library
stringclasses 1
value | test_file
stringclasses 785
values | test_function
stringlengths 1
295
| before
stringlengths 0
448k
| after
stringlengths 0
487k
| context_before
stringclasses 947
values | context_after
stringlengths 0
16.3k
| commit_before
stringclasses 1
value | commit_after
stringclasses 1
value | change_type
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|---|
torch
|
test/test_nvfuser_dynamo.py
|
get_fw_bw_graph
|
def get_fw_bw_graph(f, inps, partitioner):
from functorch.compile import aot_function
# Helper functions are taken from functorch/test_aotdispatch.py
def extract_graph(fx_g, _, graph_cell):
graph_cell[0] = fx_g
return fx_g
fw_graph_cell = [None]
bw_graph_cell = [None]
aot_function(
f,
fw_compiler=partial(extract_graph, graph_cell=fw_graph_cell),
bw_compiler=partial(extract_graph, graph_cell=bw_graph_cell),
partition_fn=partitioner,
)(*inps).sum().backward()
return (fw_graph_cell[0], bw_graph_cell[0])
|
import unittest
import warnings
from functools import partial
import torch
import torch._dynamo as torchdynamo
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
IS_WINDOWS,
run_tests,
skipIfTorchDynamo,
TEST_WITH_ROCM,
TestCase,
)
from torch.testing._internal.jit_utils import RUN_CUDA
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
import networkx # noqa: F401
from functorch.compile import default_partition
from torch._dynamo.backends.nvfuser import nvprims_fw_bw_partition_fn
from functorch.compile import aot_function
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_nvfuser_dynamo.py
|
extract_graph
|
def extract_graph(fx_g, _, graph_cell):
graph_cell[0] = fx_g
return fx_g
fw_graph_cell = [None]
bw_graph_cell = [None]
aot_function(
f,
fw_compiler=partial(extract_graph, graph_cell=fw_graph_cell),
bw_compiler=partial(extract_graph, graph_cell=bw_graph_cell),
partition_fn=partitioner,
)(*inps).sum().backward()
return (fw_graph_cell[0], bw_graph_cell[0])
|
import unittest
import warnings
from functools import partial
import torch
import torch._dynamo as torchdynamo
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
IS_WINDOWS,
run_tests,
skipIfTorchDynamo,
TEST_WITH_ROCM,
TestCase,
)
from torch.testing._internal.jit_utils import RUN_CUDA
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
import networkx # noqa: F401
from functorch.compile import default_partition
from torch._dynamo.backends.nvfuser import nvprims_fw_bw_partition_fn
from functorch.compile import aot_function
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_nvfuser_dynamo.py
|
get_ins_outs
|
def get_ins_outs(fx_g):
ins = []
outs = []
for n in fx_g.graph.nodes:
if n.op == "placeholder":
ins.append(n)
elif n.op == "output":
outs = tuple(n.args[0])
return ins, outs
|
import unittest
import warnings
from functools import partial
import torch
import torch._dynamo as torchdynamo
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
IS_WINDOWS,
run_tests,
skipIfTorchDynamo,
TEST_WITH_ROCM,
TestCase,
)
from torch.testing._internal.jit_utils import RUN_CUDA
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
import networkx # noqa: F401
from functorch.compile import default_partition
from torch._dynamo.backends.nvfuser import nvprims_fw_bw_partition_fn
from functorch.compile import aot_function
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_nvfuser_dynamo.py
|
get_num_ins_outs
|
def get_num_ins_outs(fx_g):
return tuple(len(i) for i in get_ins_outs(fx_g))
|
import unittest
import warnings
from functools import partial
import torch
import torch._dynamo as torchdynamo
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
IS_WINDOWS,
run_tests,
skipIfTorchDynamo,
TEST_WITH_ROCM,
TestCase,
)
from torch.testing._internal.jit_utils import RUN_CUDA
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
import networkx # noqa: F401
from functorch.compile import default_partition
from torch._dynamo.backends.nvfuser import nvprims_fw_bw_partition_fn
from functorch.compile import aot_function
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_nvfuser_dynamo.py
|
func
|
def func(a, b):
return a.sin() + b.cos()
# No warnings and no errors
with warnings.catch_warnings(record=True) as w:
nvfuser_result = func(input1, input2)
self.assertEqual(len(w), 0)
eager_result = func.__wrapped__(input1, input2)
self.assertEqual(eager_result, nvfuser_result)
|
import unittest
import warnings
from functools import partial
import torch
import torch._dynamo as torchdynamo
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
IS_WINDOWS,
run_tests,
skipIfTorchDynamo,
TEST_WITH_ROCM,
TestCase,
)
from torch.testing._internal.jit_utils import RUN_CUDA
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
import networkx # noqa: F401
from functorch.compile import default_partition
from torch._dynamo.backends.nvfuser import nvprims_fw_bw_partition_fn
from functorch.compile import aot_function
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_nvfuser_dynamo.py
|
test_batch_norm_implicit_dtype_promotion
|
def test_batch_norm_implicit_dtype_promotion(self):
input1 = make_tensor((2, 3, 4, 5), device="cuda", dtype=torch.float32)
input2 = make_tensor((5, 5), device="cuda", dtype=torch.float32)
w = make_tensor((3), device="cuda", dtype=torch.float32)
b = make_tensor((3), device="cuda", dtype=torch.float32)
@torchdynamo.optimize("nvprims_nvfuser")
def func(mat1, mat2, w, b):
o = torch.matmul(mat1, mat2)
return torch.batch_norm(o, w, b, None, None, True, 1e-2, 1e-5, True)
# No warnings and no errors
with torch.cuda.amp.autocast():
with warnings.catch_warnings(record=True) as warning:
nvfuser_result = func(input1, input2, w, b)
self.assertEqual(len(warning), 0)
eager_result = func.__wrapped__(input1, input2, w, b)
self.assertEqual(eager_result, nvfuser_result)
|
import unittest
import warnings
from functools import partial
import torch
import torch._dynamo as torchdynamo
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
IS_WINDOWS,
run_tests,
skipIfTorchDynamo,
TEST_WITH_ROCM,
TestCase,
)
from torch.testing._internal.jit_utils import RUN_CUDA
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
import networkx # noqa: F401
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
@unittest.skipIf(IS_WINDOWS, "TorchDynamo is not supported on Windows")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(is_pre_volta(), "Only supported on Volta and newer devices.")
class TestNvFuserDynamo(TestCase):
from functorch.compile import default_partition
from torch._dynamo.backends.nvfuser import nvprims_fw_bw_partition_fn
from functorch.compile import aot_function
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_nvfuser_dynamo.py
|
func
|
def func(a, b):
return a.sin() + b.cos()
# No warnings and no errors
with warnings.catch_warnings(record=True) as w:
nvfuser_result = func(input1, input2)
self.assertEqual(len(w), 0)
eager_result = func.__wrapped__(input1, input2)
self.assertEqual(eager_result, nvfuser_result)
|
import unittest
import warnings
from functools import partial
import torch
import torch._dynamo as torchdynamo
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
IS_WINDOWS,
run_tests,
skipIfTorchDynamo,
TEST_WITH_ROCM,
TestCase,
)
from torch.testing._internal.jit_utils import RUN_CUDA
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
import networkx # noqa: F401
from functorch.compile import default_partition
from torch._dynamo.backends.nvfuser import nvprims_fw_bw_partition_fn
from functorch.compile import aot_function
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_nvfuser_dynamo.py
|
test_dtype_correctness
|
def test_dtype_correctness(self):
input1 = make_tensor((2, 4, 8), device="cuda", dtype=torch.float16)
@torchdynamo.optimize("nvprims_nvfuser")
def func(a):
tmp = a + 1.0
# nvfuser would promote output to fp32 in math, FusionDefinition should cast output dtype back
return torch.where(tmp > 0, tmp, 0.0)
# No warnings and no errors
with warnings.catch_warnings(record=True) as w:
nvfuser_result = func(input1)
self.assertEqual(len(w), 0)
eager_result = func.__wrapped__(input1)
self.assertEqual(eager_result, nvfuser_result)
|
import unittest
import warnings
from functools import partial
import torch
import torch._dynamo as torchdynamo
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
IS_WINDOWS,
run_tests,
skipIfTorchDynamo,
TEST_WITH_ROCM,
TestCase,
)
from torch.testing._internal.jit_utils import RUN_CUDA
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
import networkx # noqa: F401
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
@unittest.skipIf(IS_WINDOWS, "TorchDynamo is not supported on Windows")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(is_pre_volta(), "Only supported on Volta and newer devices.")
class TestNvFuserDynamo(TestCase):
from functorch.compile import default_partition
from torch._dynamo.backends.nvfuser import nvprims_fw_bw_partition_fn
from functorch.compile import aot_function
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_nvfuser_dynamo.py
|
func
|
def func(a, b):
return a.sin() + b.cos()
# No warnings and no errors
with warnings.catch_warnings(record=True) as w:
nvfuser_result = func(input1, input2)
self.assertEqual(len(w), 0)
eager_result = func.__wrapped__(input1, input2)
self.assertEqual(eager_result, nvfuser_result)
|
import unittest
import warnings
from functools import partial
import torch
import torch._dynamo as torchdynamo
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
IS_WINDOWS,
run_tests,
skipIfTorchDynamo,
TEST_WITH_ROCM,
TestCase,
)
from torch.testing._internal.jit_utils import RUN_CUDA
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
import networkx # noqa: F401
from functorch.compile import default_partition
from torch._dynamo.backends.nvfuser import nvprims_fw_bw_partition_fn
from functorch.compile import aot_function
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_nvfuser_frontend.py
|
is_pre_volta
|
def is_pre_volta():
if not RUN_NVFUSER:
return False
prop = torch.cuda.get_device_properties(torch.cuda.current_device())
return prop.major < 7
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(is_pre_volta(), "Only supported on Volta and newer devices.")
class TestNvFuserFrontend(TestCase):
def test_basic(self) :
input1 = torch.ones(2, 4, 8, device='cuda')
input2 = torch.ones(2, 4, 8, device='cuda')
fc = FusionCache.get()
before_fusions = fc.num_fusions()
fs1 = Fusion()
with FusionDefinition(fs1) as fd :
t0 = fd.define_tensor(3)
t1 = fd.define_tensor(3)
c0 = fd.define_constant(3.0)
t2 = fd.ops.add(t0, t1)
t3 = fd.ops.mul(t2, c0)
t4 = fd.ops.sum(t3, [-1], False, DataType.Float)
fd.add_output(t4)
# Expected Output is a tensor of 48's
nvf_out1 = fs1.execute([input1, input2])[0]
# Create a new fusion with the same definition, it should hit the cache!
fs2 = Fusion()
with FusionDefinition(fs2) as fd :
t0 = fd.define_tensor(3)
t1 = fd.define_tensor(3)
c0 = fd.define_constant(3.0)
t2 = fd.ops.add(t0, t1)
t3 = fd.ops.mul(t2, c0)
t4 = fd.ops.sum(t3, [-1], False, DataType.Float)
fd.add_output(t4)
nvf_out2 = fs2.execute([input1, input2])[0]
# Check there is still only 1 cache entry
fc = FusionCache.get()
self.assertEqual(fc.num_fusions() - before_fusions, 1)
# Create a fusion from a fusion id and make sure it executes!
fs3 = Fusion(fs2.id())
nvf_out3 = fs3.execute([input1, input2])[0]
eager_out = torch.sum((input1 + input2) * 3.0, dim=-1)
self.assertEqual(eager_out, nvf_out1)
self.assertEqual(eager_out, nvf_out2)
self.assertEqual(eager_out, nvf_out3)
def test_basic_fp16(self) :
fs = Fusion()
with FusionDefinition(fs) as fd :
t0 = fd.define_tensor(3, DataType.Half)
t1 = fd.define_tensor(3, DataType.Half)
c0 = fd.define_constant(3.0)
t2 = fd.ops.add(t0, t1)
t3 = fd.ops.mul(t2, c0)
t4 = fd.ops.sum(t3, [-1], False, DataType.Float)
t5 = fd.ops.cast(t4, DataType.Half)
fd.add_output(t5)
input1 = torch.ones(2, 4, 8, device='cuda', dtype=torch.float16)
input2 = torch.ones(2, 4, 8, device='cuda', dtype=torch.float16)
# Expected Output is a tensor of 48's
nvf_out = fs.execute([input1, input2])[0]
eager_out = torch.sum((input1 + input2) * 3.0, dim=-1)
self.assertEqual(eager_out, nvf_out)
def test_cast_double_to_half(self) :
fs = Fusion()
with FusionDefinition(fs) as fd :
t0 = fd.define_tensor(2, DataType.Double)
t1 = fd.define_tensor(2, DataType.Double)
t0h = fd.ops.cast(t0, DataType.Half)
t1h = fd.ops.cast(t1, DataType.Half)
t2 = fd.ops.add(t0h, t1h)
t3 = fd.ops.relu(t2)
t4 = fd.ops.cast(t3, DataType.Half)
fd.add_output(t4)
input1 = torch.randn(2, 4, device='cuda', dtype=torch.float64)
input2 = torch.randn(2, 4, device='cuda', dtype=torch.float64)
nvf_out = fs.execute([input1, input2])[0]
eager_out = torch.relu(input1.to(torch.half) + input2.to(torch.half))
self.assertEqual(eager_out, nvf_out)
def test_promote_to_double(self) :
fs = Fusion()
with FusionDefinition(fs) as fd :
t0 = fd.define_tensor(2, DataType.Half)
t1 = fd.define_tensor(2, DataType.Double)
t2 = fd.ops.add(t0, t1)
t5 = fd.ops.relu(t2)
fd.add_output(t5)
input1 = torch.randn(2, 4, device='cuda', dtype=torch.float16)
input2 = torch.randn(2, 4, device='cuda', dtype=torch.float64)
nvf_out = fs.execute([input1, input2])[0]
eager_out = torch.relu(input1 + input2)
self.assertEqual(eager_out, nvf_out)
def test_implicit_broadcast_input(self) :
fs = Fusion()
with FusionDefinition(fs) as fd :
t0 = fd.define_tensor(1)
t1 = fd.define_tensor(3)
t0_b = fd.ops.broadcast_in_dim(t0, [2, 3, 4], [1])
t2 = fd.ops.add(t0_b, t1)
fd.add_output(t2)
input1 = torch.randn(3, device='cuda')
input2 = torch.randn(2, 3, 4, device='cuda')
nvf_out = fs.execute([input1, input2])[0]
eager_out = refs.add(prims.broadcast_in_dim(input1, [2, 3, 4], [1]), input2)
self.assertEqual(eager_out, nvf_out)
def test_explicit_broadcast_input(self) :
input1 = torch.randn(1, 1, 4, device='cuda')
input2 = torch.randn(2, 3, 4, device='cuda')
fs = Fusion()
with FusionDefinition(fs) as fd :
t0 = fd.define_tensor(sizes=input1.size(), strides=input1.stride())
t1 = fd.define_tensor(sizes=input2.size(), strides=input2.stride())
t0_b = fd.ops.broadcast_in_dim(t0, [2, 3, 4], [0, 1, 2])
t2 = fd.ops.add(t0_b, t1)
fd.add_output(t2)
nvf_out = fs.execute([input1, input2])[0]
eager_out = refs.add(prims.broadcast_in_dim(input1, [2, 3, 4], [0, 1, 2]), input2)
self.assertEqual(eager_out, nvf_out)
def test_broadcast_mixing(self) :
fs = Fusion()
with FusionDefinition(fs) as fd :
t0 = fd.define_tensor([3, 1], [1, 1])
t1 = fd.define_tensor(1)
t1_b = fd.ops.broadcast_in_dim(t1, [3, 3], [0])
t2 = fd.ops.add(t0, t1_b)
fd.add_output(t2)
input1 = torch.randn(3, 1, device='cuda')
input2 = torch.randn(3, device='cuda')
nvf_out = fs.execute([input1, input2])[0]
eager_out = refs.add(input1, prims.broadcast_in_dim(input2, [3, 3], [0]))
self.assertEqual(eager_out, nvf_out)
def test_ops_broadcast(self) :
fs = Fusion()
with FusionDefinition(fs) as fd :
t0 = fd.define_tensor(1)
t1 = fd.define_tensor(3)
t0_b = fd.ops.broadcast(t0, [True, False, True])
t2 = fd.ops.add(t0_b, t1)
fd.add_output(t2)
input1 = torch.randn(3, device='cuda')
input2 = torch.randn(2, 3, 4, device='cuda')
nvf_out = fs.execute([input1, input2])[0]
eager_out = refs.add(prims.broadcast_in_dim(input1, [2, 3, 4], [1]), input2)
self.assertEqual(eager_out, nvf_out)
def test_prim_layer_norm_fwd(self) :
def primitive_definition(
inputs: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
normalization_axis: int,
keepdim: bool,
) -> torch.Tensor:
mean = inputs.mean(normalization_axis, keepdim=keepdim)
diff = inputs - mean
diff_sq = diff * diff
var = diff_sq.mean(normalization_axis, keepdim=keepdim)
pre_shift_scale_norm_output = (inputs - mean) / torch.sqrt(var + 1e-12)
norm_output = weight * pre_shift_scale_norm_output + bias
return norm_output
def nvfuser_fusion(
fd: FusionDefinition,
normalization_axis: int,
norm_size: int,
input_shape: List[int],
eps: float,
keepDim: bool
) -> None :
inputs = fd.define_tensor(symbolic_sizes=[-1, -1, -1], contiguous=[True, True, True], dtype=DataType.Float)
weights = fd.define_tensor(symbolic_sizes=[-1], contiguous=[True], dtype=DataType.Float)
bias = fd.define_tensor(symbolic_sizes=[-1], contiguous=[True], dtype=DataType.Float)
sum0 = fd.ops.sum(inputs, axes=[normalization_axis], keepdim=keepDim)
norm_const = fd.define_constant(norm_size)
mean = fd.ops.div(sum0, norm_const)
diff = fd.ops.sub(inputs, mean)
diff_sq = fd.ops.mul(diff, diff)
sum1 = fd.ops.sum(diff_sq, axes=[normalization_axis], keepdim=keepDim)
var = fd.ops.div(sum1, norm_const)
eps_const = fd.define_constant(eps)
var_eps = fd.ops.add(var, eps_const)
invstd = fd.ops.rsqrt(var_eps)
pre_scale_bias = fd.ops.mul(diff, invstd)
weights_bcast = fd.ops.broadcast_in_dim(weights, output_shape=input_shape, broadcast_dims=[2])
scale = fd.ops.mul(pre_scale_bias, weights_bcast)
bias_bcast = fd.ops.broadcast_in_dim(bias, output_shape=input_shape, broadcast_dims=[2])
out = fd.ops.add(scale, bias_bcast)
fd.add_output(out)
fd.add_output(mean)
fd.add_output(invstd)
def nvfuser_fusion_var_mean(
fd: FusionDefinition,
normalization_axis: int,
norm_size: int,
input_shape: List[int],
eps: float,
keepDim: bool
) -> None :
inputs = fd.define_tensor(symbolic_sizes=[-1, -1, -1], contiguous=[True, True, True], dtype=DataType.Float)
weights = fd.define_tensor(symbolic_sizes=[-1], contiguous=[True], dtype=DataType.Float)
bias = fd.define_tensor(symbolic_sizes=[-1], contiguous=[True], dtype=DataType.Float)
var, mean = fd.ops.var_mean(inputs, axes=[normalization_axis], correction=0, keepdim=keepDim)
eps_const = fd.define_constant(eps)
var_eps = fd.ops.add(var, eps_const)
invstd = fd.ops.rsqrt(var_eps)
diff = fd.ops.sub(inputs, mean)
pre_scale_bias = fd.ops.mul(diff, invstd)
weights_bcast = fd.ops.broadcast_in_dim(weights, output_shape=input_shape, broadcast_dims=[2])
scale = fd.ops.mul(pre_scale_bias, weights_bcast)
bias_bcast = fd.ops.broadcast_in_dim(bias, output_shape=input_shape, broadcast_dims=[2])
out = fd.ops.add(scale, bias_bcast)
fd.add_output(out)
fd.add_output(mean)
fd.add_output(invstd)
input_size = [64, 128, 1024]
dtype = torch.float32
device = 'cuda'
inputs = torch.randn(*input_size, device=device, requires_grad=True)
weights = torch.nn.Parameter(torch.randn(input_size[2], dtype=dtype, device=device))
biases = torch.nn.Parameter(torch.randn(input_size[2], dtype=dtype, device=device))
fc = FusionCache.get()
before_fusions = fc.num_fusions()
for _ in range(5) :
nvf_fusion = Fusion()
with FusionDefinition(nvf_fusion) as fd:
nvfuser_fusion(fd, 2, inputs.size()[2], inputs.size(), 1e-12, True)
nvf_out = nvf_fusion.execute([inputs, weights, biases])
for _ in range(5) :
nvf_var_mean_fusion = Fusion()
with FusionDefinition(nvf_var_mean_fusion) as fd:
nvfuser_fusion_var_mean(fd, 2, inputs.size()[2], inputs.size(), 1e-12, True)
nvf_var_mean_out = nvf_var_mean_fusion.execute([inputs, weights, biases])
for _ in range(5) :
eager_out = primitive_definition(inputs, weights, biases, 2, True)
self.assertEqual(eager_out, nvf_out[0])
self.assertEqual(eager_out, nvf_var_mean_out[0])
fusion_cache = FusionCache.get()
self.assertEqual(fc.num_fusions() - before_fusions, 2)
def test_prim_rms_norm_fwd(self) :
def primitive_definition(
inputs: torch.Tensor,
weight: torch.Tensor,
normalization_axis: int,
keepdim: bool,
) -> torch.Tensor:
var = inputs.mul(inputs).mean(normalization_axis, keepdim)
pre_shift_scale_norm_output = inputs / torch.sqrt(var + 1e-12)
norm_output = weight * pre_shift_scale_norm_output
return norm_output
def nvfuser_fusion(
fd: FusionDefinition,
normalization_axis: int,
norm_size: int,
input_shape: List[int],
eps: float,
keepDim: bool
) -> None :
inputs = fd.define_tensor(symbolic_sizes=[-1, -1, -1], contiguous=[True, True, True], dtype=DataType.Float)
weights = fd.define_tensor(symbolic_sizes=[-1], contiguous=[True], dtype=DataType.Float)
inputs_sq = fd.ops.mul(inputs, inputs)
sum0 = fd.ops.sum(inputs_sq, axes=[normalization_axis], keepdim=keepDim)
norm_const = fd.define_constant(norm_size)
var = fd.ops.div(sum0, norm_const)
eps_const = fd.define_constant(eps)
var_eps = fd.ops.add(var, eps_const)
invstd = fd.ops.rsqrt(var_eps)
pre_scale = fd.ops.mul(inputs, invstd)
weights_bcast = fd.ops.broadcast_in_dim(weights, output_shape=input_shape, broadcast_dims=[2])
out = fd.ops.mul(pre_scale, weights_bcast)
fd.add_output(out)
fd.add_output(invstd)
input_size = [64, 128, 1024]
dtype = torch.float32
device = 'cuda'
inputs = torch.randn(*input_size, device=device, requires_grad=True)
weights = torch.nn.Parameter(torch.randn(input_size[2], dtype=dtype, device=device))
fc = FusionCache.get()
before_fusions = fc.num_fusions()
for _ in range(5) :
nvf_fusion = Fusion()
with FusionDefinition(nvf_fusion) as fd:
nvfuser_fusion(fd, 2, inputs.size()[2], inputs.size(), 1e-12, True)
nvf_out = nvf_fusion.execute([inputs, weights])
for _ in range(5) :
eager_out = primitive_definition(inputs, weights, 2, True)
self.assertEqual(eager_out, nvf_out[0])
self.assertEqual(fc.num_fusions() - before_fusions, 1)
if __name__ == '__main__':
run_tests()
|
import unittest
from typing import List
import torch
from torch.testing._internal.common_utils import run_tests, TEST_WITH_ROCM, TestCase
from torch.testing._internal.jit_utils import RUN_CUDA
import torch._refs as refs
import torch._prims as prims
from nvfuser._C import Fusion, FusionCache, FusionDefinition, DataType
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_openmp.py
|
func_rss
|
def func_rss(self, runs):
last_rss = list(self.func(runs))
# Check that the sequence is not strictly increasing
is_increasing = True
for idx in range(len(last_rss)):
if idx == 0:
continue
is_increasing = is_increasing and (last_rss[idx] > last_rss[idx - 1])
self.assertTrue(not is_increasing,
msg='memory usage is increasing, {}'.format(str(last_rss)))
|
def func_rss(self, runs):
last_rss = list(self.func(runs))
# Check that the sequence is not strictly increasing
is_increasing = True
for idx in range(len(last_rss)):
if idx == 0:
continue
is_increasing = is_increasing and (last_rss[idx] > last_rss[idx - 1])
self.assertTrue(
not is_increasing, msg=f"memory usage is increasing, {str(last_rss)}"
)
|
import collections
import unittest
import torch
from torch.testing._internal.common_utils import (
TestCase, run_tests, TEST_WITH_ASAN)
import psutil
device = torch.device('cpu')
@unittest.skipIf(not HAS_PSUTIL, "Requires psutil to run")
@unittest.skipIf(TEST_WITH_ASAN, "Cannot test with ASAN")
class TestOpenMP_ParallelFor(TestCase):
|
import collections
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TEST_WITH_ASAN, TestCase
import psutil
device = torch.device("cpu")
@unittest.skipIf(not HAS_PSUTIL, "Requires psutil to run")
@unittest.skipIf(TEST_WITH_ASAN, "Cannot test with ASAN")
class TestOpenMP_ParallelFor(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_openmp.py
|
test_one_thread
|
def test_one_thread(self):
"""Make sure there is no memory leak with one thread: issue gh-32284
"""
torch.set_num_threads(1)
self.func_rss(300)
|
def test_one_thread(self):
"""Make sure there is no memory leak with one thread: issue gh-32284"""
torch.set_num_threads(1)
self.func_rss(300)
|
import collections
import unittest
import torch
from torch.testing._internal.common_utils import (
TestCase, run_tests, TEST_WITH_ASAN)
import psutil
device = torch.device('cpu')
@unittest.skipIf(not HAS_PSUTIL, "Requires psutil to run")
@unittest.skipIf(TEST_WITH_ASAN, "Cannot test with ASAN")
class TestOpenMP_ParallelFor(TestCase):
|
import collections
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TEST_WITH_ASAN, TestCase
import psutil
device = torch.device("cpu")
@unittest.skipIf(not HAS_PSUTIL, "Requires psutil to run")
@unittest.skipIf(TEST_WITH_ASAN, "Cannot test with ASAN")
class TestOpenMP_ParallelFor(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_openmp.py
|
test_n_threads
|
def test_n_threads(self):
"""Make sure there is no memory leak with many threads
"""
ncores = min(5, psutil.cpu_count(logical=False))
torch.set_num_threads(ncores)
self.func_rss(300)
|
def test_n_threads(self):
"""Make sure there is no memory leak with many threads"""
ncores = min(5, psutil.cpu_count(logical=False))
torch.set_num_threads(ncores)
self.func_rss(300)
|
import collections
import unittest
import torch
from torch.testing._internal.common_utils import (
TestCase, run_tests, TEST_WITH_ASAN)
import psutil
device = torch.device('cpu')
@unittest.skipIf(not HAS_PSUTIL, "Requires psutil to run")
@unittest.skipIf(TEST_WITH_ASAN, "Cannot test with ASAN")
class TestOpenMP_ParallelFor(TestCase):
|
import collections
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TEST_WITH_ASAN, TestCase
import psutil
device = torch.device("cpu")
@unittest.skipIf(not HAS_PSUTIL, "Requires psutil to run")
@unittest.skipIf(TEST_WITH_ASAN, "Cannot test with ASAN")
class TestOpenMP_ParallelFor(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_ops.py
|
_to_tensormeta
|
def _to_tensormeta(x):
if isinstance(x, torch.Tensor):
out = FakeTensor.from_tensor(x, mode)
return out
return x
# TODO: iterate over requires_grad true/false
for sample in op.reference_inputs(device, dtype, requires_grad=False):
result = op(sample.input, *sample.args, **sample.kwargs)
meta_sample = sample.transform(_to_tensormeta)
try:
with mode:
meta_result = op(meta_sample.input, *meta_sample.args, **meta_sample.kwargs)
except torch._subclasses.fake_tensor.UnsupportedFakeTensorException:
continue
except torch._subclasses.fake_tensor.DataDependentOutputException:
continue
if isinstance(result, torch.Tensor):
self.assertTrue(isinstance(meta_result, FakeTensor))
prims.utils.compare_tensor_meta(result, meta_result)
elif isinstance(result, Sequence):
for a, b in zip(result, meta_result):
if isinstance(a, torch.Tensor) or isinstance(b, torch.Tensor):
self.assertTrue(isinstance(b, FakeTensor))
prims.utils.compare_tensor_meta(a, b)
|
def _to_tensormeta(x):
if isinstance(x, torch.Tensor):
out = FakeTensor.from_tensor(x, mode)
return out
return x
# TODO: iterate over requires_grad true/false
for sample in op.reference_inputs(device, dtype, requires_grad=False):
result = op(sample.input, *sample.args, **sample.kwargs)
meta_sample = sample.transform(_to_tensormeta)
try:
with mode:
meta_result = op(
meta_sample.input, *meta_sample.args, **meta_sample.kwargs
)
except torch._subclasses.fake_tensor.UnsupportedFakeTensorException:
continue
except torch._subclasses.fake_tensor.DataDependentOutputException:
continue
except torch._subclasses.fake_tensor.UnsupportedOperatorException:
continue
if isinstance(result, torch.Tensor):
self.assertTrue(isinstance(meta_result, FakeTensor))
prims.utils.compare_tensor_meta(
result, meta_result, check_conj=op.op not in CHECK_CONJ_SKIPS
)
elif isinstance(result, Sequence):
for a, b in zip(result, meta_result):
if isinstance(a, torch.Tensor) or isinstance(b, torch.Tensor):
self.assertTrue(isinstance(b, FakeTensor))
prims.utils.compare_tensor_meta(
a, b, check_conj=op.op not in CHECK_CONJ_SKIPS
)
|
from collections.abc import Sequence
from functools import partial
import warnings
import unittest
import itertools
import torch
import contextlib
import re
import os
from collections import defaultdict
from importlib import import_module
from torch.utils._pytree import tree_map
from typing import Dict
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_and_complex_types_and,
all_types_and_complex_and,
)
from test_proxy_tensor import xfail, skip, skipOps
from torch.testing._internal.common_utils import (
TestCase,
is_iterable_of_tensors,
run_tests,
IS_SANDCASTLE,
clone_input_helper,
IS_CI,
set_default_dtype,
suppress_warnings,
noncontiguous_like,
TEST_WITH_ASAN,
TEST_WITH_UBSAN,
skipIfRocm,
IS_WINDOWS,
IS_FBCODE,
first_sample,
parametrize,
skipIfTorchInductor,
slowTest,
)
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
ReductionPythonRefInfo,
SpectralFuncInfo,
ops_and_refs,
python_ref_db,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
ops,
onlyCUDA,
onlyCPU,
onlyNativeDeviceTypes,
OpDTypes,
skipMeta,
)
from torch._subclasses.fake_tensor import (
FakeTensor,
FakeTensorMode,
)
from torch._subclasses.fake_utils import outputs_alias_inputs
import torch._prims as prims
from torch._prims.context import TorchRefsMode
from torch.testing._internal import opinfo
from torch.testing._internal import composite_compliance
from torch.utils._pytree import tree_flatten
from torch.utils._python_dispatch import TorchDispatchMode
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs = op_db + python_ref_db
_ops_and_refs_with_no_numpy_ref = [op for op in _ops_and_refs if op.ref is None]
aten = torch.ops.aten
from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map
from torch._prims.executor import make_traced
from copy import copy
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_ops.py
|
test_multiple_devices
|
def test_multiple_devices(self, devices, dtype, op):
for cuda_device_str in devices:
cuda_device = torch.device(cuda_device_str)
# NOTE: only tests on first sample
samples = op.sample_inputs(cuda_device, dtype)
sample = first_sample(self, samples)
result = op(sample.input, *sample.args, **sample.kwargs)
if isinstance(result, torch.Tensor):
self.assertTrue(result.device == cuda_device)
elif is_iterable_of_tensors(result):
self.assertTrue(all(map(lambda t: t.device == cuda_device, result)))
else:
self.skipTest(
"Skipped! Only supports single tensor or iterable of tensor outputs."
)
|
def test_multiple_devices(self, devices, dtype, op):
for cuda_device_str in devices:
cuda_device = torch.device(cuda_device_str)
# NOTE: only tests on first sample
samples = op.sample_inputs(cuda_device, dtype)
sample = first_sample(self, samples)
result = op(sample.input, *sample.args, **sample.kwargs)
if isinstance(result, torch.Tensor):
self.assertTrue(result.device == cuda_device)
elif is_iterable_of_tensors(result):
self.assertTrue(all(t.device == cuda_device for t in result))
else:
self.skipTest(
"Skipped! Only supports single tensor or iterable of tensor outputs."
)
|
from collections.abc import Sequence
from functools import partial
import warnings
import unittest
import itertools
import torch
import contextlib
import re
import os
from collections import defaultdict
from importlib import import_module
from torch.utils._pytree import tree_map
from typing import Dict
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_and_complex_types_and,
all_types_and_complex_and,
)
from test_proxy_tensor import xfail, skip, skipOps
from torch.testing._internal.common_utils import (
TestCase,
is_iterable_of_tensors,
run_tests,
IS_SANDCASTLE,
clone_input_helper,
IS_CI,
set_default_dtype,
suppress_warnings,
noncontiguous_like,
TEST_WITH_ASAN,
TEST_WITH_UBSAN,
skipIfRocm,
IS_WINDOWS,
IS_FBCODE,
first_sample,
parametrize,
skipIfTorchInductor,
slowTest,
)
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
ReductionPythonRefInfo,
SpectralFuncInfo,
ops_and_refs,
python_ref_db,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
ops,
onlyCUDA,
onlyCPU,
onlyNativeDeviceTypes,
OpDTypes,
skipMeta,
)
from torch._subclasses.fake_tensor import (
FakeTensor,
FakeTensorMode,
)
from torch._subclasses.fake_utils import outputs_alias_inputs
import torch._prims as prims
from torch._prims.context import TorchRefsMode
from torch.testing._internal import opinfo
from torch.testing._internal import composite_compliance
from torch.utils._pytree import tree_flatten
from torch.utils._python_dispatch import TorchDispatchMode
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs = op_db + python_ref_db
_ops_and_refs_with_no_numpy_ref = [op for op in _ops_and_refs if op.ref is None]
aten = torch.ops.aten
class TestCommon(TestCase):
from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map
from torch._prims.executor import make_traced
from copy import copy
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
@unMarkDynamoStrictTest
class TestCommon(TestCase):
from copy import copy
from torch._prims.executor import make_traced
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_ops.py
|
test_python_ref_meta
|
def test_python_ref_meta(self, device, dtype, op):
with FakeTensorMode() as mode:
pass
def _to_tensormeta(x):
if isinstance(x, torch.Tensor):
out = FakeTensor.from_tensor(x, mode)
return out
return x
# TODO: iterate over requires_grad true/false
for sample in op.reference_inputs(device, dtype, requires_grad=False):
result = op(sample.input, *sample.args, **sample.kwargs)
meta_sample = sample.transform(_to_tensormeta)
try:
with mode:
meta_result = op(meta_sample.input, *meta_sample.args, **meta_sample.kwargs)
except torch._subclasses.fake_tensor.UnsupportedFakeTensorException:
continue
except torch._subclasses.fake_tensor.DataDependentOutputException:
continue
if isinstance(result, torch.Tensor):
self.assertTrue(isinstance(meta_result, FakeTensor))
prims.utils.compare_tensor_meta(result, meta_result)
elif isinstance(result, Sequence):
for a, b in zip(result, meta_result):
if isinstance(a, torch.Tensor) or isinstance(b, torch.Tensor):
self.assertTrue(isinstance(b, FakeTensor))
prims.utils.compare_tensor_meta(a, b)
|
def test_python_ref_meta(self, device, dtype, op):
CHECK_CONJ_SKIPS = {
torch._refs.linalg.svd,
}
with FakeTensorMode() as mode:
pass
def _to_tensormeta(x):
if isinstance(x, torch.Tensor):
out = FakeTensor.from_tensor(x, mode)
return out
return x
# TODO: iterate over requires_grad true/false
for sample in op.reference_inputs(device, dtype, requires_grad=False):
result = op(sample.input, *sample.args, **sample.kwargs)
meta_sample = sample.transform(_to_tensormeta)
try:
with mode:
meta_result = op(
meta_sample.input, *meta_sample.args, **meta_sample.kwargs
)
except torch._subclasses.fake_tensor.UnsupportedFakeTensorException:
continue
except torch._subclasses.fake_tensor.DataDependentOutputException:
continue
except torch._subclasses.fake_tensor.UnsupportedOperatorException:
continue
if isinstance(result, torch.Tensor):
self.assertTrue(isinstance(meta_result, FakeTensor))
prims.utils.compare_tensor_meta(
result, meta_result, check_conj=op.op not in CHECK_CONJ_SKIPS
)
elif isinstance(result, Sequence):
for a, b in zip(result, meta_result):
if isinstance(a, torch.Tensor) or isinstance(b, torch.Tensor):
self.assertTrue(isinstance(b, FakeTensor))
prims.utils.compare_tensor_meta(
a, b, check_conj=op.op not in CHECK_CONJ_SKIPS
)
|
from collections.abc import Sequence
from functools import partial
import warnings
import unittest
import itertools
import torch
import contextlib
import re
import os
from collections import defaultdict
from importlib import import_module
from torch.utils._pytree import tree_map
from typing import Dict
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_and_complex_types_and,
all_types_and_complex_and,
)
from test_proxy_tensor import xfail, skip, skipOps
from torch.testing._internal.common_utils import (
TestCase,
is_iterable_of_tensors,
run_tests,
IS_SANDCASTLE,
clone_input_helper,
IS_CI,
set_default_dtype,
suppress_warnings,
noncontiguous_like,
TEST_WITH_ASAN,
TEST_WITH_UBSAN,
skipIfRocm,
IS_WINDOWS,
IS_FBCODE,
first_sample,
parametrize,
skipIfTorchInductor,
slowTest,
)
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
ReductionPythonRefInfo,
SpectralFuncInfo,
ops_and_refs,
python_ref_db,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
ops,
onlyCUDA,
onlyCPU,
onlyNativeDeviceTypes,
OpDTypes,
skipMeta,
)
from torch._subclasses.fake_tensor import (
FakeTensor,
FakeTensorMode,
)
from torch._subclasses.fake_utils import outputs_alias_inputs
import torch._prims as prims
from torch._prims.context import TorchRefsMode
from torch.testing._internal import opinfo
from torch.testing._internal import composite_compliance
from torch.utils._pytree import tree_flatten
from torch.utils._python_dispatch import TorchDispatchMode
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs = op_db + python_ref_db
_ops_and_refs_with_no_numpy_ref = [op for op in _ops_and_refs if op.ref is None]
aten = torch.ops.aten
class TestCommon(TestCase):
from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map
from torch._prims.executor import make_traced
from copy import copy
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
@unMarkDynamoStrictTest
class TestCommon(TestCase):
from copy import copy
from torch._prims.executor import make_traced
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_ops.py
|
_to_tensormeta
|
def _to_tensormeta(x):
if isinstance(x, torch.Tensor):
out = FakeTensor.from_tensor(x, mode)
return out
return x
# TODO: iterate over requires_grad true/false
for sample in op.reference_inputs(device, dtype, requires_grad=False):
result = op(sample.input, *sample.args, **sample.kwargs)
meta_sample = sample.transform(_to_tensormeta)
try:
with mode:
meta_result = op(meta_sample.input, *meta_sample.args, **meta_sample.kwargs)
except torch._subclasses.fake_tensor.UnsupportedFakeTensorException:
continue
except torch._subclasses.fake_tensor.DataDependentOutputException:
continue
if isinstance(result, torch.Tensor):
self.assertTrue(isinstance(meta_result, FakeTensor))
prims.utils.compare_tensor_meta(result, meta_result)
elif isinstance(result, Sequence):
for a, b in zip(result, meta_result):
if isinstance(a, torch.Tensor) or isinstance(b, torch.Tensor):
self.assertTrue(isinstance(b, FakeTensor))
prims.utils.compare_tensor_meta(a, b)
|
def _to_tensormeta(x):
if isinstance(x, torch.Tensor):
out = FakeTensor.from_tensor(x, mode)
return out
return x
# TODO: iterate over requires_grad true/false
for sample in op.reference_inputs(device, dtype, requires_grad=False):
result = op(sample.input, *sample.args, **sample.kwargs)
meta_sample = sample.transform(_to_tensormeta)
try:
with mode:
meta_result = op(
meta_sample.input, *meta_sample.args, **meta_sample.kwargs
)
except torch._subclasses.fake_tensor.UnsupportedFakeTensorException:
continue
except torch._subclasses.fake_tensor.DataDependentOutputException:
continue
except torch._subclasses.fake_tensor.UnsupportedOperatorException:
continue
if isinstance(result, torch.Tensor):
self.assertTrue(isinstance(meta_result, FakeTensor))
prims.utils.compare_tensor_meta(
result, meta_result, check_conj=op.op not in CHECK_CONJ_SKIPS
)
elif isinstance(result, Sequence):
for a, b in zip(result, meta_result):
if isinstance(a, torch.Tensor) or isinstance(b, torch.Tensor):
self.assertTrue(isinstance(b, FakeTensor))
prims.utils.compare_tensor_meta(
a, b, check_conj=op.op not in CHECK_CONJ_SKIPS
)
|
from collections.abc import Sequence
from functools import partial
import warnings
import unittest
import itertools
import torch
import contextlib
import re
import os
from collections import defaultdict
from importlib import import_module
from torch.utils._pytree import tree_map
from typing import Dict
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_and_complex_types_and,
all_types_and_complex_and,
)
from test_proxy_tensor import xfail, skip, skipOps
from torch.testing._internal.common_utils import (
TestCase,
is_iterable_of_tensors,
run_tests,
IS_SANDCASTLE,
clone_input_helper,
IS_CI,
set_default_dtype,
suppress_warnings,
noncontiguous_like,
TEST_WITH_ASAN,
TEST_WITH_UBSAN,
skipIfRocm,
IS_WINDOWS,
IS_FBCODE,
first_sample,
parametrize,
skipIfTorchInductor,
slowTest,
)
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
ReductionPythonRefInfo,
SpectralFuncInfo,
ops_and_refs,
python_ref_db,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
ops,
onlyCUDA,
onlyCPU,
onlyNativeDeviceTypes,
OpDTypes,
skipMeta,
)
from torch._subclasses.fake_tensor import (
FakeTensor,
FakeTensorMode,
)
from torch._subclasses.fake_utils import outputs_alias_inputs
import torch._prims as prims
from torch._prims.context import TorchRefsMode
from torch.testing._internal import opinfo
from torch.testing._internal import composite_compliance
from torch.utils._pytree import tree_flatten
from torch.utils._python_dispatch import TorchDispatchMode
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs = op_db + python_ref_db
_ops_and_refs_with_no_numpy_ref = [op for op in _ops_and_refs if op.ref is None]
aten = torch.ops.aten
from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map
from torch._prims.executor import make_traced
from copy import copy
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_nn.py
|
helper
|
def helper(self, size, dtype, mixed_dtype=False):
channels = size[1]
input = torch.randn(size, dtype=dtype, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=torch.channels_last).to(dtype)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device='cpu')
grad = grad.contiguous(memory_format=torch.channels_last)
bn = nn.BatchNorm2d(channels).cpu().to(dtype)
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = nn.BatchNorm2d(channels).cpu().to(dtype)
ref_bn.load_state_dict(bn.state_dict())
if mixed_dtype:
bn.float()
ref_bn.float()
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
# test NC11 and N1HW; test mixed dtype
for shape in [(4, 8, 10, 10), (4, 1, 9, 9), (4, 9, 1, 1)]:
helper(self, shape, torch.float, False)
helper(self, shape, torch.bfloat16, False)
helper(self, shape, torch.bfloat16, True)
|
def helper(self, mod, size, dtype, mixed_dtype=False, format=torch.channels_last, precision=None):
channels = size[1]
input = torch.randn(size, dtype=dtype, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=format).to(dtype)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device='cpu')
grad = grad.contiguous(memory_format=format)
bn = mod(channels).cpu().to(dtype)
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = mod(channels).cpu().to(dtype)
ref_bn.load_state_dict(bn.state_dict())
if mixed_dtype:
bn.float()
ref_bn.float()
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=format))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad, atol=precision, rtol=precision)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
# test NC11 and N1HW; test mixed dtype
for shape in [(4, 8, 10, 10), (4, 1, 9, 9), (4, 9, 1, 1)]:
for dtype in [torch.float, torch.bfloat16, torch.float16]:
for mixed_dtype in [False, True]:
if dtype == torch.float:
mixed_dtype = False
helper(self, nn.BatchNorm2d, shape, dtype, mixed_dtype, torch.channels_last)
precisons = {torch.float: 1e-4, torch.bfloat16: 1e-4, torch.float16: None}
for shape in [(4, 8, 2, 10, 10), (4, 1, 2, 9, 9), (4, 9, 1, 1, 1)]:
for dtype in [torch.float, torch.bfloat16, torch.float16]:
for mixed_dtype in [False, True]:
if dtype == torch.float:
mixed_dtype = False
helper(self, nn.BatchNorm3d, shape, dtype, mixed_dtype, torch.channels_last_3d, precisons[dtype])
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps,\
TEST_WITH_UBSAN, IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, IS_WINDOWS
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import gc
import copy
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
import copy
import copy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_nn.py
|
helper
|
def helper(self, size, dtype, mixed_dtype=False):
channels = size[1]
input = torch.randn(size, dtype=dtype, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=torch.channels_last).to(dtype)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device='cpu')
grad = grad.contiguous(memory_format=torch.channels_last)
bn = nn.BatchNorm2d(channels).cpu().to(dtype)
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = nn.BatchNorm2d(channels).cpu().to(dtype)
ref_bn.load_state_dict(bn.state_dict())
if mixed_dtype:
bn.float()
ref_bn.float()
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
# test NC11 and N1HW; test mixed dtype
for shape in [(4, 8, 10, 10), (4, 1, 9, 9), (4, 9, 1, 1)]:
helper(self, shape, torch.float, False)
helper(self, shape, torch.bfloat16, False)
helper(self, shape, torch.bfloat16, True)
|
def helper(self, mod, size, dtype, mixed_dtype=False, format=torch.channels_last, precision=None):
channels = size[1]
input = torch.randn(size, dtype=dtype, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=format).to(dtype)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device='cpu')
grad = grad.contiguous(memory_format=format)
bn = mod(channels).cpu().to(dtype)
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = mod(channels).cpu().to(dtype)
ref_bn.load_state_dict(bn.state_dict())
if mixed_dtype:
bn.float()
ref_bn.float()
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=format))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad, atol=precision, rtol=precision)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
# test NC11 and N1HW; test mixed dtype
for shape in [(4, 8, 10, 10), (4, 1, 9, 9), (4, 9, 1, 1)]:
for dtype in [torch.float, torch.bfloat16, torch.float16]:
for mixed_dtype in [False, True]:
if dtype == torch.float:
mixed_dtype = False
helper(self, nn.BatchNorm2d, shape, dtype, mixed_dtype, torch.channels_last)
precisons = {torch.float: 1e-4, torch.bfloat16: 1e-4, torch.float16: None}
for shape in [(4, 8, 2, 10, 10), (4, 1, 2, 9, 9), (4, 9, 1, 1, 1)]:
for dtype in [torch.float, torch.bfloat16, torch.float16]:
for mixed_dtype in [False, True]:
if dtype == torch.float:
mixed_dtype = False
helper(self, nn.BatchNorm3d, shape, dtype, mixed_dtype, torch.channels_last_3d, precisons[dtype])
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps,\
TEST_WITH_UBSAN, IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, IS_WINDOWS
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import gc
import copy
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
import copy
import copy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_nn.py
|
helper
|
def helper(self, size, dtype, mixed_dtype=False):
channels = size[1]
input = torch.randn(size, dtype=dtype, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=torch.channels_last).to(dtype)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device='cpu')
grad = grad.contiguous(memory_format=torch.channels_last)
bn = nn.BatchNorm2d(channels).cpu().to(dtype)
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = nn.BatchNorm2d(channels).cpu().to(dtype)
ref_bn.load_state_dict(bn.state_dict())
if mixed_dtype:
bn.float()
ref_bn.float()
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
# test NC11 and N1HW; test mixed dtype
for shape in [(4, 8, 10, 10), (4, 1, 9, 9), (4, 9, 1, 1)]:
helper(self, shape, torch.float, False)
helper(self, shape, torch.bfloat16, False)
helper(self, shape, torch.bfloat16, True)
|
def helper(self, mod, size, dtype, mixed_dtype=False, format=torch.channels_last, precision=None):
channels = size[1]
input = torch.randn(size, dtype=dtype, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=format).to(dtype)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device='cpu')
grad = grad.contiguous(memory_format=format)
bn = mod(channels).cpu().to(dtype)
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = mod(channels).cpu().to(dtype)
ref_bn.load_state_dict(bn.state_dict())
if mixed_dtype:
bn.float()
ref_bn.float()
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=format))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad, atol=precision, rtol=precision)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
# test NC11 and N1HW; test mixed dtype
for shape in [(4, 8, 10, 10), (4, 1, 9, 9), (4, 9, 1, 1)]:
for dtype in [torch.float, torch.bfloat16, torch.float16]:
for mixed_dtype in [False, True]:
if dtype == torch.float:
mixed_dtype = False
helper(self, nn.BatchNorm2d, shape, dtype, mixed_dtype, torch.channels_last)
precisons = {torch.float: 1e-4, torch.bfloat16: 1e-4, torch.float16: None}
for shape in [(4, 8, 2, 10, 10), (4, 1, 2, 9, 9), (4, 9, 1, 1, 1)]:
for dtype in [torch.float, torch.bfloat16, torch.float16]:
for mixed_dtype in [False, True]:
if dtype == torch.float:
mixed_dtype = False
helper(self, nn.BatchNorm3d, shape, dtype, mixed_dtype, torch.channels_last_3d, precisons[dtype])
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps,\
TEST_WITH_UBSAN, IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, IS_WINDOWS
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import gc
import copy
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
import copy
import copy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_nn.py
|
helper
|
def helper(self, size, dtype, mixed_dtype=False):
channels = size[1]
input = torch.randn(size, dtype=dtype, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=torch.channels_last).to(dtype)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device='cpu')
grad = grad.contiguous(memory_format=torch.channels_last)
bn = nn.BatchNorm2d(channels).cpu().to(dtype)
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = nn.BatchNorm2d(channels).cpu().to(dtype)
ref_bn.load_state_dict(bn.state_dict())
if mixed_dtype:
bn.float()
ref_bn.float()
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
# test NC11 and N1HW; test mixed dtype
for shape in [(4, 8, 10, 10), (4, 1, 9, 9), (4, 9, 1, 1)]:
helper(self, shape, torch.float, False)
helper(self, shape, torch.bfloat16, False)
helper(self, shape, torch.bfloat16, True)
|
def helper(self, mod, size, dtype, mixed_dtype=False, format=torch.channels_last, precision=None):
channels = size[1]
input = torch.randn(size, dtype=dtype, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=format).to(dtype)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device='cpu')
grad = grad.contiguous(memory_format=format)
bn = mod(channels).cpu().to(dtype)
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = mod(channels).cpu().to(dtype)
ref_bn.load_state_dict(bn.state_dict())
if mixed_dtype:
bn.float()
ref_bn.float()
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=format))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad, atol=precision, rtol=precision)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
# test NC11 and N1HW; test mixed dtype
for shape in [(4, 8, 10, 10), (4, 1, 9, 9), (4, 9, 1, 1)]:
for dtype in [torch.float, torch.bfloat16, torch.float16]:
for mixed_dtype in [False, True]:
if dtype == torch.float:
mixed_dtype = False
helper(self, nn.BatchNorm2d, shape, dtype, mixed_dtype, torch.channels_last)
precisons = {torch.float: 1e-4, torch.bfloat16: 1e-4, torch.float16: None}
for shape in [(4, 8, 2, 10, 10), (4, 1, 2, 9, 9), (4, 9, 1, 1, 1)]:
for dtype in [torch.float, torch.bfloat16, torch.float16]:
for mixed_dtype in [False, True]:
if dtype == torch.float:
mixed_dtype = False
helper(self, nn.BatchNorm3d, shape, dtype, mixed_dtype, torch.channels_last_3d, precisons[dtype])
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps,\
TEST_WITH_UBSAN, IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, IS_WINDOWS
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import gc
import copy
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
import copy
import copy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_ops.py
|
_extract_strides
|
def _extract_strides(out):
if isinstance(out, torch.Tensor):
return (out.stride(),)
# assumes (see above) that out is an iterable of tensors
return tuple(map(lambda t: t.stride(), out))
# Extracts data pointers from a tensor or iterable of tensors into a tuple
# NOTE: only extracts on the CPU and CUDA device types since some
# device types don't have storage
|
def _extract_strides(out):
if isinstance(out, torch.Tensor):
return (out.stride(),)
# assumes (see above) that out is an iterable of tensors
return tuple(t.stride() for t in out)
# Extracts data pointers from a tensor or iterable of tensors into a tuple
# NOTE: only extracts on the CPU and CUDA device types since some
# device types don't have storage
|
from collections.abc import Sequence
from functools import partial
import warnings
import unittest
import itertools
import torch
import contextlib
import re
import os
from collections import defaultdict
from importlib import import_module
from torch.utils._pytree import tree_map
from typing import Dict
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_and_complex_types_and,
all_types_and_complex_and,
)
from test_proxy_tensor import xfail, skip, skipOps
from torch.testing._internal.common_utils import (
TestCase,
is_iterable_of_tensors,
run_tests,
IS_SANDCASTLE,
clone_input_helper,
IS_CI,
set_default_dtype,
suppress_warnings,
noncontiguous_like,
TEST_WITH_ASAN,
TEST_WITH_UBSAN,
skipIfRocm,
IS_WINDOWS,
IS_FBCODE,
first_sample,
parametrize,
skipIfTorchInductor,
slowTest,
)
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
ReductionPythonRefInfo,
SpectralFuncInfo,
ops_and_refs,
python_ref_db,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
ops,
onlyCUDA,
onlyCPU,
onlyNativeDeviceTypes,
OpDTypes,
skipMeta,
)
from torch._subclasses.fake_tensor import (
FakeTensor,
FakeTensorMode,
)
from torch._subclasses.fake_utils import outputs_alias_inputs
import torch._prims as prims
from torch._prims.context import TorchRefsMode
from torch.testing._internal import opinfo
from torch.testing._internal import composite_compliance
from torch.utils._pytree import tree_flatten
from torch.utils._python_dispatch import TorchDispatchMode
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs = op_db + python_ref_db
_ops_and_refs_with_no_numpy_ref = [op for op in _ops_and_refs if op.ref is None]
aten = torch.ops.aten
from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map
from torch._prims.executor import make_traced
from copy import copy
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_ops.py
|
_compare_out
|
def _compare_out(transform, *, compare_strides_and_data_ptrs=True):
out = _apply_out_transform(transform, expected)
original_strides = _extract_strides(out)
original_ptrs = _extract_data_ptrs(out)
op_out(out=out)
final_strides = _extract_strides(out)
final_ptrs = _extract_data_ptrs(out)
self.assertEqual(expected, out)
if compare_strides_and_data_ptrs:
stride_msg = "Strides are not the same! Original strides were {0} and strides are now {1}".format(
original_strides, final_strides
)
self.assertEqual(original_strides, final_strides, msg=stride_msg)
self.assertEqual(original_ptrs, final_ptrs)
# Case Zero: out= with the correct dtype and device, but the wrong shape
# Expected behavior: if nonempty, resize with a warning.
|
def _compare_out(transform, *, compare_strides_and_data_ptrs=True):
out = _apply_out_transform(transform, expected)
original_strides = _extract_strides(out)
original_ptrs = _extract_data_ptrs(out)
op_out(out=out)
final_strides = _extract_strides(out)
final_ptrs = _extract_data_ptrs(out)
self.assertEqual(expected, out)
if compare_strides_and_data_ptrs:
stride_msg = (
f"Strides are not the same! Original strides were {original_strides} "
f"and strides are now {final_strides}"
)
self.assertEqual(original_strides, final_strides, msg=stride_msg)
self.assertEqual(original_ptrs, final_ptrs)
# Case Zero: out= with the correct dtype and device, but the wrong shape
# Expected behavior: if nonempty, resize with a warning.
|
from collections.abc import Sequence
from functools import partial
import warnings
import unittest
import itertools
import torch
import contextlib
import re
import os
from collections import defaultdict
from importlib import import_module
from torch.utils._pytree import tree_map
from typing import Dict
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_and_complex_types_and,
all_types_and_complex_and,
)
from test_proxy_tensor import xfail, skip, skipOps
from torch.testing._internal.common_utils import (
TestCase,
is_iterable_of_tensors,
run_tests,
IS_SANDCASTLE,
clone_input_helper,
IS_CI,
set_default_dtype,
suppress_warnings,
noncontiguous_like,
TEST_WITH_ASAN,
TEST_WITH_UBSAN,
skipIfRocm,
IS_WINDOWS,
IS_FBCODE,
first_sample,
parametrize,
skipIfTorchInductor,
slowTest,
)
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
ReductionPythonRefInfo,
SpectralFuncInfo,
ops_and_refs,
python_ref_db,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
ops,
onlyCUDA,
onlyCPU,
onlyNativeDeviceTypes,
OpDTypes,
skipMeta,
)
from torch._subclasses.fake_tensor import (
FakeTensor,
FakeTensorMode,
)
from torch._subclasses.fake_utils import outputs_alias_inputs
import torch._prims as prims
from torch._prims.context import TorchRefsMode
from torch.testing._internal import opinfo
from torch.testing._internal import composite_compliance
from torch.utils._pytree import tree_flatten
from torch.utils._python_dispatch import TorchDispatchMode
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs = op_db + python_ref_db
_ops_and_refs_with_no_numpy_ref = [op for op in _ops_and_refs if op.ref is None]
aten = torch.ops.aten
from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map
from torch._prims.executor import make_traced
from copy import copy
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_ops.py
|
_extract_strides
|
def _extract_strides(out):
if isinstance(out, torch.Tensor):
return (out.stride(),)
# assumes (see above) that out is an iterable of tensors
return tuple(map(lambda t: t.stride(), out))
# Extracts data pointers from a tensor or iterable of tensors into a tuple
# NOTE: only extracts on the CPU and CUDA device types since some
# device types don't have storage
|
def _extract_strides(out):
if isinstance(out, torch.Tensor):
return (out.stride(),)
# assumes (see above) that out is an iterable of tensors
return tuple(t.stride() for t in out)
# Extracts data pointers from a tensor or iterable of tensors into a tuple
# NOTE: only extracts on the CPU and CUDA device types since some
# device types don't have storage
|
from collections.abc import Sequence
from functools import partial
import warnings
import unittest
import itertools
import torch
import contextlib
import re
import os
from collections import defaultdict
from importlib import import_module
from torch.utils._pytree import tree_map
from typing import Dict
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_and_complex_types_and,
all_types_and_complex_and,
)
from test_proxy_tensor import xfail, skip, skipOps
from torch.testing._internal.common_utils import (
TestCase,
is_iterable_of_tensors,
run_tests,
IS_SANDCASTLE,
clone_input_helper,
IS_CI,
set_default_dtype,
suppress_warnings,
noncontiguous_like,
TEST_WITH_ASAN,
TEST_WITH_UBSAN,
skipIfRocm,
IS_WINDOWS,
IS_FBCODE,
first_sample,
parametrize,
skipIfTorchInductor,
slowTest,
)
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
ReductionPythonRefInfo,
SpectralFuncInfo,
ops_and_refs,
python_ref_db,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
ops,
onlyCUDA,
onlyCPU,
onlyNativeDeviceTypes,
OpDTypes,
skipMeta,
)
from torch._subclasses.fake_tensor import (
FakeTensor,
FakeTensorMode,
)
from torch._subclasses.fake_utils import outputs_alias_inputs
import torch._prims as prims
from torch._prims.context import TorchRefsMode
from torch.testing._internal import opinfo
from torch.testing._internal import composite_compliance
from torch.utils._pytree import tree_flatten
from torch.utils._python_dispatch import TorchDispatchMode
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs = op_db + python_ref_db
_ops_and_refs_with_no_numpy_ref = [op for op in _ops_and_refs if op.ref is None]
aten = torch.ops.aten
from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map
from torch._prims.executor import make_traced
from copy import copy
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_ops.py
|
_compare_out
|
def _compare_out(transform, *, compare_strides_and_data_ptrs=True):
out = _apply_out_transform(transform, expected)
original_strides = _extract_strides(out)
original_ptrs = _extract_data_ptrs(out)
op_out(out=out)
final_strides = _extract_strides(out)
final_ptrs = _extract_data_ptrs(out)
self.assertEqual(expected, out)
if compare_strides_and_data_ptrs:
stride_msg = "Strides are not the same! Original strides were {0} and strides are now {1}".format(
original_strides, final_strides
)
self.assertEqual(original_strides, final_strides, msg=stride_msg)
self.assertEqual(original_ptrs, final_ptrs)
# Case Zero: out= with the correct dtype and device, but the wrong shape
# Expected behavior: if nonempty, resize with a warning.
|
def _compare_out(transform, *, compare_strides_and_data_ptrs=True):
out = _apply_out_transform(transform, expected)
original_strides = _extract_strides(out)
original_ptrs = _extract_data_ptrs(out)
op_out(out=out)
final_strides = _extract_strides(out)
final_ptrs = _extract_data_ptrs(out)
self.assertEqual(expected, out)
if compare_strides_and_data_ptrs:
stride_msg = (
f"Strides are not the same! Original strides were {original_strides} "
f"and strides are now {final_strides}"
)
self.assertEqual(original_strides, final_strides, msg=stride_msg)
self.assertEqual(original_ptrs, final_ptrs)
# Case Zero: out= with the correct dtype and device, but the wrong shape
# Expected behavior: if nonempty, resize with a warning.
|
from collections.abc import Sequence
from functools import partial
import warnings
import unittest
import itertools
import torch
import contextlib
import re
import os
from collections import defaultdict
from importlib import import_module
from torch.utils._pytree import tree_map
from typing import Dict
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_and_complex_types_and,
all_types_and_complex_and,
)
from test_proxy_tensor import xfail, skip, skipOps
from torch.testing._internal.common_utils import (
TestCase,
is_iterable_of_tensors,
run_tests,
IS_SANDCASTLE,
clone_input_helper,
IS_CI,
set_default_dtype,
suppress_warnings,
noncontiguous_like,
TEST_WITH_ASAN,
TEST_WITH_UBSAN,
skipIfRocm,
IS_WINDOWS,
IS_FBCODE,
first_sample,
parametrize,
skipIfTorchInductor,
slowTest,
)
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
ReductionPythonRefInfo,
SpectralFuncInfo,
ops_and_refs,
python_ref_db,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
ops,
onlyCUDA,
onlyCPU,
onlyNativeDeviceTypes,
OpDTypes,
skipMeta,
)
from torch._subclasses.fake_tensor import (
FakeTensor,
FakeTensorMode,
)
from torch._subclasses.fake_utils import outputs_alias_inputs
import torch._prims as prims
from torch._prims.context import TorchRefsMode
from torch.testing._internal import opinfo
from torch.testing._internal import composite_compliance
from torch.utils._pytree import tree_flatten
from torch.utils._python_dispatch import TorchDispatchMode
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs = op_db + python_ref_db
_ops_and_refs_with_no_numpy_ref = [op for op in _ops_and_refs if op.ref is None]
aten = torch.ops.aten
from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map
from torch._prims.executor import make_traced
from copy import copy
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_nn.py
|
test_upsamplingnearest2d_backward_64bit_indexing
|
def test_upsamplingnearest2d_backward_64bit_indexing(self, device, dtype):
x = torch.randn(size=(36, 128, 512, 512), device=device, dtype=dtype).requires_grad_()
y = F.interpolate(x, scale_factor=2, mode="nearest")
y.backward(torch.randn_like(y))
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
class TestNNDeviceType(NNTestCase):
import copy
import copy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_nn.py
|
test_masked_softmax_lowp
|
def test_masked_softmax_lowp(self, dtype):
sizes = [(1, 1, 32), (3, 16, 310), (12, 4, 1024), (4, 2, 1200)]
for (B, num_heads, L) in sizes:
for dim in [0, 3]:
input_lowp = torch.randn((B, num_heads, L, L), dtype=dtype).requires_grad_()
input_ref = input_lowp.float().detach().requires_grad_()
mask = torch.randint(0, 2, (B, L))
mask = mask.reshape(B, 1, 1, L).expand(B, num_heads, L, L).bool()
for mask_type in [1, 2]:
res_ref = torch._masked_softmax(input_ref, mask, dim, mask_type)
res = torch._masked_softmax(input_lowp, mask, dim, mask_type)
self.assertEqual(res_ref.to(dtype), res)
grad_lowp = torch.randn_like(res_ref).to(dtype=dtype)
grad_ref = grad_lowp.float()
res_ref.backward(grad_ref)
res.backward(grad_lowp)
self.assertEqual(input_ref.grad.to(dtype), input_lowp.grad)
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
class TestNNDeviceType(NNTestCase):
import copy
import copy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_nn.py
|
issue_24823_1
|
def issue_24823_1(dtype):
image = torch.arange(27, 0, -1, dtype=dtype, device=device).view(1, 1, 3, 3, 3)
image.requires_grad_()
grid = torch.nn.functional.affine_grid(
torch.tensor([[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]], dtype=dtype, device=device),
(1, 1, 3, 3, 3))
grid[:, 1, 1, 1, 0] = float('inf')
result = torch.nn.functional.grid_sample(image, grid, padding_mode='zeros')
self.assertEqual(result, torch.tensor([[[[[27., 26., 25.], [24., 23., 22.], [21., 20., 19.]],
[[18., 17., 16.], [15., 0., 13.], [12., 11., 10.]],
[[9., 8., 7.], [6., 5., 4.], [3., 2., 1.]]]]],
device=device, dtype=dtype))
result.backward(torch.ones_like(result))
expected_grad = torch.ones_like(image)
expected_grad[0, 0, 1, 1, 1] = 0
self.assertEqual(image.grad, expected_grad, atol=0.005, rtol=0)
issue_24823_1(torch.half)
issue_24823_1(torch.float)
issue_24823_1(torch.double)
|
def issue_24823_1(dtype):
image = torch.arange(27, 0, -1, dtype=dtype, device=device).view(1, 1, 3, 3, 3)
image.requires_grad_()
grid = torch.nn.functional.affine_grid(
torch.tensor([[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]], dtype=dtype, device=device),
(1, 1, 3, 3, 3))
grid[:, 1, 1, 1, 0] = float('inf')
result = torch.nn.functional.grid_sample(image, grid, padding_mode='zeros')
tol_override = {'atol': 0.005, 'rtol': 0} if dtype == torch.half else {}
self.assertEqual(result, torch.tensor([[[[[27., 26., 25.], [24., 23., 22.], [21., 20., 19.]],
[[18., 17., 16.], [15., 0., 13.], [12., 11., 10.]],
[[9., 8., 7.], [6., 5., 4.], [3., 2., 1.]]]]],
device=device, dtype=dtype), **tol_override)
result.backward(torch.ones_like(result))
expected_grad = torch.ones_like(image)
expected_grad[0, 0, 1, 1, 1] = 0
self.assertEqual(image.grad, expected_grad, atol=0.005, rtol=0)
issue_24823_1(torch.half)
issue_24823_1(torch.float)
issue_24823_1(torch.double)
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps,\
TEST_WITH_UBSAN, IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, IS_WINDOWS
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import gc
import copy
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
import copy
import copy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_nn.py
|
helper
|
def helper(self, size, dtype, mixed_dtype=False):
channels = size[1]
input = torch.randn(size, dtype=dtype, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=torch.channels_last).to(dtype)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device='cpu')
grad = grad.contiguous(memory_format=torch.channels_last)
bn = nn.BatchNorm2d(channels).cpu().to(dtype)
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = nn.BatchNorm2d(channels).cpu().to(dtype)
ref_bn.load_state_dict(bn.state_dict())
if mixed_dtype:
bn.float()
ref_bn.float()
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
# test NC11 and N1HW; test mixed dtype
for shape in [(4, 8, 10, 10), (4, 1, 9, 9), (4, 9, 1, 1)]:
helper(self, shape, torch.float, False)
helper(self, shape, torch.bfloat16, False)
helper(self, shape, torch.bfloat16, True)
|
def helper(self, mod, size, dtype, mixed_dtype=False, format=torch.channels_last, precision=None):
channels = size[1]
input = torch.randn(size, dtype=dtype, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=format).to(dtype)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device='cpu')
grad = grad.contiguous(memory_format=format)
bn = mod(channels).cpu().to(dtype)
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = mod(channels).cpu().to(dtype)
ref_bn.load_state_dict(bn.state_dict())
if mixed_dtype:
bn.float()
ref_bn.float()
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=format))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad, atol=precision, rtol=precision)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
# test NC11 and N1HW; test mixed dtype
for shape in [(4, 8, 10, 10), (4, 1, 9, 9), (4, 9, 1, 1)]:
for dtype in [torch.float, torch.bfloat16, torch.float16]:
for mixed_dtype in [False, True]:
if dtype == torch.float:
mixed_dtype = False
helper(self, nn.BatchNorm2d, shape, dtype, mixed_dtype, torch.channels_last)
precisons = {torch.float: 1e-4, torch.bfloat16: 1e-4, torch.float16: None}
for shape in [(4, 8, 2, 10, 10), (4, 1, 2, 9, 9), (4, 9, 1, 1, 1)]:
for dtype in [torch.float, torch.bfloat16, torch.float16]:
for mixed_dtype in [False, True]:
if dtype == torch.float:
mixed_dtype = False
helper(self, nn.BatchNorm3d, shape, dtype, mixed_dtype, torch.channels_last_3d, precisons[dtype])
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps,\
TEST_WITH_UBSAN, IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, IS_WINDOWS
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import gc
import copy
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
import copy
import copy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_nn.py
|
helper
|
def helper(self, size, dtype, mixed_dtype=False):
channels = size[1]
input = torch.randn(size, dtype=dtype, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=torch.channels_last).to(dtype)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device='cpu')
grad = grad.contiguous(memory_format=torch.channels_last)
bn = nn.BatchNorm2d(channels).cpu().to(dtype)
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = nn.BatchNorm2d(channels).cpu().to(dtype)
ref_bn.load_state_dict(bn.state_dict())
if mixed_dtype:
bn.float()
ref_bn.float()
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
# test NC11 and N1HW; test mixed dtype
for shape in [(4, 8, 10, 10), (4, 1, 9, 9), (4, 9, 1, 1)]:
helper(self, shape, torch.float, False)
helper(self, shape, torch.bfloat16, False)
helper(self, shape, torch.bfloat16, True)
|
def helper(self, mod, size, dtype, mixed_dtype=False, format=torch.channels_last, precision=None):
channels = size[1]
input = torch.randn(size, dtype=dtype, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=format).to(dtype)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device='cpu')
grad = grad.contiguous(memory_format=format)
bn = mod(channels).cpu().to(dtype)
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = mod(channels).cpu().to(dtype)
ref_bn.load_state_dict(bn.state_dict())
if mixed_dtype:
bn.float()
ref_bn.float()
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=format))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad, atol=precision, rtol=precision)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
# test NC11 and N1HW; test mixed dtype
for shape in [(4, 8, 10, 10), (4, 1, 9, 9), (4, 9, 1, 1)]:
for dtype in [torch.float, torch.bfloat16, torch.float16]:
for mixed_dtype in [False, True]:
if dtype == torch.float:
mixed_dtype = False
helper(self, nn.BatchNorm2d, shape, dtype, mixed_dtype, torch.channels_last)
precisons = {torch.float: 1e-4, torch.bfloat16: 1e-4, torch.float16: None}
for shape in [(4, 8, 2, 10, 10), (4, 1, 2, 9, 9), (4, 9, 1, 1, 1)]:
for dtype in [torch.float, torch.bfloat16, torch.float16]:
for mixed_dtype in [False, True]:
if dtype == torch.float:
mixed_dtype = False
helper(self, nn.BatchNorm3d, shape, dtype, mixed_dtype, torch.channels_last_3d, precisons[dtype])
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps,\
TEST_WITH_UBSAN, IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, IS_WINDOWS
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import gc
import copy
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
import copy
import copy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_nn.py
|
test_hardswish_grad
|
def test_hardswish_grad(self, device):
inputs = (torch.randn(4, 16, 16, device=device) - 0.5) * 10
inputs.requires_grad = True
self.assertTrue(gradcheck(F.hardswish, (inputs,)))
|
def test_hardswish_grad(self, device):
inputs = (torch.randn(4, 16, 16, device=device, dtype=torch.double) - 0.5) * 10
inputs.requires_grad = True
self.assertTrue(gradcheck(F.hardswish, (inputs,)))
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps,\
TEST_WITH_UBSAN, IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, IS_WINDOWS
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import gc
class TestNNDeviceType(NNTestCase):
import copy
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
class TestNNDeviceType(NNTestCase):
import copy
import copy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_ops.py
|
set_requires_grad
|
def set_requires_grad(x):
nonlocal any_requires_grad
if isinstance(x, torch.Tensor) and (
x.is_floating_point() or x.is_complex()
):
any_requires_grad = True
x.requires_grad_(True)
return x
out = pytree.tree_map_(set_requires_grad, expect)
if not any_requires_grad:
# Skip ops without any floating point outputs, e.g. isnan
return
msg = (
"functions with out=... arguments don't support automatic "
"differentiation, but one of the arguments requires grad."
)
with self.assertRaises(RuntimeError, msg=msg):
op(sample.input, *sample.args, **sample.kwargs, out=out)
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_ops.py
|
_test_consistency_helper
|
def _test_consistency_helper(samples, variants):
for sample in samples:
# TODO: Check grad for all Tensors requiring grad if sample.input is TensorList
tensor = (
sample.input
if isinstance(sample.input, torch.Tensor)
else sample.input[0]
)
# Computes function forward and backward values
tensor.grad = None
expected_forward = op(sample.input, *sample.args, **sample.kwargs)
expected_grad = None
output_process_fn_grad = (
sample.output_process_fn_grad
if sample.output_process_fn_grad
else lambda x: x
)
# Skips inplace variants if the output dtype is not the same as
# the input dtype
skip_inplace = False
if (
isinstance(expected_forward, torch.Tensor)
and expected_forward.dtype is not tensor.dtype
):
skip_inplace = True
# TODO: backward consistency only supported for single tensor outputs
# TODO: backward consistency only checked on sample.input, not all
# tensor inputs
# TODO: update to handle checking grads of all tensor inputs as
# derived from each tensor output
if isinstance(
expected_forward, torch.Tensor
) and dtype in op.supported_backward_dtypes(torch.device(device).type):
output_process_fn_grad(expected_forward).sum().backward()
expected_grad = tensor.grad
# Test eager consistency
for variant in variants:
# Skips inplace ops
if variant in inplace_ops and skip_inplace:
continue
# Compares variant's forward
# Note: copies the to-be-modified input when testing the inplace variant
tensor.grad = None
cloned = (
clone_input_helper(sample.input)
if variant in inplace_ops
else sample.input
)
if variant in inplace_ops and sample.broadcasts_input:
with self.assertRaises(
RuntimeError,
msg=(
"inplace variant either incorrectly allowed "
"resizing or you have marked the sample {}"
" incorrectly with `broadcasts_self=True".format(
sample.summary()
)
),
):
variant_forward = variant(
cloned, *sample.args, **sample.kwargs
)
continue
if variant in operators and sample.kwargs:
# skip samples with kwargs for operator variants
continue
variant_forward = variant(cloned, *sample.args, **sample.kwargs)
self.assertEqual(expected_forward, variant_forward)
# Compares variant's backward
if expected_grad is not None and (
variant not in inplace_ops or op.supports_inplace_autograd
):
output_process_fn_grad(variant_forward).sum().backward()
self.assertEqual(expected_grad, tensor.grad)
_test_consistency_helper(samples, variants)
|
def _test_consistency_helper(samples, variants):
for sample in samples:
# TODO: Check grad for all Tensors requiring grad if sample.input is TensorList
tensor = (
sample.input
if isinstance(sample.input, torch.Tensor)
else sample.input[0]
)
# Computes function forward and backward values
tensor.grad = None
expected_forward = op(sample.input, *sample.args, **sample.kwargs)
expected_grad = None
output_process_fn_grad = (
sample.output_process_fn_grad
if sample.output_process_fn_grad
else lambda x: x
)
# Skips inplace variants if the output dtype is not the same as
# the input dtype
skip_inplace = False
if (
isinstance(expected_forward, torch.Tensor)
and expected_forward.dtype is not tensor.dtype
):
skip_inplace = True
# TODO: backward consistency only supported for single tensor outputs
# TODO: backward consistency only checked on sample.input, not all
# tensor inputs
# TODO: update to handle checking grads of all tensor inputs as
# derived from each tensor output
if isinstance(
expected_forward, torch.Tensor
) and dtype in op.supported_backward_dtypes(torch.device(device).type):
out = output_process_fn_grad(expected_forward).sum()
if out.dtype.is_complex:
out = out.abs()
out.backward()
expected_grad = tensor.grad
# Test eager consistency
for variant in variants:
# Skips inplace ops
if variant in inplace_ops and skip_inplace:
continue
# Compares variant's forward
# Note: copies the to-be-modified input when testing the inplace variant
tensor.grad = None
cloned = (
clone_input_helper(sample.input)
if variant in inplace_ops
else sample.input
)
if variant in inplace_ops and sample.broadcasts_input:
with self.assertRaises(
RuntimeError,
msg=(
"inplace variant either incorrectly allowed "
f"resizing or you have marked the sample {sample.summary()}"
" incorrectly with `broadcasts_self=True"
),
):
variant_forward = variant(
cloned, *sample.args, **sample.kwargs
)
continue
if variant in operators and sample.kwargs:
# skip samples with kwargs for operator variants
continue
variant_forward = variant(cloned, *sample.args, **sample.kwargs)
self.assertEqual(expected_forward, variant_forward)
# Compares variant's backward
if expected_grad is not None and (
variant not in inplace_ops or op.supports_inplace_autograd
):
out = output_process_fn_grad(variant_forward).sum()
if out.dtype.is_complex:
out = out.abs()
out.backward()
self.assertEqual(expected_grad, tensor.grad)
_test_consistency_helper(samples, variants)
|
from collections.abc import Sequence
from functools import partial
import warnings
import unittest
import itertools
import torch
import contextlib
import re
import os
from collections import defaultdict
from importlib import import_module
from torch.utils._pytree import tree_map
from typing import Dict
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_and_complex_types_and,
all_types_and_complex_and,
)
from test_proxy_tensor import xfail, skip, skipOps
from torch.testing._internal.common_utils import (
TestCase,
is_iterable_of_tensors,
run_tests,
IS_SANDCASTLE,
clone_input_helper,
IS_CI,
set_default_dtype,
suppress_warnings,
noncontiguous_like,
TEST_WITH_ASAN,
TEST_WITH_UBSAN,
skipIfRocm,
IS_WINDOWS,
IS_FBCODE,
first_sample,
parametrize,
skipIfTorchInductor,
slowTest,
)
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
ReductionPythonRefInfo,
SpectralFuncInfo,
ops_and_refs,
python_ref_db,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
ops,
onlyCUDA,
onlyCPU,
onlyNativeDeviceTypes,
OpDTypes,
skipMeta,
)
from torch._subclasses.fake_tensor import (
FakeTensor,
FakeTensorMode,
)
from torch._subclasses.fake_utils import outputs_alias_inputs
import torch._prims as prims
from torch._prims.context import TorchRefsMode
from torch.testing._internal import opinfo
from torch.testing._internal import composite_compliance
from torch.utils._pytree import tree_flatten
from torch.utils._python_dispatch import TorchDispatchMode
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs = op_db + python_ref_db
_ops_and_refs_with_no_numpy_ref = [op for op in _ops_and_refs if op.ref is None]
aten = torch.ops.aten
from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map
from torch._prims.executor import make_traced
from copy import copy
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_nn.py
|
test_channel_shuffle
|
def test_channel_shuffle(self):
# 3D tensor
x = torch.tensor(
[[[1, 2],
[5, 6],
[9, 10],
[13, 14],
]]
)
y_ref = torch.tensor(
[[[1, 2],
[9, 10],
[5, 6],
[13, 14],
]]
)
# ChannelsFirst
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast not supported for 3dim
# 4D tensor
x = torch.tensor(
[[[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]],
[[9, 10],
[11, 12]],
[[13, 14],
[15, 16]],
]]
)
y_ref = torch.tensor(
[[[[1, 2],
[3, 4]],
[[9, 10],
[11, 12]],
[[5, 6],
[7, 8]],
[[13, 14],
[15, 16]],
]]
)
# ChannelsFirst NCHW
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast NHWC
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x.contiguous(memory_format=torch.channels_last), 2)
self.assertEqual(len(w), 0)
y = y.contiguous(memory_format=torch.contiguous_format)
self.assertEqual(y, y_ref)
# 5D tensor
x = torch.tensor(
[[[[[1, 2],
[3, 4]]],
[[[5, 6],
[7, 8]]],
[[[9, 10],
[11, 12]]],
[[[13, 14],
[15, 16]]],
]]
)
y_ref = torch.tensor(
[[[[[1, 2],
[3, 4]]],
[[[9, 10],
[11, 12]]],
[[[5, 6],
[7, 8]]],
[[[13, 14],
[15, 16]]],
]]
)
# ChannelsFirst NCHW
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast NHWC
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x.contiguous(memory_format=torch.channels_last_3d), 2)
self.assertEqual(len(w), 0)
y = y.contiguous(memory_format=torch.contiguous_format)
self.assertEqual(y, y_ref)
|
def test_channel_shuffle(self, device):
# 3D tensor
x = torch.tensor(
[[[1, 2],
[5, 6],
[9, 10],
[13, 14],
]], device=device
)
y_ref = torch.tensor(
[[[1, 2],
[9, 10],
[5, 6],
[13, 14],
]], device=device
)
# ChannelsFirst
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2).to(device)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast not supported for 3dim
# 4D tensor
x = torch.tensor(
[[[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]],
[[9, 10],
[11, 12]],
[[13, 14],
[15, 16]],
]], device=device
)
y_ref = torch.tensor(
[[[[1, 2],
[3, 4]],
[[9, 10],
[11, 12]],
[[5, 6],
[7, 8]],
[[13, 14],
[15, 16]],
]], device=device
)
# ChannelsFirst NCHW
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2).to(device)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast NHWC
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x.contiguous(memory_format=torch.channels_last), 2).to(device)
self.assertEqual(len(w), 0)
y = y.contiguous(memory_format=torch.contiguous_format)
self.assertEqual(y, y_ref)
# 5D tensor
x = torch.tensor(
[[[[[1, 2],
[3, 4]]],
[[[5, 6],
[7, 8]]],
[[[9, 10],
[11, 12]]],
[[[13, 14],
[15, 16]]],
]], device=device
)
y_ref = torch.tensor(
[[[[[1, 2],
[3, 4]]],
[[[9, 10],
[11, 12]]],
[[[5, 6],
[7, 8]]],
[[[13, 14],
[15, 16]]],
]], device=device
)
# ChannelsFirst NCHW
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2).to(device)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast NHWC
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x.contiguous(memory_format=torch.channels_last_3d), 2).to(device)
self.assertEqual(len(w), 0)
y = y.contiguous(memory_format=torch.contiguous_format)
self.assertEqual(y, y_ref)
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps,\
TEST_WITH_UBSAN, IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, IS_WINDOWS
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
class TestNN(NNTestCase):
from torch.serialization import SourceChangeWarning
import gc
import copy
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
class TestNNDeviceType(NNTestCase):
import copy
import copy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_nn.py
|
test_consume_prefix_in_state_dict_if_present
|
instantiate_parametrized_tests(TestNN)
if __name__ == '__main__':
run_tests()
|
def test_consume_prefix_in_state_dict_if_present(self):
class Block(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(3, 3, 3, bias=True)
self.conv2 = nn.Conv2d(3, 3, 3, bias=False)
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = nn.Linear(5, 5)
self.linear2 = nn.Linear(5, 5)
net.bn = nn.BatchNorm2d(2)
self.block = Block()
# 0. Case non-DDP model empty state_dict
net = nn.Module()
state_dict = net.state_dict()
nn.modules.utils.consume_prefix_in_state_dict_if_present(state_dict, 'module.')
# check they are the same preserving order
self.assertEqual(list(state_dict.keys()), list(net.state_dict().keys()))
self.assertEqual(list(state_dict._metadata.keys()), list(net.state_dict()._metadata.keys()))
# 1. Case non-DDP model test example state_dict
net = Net()
state_dict = net.state_dict()
nn.modules.utils.consume_prefix_in_state_dict_if_present(state_dict, 'module.')
# Check they are the same preserving order
self.assertEqual(list(state_dict.keys()), list(net.state_dict().keys()))
self.assertEqual(list(state_dict._metadata.keys()), list(net.state_dict()._metadata.keys()))
# 2. Case DDP model test example state_dict
state_dict = net.state_dict()
metadata = state_dict._metadata
ddp_state_dict = OrderedDict((f'module.{k}', v) for k, v in state_dict.items())
ddp_state_dict._metadata = OrderedDict({'': metadata['']})
ddp_state_dict._metadata.update(('module' if k == '' else f'module.{k}', v) for k, v in metadata.items())
nn.modules.utils.consume_prefix_in_state_dict_if_present(ddp_state_dict, 'module.')
# Check they are the same preserving order
self.assertEqual(list(state_dict.keys()), list(ddp_state_dict.keys()))
self.assertEqual(list(state_dict._metadata.keys()), list(ddp_state_dict._metadata.keys()))
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
import copy
import copy
class TestUtils(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_nnapi.py
|
nhwc
|
def nhwc(t):
t = t.clone().contiguous(memory_format=torch.channels_last)
t.nnapi_nhwc = True
return t
@unittest.skipUnless('qnnpack' in supported_qengines,
"This Pytorch Build has not been built with or does not support QNNPACK")
class TestNNAPI(TestCase):
def setUp(self):
# Avoid saturation in fbgemm
torch.backends.quantized.engine = 'qnnpack'
libneuralnetworks_path = os.environ.get("LIBNEURALNETWORKS_PATH")
if libneuralnetworks_path:
ctypes.cdll.LoadLibrary(libneuralnetworks_path)
print("Will attempt to run NNAPI models.")
self.can_run_nnapi = True
else:
self.can_run_nnapi = False
# Created for easy override by subclasses (eg TestNnapiBackend)
def call_lowering_to_nnapi(self, traced_module, args):
return convert_model_to_nnapi(traced_module, args)
# Created for subclasses to set can_run_nnapi (eg TestNnapiBackend)
def set_can_run_nnapi(self, can_run):
self.can_run_nnapi = can_run
def check(
self,
module,
arg_or_args,
*,
trace_args=None,
convert_args=None,
atol_rtol=None,
limit=None,
expected_memory_format=None
):
with torch.no_grad():
if isinstance(arg_or_args, torch.Tensor):
args = [arg_or_args]
else:
args = arg_or_args
module.eval()
traced = torch.jit.trace(module, trace_args or args)
nnapi_module = self.call_lowering_to_nnapi(traced, convert_args or args)
if not self.can_run_nnapi:
# Only test that the model was converted successfully.
return
eager_output = module(*args)
nnapi_output = nnapi_module(*args)
kwargs = {}
if atol_rtol is not None:
kwargs["atol"] = atol_rtol[0]
kwargs["rtol"] = atol_rtol[1]
self.assertEqual(eager_output, nnapi_output, **kwargs)
if limit is not None:
mismatches = \
eager_output.int_repr().to(torch.int32) - \
nnapi_output.int_repr().to(torch.int32)
if mismatches.count_nonzero() > limit:
# Too many mismatches. Re-run the check with no tolerance
# to get a nice message.
self.assertEqual(eager_output, nnapi_output, atol=0, rtol=0)
if expected_memory_format:
self.assertTrue(nnapi_output.is_contiguous(memory_format=expected_memory_format))
def float_and_quant_and_nhwc(self, inp_float, scale, zero_point):
torch.manual_seed(29)
inp_quant = qpt(inp_float, 0.03, 128)
return [
("float", inp_float),
("float-nhwc", nhwc(inp_float)),
("quant", inp_quant),
("quant-nhwc", nhwc(inp_quant)),
]
def test_prelu(self):
arg = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
single_a = torch.nn.PReLU()
self.check(single_a, arg)
multi_a = torch.nn.PReLU(4)
with torch.no_grad():
multi_a.weight.copy_(torch.tensor([.1, .2, .3, .4]))
self.check(multi_a, nhwc(arg))
# Test flexible size
self.check(
multi_a,
arg,
trace_args=[torch.zeros(1, 4, 3, 3)],
convert_args=[nhwc(torch.zeros(1, 4, 0, 0))],
)
def test_quantize(self):
self.check(
torch.ao.nn.quantized.Quantize(0.25, 2, torch.quint8),
nhwc(torch.tensor([[[[1.0]], [[2.0]]]])))
def test_dequantize(self):
self.check(
torch.ao.nn.quantized.DeQuantize(),
nhwc(qpt([[[[1.0]], [[2.0]]]], 0.25, 2)))
def test_unsqueeze(self):
class UnsqueezeModule(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, arg):
return arg.unsqueeze(self.dim)
self.check(UnsqueezeModule(-2), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(-1), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(0), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(1), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(2), torch.randn(4, 2, 2))
def test_reshape(self):
class ReshapeModule(torch.nn.Module):
def __init__(self, shape):
super().__init__()
self.shape = shape
def forward(self, arg):
return arg.reshape(self.shape)
self.check(
ReshapeModule((2, 4)),
torch.randn(4, 2, 1, 1))
self.check(
ReshapeModule((8, -1)),
nhwc(torch.randn(4, 2, 1, 1)))
with self.assertRaisesRegex(Exception, "target size"):
self.check(
ReshapeModule((2, 4)),
nhwc(torch.randn(4, 2, 1, 1)))
def test_flatten(self):
for mod in [
torch.nn.Flatten(),
torch.nn.Flatten(start_dim=2, end_dim=3),
torch.nn.Flatten(start_dim=2, end_dim=4),
torch.nn.Flatten(start_dim=0, end_dim=-2),
torch.nn.Flatten(start_dim=0, end_dim=4)
]:
self.check(mod, torch.randn(4, 2, 1, 3, 7))
# flex inputs
self.check(
torch.nn.Flatten(),
torch.randn(4, 2, 1, 3, 7),
convert_args=[torch.zeros(0, 2, 1, 3, 7)]
)
# channels last
self.check(
torch.nn.Flatten(),
nhwc(torch.randn(2, 1, 4, 7))
)
self.check(
torch.nn.Flatten(),
nhwc(torch.randn(2, 3, 1, 1))
)
# Exceptions
with self.assertRaisesRegex(Exception, "not supported on NHWC"):
self.check(
torch.nn.Flatten(),
nhwc(torch.randn(1, 3, 4, 4))
)
with self.assertRaisesRegex(Exception, "Flattening flexible dims is not supported yet"):
self.check(torch.nn.Flatten(), torch.randn(4, 2, 0, 0, 7))
with self.assertRaisesRegex(Exception, "Only 1 dim"):
self.check(
torch.nn.Flatten(start_dim=1, end_dim=-2),
torch.randn(0, 2, 1, 3, 0))
def test_slice(self):
class SliceModule(torch.nn.Module):
def __init__(self, start, stop, step):
super().__init__()
self.start = start
self.stop = stop
self.step = step
def forward(self, t):
return t[1:, self.start:self.stop:self.step, :]
class SliceModule2(torch.nn.Module):
def forward(self, t):
return t[3:]
self.check(
SliceModule(1, 5, 2),
torch.randn(4, 6, 2)
)
self.check(
SliceModule2(),
torch.randn(5)
)
# flex inputs
self.check(
SliceModule(1, 5, 2),
torch.randn(4, 6, 2),
convert_args=[torch.zeros(4, 6, 0)]
)
with self.assertRaisesRegex(Exception, "slice with flexible shape"):
self.check(
SliceModule(1, 5, 2),
torch.randn(4, 6, 2),
convert_args=[torch.zeros(0, 0, 0)]
)
def test_cat(self):
class CatModule(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, t1, t2):
return torch.cat([t1, t2], self.dim)
self.check(
CatModule(0),
[
torch.randn(1, 2, 3, 3),
torch.randn(2, 2, 3, 3),
])
self.check(
CatModule(1),
[
torch.randn(1, 2, 3, 3),
torch.randn(1, 4, 3, 3),
])
self.check(
CatModule(1),
[
nhwc(torch.randn(1, 2, 3, 3)),
nhwc(torch.randn(1, 4, 3, 3)),
])
self.check(
CatModule(1),
[
torch.randn(1, 2, 3, 3),
torch.randn(1, 4, 3, 3),
],
convert_args=[
torch.zeros(0, 0, 0, 0),
torch.zeros(0, 0, 0, 0)
])
def test_pointwise_unary(self):
for op in ["relu", "sigmoid"]:
with self.subTest(op):
class UnaryModule(torch.nn.Module):
def forward(self, arg):
if op == "relu":
return torch.nn.functional.relu(arg)
if op == "sigmoid":
return torch.sigmoid(arg)
raise Exception("Bad op")
self.check(UnaryModule(), torch.tensor([-1.0, 1.0]))
self.check(
UnaryModule(),
qpt(torch.tensor([-1.0, 1.0]), 1. / 256, 0),
)
def test_pointwise_binary(self):
for op in ["add", "sub", "mul", "div"]:
with self.subTest(op):
class BinaryModule(torch.nn.Module):
def forward(self, lhs, rhs):
if op == "add":
return lhs + rhs
if op == "sub":
return lhs - rhs
if op == "mul":
return lhs * rhs
if op == "div":
return lhs / rhs
raise Exception("Bad op")
self.check(
BinaryModule(),
[
torch.tensor([1.0, 2.0]),
torch.tensor([3.0, 4.0]),
])
self.check(
BinaryModule(),
[
torch.tensor([[1.0, 2.0]]),
torch.tensor([[3.0, 4.0], [5.0, 6.0]]),
])
with self.assertRaisesRegex(Exception, "Non-equal-rank broadcast"):
self.check(
BinaryModule(),
[
torch.tensor([1.0, 2.0]),
torch.tensor([[3.0, 4.0], [5.0, 6.0]]),
])
def test_pointwise_binary_const(self):
const = torch.randn(1, 4, 6, 6)
class ArgPlusConst(torch.nn.Module):
def forward(self, arg):
return arg + const
class ConstPlusArg(torch.nn.Module):
def forward(self, arg):
return const + arg
arg_contig = torch.randn(2, 4, 6, 6)
arg_nhwc = nhwc(torch.randn(2, 4, 6, 6))
for mod_class in [ArgPlusConst, ConstPlusArg]:
for use_nhwc in [False, True]:
with self.subTest(mod_class=mod_class.__name__, use_nhwc=use_nhwc):
arg = arg_nhwc if use_nhwc else arg_contig
memory_format = torch.channels_last if use_nhwc else torch.contiguous_format
self.check(mod_class(), arg,
expected_memory_format=memory_format)
def test_hardtanh(self):
inp = torch.tensor([-2.0, -0.5, 0.5, 2.0, 7.0])
self.check(torch.nn.Hardtanh(), inp)
self.check(torch.nn.Hardtanh(0.0, 6.0), inp)
with self.assertRaisesRegex(Exception, "hardtanh with args"):
self.check(torch.nn.Hardtanh(0.0, 5.0), inp)
def test_softmax(self):
inp = torch.tensor([[-2.0, -0.5], [0.5, 2.0]])
self.check(torch.nn.Softmax(), inp)
self.check(torch.nn.Softmax(dim=0), inp)
# Test flexible size
self.check(
torch.nn.Softmax(),
inp,
convert_args=[torch.zeros(0, 0)],
)
def test_to(self):
class ToCPU(torch.nn.Module):
def __init__(self):
super().__init__()
self.prelu = torch.nn.PReLU()
def forward(self, x):
y = x.to("cpu")
# add prelu since input operand can't be output
return self.prelu(y)
arg = torch.randn(1, 2, 3, 3)
self.check(ToCPU(), arg)
# Test flexible size
self.check(
ToCPU(),
arg,
convert_args=[torch.zeros(1, 2, 0, 0)],
)
def test_detach(self):
class DetachModule(torch.nn.Module):
def forward(self, x):
y = x.detach()
return torch.nn.functional.relu(y)
self.check(DetachModule(), torch.randn(1, 2, 3, 3))
self.check(
DetachModule(), torch.randn(1, 2, 3, 3),
convert_args=[torch.zeros(1, 2, 0, 0)])
def test_log_softmax(self):
inp = torch.randn(3, 10)
self.check(torch.nn.LogSoftmax(), inp)
self.check(torch.nn.LogSoftmax(0), inp)
def test_mean(self):
class MeanModule(torch.nn.Module):
def __init__(self, dim, keep=False):
super().__init__()
self.dim = dim
self.keep = keep
def forward(self, t):
return torch.mean(t, dim=self.dim, keepdim=self.keep)
self.check(MeanModule(0), torch.randn(2, 3))
self.check(MeanModule(1), torch.randn(2, 3))
self.check(MeanModule([2, 3]), torch.randn(2, 3, 6, 6))
self.check(MeanModule([2, 3]), nhwc(torch.randn(2, 3, 6, 6)))
self.check(MeanModule([-1, -2]), nhwc(torch.randn(2, 3, 6, 6)))
self.check(MeanModule([-1, -2], keep=True), nhwc(torch.randn(2, 3, 6, 6)))
def test_max_pool2d(self):
for (name, inp) in self.float_and_quant_and_nhwc(torch.randn(2, 3, 12, 16), 0.3, 128):
with self.subTest(name):
self.check(torch.nn.MaxPool2d(2), inp)
self.check(torch.nn.MaxPool2d((3, 4)), inp)
self.check(torch.nn.MaxPool2d((3, 4), (1, 2)), inp)
def test_avg_pool2d(self):
for (name, inp) in self.float_and_quant_and_nhwc(torch.randn(2, 3, 12, 16), 0.3, 128):
with self.subTest(name):
atol_rtol = None
limit = None
convert_dims = (2, 3, 0, 0)
convert_arg = torch.zeros(*convert_dims)
for model in (
torch.nn.AvgPool2d(2),
torch.nn.AvgPool2d((3, 4)),
torch.nn.AvgPool2d((3, 4), (1, 2))):
if "quant" in name:
atol_rtol = (1, 0)
limit = model(inp).numel()
convert_arg = qpt(torch.zeros(*convert_dims), 1.0 / 16, 128)
if "nhwc" in name:
convert_arg = nhwc(convert_arg)
self.check(model, inp, atol_rtol=atol_rtol, limit=limit)
self.check(
model,
inp,
convert_args=[convert_arg],
atol_rtol=atol_rtol,
limit=limit
)
def test_adaptive_avg_pool2d(self):
for (name, inp) in self.float_and_quant_and_nhwc(torch.randn(2, 3, 12, 16), 0.3, 128):
with self.subTest(name):
self.check(torch.nn.AdaptiveAvgPool2d((1, 1)), inp)
with self.assertRaisesRegex(Exception, "with output size"):
self.check(torch.nn.AdaptiveAvgPool2d((2, 2)), inp)
def test_upsample_nearest2d(self):
convert_args = dict(self.float_and_quant_and_nhwc(torch.randn(2, 3, 0, 0), 0.3, 128))
for (name, inp) in self.float_and_quant_and_nhwc(torch.randn(2, 3, 12, 16), 0.3, 128):
with self.subTest(name):
self.check(torch.nn.UpsamplingNearest2d(size=(16, 20)), inp)
self.check(torch.nn.UpsamplingNearest2d(size=(24, 32)), inp)
self.check(torch.nn.UpsamplingNearest2d(size=(36, 48)), inp)
self.check(torch.nn.UpsamplingNearest2d(scale_factor=(1.5, 1.5)), inp)
self.check(torch.nn.UpsamplingNearest2d(scale_factor=(2.0, 2.0)), inp)
self.check(torch.nn.UpsamplingNearest2d(scale_factor=(3.0, 3.0)), inp)
self.check(
torch.nn.UpsamplingNearest2d(size=(24, 32)), inp,
convert_args=[convert_args[name]]
)
self.check(
torch.nn.UpsamplingNearest2d(scale_factor=(2.0, 2.0)), inp,
convert_args=[convert_args[name]]
)
def test_linear(self):
torch.manual_seed(29)
self.check(torch.nn.Linear(16, 32), torch.randn(2, 16))
self.check(
torch.nn.Linear(16, 32), torch.randn(2, 16),
convert_args=[torch.zeros(0, 16)])
def test_conv2d(self):
cases = [
# in_ch, out_ch, kernel, stride, padding, groups, bias, input_dim, name
( 4, 8, (3, 3), 1, 0, 1, 1, (2, 4, 16, 16), "3x3"), # noqa: E201,E241
( 4, 8, (3, 3), 1, 0, 1, 0, (2, 4, 16, 16), "3x3nobias"), # noqa: E201,E241
( 4, 16, (3, 3), 1, 1, 1, 1, (2, 4, 16, 16), "3x3p1"), # noqa: E201,E241
( 8, 8, (3, 3), 2, 0, 1, 1, (2, 8, 16, 16), "3x3s2"), # noqa: E201,E241
( 4, 8, (5, 5), 1, 0, 1, 1, (2, 4, 16, 16), "5x5"), # noqa: E201,E241
( 4, 4, (3, 3), 1, 0, 4, 1, (2, 4, 16, 16), "3x3dw"), # noqa: E201,E241
( 8, 4, (1, 1), 1, 0, 1, 1, (2, 8, 16, 16), "1x1"), # noqa: E201,E241
]
for kind in ["float", "float-nhwc", "quant", "quant-nhwc"]:
for case in cases:
in_ch, out_ch, kernel, stride, padding, groups, bias, input_dim, name = case
with self.subTest("{}-{}".format(kind, name)):
inp = torch.randn(input_dim)
model = torch.nn.Conv2d(in_ch, out_ch, kernel, stride, padding, groups=groups, bias=bool(bias))
output_size = model(inp).numel()
atol_rtol = None
limit = None
convert_dims = (0, in_ch, 0, 0)
convert_arg = torch.zeros(*convert_dims)
if "quant" in kind:
model = torch.nn.Sequential(model)
model.eval()
model.qconfig = torch.ao.quantization.get_default_qconfig('qnnpack')
model = torch.ao.quantization.prepare(model)
model(inp)
model = torch.ao.quantization.convert(model)
inp = qpt(inp, 1.0 / 16, 128)
# I've seen numerical differences between QNNPACK and NNAPI,
# but never more than 1 quantum, and never more than ~1% of
# the output in this test.
atol_rtol = (1, 0)
limit = output_size * 0.03
convert_arg = qpt(torch.zeros(*convert_dims), 1.0 / 16, 128)
if "nhwc" in kind:
inp = nhwc(inp)
convert_arg = nhwc(convert_arg)
self.check(model, inp, atol_rtol=atol_rtol, limit=limit)
self.check(
model,
inp,
convert_args=[convert_arg],
atol_rtol=atol_rtol,
limit=limit
)
def test_conv2d_transpose(self):
torch.manual_seed(29)
in_ch, out_ch, kernel = (5, 7, (2, 2))
input_dim = (4, 5, 3, 3)
convert_dims = input_dim[:2] + (0, 0)
for kind in ["float", "float-nhwc", "quant", "quant-nhwc"]:
with self.subTest(kind):
inp = torch.randn(input_dim)
model = torch.nn.ConvTranspose2d(in_ch, out_ch, kernel)
output_size = model(inp).numel()
atol_rtol = (0.0002, 0)
limit = None
convert_arg = torch.zeros(*convert_dims)
if "quant" in kind:
model = torch.ao.nn.quantized.ConvTranspose2d(in_ch, out_ch, kernel)
model.qconfig = torch.ao.quantization.get_default_qconfig('qnnpack')
inp = qpt(inp, 1.0 / 16, 128)
# I've seen numerical differences between QNNPACK and NNAPI,
# but never more than 1 quantum, and never more than ~10% of
# the output in this test.
atol_rtol = (1, 0)
limit = output_size * 0.1
convert_arg = qpt(convert_arg, 1.0 / 16, 128)
if "nhwc" in kind:
inp = nhwc(inp)
convert_arg = nhwc(convert_arg)
self.check(model, inp, atol_rtol=atol_rtol, limit=limit)
self.check(
model,
inp,
convert_args=[convert_arg],
atol_rtol=atol_rtol,
limit=limit
)
def test_qadd(self):
func = torch.ao.nn.quantized.QFunctional()
func.scale = 0.5
func.zero_point = 120
class AddMod(torch.nn.Module):
def forward(self, lhs, rhs):
return func.add(lhs, rhs)
class AddReluMod(torch.nn.Module):
def forward(self, lhs, rhs):
return func.add_relu(lhs, rhs)
class MulMod(torch.nn.Module):
def forward(self, lhs, rhs):
return func.mul(lhs, rhs)
for (name, mod) in [("add", AddMod), ("add_relu", AddReluMod), ("mul", MulMod)]:
with self.subTest(name):
self.check(
mod(),
[
qpt([1.0, 2.0], 0.25, 128),
qpt([3.0, 4.0], 0.25, 128),
])
self.check(
mod(),
[
qpt([[1.0, 2.0]], 0.25, 128),
qpt([[3.0, 4.0]], 0.25, 128),
],
convert_args=[
qpt([[1.0, 2.0]], 0.25, 128),
qpt(torch.zeros((1, 2)), 0.25, 128),
]
)
self.check(
mod(),
[
qpt([[1.0, 2.0]], 0.25, 128),
qpt([[3.0, 4.0]], 0.25, 128),
],
convert_args=[
qpt(torch.zeros((1, 2)), 0.25, 128),
qpt([[3.0, 4.0]], 0.25, 128),
]
)
self.check(
mod(),
[
qpt([[1.0, 2.0]], 0.25, 128),
qpt([[3.0, 4.0]], 0.25, 128),
],
convert_args=[
qpt(torch.zeros((1, 2)), 0.25, 128),
qpt(torch.zeros((1, 2)), 0.25, 128),
]
)
# NOTE: NNAPI qadd supports broadcast, but PT does not.
def test_qlinear(self):
torch.manual_seed(29)
weight = qpt(torch.randn(16, 32), 0.125, 0, torch.qint8)
bias = torch.randn(16)
mod = torch.ao.nn.quantized.Linear(32, 16)
mod.set_weight_bias(weight, bias)
inp = qpt(torch.randn(2, 32), 0.05, 130, torch.quint8)
self.check(mod, inp)
def test_seblock_mul(self):
class MulModel(torch.nn.Module):
def forward(self, lhs, rhs):
return lhs * rhs
self.check(
MulModel(),
[
nhwc(torch.randn(2, 3, 4, 4)),
torch.randn(1, 3, 1, 1),
])
def test_multi_output(self):
class MultiModel(torch.nn.Module):
def forward(self, lhs, rhs) -> Tuple[torch.Tensor, torch.Tensor]:
the_sum = lhs + rhs
the_diff = lhs - rhs
return the_sum, the_diff
self.check(MultiModel(), [torch.tensor([1.0, 2.0]), torch.tensor([1.0, 3.0])])
if __name__ == '__main__':
run_tests()
|
def nhwc(t):
t = t.clone().contiguous(memory_format=torch.channels_last)
t.nnapi_nhwc = True
return t
@unittest.skipUnless(
"qnnpack" in supported_qengines,
"This Pytorch Build has not been built with or does not support QNNPACK",
)
class TestNNAPI(TestCase):
def setUp(self):
# Avoid saturation in fbgemm
torch.backends.quantized.engine = "qnnpack"
libneuralnetworks_path = os.environ.get("LIBNEURALNETWORKS_PATH")
if libneuralnetworks_path:
ctypes.cdll.LoadLibrary(libneuralnetworks_path)
print("Will attempt to run NNAPI models.")
self.can_run_nnapi = True
else:
self.can_run_nnapi = False
# Created for easy override by subclasses (eg TestNnapiBackend)
def call_lowering_to_nnapi(self, traced_module, args):
return convert_model_to_nnapi(traced_module, args)
# Created for subclasses to set can_run_nnapi (eg TestNnapiBackend)
def set_can_run_nnapi(self, can_run):
self.can_run_nnapi = can_run
def check(
self,
module,
arg_or_args,
*,
trace_args=None,
convert_args=None,
atol_rtol=None,
limit=None,
expected_memory_format=None,
):
with torch.no_grad():
if isinstance(arg_or_args, torch.Tensor):
args = [arg_or_args]
else:
args = arg_or_args
module.eval()
traced = torch.jit.trace(module, trace_args or args)
nnapi_module = self.call_lowering_to_nnapi(traced, convert_args or args)
if not self.can_run_nnapi:
# Only test that the model was converted successfully.
return
eager_output = module(*args)
nnapi_output = nnapi_module(*args)
kwargs = {}
if atol_rtol is not None:
kwargs["atol"] = atol_rtol[0]
kwargs["rtol"] = atol_rtol[1]
self.assertEqual(eager_output, nnapi_output, **kwargs)
if limit is not None:
mismatches = eager_output.int_repr().to(
torch.int32
) - nnapi_output.int_repr().to(torch.int32)
if mismatches.count_nonzero() > limit:
# Too many mismatches. Re-run the check with no tolerance
# to get a nice message.
self.assertEqual(eager_output, nnapi_output, atol=0, rtol=0)
if expected_memory_format:
self.assertTrue(
nnapi_output.is_contiguous(memory_format=expected_memory_format)
)
def float_and_quant_and_nhwc(self, inp_float, scale, zero_point):
torch.manual_seed(29)
inp_quant = qpt(inp_float, 0.03, 128)
return [
("float", inp_float),
("float-nhwc", nhwc(inp_float)),
("quant", inp_quant),
("quant-nhwc", nhwc(inp_quant)),
]
def test_prelu(self):
arg = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
single_a = torch.nn.PReLU()
self.check(single_a, arg)
multi_a = torch.nn.PReLU(4)
with torch.no_grad():
multi_a.weight.copy_(torch.tensor([0.1, 0.2, 0.3, 0.4]))
self.check(multi_a, nhwc(arg))
# Test flexible size
self.check(
multi_a,
arg,
trace_args=[torch.zeros(1, 4, 3, 3)],
convert_args=[nhwc(torch.zeros(1, 4, 0, 0))],
)
def test_quantize(self):
self.check(
torch.ao.nn.quantized.Quantize(0.25, 2, torch.quint8),
nhwc(torch.tensor([[[[1.0]], [[2.0]]]])),
)
def test_dequantize(self):
self.check(
torch.ao.nn.quantized.DeQuantize(), nhwc(qpt([[[[1.0]], [[2.0]]]], 0.25, 2))
)
def test_unsqueeze(self):
class UnsqueezeModule(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, arg):
return arg.unsqueeze(self.dim)
self.check(UnsqueezeModule(-2), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(-1), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(0), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(1), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(2), torch.randn(4, 2, 2))
def test_reshape(self):
class ReshapeModule(torch.nn.Module):
def __init__(self, shape):
super().__init__()
self.shape = shape
def forward(self, arg):
return arg.reshape(self.shape)
self.check(ReshapeModule((2, 4)), torch.randn(4, 2, 1, 1))
self.check(ReshapeModule((8, -1)), nhwc(torch.randn(4, 2, 1, 1)))
with self.assertRaisesRegex(Exception, "target size"):
self.check(ReshapeModule((2, 4)), nhwc(torch.randn(4, 2, 1, 1)))
def test_flatten(self):
for mod in [
torch.nn.Flatten(),
torch.nn.Flatten(start_dim=2, end_dim=3),
torch.nn.Flatten(start_dim=2, end_dim=4),
torch.nn.Flatten(start_dim=0, end_dim=-2),
torch.nn.Flatten(start_dim=0, end_dim=4),
]:
self.check(mod, torch.randn(4, 2, 1, 3, 7))
# flex inputs
self.check(
torch.nn.Flatten(),
torch.randn(4, 2, 1, 3, 7),
convert_args=[torch.zeros(0, 2, 1, 3, 7)],
)
# channels last
self.check(torch.nn.Flatten(), nhwc(torch.randn(2, 1, 4, 7)))
self.check(torch.nn.Flatten(), nhwc(torch.randn(2, 3, 1, 1)))
# Exceptions
with self.assertRaisesRegex(Exception, "not supported on NHWC"):
self.check(torch.nn.Flatten(), nhwc(torch.randn(1, 3, 4, 4)))
with self.assertRaisesRegex(
Exception, "Flattening flexible dims is not supported yet"
):
self.check(torch.nn.Flatten(), torch.randn(4, 2, 0, 0, 7))
with self.assertRaisesRegex(Exception, "Only 1 dim"):
self.check(
torch.nn.Flatten(start_dim=1, end_dim=-2), torch.randn(0, 2, 1, 3, 0)
)
def test_slice(self):
class SliceModule(torch.nn.Module):
def __init__(self, start, stop, step):
super().__init__()
self.start = start
self.stop = stop
self.step = step
def forward(self, t):
return t[1:, self.start : self.stop : self.step, :]
class SliceModule2(torch.nn.Module):
def forward(self, t):
return t[3:]
self.check(SliceModule(1, 5, 2), torch.randn(4, 6, 2))
self.check(SliceModule2(), torch.randn(5))
# flex inputs
self.check(
SliceModule(1, 5, 2),
torch.randn(4, 6, 2),
convert_args=[torch.zeros(4, 6, 0)],
)
with self.assertRaisesRegex(Exception, "slice with flexible shape"):
self.check(
SliceModule(1, 5, 2),
torch.randn(4, 6, 2),
convert_args=[torch.zeros(0, 0, 0)],
)
def test_cat(self):
class CatModule(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, t1, t2):
return torch.cat([t1, t2], self.dim)
self.check(
CatModule(0),
[
torch.randn(1, 2, 3, 3),
torch.randn(2, 2, 3, 3),
],
)
self.check(
CatModule(1),
[
torch.randn(1, 2, 3, 3),
torch.randn(1, 4, 3, 3),
],
)
self.check(
CatModule(1),
[
nhwc(torch.randn(1, 2, 3, 3)),
nhwc(torch.randn(1, 4, 3, 3)),
],
)
self.check(
CatModule(1),
[
torch.randn(1, 2, 3, 3),
torch.randn(1, 4, 3, 3),
],
convert_args=[torch.zeros(0, 0, 0, 0), torch.zeros(0, 0, 0, 0)],
)
def test_pointwise_unary(self):
for op in ["relu", "sigmoid"]:
with self.subTest(op):
class UnaryModule(torch.nn.Module):
def forward(self, arg):
if op == "relu":
return torch.nn.functional.relu(arg)
if op == "sigmoid":
return torch.sigmoid(arg)
raise Exception("Bad op") # noqa: TRY002
self.check(UnaryModule(), torch.tensor([-1.0, 1.0]))
self.check(
UnaryModule(),
qpt(torch.tensor([-1.0, 1.0]), 1.0 / 256, 0),
)
def test_pointwise_binary(self):
for op in ["add", "sub", "mul", "div"]:
with self.subTest(op):
class BinaryModule(torch.nn.Module):
def forward(self, lhs, rhs):
if op == "add":
return lhs + rhs
if op == "sub":
return lhs - rhs
if op == "mul":
return lhs * rhs
if op == "div":
return lhs / rhs
raise Exception("Bad op") # noqa: TRY002
self.check(
BinaryModule(),
[
torch.tensor([1.0, 2.0]),
torch.tensor([3.0, 4.0]),
],
)
self.check(
BinaryModule(),
[
torch.tensor([[1.0, 2.0]]),
torch.tensor([[3.0, 4.0], [5.0, 6.0]]),
],
)
with self.assertRaisesRegex(Exception, "Non-equal-rank broadcast"):
self.check(
BinaryModule(),
[
torch.tensor([1.0, 2.0]),
torch.tensor([[3.0, 4.0], [5.0, 6.0]]),
],
)
def test_pointwise_binary_const(self):
const = torch.randn(1, 4, 6, 6)
class ArgPlusConst(torch.nn.Module):
def forward(self, arg):
return arg + const
class ConstPlusArg(torch.nn.Module):
def forward(self, arg):
return const + arg
arg_contig = torch.randn(2, 4, 6, 6)
arg_nhwc = nhwc(torch.randn(2, 4, 6, 6))
for mod_class in [ArgPlusConst, ConstPlusArg]:
for use_nhwc in [False, True]:
with self.subTest(mod_class=mod_class.__name__, use_nhwc=use_nhwc):
arg = arg_nhwc if use_nhwc else arg_contig
memory_format = (
torch.channels_last if use_nhwc else torch.contiguous_format
)
self.check(mod_class(), arg, expected_memory_format=memory_format)
def test_hardtanh(self):
inp = torch.tensor([-2.0, -0.5, 0.5, 2.0, 7.0])
self.check(torch.nn.Hardtanh(), inp)
self.check(torch.nn.Hardtanh(0.0, 6.0), inp)
with self.assertRaisesRegex(Exception, "hardtanh with args"):
self.check(torch.nn.Hardtanh(0.0, 5.0), inp)
def test_softmax(self):
inp = torch.tensor([[-2.0, -0.5], [0.5, 2.0]])
self.check(torch.nn.Softmax(), inp)
self.check(torch.nn.Softmax(dim=0), inp)
# Test flexible size
self.check(
torch.nn.Softmax(),
inp,
convert_args=[torch.zeros(0, 0)],
)
def test_to(self):
class ToCPU(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.prelu = torch.nn.PReLU()
def forward(self, x):
y = x.to("cpu")
# add prelu since input operand can't be output
return self.prelu(y)
arg = torch.randn(1, 2, 3, 3)
self.check(ToCPU(), arg)
# Test flexible size
self.check(
ToCPU(),
arg,
convert_args=[torch.zeros(1, 2, 0, 0)],
)
def test_detach(self):
class DetachModule(torch.nn.Module):
def forward(self, x):
y = x.detach()
return torch.nn.functional.relu(y)
self.check(DetachModule(), torch.randn(1, 2, 3, 3))
self.check(
DetachModule(),
torch.randn(1, 2, 3, 3),
convert_args=[torch.zeros(1, 2, 0, 0)],
)
def test_log_softmax(self):
inp = torch.randn(3, 10)
self.check(torch.nn.LogSoftmax(), inp)
self.check(torch.nn.LogSoftmax(0), inp)
def test_mean(self):
class MeanModule(torch.nn.Module):
def __init__(self, dim, keep=False):
super().__init__()
self.dim = dim
self.keep = keep
def forward(self, t):
return torch.mean(t, dim=self.dim, keepdim=self.keep)
self.check(MeanModule(0), torch.randn(2, 3))
self.check(MeanModule(1), torch.randn(2, 3))
self.check(MeanModule([2, 3]), torch.randn(2, 3, 6, 6))
self.check(MeanModule([2, 3]), nhwc(torch.randn(2, 3, 6, 6)))
self.check(MeanModule([-1, -2]), nhwc(torch.randn(2, 3, 6, 6)))
self.check(MeanModule([-1, -2], keep=True), nhwc(torch.randn(2, 3, 6, 6)))
def test_max_pool2d(self):
for name, inp in self.float_and_quant_and_nhwc(
torch.randn(2, 3, 12, 16), 0.3, 128
):
with self.subTest(name):
self.check(torch.nn.MaxPool2d(2), inp)
self.check(torch.nn.MaxPool2d((3, 4)), inp)
self.check(torch.nn.MaxPool2d((3, 4), (1, 2)), inp)
def test_avg_pool2d(self):
for name, inp in self.float_and_quant_and_nhwc(
torch.randn(2, 3, 12, 16), 0.3, 128
):
with self.subTest(name):
atol_rtol = None
limit = None
convert_dims = (2, 3, 0, 0)
convert_arg = torch.zeros(*convert_dims)
for model in (
torch.nn.AvgPool2d(2),
torch.nn.AvgPool2d((3, 4)),
torch.nn.AvgPool2d((3, 4), (1, 2)),
):
if "quant" in name:
atol_rtol = (1, 0)
limit = model(inp).numel()
convert_arg = qpt(torch.zeros(*convert_dims), 1.0 / 16, 128)
if "nhwc" in name:
convert_arg = nhwc(convert_arg)
self.check(model, inp, atol_rtol=atol_rtol, limit=limit)
self.check(
model,
inp,
convert_args=[convert_arg],
atol_rtol=atol_rtol,
limit=limit,
)
def test_adaptive_avg_pool2d(self):
for name, inp in self.float_and_quant_and_nhwc(
torch.randn(2, 3, 12, 16), 0.3, 128
):
with self.subTest(name):
self.check(torch.nn.AdaptiveAvgPool2d((1, 1)), inp)
with self.assertRaisesRegex(Exception, "with output size"):
self.check(torch.nn.AdaptiveAvgPool2d((2, 2)), inp)
def test_upsample_nearest2d(self):
convert_args = dict(
self.float_and_quant_and_nhwc(torch.randn(2, 3, 0, 0), 0.3, 128)
)
for name, inp in self.float_and_quant_and_nhwc(
torch.randn(2, 3, 12, 16), 0.3, 128
):
with self.subTest(name):
self.check(torch.nn.UpsamplingNearest2d(size=(16, 20)), inp)
self.check(torch.nn.UpsamplingNearest2d(size=(24, 32)), inp)
self.check(torch.nn.UpsamplingNearest2d(size=(36, 48)), inp)
self.check(torch.nn.UpsamplingNearest2d(scale_factor=(1.5, 1.5)), inp)
self.check(torch.nn.UpsamplingNearest2d(scale_factor=(2.0, 2.0)), inp)
self.check(torch.nn.UpsamplingNearest2d(scale_factor=(3.0, 3.0)), inp)
self.check(
torch.nn.UpsamplingNearest2d(size=(24, 32)),
inp,
convert_args=[convert_args[name]],
)
self.check(
torch.nn.UpsamplingNearest2d(scale_factor=(2.0, 2.0)),
inp,
convert_args=[convert_args[name]],
)
def test_linear(self):
torch.manual_seed(29)
self.check(torch.nn.Linear(16, 32), torch.randn(2, 16))
self.check(
torch.nn.Linear(16, 32),
torch.randn(2, 16),
convert_args=[torch.zeros(0, 16)],
)
def test_conv2d(self):
cases = [
# in_ch, out_ch, kernel, stride, padding, groups, bias, input_dim, name
(4, 8, (3, 3), 1, 0, 1, 1, (2, 4, 16, 16), "3x3"), # noqa: E201,E241
(4, 8, (3, 3), 1, 0, 1, 0, (2, 4, 16, 16), "3x3nobias"), # noqa: E201,E241
(4, 16, (3, 3), 1, 1, 1, 1, (2, 4, 16, 16), "3x3p1"), # noqa: E201,E241
(8, 8, (3, 3), 2, 0, 1, 1, (2, 8, 16, 16), "3x3s2"), # noqa: E201,E241
(4, 8, (5, 5), 1, 0, 1, 1, (2, 4, 16, 16), "5x5"), # noqa: E201,E241
(4, 4, (3, 3), 1, 0, 4, 1, (2, 4, 16, 16), "3x3dw"), # noqa: E201,E241
(8, 4, (1, 1), 1, 0, 1, 1, (2, 8, 16, 16), "1x1"), # noqa: E201,E241
]
for kind in ["float", "float-nhwc", "quant", "quant-nhwc"]:
for case in cases:
(
in_ch,
out_ch,
kernel,
stride,
padding,
groups,
bias,
input_dim,
name,
) = case
with self.subTest(f"{kind}-{name}"):
inp = torch.randn(input_dim)
model = torch.nn.Conv2d(
in_ch,
out_ch,
kernel,
stride,
padding,
groups=groups,
bias=bool(bias),
)
output_size = model(inp).numel()
atol_rtol = None
limit = None
convert_dims = (0, in_ch, 0, 0)
convert_arg = torch.zeros(*convert_dims)
if "quant" in kind:
model = torch.nn.Sequential(model)
model.eval()
model.qconfig = torch.ao.quantization.get_default_qconfig(
"qnnpack"
)
model = torch.ao.quantization.prepare(model)
model(inp)
model = torch.ao.quantization.convert(model)
inp = qpt(inp, 1.0 / 16, 128)
# I've seen numerical differences between QNNPACK and NNAPI,
# but never more than 1 quantum, and never more than ~1% of
# the output in this test.
atol_rtol = (1, 0)
limit = output_size * 0.03
convert_arg = qpt(torch.zeros(*convert_dims), 1.0 / 16, 128)
if "nhwc" in kind:
inp = nhwc(inp)
convert_arg = nhwc(convert_arg)
self.check(model, inp, atol_rtol=atol_rtol, limit=limit)
self.check(
model,
inp,
convert_args=[convert_arg],
atol_rtol=atol_rtol,
limit=limit,
)
def test_conv2d_transpose(self):
torch.manual_seed(29)
in_ch, out_ch, kernel = (5, 7, (2, 2))
input_dim = (4, 5, 3, 3)
convert_dims = input_dim[:2] + (0, 0)
for kind in ["float", "float-nhwc", "quant", "quant-nhwc"]:
with self.subTest(kind):
inp = torch.randn(input_dim)
model = torch.nn.ConvTranspose2d(in_ch, out_ch, kernel)
output_size = model(inp).numel()
atol_rtol = (0.0002, 0)
limit = None
convert_arg = torch.zeros(*convert_dims)
if "quant" in kind:
model = torch.ao.nn.quantized.ConvTranspose2d(in_ch, out_ch, kernel)
model.qconfig = torch.ao.quantization.get_default_qconfig("qnnpack")
inp = qpt(inp, 1.0 / 16, 128)
# I've seen numerical differences between QNNPACK and NNAPI,
# but never more than 1 quantum, and never more than ~10% of
# the output in this test.
atol_rtol = (1, 0)
limit = output_size * 0.1
convert_arg = qpt(convert_arg, 1.0 / 16, 128)
if "nhwc" in kind:
inp = nhwc(inp)
convert_arg = nhwc(convert_arg)
self.check(model, inp, atol_rtol=atol_rtol, limit=limit)
self.check(
model,
inp,
convert_args=[convert_arg],
atol_rtol=atol_rtol,
limit=limit,
)
def test_qadd(self):
func = torch.ao.nn.quantized.QFunctional()
func.scale = 0.5
func.zero_point = 120
class AddMod(torch.nn.Module):
def forward(self, lhs, rhs):
return func.add(lhs, rhs)
class AddReluMod(torch.nn.Module):
def forward(self, lhs, rhs):
return func.add_relu(lhs, rhs)
class MulMod(torch.nn.Module):
def forward(self, lhs, rhs):
return func.mul(lhs, rhs)
for name, mod in [("add", AddMod), ("add_relu", AddReluMod), ("mul", MulMod)]:
with self.subTest(name):
self.check(
mod(),
[
qpt([1.0, 2.0], 0.25, 128),
qpt([3.0, 4.0], 0.25, 128),
],
)
self.check(
mod(),
[
qpt([[1.0, 2.0]], 0.25, 128),
qpt([[3.0, 4.0]], 0.25, 128),
],
convert_args=[
qpt([[1.0, 2.0]], 0.25, 128),
qpt(torch.zeros((1, 2)), 0.25, 128),
],
)
self.check(
mod(),
[
qpt([[1.0, 2.0]], 0.25, 128),
qpt([[3.0, 4.0]], 0.25, 128),
],
convert_args=[
qpt(torch.zeros((1, 2)), 0.25, 128),
qpt([[3.0, 4.0]], 0.25, 128),
],
)
self.check(
mod(),
[
qpt([[1.0, 2.0]], 0.25, 128),
qpt([[3.0, 4.0]], 0.25, 128),
],
convert_args=[
qpt(torch.zeros((1, 2)), 0.25, 128),
qpt(torch.zeros((1, 2)), 0.25, 128),
],
)
# NOTE: NNAPI qadd supports broadcast, but PT does not.
def test_qlinear(self):
torch.manual_seed(29)
weight = qpt(torch.randn(16, 32), 0.125, 0, torch.qint8)
bias = torch.randn(16)
mod = torch.ao.nn.quantized.Linear(32, 16)
mod.set_weight_bias(weight, bias)
inp = qpt(torch.randn(2, 32), 0.05, 130, torch.quint8)
self.check(mod, inp)
def test_seblock_mul(self):
class MulModel(torch.nn.Module):
def forward(self, lhs, rhs):
return lhs * rhs
self.check(
MulModel(),
[
nhwc(torch.randn(2, 3, 4, 4)),
torch.randn(1, 3, 1, 1),
],
)
def test_multi_output(self):
class MultiModel(torch.nn.Module):
def forward(self, lhs, rhs) -> Tuple[torch.Tensor, torch.Tensor]:
the_sum = lhs + rhs
the_diff = lhs - rhs
return the_sum, the_diff
self.check(MultiModel(), [torch.tensor([1.0, 2.0]), torch.tensor([1.0, 3.0])])
if __name__ == "__main__":
run_tests()
|
import os
import ctypes
import torch
import unittest
from typing import Tuple
from torch.backends._nnapi.prepare import convert_model_to_nnapi
from torch.testing._internal.common_quantized import supported_qengines
from torch.testing._internal.common_utils import TestCase, run_tests
|
import ctypes
import os
import unittest
from typing import Tuple
import torch
from torch.backends._nnapi.prepare import convert_model_to_nnapi
from torch.testing._internal.common_quantized import supported_qengines
from torch.testing._internal.common_utils import run_tests, TestCase
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_nn.py
|
test_cross_entropy_64bit
|
def test_cross_entropy_64bit(self, device, reduction):
labels = torch.zeros(190, 50, dtype=torch.long, device=device)
logits = torch.ones(190, 229000, 50, dtype=torch.float, device=device)
loss = torch.nn.functional.cross_entropy(logits, labels)
loss_cpu = torch.nn.functional.cross_entropy(logits.cpu(), labels.cpu())
print(logits.numel(), labels.numel(), loss.numel())
self.assertTrue(torch.allclose(loss_cpu, loss.cpu(), rtol=1e-4, atol=1e-4))
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
class TestNNDeviceType(NNTestCase):
import copy
import copy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_nn.py
|
helper
|
def helper(self, size, dtype, mixed_dtype=False):
channels = size[1]
input = torch.randn(size, dtype=dtype, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=torch.channels_last).to(dtype)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device='cpu')
grad = grad.contiguous(memory_format=torch.channels_last)
bn = nn.BatchNorm2d(channels).cpu().to(dtype)
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = nn.BatchNorm2d(channels).cpu().to(dtype)
ref_bn.load_state_dict(bn.state_dict())
if mixed_dtype:
bn.float()
ref_bn.float()
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
# test NC11 and N1HW; test mixed dtype
for shape in [(4, 8, 10, 10), (4, 1, 9, 9), (4, 9, 1, 1)]:
helper(self, shape, torch.float, False)
helper(self, shape, torch.bfloat16, False)
helper(self, shape, torch.bfloat16, True)
|
def helper(self, mod, size, dtype, mixed_dtype=False, format=torch.channels_last, precision=None):
channels = size[1]
input = torch.randn(size, dtype=dtype, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=format).to(dtype)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device='cpu')
grad = grad.contiguous(memory_format=format)
bn = mod(channels).cpu().to(dtype)
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = mod(channels).cpu().to(dtype)
ref_bn.load_state_dict(bn.state_dict())
if mixed_dtype:
bn.float()
ref_bn.float()
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=format))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad, atol=precision, rtol=precision)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
# test NC11 and N1HW; test mixed dtype
for shape in [(4, 8, 10, 10), (4, 1, 9, 9), (4, 9, 1, 1)]:
for dtype in [torch.float, torch.bfloat16, torch.float16]:
for mixed_dtype in [False, True]:
if dtype == torch.float:
mixed_dtype = False
helper(self, nn.BatchNorm2d, shape, dtype, mixed_dtype, torch.channels_last)
precisons = {torch.float: 1e-4, torch.bfloat16: 1e-4, torch.float16: None}
for shape in [(4, 8, 2, 10, 10), (4, 1, 2, 9, 9), (4, 9, 1, 1, 1)]:
for dtype in [torch.float, torch.bfloat16, torch.float16]:
for mixed_dtype in [False, True]:
if dtype == torch.float:
mixed_dtype = False
helper(self, nn.BatchNorm3d, shape, dtype, mixed_dtype, torch.channels_last_3d, precisons[dtype])
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps,\
TEST_WITH_UBSAN, IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, IS_WINDOWS
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import gc
import copy
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
import copy
import copy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_nn.py
|
helper
|
def helper(self, size, dtype, mixed_dtype=False):
channels = size[1]
input = torch.randn(size, dtype=dtype, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=torch.channels_last).to(dtype)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device='cpu')
grad = grad.contiguous(memory_format=torch.channels_last)
bn = nn.BatchNorm2d(channels).cpu().to(dtype)
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = nn.BatchNorm2d(channels).cpu().to(dtype)
ref_bn.load_state_dict(bn.state_dict())
if mixed_dtype:
bn.float()
ref_bn.float()
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
# test NC11 and N1HW; test mixed dtype
for shape in [(4, 8, 10, 10), (4, 1, 9, 9), (4, 9, 1, 1)]:
helper(self, shape, torch.float, False)
helper(self, shape, torch.bfloat16, False)
helper(self, shape, torch.bfloat16, True)
|
def helper(self, mod, size, dtype, mixed_dtype=False, format=torch.channels_last, precision=None):
channels = size[1]
input = torch.randn(size, dtype=dtype, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=format).to(dtype)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device='cpu')
grad = grad.contiguous(memory_format=format)
bn = mod(channels).cpu().to(dtype)
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = mod(channels).cpu().to(dtype)
ref_bn.load_state_dict(bn.state_dict())
if mixed_dtype:
bn.float()
ref_bn.float()
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=format))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad, atol=precision, rtol=precision)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
# test NC11 and N1HW; test mixed dtype
for shape in [(4, 8, 10, 10), (4, 1, 9, 9), (4, 9, 1, 1)]:
for dtype in [torch.float, torch.bfloat16, torch.float16]:
for mixed_dtype in [False, True]:
if dtype == torch.float:
mixed_dtype = False
helper(self, nn.BatchNorm2d, shape, dtype, mixed_dtype, torch.channels_last)
precisons = {torch.float: 1e-4, torch.bfloat16: 1e-4, torch.float16: None}
for shape in [(4, 8, 2, 10, 10), (4, 1, 2, 9, 9), (4, 9, 1, 1, 1)]:
for dtype in [torch.float, torch.bfloat16, torch.float16]:
for mixed_dtype in [False, True]:
if dtype == torch.float:
mixed_dtype = False
helper(self, nn.BatchNorm3d, shape, dtype, mixed_dtype, torch.channels_last_3d, precisons[dtype])
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps,\
TEST_WITH_UBSAN, IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, IS_WINDOWS
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import gc
import copy
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
import copy
import copy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_nn.py
|
test_smoothl1loss_backward_zero_beta
|
def test_smoothl1loss_backward_zero_beta(self, device):
input = torch.randn(300, 256, requires_grad=True, device=device)
target = input.detach()
loss = F.smooth_l1_loss(input, target, beta=0.0, reduction='sum')
loss.backward()
grad_max_abs = input.grad.abs().max().item()
self.assertLessEqual(grad_max_abs, 1.0)
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
class TestNNDeviceType(NNTestCase):
import copy
import copy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_nn.py
|
test_clip_grad_norm_multi_device
|
def test_clip_grad_norm_multi_device(self, devices, foreach):
class TestModel(nn.Module):
def __init__(self):
super().__init__()
self.layer1 = nn.Linear(10, 10)
self.layer2 = nn.Linear(10, 10)
test_model = TestModel()
test_model.layer1.to(devices[0])
test_model.layer2.to(devices[1])
ref_model = TestModel().to(devices[0])
for norm_type in [2., math.inf]:
for p in test_model.parameters():
p.grad = torch.ones_like(p)
for p in ref_model.parameters():
p.grad = torch.ones_like(p)
norm = clip_grad_norm_(test_model.parameters(), 0.5, norm_type=norm_type, foreach=foreach)
expected = clip_grad_norm_(ref_model.parameters(), 0.5, norm_type=norm_type, foreach=foreach)
self.assertEqual(norm, expected)
for p, pe in zip(test_model.parameters(), ref_model.parameters()):
self.assertEqual(p.grad.to(devices[0]), pe.grad)
|
def test_clip_grad_norm_multi_device(self, devices, foreach):
class TestModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer1 = nn.Linear(10, 10)
self.layer2 = nn.Linear(10, 10)
test_model = TestModel()
test_model.layer1.to(devices[0])
test_model.layer2.to(devices[1])
ref_model = TestModel().to(devices[0])
for norm_type in [2., math.inf]:
for p in test_model.parameters():
p.grad = torch.ones_like(p)
for p in ref_model.parameters():
p.grad = torch.ones_like(p)
norm = clip_grad_norm_(test_model.parameters(), 0.5, norm_type=norm_type, foreach=foreach)
expected = clip_grad_norm_(ref_model.parameters(), 0.5, norm_type=norm_type, foreach=foreach)
self.assertEqual(norm, expected)
for p, pe in zip(test_model.parameters(), ref_model.parameters()):
self.assertEqual(p.grad.to(devices[0]), pe.grad)
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps,\
TEST_WITH_UBSAN, IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, IS_WINDOWS
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import gc
class TestNNDeviceType(NNTestCase):
import copy
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
class TestNNDeviceType(NNTestCase):
import copy
import copy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_nn.py
|
cosine_distance
|
def cosine_distance(x, y):
return 1.0 - F.cosine_similarity(x, y)
distance_functions = (pairwise_distance, cosine_distance,
lambda x, y: 1.0 - F.cosine_similarity(x, y))
reductions = ('mean', 'none', 'sum')
margins = (1.0, 1.5, 0.5)
swaps = (True, False)
for distance_fn, reduction, margin, swap \
in itertools.product(distance_functions, reductions, margins, swaps):
anchor = torch.randn(5, 10, device=device, requires_grad=True)
positive = torch.randn(5, 10, device=device, requires_grad=True)
negative = torch.randn(5, 10, device=device, requires_grad=True)
# Test backward
self.assertTrue(gradcheck(lambda a, p, n: F.triplet_margin_with_distance_loss(
a, p, n, distance_function=distance_fn, reduction=reduction, margin=margin, swap=swap),
(anchor, positive, negative)))
loss_op = nn.TripletMarginWithDistanceLoss(distance_function=distance_fn,
reduction=reduction, margin=margin, swap=swap)
self.assertTrue(gradcheck(lambda a, p, n: loss_op(
a, p, n), (anchor, positive, negative)))
traced_loss_op = torch.jit.trace(loss_op, (anchor, positive, negative))
self.assertTrue(gradcheck(lambda a, p, n: traced_loss_op(
a, p, n), (anchor, positive, negative)))
# Test forward parity
functional = F.triplet_margin_with_distance_loss(anchor, positive, negative,
distance_function=distance_fn,
reduction=reduction, margin=margin, swap=swap)
modular = loss_op(anchor, positive, negative)
traced = traced_loss_op(anchor, positive, negative)
self.assertEqual(functional, modular, atol=1e-6, rtol=1e-6)
self.assertEqual(traced, modular, atol=1e-6, rtol=1e-6)
|
def cosine_distance(x, y):
return 1.0 - F.cosine_similarity(x, y)
distance_functions = (pairwise_distance, cosine_distance,
lambda x, y: 1.0 - F.cosine_similarity(x, y))
reductions = ('mean', 'none', 'sum')
margins = (1.0, 1.5, 0.5)
swaps = (True, False)
for distance_fn, reduction, margin, swap \
in itertools.product(distance_functions, reductions, margins, swaps):
anchor = torch.randn(5, 10, device=device, requires_grad=True, dtype=torch.double)
positive = torch.randn(5, 10, device=device, requires_grad=True, dtype=torch.double)
negative = torch.randn(5, 10, device=device, requires_grad=True, dtype=torch.double)
# Test backward
self.assertTrue(gradcheck(lambda a, p, n: F.triplet_margin_with_distance_loss(
a, p, n, distance_function=distance_fn, reduction=reduction, margin=margin, swap=swap),
(anchor, positive, negative)))
loss_op = nn.TripletMarginWithDistanceLoss(distance_function=distance_fn,
reduction=reduction, margin=margin, swap=swap)
self.assertTrue(gradcheck(lambda a, p, n: loss_op(
a, p, n), (anchor, positive, negative)))
traced_loss_op = torch.jit.trace(loss_op, (anchor, positive, negative))
self.assertTrue(gradcheck(lambda a, p, n: traced_loss_op(
a, p, n), (anchor, positive, negative)))
# Test forward parity
functional = F.triplet_margin_with_distance_loss(anchor, positive, negative,
distance_function=distance_fn,
reduction=reduction, margin=margin, swap=swap)
modular = loss_op(anchor, positive, negative)
traced = traced_loss_op(anchor, positive, negative)
self.assertEqual(functional, modular, atol=1e-6, rtol=1e-6)
self.assertEqual(traced, modular, atol=1e-6, rtol=1e-6)
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps,\
TEST_WITH_UBSAN, IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, IS_WINDOWS
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import gc
import copy
|
import contextlib
import math
import random
import unittest
import io
import itertools
import warnings
import pickle
import re
from copy import deepcopy
from itertools import product
from functools import partial
from collections import OrderedDict
from unittest import SkipTest
import torch
from torch import inf, nan
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn.utils.fusion import fuse_conv_bn_weights
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn import Buffer, Parameter
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_math_dtypes, floating_types
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \
download_file, get_function_arglist, load_tests, skipIfMps, \
IS_PPC, \
parametrize as parametrize_test, subtest, instantiate_parametrized_tests, \
skipIfTorchDynamo, gcIfJetson, set_default_dtype
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, _create_basic_net, \
ctcloss_reference, new_module_tests, single_batch_reference_fn, _test_bfloat16_ops, _test_module_empty_input
from torch.testing._internal.common_device_type import dtypesIfMPS, instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, \
onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, expectedFailureMPS, \
skipMeta, get_all_device_types
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
from torch.testing._internal.common_mkldnn import bf32_on_and_off
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
load_tests = load_tests
import scipy.signal
import scipy.ndimage
import numpy as np
from torch.serialization import SourceChangeWarning
import copy
import copy
import copy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_ops.py
|
is_strided_tensor
|
def is_strided_tensor(arg):
return torch.is_tensor(arg) and arg.layout == torch.strided
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_ops.py
|
check_ignore_materialize
|
def check_ignore_materialize(idx_or_kw, allow_list):
return (allow_list is not None) and (idx_or_kw in allow_list)
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_ops.py
|
_assert_match_metadata
|
class TestMathBits(TestCase):
# Tests that
# 1. The operator's output for physically conjugated/negated tensors and conjugate/negative view tensors
# produces the same value
# 2. The gradients are same in both cases mentioned in (1)
# 3. If the operator's inplace variant is supported, tests that the inplace operation
# produces the correct value when called on a conjugate/negative view tensor and that the output
# has its conj/neg bit set to true
# This test only runs for C -> R and C -> C functions
# TODO: add tests for `R->C` functions
# Note: This test runs for functions that take both tensors and tensorlists as input.
def _test_math_view(
self,
device,
dtype,
op,
samples,
math_op_physical,
math_op_view,
is_bit_set,
out_type,
):
inplace_variant = op.inplace_variant
# helper function to clone and conjugate/negate the input if its a tensor
# else clone the sequence and conjugate/negate the first element in the sequence
# If a requires_grad argument is provided the tensor being conjugated/negated will
# have its requires_grad set to that value.
|
def _assert_match_metadata(a, b):
self.assertEqual(a.size(), b.size())
self.assertEqual(a.stride(), b.stride())
self.assertEqual(a.storage_offset(), b.storage_offset())
self.assertEqual(a.device, b.device)
self.assertEqual(a.dtype, b.dtype)
# ensure view replay is enabled
with torch.autograd._force_original_view_tracking(True):
for sample in op.sample_inputs(device, dtype, requires_grad=False):
inp = sample.input
outs = op(inp, *sample.args, **sample.kwargs)
if not isinstance(outs, (tuple, List)):
outs = [outs]
# for all outputs that are views of the input, we should be able to replay the
# forward and reverse views via a functioning view_func() / rev_view_func().
for out in outs:
if not (
isinstance(out, torch.Tensor)
and out._is_view()
and out._base is inp
):
continue
# forward view_func
new_inp = inp.clone()
_assert_match_metadata(new_inp, inp)
new_out = out._view_func_unsafe(new_inp)
_assert_match_metadata(new_out, out)
self.assertEqual(new_out, out)
# reverse view_func
new_out = out.detach()
new_inp = out._rev_view_func_unsafe(new_out)
_assert_match_metadata(new_inp, inp)
self.assertTrue(new_inp._is_view())
self.assertTrue(new_inp._base is new_out)
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_ops.py
|
clone_and_perform_view
|
def clone_and_perform_view(input, **kwargs):
if isinstance(input, torch.Tensor):
requires_grad = kwargs.get("requires_grad", input.requires_grad)
with torch.no_grad():
# Ensure view represents the original sample input
input = math_op_physical(input)
# Note: .conj() is not called under no_grad mode since it's not allowed to modify a
# view created in no_grad mode. Here it's ok to do so, so as a workaround we call conj
# before resetting the requires_grad field for input
input = math_op_view(input)
assert input.is_leaf
return input.requires_grad_(requires_grad)
if isinstance(input, Sequence):
out = list(map(clone_input_helper, input))
out[0] = clone_and_perform_view(out[0])
return tuple(out)
for sample in samples:
tensor = (
sample.input
if isinstance(sample.input, torch.Tensor)
else sample.input[0]
)
cloned1 = clone_and_perform_view(sample.input)
# Computes function forward value with a physically conjugated/negated tensor and
# a conj/neg view tensor and verifies that the output in both case are equal.
expected_forward = op(sample.input, *sample.args, **sample.kwargs)
forward_with_mathview = op(cloned1, *sample.args, **sample.kwargs)
self.assertEqual(expected_forward, forward_with_mathview)
# If the op has an inplace variant, and the input doesn't require broadcasting
# and has the same dtype as output, verify that the inplace operation on a conjugated/negated
# input produces correct output, and the output tensor has the conj/neg bit set to True
if inplace_variant is not None and not sample.broadcasts_input:
cloned2 = clone_and_perform_view(tensor, requires_grad=False)
if (
isinstance(expected_forward, torch.Tensor)
and expected_forward.dtype is tensor.dtype
):
inplace_forward = inplace_variant(
cloned2, *sample.args, **sample.kwargs
)
self.assertTrue(is_bit_set(inplace_forward))
self.assertEqual(inplace_forward, expected_forward)
# TODO: backward consistency only supported for single tensor outputs
# TODO: backward consistency only checked on sample.input, not all
# tensor inputs
# TODO: update to handle checking grads of all tensor inputs as
# derived from each tensor output
if (
isinstance(expected_forward, torch.Tensor)
and expected_forward.requires_grad
):
output_process_fn_grad = sample.output_process_fn_grad or (lambda x: x)
expected_forward = output_process_fn_grad(expected_forward)
forward_with_mathview = output_process_fn_grad(forward_with_mathview)
tensor = (
sample.input
if isinstance(sample.input, torch.Tensor)
else sample.input[0]
)
expected_forward.sum().backward(retain_graph=True)
forward_with_mathview.sum().backward(retain_graph=True)
if tensor.grad is not None:
cloned1_tensor = (
cloned1 if isinstance(cloned1, torch.Tensor) else cloned1[0]
)
self.assertEqual(tensor.grad, cloned1_tensor.grad)
tensor.grad, cloned1_tensor.grad = None, None
# a repeat of the above test if output is not complex valued
if out_type(expected_forward):
grad = torch.randn_like(expected_forward)
expected_forward.backward(grad)
forward_with_mathview.backward(
math_op_view(math_op_physical(grad))
)
self.assertEqual(tensor.grad, cloned1_tensor.grad)
|
def clone_and_perform_view(input, **kwargs):
if isinstance(input, torch.Tensor):
requires_grad = kwargs.get("requires_grad", input.requires_grad)
with torch.no_grad():
# Ensure view represents the original sample input
input = math_op_physical(input)
# Note: .conj() is not called under no_grad mode since it's not allowed to modify a
# view created in no_grad mode. Here it's ok to do so, so as a workaround we call conj
# before resetting the requires_grad field for input
input = math_op_view(input)
assert input.is_leaf
return input.requires_grad_(requires_grad)
if isinstance(input, Sequence):
out = list(map(clone_input_helper, input))
out[0] = clone_and_perform_view(out[0])
return tuple(out)
for sample in samples:
tensor = (
sample.input
if isinstance(sample.input, torch.Tensor)
else sample.input[0]
)
cloned1 = clone_and_perform_view(sample.input)
# Computes function forward value with a physically conjugated/negated tensor and
# a conj/neg view tensor and verifies that the output in both case are equal.
expected_forward = op(sample.input, *sample.args, **sample.kwargs)
forward_with_mathview = op(cloned1, *sample.args, **sample.kwargs)
self.assertEqual(expected_forward, forward_with_mathview)
# If the op has an inplace variant, and the input doesn't require broadcasting
# and has the same dtype as output, verify that the inplace operation on a conjugated/negated
# input produces correct output, and the output tensor has the conj/neg bit set to True
if inplace_variant is not None and not sample.broadcasts_input:
cloned2 = clone_and_perform_view(tensor, requires_grad=False)
if (
isinstance(expected_forward, torch.Tensor)
and expected_forward.dtype is tensor.dtype
):
inplace_forward = inplace_variant(
cloned2, *sample.args, **sample.kwargs
)
self.assertTrue(is_bit_set(inplace_forward))
self.assertEqual(inplace_forward, expected_forward)
# TODO: backward consistency only supported for single tensor outputs
# TODO: backward consistency only checked on sample.input, not all
# tensor inputs
# TODO: update to handle checking grads of all tensor inputs as
# derived from each tensor output
if (
isinstance(expected_forward, torch.Tensor)
and expected_forward.requires_grad
):
output_process_fn_grad = sample.output_process_fn_grad or (lambda x: x)
expected_forward = output_process_fn_grad(expected_forward)
forward_with_mathview = output_process_fn_grad(forward_with_mathview)
tensor = (
sample.input
if isinstance(sample.input, torch.Tensor)
else sample.input[0]
)
expected_forward.sum().abs().backward(retain_graph=True)
forward_with_mathview.sum().abs().backward(retain_graph=True)
if tensor.grad is not None:
cloned1_tensor = (
cloned1 if isinstance(cloned1, torch.Tensor) else cloned1[0]
)
self.assertEqual(tensor.grad, cloned1_tensor.grad)
tensor.grad, cloned1_tensor.grad = None, None
# a repeat of the above test if output is not complex valued
if out_type(expected_forward):
grad = torch.randn_like(expected_forward)
expected_forward.backward(grad)
forward_with_mathview.backward(
math_op_view(math_op_physical(grad))
)
self.assertEqual(tensor.grad, cloned1_tensor.grad)
|
from collections.abc import Sequence
from functools import partial
import warnings
import unittest
import itertools
import torch
import contextlib
import re
import os
from collections import defaultdict
from importlib import import_module
from torch.utils._pytree import tree_map
from typing import Dict
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_and_complex_types_and,
all_types_and_complex_and,
)
from test_proxy_tensor import xfail, skip, skipOps
from torch.testing._internal.common_utils import (
TestCase,
is_iterable_of_tensors,
run_tests,
IS_SANDCASTLE,
clone_input_helper,
IS_CI,
set_default_dtype,
suppress_warnings,
noncontiguous_like,
TEST_WITH_ASAN,
TEST_WITH_UBSAN,
skipIfRocm,
IS_WINDOWS,
IS_FBCODE,
first_sample,
parametrize,
skipIfTorchInductor,
slowTest,
)
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
ReductionPythonRefInfo,
SpectralFuncInfo,
ops_and_refs,
python_ref_db,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
ops,
onlyCUDA,
onlyCPU,
onlyNativeDeviceTypes,
OpDTypes,
skipMeta,
)
from torch._subclasses.fake_tensor import (
FakeTensor,
FakeTensorMode,
)
from torch._subclasses.fake_utils import outputs_alias_inputs
import torch._prims as prims
from torch._prims.context import TorchRefsMode
from torch.testing._internal import opinfo
from torch.testing._internal import composite_compliance
from torch.utils._pytree import tree_flatten
from torch.utils._python_dispatch import TorchDispatchMode
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs = op_db + python_ref_db
_ops_and_refs_with_no_numpy_ref = [op for op in _ops_and_refs if op.ref is None]
aten = torch.ops.aten
from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map
from torch._prims.executor import make_traced
from copy import copy
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_ops.py
|
check_inplace_view
|
def check_inplace_view(func, input, rs, input_size, input_strides):
if func is None:
return
# TODO: extend this test to test ops with multiple outputs and ops like native_batch_norm(_legit).out
# which mutate not necessarily the first input.
if isinstance(rs, torch.Tensor) and rs is input:
unequal_size = rs.size() != input_size
unequal_strides = rs.stride() != input_strides
# resize_ should probably have inplace_view tag. Not adding the tag since it
# breaks some codegen logic
if (unequal_size or unequal_strides):
if isinstance(func, torch._ops.OpOverloadPacket):
func = func.default
# Reference: https://github.com/pytorch/pytorch/issues/78759
if func is not torch.ops.aten.resize_.default:
# TODO: use self.assertIn when we have separate tests for each tag
assert torch.Tag.inplace_view in func.tags
# A mode that when enabled runs correctness checks to ensure
# that operators have expected tags based on their input and
# ouput tensor properties
class TestTagsMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if isinstance(args[0], torch.Tensor):
old_size = args[0].size()
old_stride = args[0].stride()
rs = func(*args, **kwargs)
check_inplace_view(func, args[0], rs, old_size, old_stride)
else:
rs = func(*args, **kwargs)
return rs
# Test to verify the correctness for tags in `tags.yaml`, also available for access through `torch.Tags`
class TestTags(TestCase):
@onlyCPU
@ops(ops_and_refs, dtypes=OpDTypes.any_one)
def test_tags(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample in samples:
# TODO: Test tags for ops that return a list of tensors
input = sample.input
if isinstance(input, torch.Tensor):
old_size = input.size()
old_stride = input.stride()
with TestTagsMode():
rs = op(input, *sample.args, **sample.kwargs)
# TODO: add test for aliases: https://github.com/pytorch/pytorch/issues/78761
aten_name = op.aten_name if op.aten_name is not None else op.name
opoverloadpacket = getattr(torch.ops.aten, aten_name, None)
check_inplace_view(opoverloadpacket, input, rs, old_size, old_stride)
class TestRefsOpsInfo(TestCase):
import_paths = ["_refs", "_refs.special", "_refs.nn.functional", "_refs.fft", "_refs._conversions"]
module_alls = [(path, import_module(f"torch.{path}").__all__) for path in import_paths]
ref_ops_names = tuple(itertools.chain.from_iterable(
[f"{path}.{op}" for op in module_all] for path, module_all in module_alls))
ref_db_names = {ref_op.name for ref_op in python_ref_db}
# TODO: References that do not have an entry in python_ref_db
skip_ref_ops = {
'_refs.bitwise_right_shift',
'_refs.copy_to',
'_refs.empty_strided',
'_refs.equal',
'_refs.full',
'_refs.full_like',
'_refs.item',
'_refs.to',
'_refs.ones',
'_refs.ones_like',
'_refs.special.expit',
'_refs.std_var',
'_refs.swap_axes',
'_refs.uniform',
'_refs.scalar_tensor',
'_refs.trunc_divide',
'_refs.zeros',
'_refs.zeros_like',
'_refs.rfloordiv',
'_refs.rtruediv',
'_refs.rpow',
# These should be tested with their out-of-place counterparts
'_refs.index_add_',
'_refs.index_copy_',
'_refs.index_fill_',
'_refs.native_group_norm',
}
not_in_decomp_table = {
# duplicated in _decomp and _refs
'_refs.nn.functional.group_norm',
'_refs.nn.functional.mse_loss',
'_refs.rsub',
# duplicated as refs do not have decent support for advanced indexing
'_refs.index_copy',
'_refs.index_copy_',
'_refs.index_add',
'_refs.index_add_',
# these are not aten ops?
'_refs._conversions.bfloat16',
'_refs._conversions.bool',
'_refs._conversions.byte',
'_refs._conversions.char',
'_refs._conversions.double',
'_refs._conversions.float',
'_refs._conversions.half',
'_refs._conversions.int',
'_refs._conversions.long',
'_refs._conversions.short',
'_refs._conversions.chalf',
'_refs._conversions.cfloat',
'_refs._conversions.cdouble',
'_refs.broadcast_shapes',
'_refs.broadcast_tensors',
'_refs.nn.functional.tanhshrink',
'_refs.nn.functional.triplet_margin_loss',
'_refs.rfloordiv',
'_refs.rtruediv',
'_refs.rpow',
# CompositeImplicitAutograd
'_refs.allclose',
'_refs.atleast_1d',
'_refs.atleast_2d',
'_refs.atleast_3d',
'_refs.broadcast_to',
'_refs.chunk',
'_refs.column_stack',
'_refs.contiguous',
'_refs.dsplit',
'_refs.dstack',
'_refs.fill',
'_refs.flatten',
'_refs.fliplr',
'_refs.flipud',
'_refs.float_power',
'_refs.hsplit',
'_refs.hstack',
'_refs.isclose',
'_refs.isfinite',
'_refs.isreal',
'_refs.log_softmax',
'_refs.movedim',
'_refs.narrow',
'_refs.nn.functional.l1_loss',
'_refs.nn.functional.log_softmax',
'_refs.nn.functional.poisson_nll_loss',
'_refs.nn.functional.softmax',
'_refs.nn.functional.softmin',
'_refs.positive',
'_refs.ravel',
'_refs.reshape',
'_refs.softmax',
'_refs.special.expit',
'_refs.special.log_softmax',
'_refs.special.softmax',
'_refs.square',
'_refs.T',
'_refs.tensor_split',
'_refs.to',
'_refs.true_divide',
'_refs.trunc_divide',
'_refs.vsplit',
'_refs.vstack',
'_refs.linalg.matrix_norm',
'_refs.linalg.norm',
'_refs.linalg.svd',
'_refs.linalg.svdvals',
'_refs.unflatten',
'_refs.sum_to_size',
# ref implementation missing kwargs
'_refs.full_like', # missing "layout"
'_refs.round', # missing "decimals"
'_refs.scalar_tensor', # missing "layout"
# other
'_refs.expand_as',
'_refs.as_strided', # _prims._as_strided_meta: "reduce() of empty sequence with no initial value"
'_refs.copy_to', # torch._C._jit_get_operation: No such operator aten::copy_to
'_refs.equal', # 'bool' object has no attribute 'dtype'
'_refs.conj', # Calls _prims.conj
'_refs.real',
'_refs.imag',
}
@parametrize("op", ref_ops_names)
def test_refs_are_in_python_ref_db(self, op):
inplace = op[-1] == "_"
if op in self.skip_ref_ops:
raise unittest.SkipTest(f"{op} does not have an entry in python_ref_db")
elif inplace:
self.assertNotIn(op, self.ref_db_names, msg=f"{op} is an in-place operation and should not have an OpInfo")
else:
self.assertIn(op, self.ref_db_names)
@parametrize("op", ref_ops_names)
def test_refs_are_in_decomp_table(self, op):
path = op.split('.')
module_path = '.'.join(path[:-1])
op_name = path[-1]
op_impl = getattr(import_module(f"torch.{module_path}"), op_name)
if op in self.not_in_decomp_table:
self.assertNotIn(op_impl, torch._decomp.decomposition_table.values(),
f"Unexpectedly found {op} in torch._decomp.decomposition_table.values()")
else:
self.assertIn(op_impl, torch._decomp.decomposition_table.values(),
f"Did not find {op} in torch._decomp.decomposition_table.values()")
fake_skips = (
"aminmax", # failing input
"cholesky", # Could not run 'aten::cholesky' with arguments from the 'Meta' backend
"cholesky_inverse", # Could not run 'aten::cholesky' with arguments from the 'Meta' backend
"cov", # aweights cannot be negtaive
"istft", # window overlap add min: 0
"linalg.eigvals", # The tensor has a non-zero number of elements, but its data is not allocated yet
"linalg.eigvalsh", # aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.matrix_power", # Could not run 'aten::eye.m_out' with arguments from the 'Meta' backend
# "linalg.pinv", # Could not run 'aten::pinv.out' with arguments from the 'Meta' backen
"linalg.matrix_rank.hermitian", # Could not run 'aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.pinv.hermitian", # tensor.mH is only supported on matrices or batches of matrices. Got 1-D tensor
"linalg.solve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta' backend
"linalg.tensorsolve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta'
"lu_solve", # MALLOC ERROR: debug
"multinomial", # Could not run 'aten::multinomial' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_1", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_3", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_5", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"nanmean", # logical_not() got an unexpected keyword argument 'out'
"quantile", # quantile() q values must be in the range [0, 1]
"nanquantile", # quantile() q values must be in the range [0, 1]
"nn.functional.ctc_loss", # The tensor has a non-zero number of elements, but its data is not allocated yet
"nn.functional.embedding_bag", # sometimes errors
"nn.functional.nll_loss", # sometimes errors
"nn.functional.max_pool1d", # The tensor has a non-zero number of elements
"to_sparse", # Could not run 'aten::to_sparse' with arguments from the 'Meta' backend
"tensor_split", # The tensor has a non-zero number of elements, but its data is not allocated yet
"repeat_interleave", # cannot repeat_interleave a meta tensor without output_size
"_segment_reduce.lengths", # Could not run 'aten::segment_reduce' with arguments from the 'Meta' backend.
"sparse.sampled.addmm", # sparsity not supported
# Can not infer total number of classes from meta. no way at present to throw DynamicOutputShapeException
"nn.functional.one_hot",
"narrow", # Fails only for one overload with DataDependentOutputException (hence skip).
)
fake_autocast_device_skips = defaultdict(dict)
# TODO: investigate/fix
fake_autocast_device_skips["cpu"] = {"linalg.pinv"}
dynamic_output_op_tests = (
"argwhere",
"bincount",
"combinations",
"linalg.lstsq",
"masked_select",
"nonzero",
"unique_consecutive",
"unique",
"linalg.lstsq.grad_oriented",
)
# some inputs invoke dynamic output shape operators, some do not
sometimes_dynamic_output_op_test = (
"__getitem__",
"index_select",
)
data_dependent_op_tests = (
"equal",
"corrcoef",
"nn.functional.gaussian_nll_loss",
"allclose",
)
aliasing_failures = (
"histogramdd",
)
# tests which have inconsistent fake tensor stride propagation
# XXX: no new tests should be added to this list as a result of a
# decomp or prim, see https://github.com/pytorch/pytorch/issues/78050#issuecomment-1253950325
fake_tensor_stride_failing_ops = {
"fft.fft2",
"fft.fft",
"fft.fftn",
"fft.hfft2",
"fft.hfft",
"fft.hfftn",
"fft.ifft2",
"fft.ifft",
"fft.ifftn",
"fft.ihfft2",
"fft.ihfft",
"fft.ihfftn",
"fft.irfft2",
"fft.irfft",
"fft.irfftn",
"fft.rfft2",
"fft.rfft",
"fft.rfftn",
"svd",
"linalg.svd",
}
fake_backward_xfails = fake_tensor_stride_failing_ops | {
"linalg.cond",
"linalg.matrix_norm",
"linalg.norm",
"linalg.svd",
"linalg.svdvals",
"pca_lowrank",
"roll",
"svd_lowrank",
"sgn",
"cholesky",
}
fake_backward_xfails = {xfail(stride_skip) for stride_skip in fake_backward_xfails} | {
xfail("_segment_reduce", "lengths"),
xfail("norm", "nuc"),
xfail("linalg.norm", "subgradients_at_zero"), # can accept vector inputs
skip('nn.functional.ctc_loss'),
}
fake_autocast_backward_xfails = {
skip("nn.functional.binary_cross_entropy"),
skip("sparse.sampled_addmm"),
skip("linalg.pinv"),
skip("linalg.pinv", "hermitian"),
skip("linalg.pinv", "singular"),
skip('pinverse'),
}
class TestFakeTensor(TestCase):
def _test_fake_helper(self, device, dtype, op, context):
name = op.name
if op.variant_test_name:
name += "." + op.variant_test_name
if name in fake_skips or "sparse" in name or "jiterator" in name:
self.skipTest("Skip failing test")
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample in samples:
try:
mode = FakeTensorMode()
def map_to_fake(e):
if isinstance(e, torch.Tensor):
return mode.from_tensor(e)
else:
return e
input = tree_map(map_to_fake, sample.input)
args = tree_map(map_to_fake, sample.args)
kwargs = tree_map(map_to_fake, sample.kwargs)
try:
with context():
res = op(sample.input, *sample.args, **sample.kwargs)
except Exception as e:
continue
with context():
with mode:
res_fake = op(input, *args, **kwargs)
for fake_out, real_out in zip(
tree_flatten(res_fake)[0], tree_flatten(res)[0]
):
if not isinstance(fake_out, torch.Tensor):
self.assertTrue(not isinstance(real_out, torch.Tensor))
continue
self.assertTrue(isinstance(fake_out, FakeTensor))
# if you see a shape exception here, you may need to add
# a `dynamic_output_shape` tag to an operator
check_strides = name not in fake_tensor_stride_failing_ops
# prims/decomps must correctly model strides,
# see https://github.com/pytorch/pytorch/issues/78050#issuecomment-1253950325
prims.utils.compare_tensor_meta(fake_out, real_out, check_strides)
if name not in aliasing_failures:
fake_aliasing = outputs_alias_inputs((input, args, kwargs), res_fake)
real_aliasing = outputs_alias_inputs((sample.input, sample, args, sample.kwargs), res)
self.assertEqual(fake_aliasing, real_aliasing)
self.assertTrue(name not in dynamic_output_op_tests and name not in data_dependent_op_tests)
except torch._subclasses.fake_tensor.UnsupportedFakeTensorException:
pass
except torch._subclasses.fake_tensor.DynamicOutputShapeException:
self.assertTrue(name in dynamic_output_op_tests or name in sometimes_dynamic_output_op_test)
except torch._subclasses.fake_tensor.DataDependentOutputException:
self.assertTrue(name in data_dependent_op_tests)
@ops(op_db, dtypes=OpDTypes.any_one)
def test_pointwise_ops(self, device, dtype, op):
name = op.name
if op.variant_test_name:
name += "." + op.variant_test_name
if name in fake_skips or "sparse" in name or "jiterator" in name:
self.skipTest("Skip failing test")
test_self = self
class TestPointwiseMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
out = func(*args, **kwargs)
if torch.Tag.pointwise in func.tags:
shapes = []
for inp in tree_flatten((args, kwargs)):
if isinstance(inp, torch.Tensor):
shapes.append(inp.shape)
out_shape = torch._refs._broadcast_shapes(*shapes)
for out_elem in tree_flatten(out):
if isinstance(out_elem, torch.Tensor):
test_self.assertEqual(out_elem.shape, out_shape)
return out
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample in samples:
mode = FakeTensorMode()
def map_to_fake(e):
if isinstance(e, torch.Tensor):
return mode.from_tensor(e)
else:
return e
input = tree_map(map_to_fake, sample.input)
args = tree_map(map_to_fake, sample.args)
kwargs = tree_map(map_to_fake, sample.kwargs)
try:
op(input, *args, **kwargs)
except Exception as e:
continue
with TestPointwiseMode():
with mode:
op(input, *args, **kwargs)
@ops(op_db, dtypes=OpDTypes.any_one)
def test_fake(self, device, dtype, op):
self._test_fake_helper(device, dtype, op, contextlib.nullcontext)
@ops(op_db, dtypes=OpDTypes.any_one)
def test_fake_autocast(self, device, dtype, op):
if op.name in fake_autocast_device_skips[device]:
self.skipTest("Skip failing test")
context = torch.cuda.amp.autocast if device == "cuda" else torch.cpu.amp.autocast
self._test_fake_helper(device, dtype, op, context)
def _test_fake_crossref_helper(self, device, dtype, op, context):
samples = op.sample_inputs(device, dtype, requires_grad=True)
for iter, sample in enumerate(samples):
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
# skip these to speed up tests
common_skip_ops = (
aten.detach.default,
aten.empty_strided.default,
aten.copy_.default,
aten.is_same_size.default,
)
# TODO: enable check_aliasing, batch norm fails
with torch._subclasses.CrossRefFakeMode(ignore_op_fn=lambda fn: fn in common_skip_ops, check_aliasing=True):
with warnings.catch_warnings(), context(), torch.autograd.set_multithreading_enabled(False):
composite_compliance.compute_expected_grads(
op.get_op(), args, kwargs,
sample.output_process_fn_grad,
op.gradcheck_wrapper)
@skipIfRocm
@onlyCUDA
@ops([op for op in op_db if op.supports_autograd], allowed_dtypes=(torch.float,))
@skipOps('TestFakeTensor', 'test_fake_crossref_backward_no_amp', fake_backward_xfails)
def test_fake_crossref_backward_no_amp(self, device, dtype, op):
self._test_fake_crossref_helper(device, dtype, op, contextlib.nullcontext)
@skipIfRocm
@onlyCUDA
@ops([op for op in op_db if op.supports_autograd], allowed_dtypes=(torch.float,))
@skipOps('TestFakeTensor', 'test_fake_crossref_backward_amp', fake_backward_xfails | fake_autocast_backward_xfails)
def test_fake_crossref_backward_amp(self, device, dtype, op):
self._test_fake_crossref_helper(device, dtype, op, torch.cuda.amp.autocast)
instantiate_device_type_tests(TestCommon, globals())
instantiate_device_type_tests(TestCompositeCompliance, globals())
instantiate_device_type_tests(TestMathBits, globals())
instantiate_device_type_tests(TestRefsOpsInfo, globals(), only_for="cpu")
instantiate_device_type_tests(TestFakeTensor, globals())
instantiate_device_type_tests(TestTags, globals())
if __name__ == "__main__":
run_tests()
|
def check_inplace_view(func, input, rs, input_size, input_strides):
if func is None:
return
# TODO: extend this test to test ops with multiple outputs and ops like native_batch_norm(_legit).out
# which mutate not necessarily the first input.
if isinstance(rs, torch.Tensor) and rs is input:
unequal_size = rs.size() != input_size
unequal_strides = rs.stride() != input_strides
# resize_ should probably have inplace_view tag. Not adding the tag since it
# breaks some codegen logic
if unequal_size or unequal_strides:
if isinstance(func, torch._ops.OpOverloadPacket):
func = func.default
# Reference: https://github.com/pytorch/pytorch/issues/78759
if func is not torch.ops.aten.resize_.default:
# TODO: use self.assertIn when we have separate tests for each tag
assert torch.Tag.inplace_view in func.tags
# A mode that when enabled runs correctness checks to ensure
# that operators have expected tags based on their input and
# output tensor properties
class TestTagsMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if isinstance(args[0], torch.Tensor):
old_size = args[0].size()
old_stride = args[0].stride()
rs = func(*args, **kwargs)
check_inplace_view(func, args[0], rs, old_size, old_stride)
else:
rs = func(*args, **kwargs)
return rs
# Test to verify the correctness for tags in `tags.yaml`, also available for access through `torch.Tags`
@unMarkDynamoStrictTest
class TestTags(TestCase):
@onlyCPU
@ops(ops_and_refs, dtypes=OpDTypes.any_one)
def test_tags(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample in samples:
# TODO: Test tags for ops that return a list of tensors
input = sample.input
if isinstance(input, torch.Tensor):
old_size = input.size()
old_stride = input.stride()
with TestTagsMode():
rs = op(input, *sample.args, **sample.kwargs)
# TODO: add test for aliases: https://github.com/pytorch/pytorch/issues/78761
aten_name = op.aten_name if op.aten_name is not None else op.name
opoverloadpacket = getattr(torch.ops.aten, aten_name, None)
check_inplace_view(opoverloadpacket, input, rs, old_size, old_stride)
class TestSelfKwarg(TestCase):
def test_self_kwargs(self):
"""Verify that we can call the aten ops with all kwargs even if the
argument's name is "self"
"""
torch.ops.aten.reshape.default(self=torch.rand(1, 2), shape=[2])
torch.ops.aten.min.default(self=torch.rand(100))
@unMarkDynamoStrictTest
class TestRefsOpsInfo(TestCase):
import_paths = [
"_refs",
"_refs.special",
"_refs.nn.functional",
"_refs.fft",
"_refs._conversions",
]
module_alls = [
(path, import_module(f"torch.{path}").__all__) for path in import_paths
]
ref_ops_names = tuple(
itertools.chain.from_iterable(
[f"{path}.{op}" for op in module_all] for path, module_all in module_alls
)
)
ref_db_names = {ref_op.name for ref_op in python_ref_db}
# TODO: References that do not have an entry in python_ref_db
skip_ref_ops = {
"_refs.alias",
"_refs.bitwise_right_shift",
"_refs.copy_to",
"_refs.empty_permuted",
"_refs.empty_strided",
"_refs.equal",
"_refs.full",
"_refs.full_like",
"_refs.is_complex",
"_refs.to",
"_refs.mvlgamma",
"_refs.ones",
"_refs.ones_like",
"_refs.special.expit",
"_refs.std_var",
"_refs.swap_axes",
"_refs.uniform",
"_refs.scalar_tensor",
"_refs.trunc_divide",
"_refs.zero",
"_refs.zeros",
"_refs.zeros_like",
"_refs.rfloordiv",
"_refs.rtruediv",
"_refs.rpow",
# These should be tested with their out-of-place counterparts
"_refs.index_add_",
"_refs.index_copy_",
"_refs.index_fill_",
"_refs.native_group_norm",
}
not_in_decomp_table = {
# duplicated in _decomp and _refs
"_refs.nn.functional.group_norm",
"_refs.nn.functional.mse_loss",
"_refs.floor_divide",
# duplicated as refs do not have decent support for advanced indexing
"_refs.index_copy",
"_refs.index_copy_",
"_refs.index_add",
"_refs.index_add_",
# these are not aten ops?
"_refs._conversions.bfloat16",
"_refs._conversions.bool",
"_refs._conversions.byte",
"_refs._conversions.char",
"_refs._conversions.double",
"_refs._conversions.float",
"_refs._conversions.half",
"_refs._conversions.int",
"_refs._conversions.long",
"_refs._conversions.short",
"_refs._conversions.chalf",
"_refs._conversions.cfloat",
"_refs._conversions.cdouble",
"_refs.broadcast_shapes",
"_refs.broadcast_tensors",
"_refs.mvlgamma",
"_refs.nn.functional.layer_norm",
"_refs.nn.functional.tanhshrink",
"_refs.nn.functional.triplet_margin_loss",
"_refs.rfloordiv",
"_refs.rtruediv",
"_refs.rpow",
# CompositeImplicitAutograd
"_refs.allclose",
"_refs.atleast_1d",
"_refs.atleast_2d",
"_refs.atleast_3d",
"_refs.broadcast_to",
"_refs.chunk",
"_refs.column_stack",
"_refs.contiguous",
"_refs.dsplit",
"_refs.dstack",
"_refs.fill",
"_refs.fill_",
"_refs.flatten",
"_refs.fliplr",
"_refs.flipud",
"_refs.float_power",
"_refs.hsplit",
"_refs.hstack",
"_refs.isclose",
"_refs.isfinite",
"_refs.isreal",
"_refs.istft",
"_refs.log_softmax",
"_refs.movedim",
"_refs.narrow",
"_refs.nn.functional.dropout",
"_refs.nn.functional.l1_loss",
"_refs.nn.functional.smooth_l1_loss",
"_refs.nn.functional.log_softmax",
"_refs.nn.functional.poisson_nll_loss",
"_refs.nn.functional.softmax",
"_refs.nn.functional.softmin",
"_refs.positive",
"_refs.ravel",
"_refs.reshape",
"_refs.softmax",
"_refs.special.expit",
"_refs.special.log_softmax",
"_refs.special.softmax",
"_refs.square",
"_refs.stft",
"_refs.T",
"_refs.take_along_dim",
"_refs.tensor_split",
"_refs.to",
"_refs.true_divide",
"_refs.trunc_divide",
"_refs.vsplit",
"_refs.vstack",
"_refs.linalg.matrix_norm",
"_refs.linalg.norm",
"_refs.linalg.svd",
"_refs.linalg.svdvals",
"_refs.unflatten",
"_refs.sum_to_size",
# ref implementation missing kwargs
"_refs.full_like", # missing "layout"
"_refs.scalar_tensor", # missing "layout"
# other
"_refs.block_diag", # only refs._block_diag_iterable is in decomposition table
"_refs.empty", # intentional; direct empty is faster and has less guards
"_refs.empty_permuted", # intentional; direct empty is faster and has less guards
"_refs.expand_as",
"_refs.as_strided", # _prims._as_strided_meta: "reduce() of empty sequence with no initial value"
"_refs.copy_to", # torch._C._jit_get_operation: No such operator aten::copy_to
"_refs.equal", # 'bool' object has no attribute 'dtype'
"_refs.conj", # Calls _prims.conj
"_refs.real",
"_refs.imag",
"_refs.reshape_as",
"_refs.view_as",
"_refs.view_as_complex", # TorchInductor does not support complex at the moment.
# the decompositions for these ops are slightly different
# because of out handling
"_refs.var_mean",
"_refs.std_mean",
"_refs.native_layer_norm",
}
@parametrize("op", ref_ops_names)
def test_refs_are_in_python_ref_db(self, op):
inplace = op[-1] == "_"
if op in self.skip_ref_ops:
raise unittest.SkipTest(f"{op} does not have an entry in python_ref_db")
elif inplace:
self.assertNotIn(
op,
self.ref_db_names,
msg=f"{op} is an in-place operation and should not have an OpInfo",
)
else:
# Intentionally don't use assertIn to avoid printing the
# (very large) container
self.assertTrue(op in self.ref_db_names, msg=f"{op} not in ref_db_names")
@parametrize("op", ref_ops_names)
def test_refs_are_in_decomp_table(self, op):
path = op.split(".")
module_path = ".".join(path[:-1])
op_name = path[-1]
op_impl = getattr(import_module(f"torch.{module_path}"), op_name)
if op in self.not_in_decomp_table:
self.assertNotIn(
op_impl,
torch._decomp.decomposition_table.values(),
f"Unexpectedly found {op} in torch._decomp.decomposition_table.values()",
)
else:
self.assertIn(
op_impl,
torch._decomp.decomposition_table.values(),
f"Did not find {op} in torch._decomp.decomposition_table.values()",
)
fake_skips = (
"aminmax", # failing input
"cov", # aweights cannot be negtaive
"istft", # window overlap add min: 0
"linalg.eigvals", # The tensor has a non-zero number of elements, but its data is not allocated yet
"linalg.eigvalsh", # aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.matrix_power", # Could not run 'aten::eye.m_out' with arguments from the 'Meta' backend
# "linalg.pinv", # Could not run 'aten::pinv.out' with arguments from the 'Meta' backen
"linalg.matrix_rank.hermitian", # Could not run 'aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.pinv.hermitian", # tensor.mH is only supported on matrices or batches of matrices. Got 1-D tensor
"linalg.solve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta' backend
"linalg.tensorsolve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta'
"lu_solve", # MALLOC ERROR: debug
"multinomial", # Could not run 'aten::multinomial' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_1", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_3", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_5", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"nanmean", # logical_not() got an unexpected keyword argument 'out'
"quantile", # quantile() q values must be in the range [0, 1]
"nanquantile", # quantile() q values must be in the range [0, 1]
"nn.functional.ctc_loss", # The tensor has a non-zero number of elements, but its data is not allocated yet
"nn.functional.embedding_bag", # sometimes errors
"nn.functional.nll_loss", # sometimes errors
"nn.functional.max_pool1d", # The tensor has a non-zero number of elements
"to_sparse", # Could not run 'aten::_to_sparse' with arguments from the 'Meta' backend
"tensor_split", # The tensor has a non-zero number of elements, but its data is not allocated yet
"repeat_interleave", # cannot repeat_interleave a meta tensor without output_size
"sparse.sampled.addmm", # sparsity not supported
# Can not infer total number of classes from meta. no way at present to throw DynamicOutputShapeException
"nn.functional.one_hot",
"narrow", # Fails only for one overload with DataDependentOutputException (hence skip).
)
fake_autocast_device_skips = defaultdict(dict)
# TODO: investigate/fix
fake_autocast_device_skips["cpu"] = {"linalg.pinv"}
fake_autocast_device_skips["cuda"] = {"linalg.pinv", "pinverse"}
dynamic_output_op_tests = (
"argwhere",
"bincount",
"combinations",
"linalg.lstsq",
"masked_select",
"nonzero",
"unique_consecutive",
"unique",
"linalg.lstsq.grad_oriented",
)
# Ops that have dynamic output shapes that we can handle when
# allow_dynamic_shape_ops is True in fake tensor shape environment.
supported_dynamic_output_op_tests = (
"nonzero",
"unique",
"repeat_interleave",
"masked_select",
)
# some inputs invoke dynamic output shape operators, some do not
sometimes_dynamic_output_op_test = ("__getitem__", "index_select")
data_dependent_op_tests = (
"equal",
"corrcoef",
"nn.functional.gaussian_nll_loss",
"allclose",
)
aliasing_failures = ("histogramdd",)
fake_backward_skips = {
"linalg.cond",
"linalg.matrix_norm",
"linalg.norm",
"linalg.svd",
"linalg.svdvals",
"pca_lowrank",
"roll",
"svd_lowrank",
"sgn",
}
fake_backward_xfails = {skip(s) for s in fake_backward_skips} | {
xfail("fft.ihfftn"), # Mismatch in aten._conj_physical.default
xfail("fft.ihfft2"), # Mismatch in aten._conj_physical.default
skip("nn.functional.ctc_loss"),
}
fake_autocast_backward_xfails = {
skip("nn.functional.binary_cross_entropy"),
skip("sparse.sampled_addmm"),
skip("linalg.pinv"),
skip("linalg.pinv", "hermitian"),
skip("linalg.pinv", "singular"),
skip("pinverse"),
}
@unMarkDynamoStrictTest
class TestFakeTensor(TestCase):
def setUp(self):
# Turn on FakeTensor caching and cross-checking for these tests:
cache_enabled = unittest.mock.patch(
"torch._dynamo.config.fake_tensor_cache_enabled", True
)
cache_enabled.start()
self.addCleanup(cache_enabled.stop)
cache_crosscheck = unittest.mock.patch(
"torch._dynamo.config.fake_tensor_cache_crosscheck_enabled", True
)
cache_crosscheck.start()
self.addCleanup(cache_crosscheck.stop)
def _test_fake_helper(self, device, dtype, op, context):
name = op.name
if op.variant_test_name:
name += "." + op.variant_test_name
if name in fake_skips or "sparse" in name or "jiterator" in name:
self.skipTest("Skip failing test")
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample in samples:
mode = FakeTensorMode()
from torch.fx.experimental.symbolic_shapes import ShapeEnv
allow_dynamic_output_shape_shape_env = ShapeEnv(
allow_dynamic_output_shape_ops=True
)
allow_dynamic_output_shape_mode = FakeTensorMode(
shape_env=allow_dynamic_output_shape_shape_env
)
try:
with context():
res = op(sample.input, *sample.args, **sample.kwargs)
except Exception:
continue
def run_with_fake_mode_and_verify(fake_mode, match_results=True):
def map_to_fake(e):
if isinstance(e, torch.Tensor):
return fake_mode.from_tensor(e)
else:
return e
input = tree_map(map_to_fake, sample.input)
args = tree_map(map_to_fake, sample.args)
kwargs = tree_map(map_to_fake, sample.kwargs)
try:
with context():
with fake_mode:
res_fake = op(input, *args, **kwargs)
if not match_results:
return
for fake_out, real_out in zip(
pytree.tree_leaves(res_fake), pytree.tree_leaves(res)
):
if not isinstance(fake_out, torch.Tensor):
self.assertTrue(not isinstance(real_out, torch.Tensor))
self.assertEqual(fake_out, real_out)
continue
self.assertTrue(isinstance(fake_out, FakeTensor))
# if you see a shape exception here, you may need to add
# a `dynamic_output_shape` tag to an operator
if op.op not in [
torch.ops.aten._efficient_attention_forward,
torch.ops.aten._flash_attention_forward,
]:
# prims/decomps must correctly model strides,
# see https://github.com/pytorch/pytorch/issues/78050#issuecomment-1253950325
# note: the excluded ops have intentionally incorrect device;
# see "Note [Seed and Offset]" (_meta_registrations.py)
prims.utils.compare_tensor_meta(fake_out, real_out, True)
if name not in aliasing_failures:
fake_aliasing = outputs_alias_inputs(
(input, args, kwargs), res_fake
)
real_aliasing = outputs_alias_inputs(
(sample.input, sample, args, sample.kwargs), res
)
self.assertEqual(fake_aliasing, real_aliasing)
self.assertTrue(
name not in dynamic_output_op_tests
and name not in data_dependent_op_tests
)
except torch._subclasses.fake_tensor.UnsupportedFakeTensorException:
pass
except torch._subclasses.fake_tensor.UnsupportedOperatorException:
pass
except torch._subclasses.fake_tensor.DynamicOutputShapeException:
self.assertTrue(
name in dynamic_output_op_tests
or name in sometimes_dynamic_output_op_test
)
self.assertTrue(
fake_mode.shape_env is None
or not fake_mode.shape_env.allow_dynamic_output_shape_ops
or name not in supported_dynamic_output_op_tests
)
except torch._subclasses.fake_tensor.DataDependentOutputException:
self.assertTrue(name in data_dependent_op_tests)
run_with_fake_mode_and_verify(mode)
if name in supported_dynamic_output_op_tests:
run_with_fake_mode_and_verify(
allow_dynamic_output_shape_mode, match_results=False
)
@ops(op_db, dtypes=OpDTypes.any_one)
def test_pointwise_ops(self, device, dtype, op):
name = op.name
if op.variant_test_name:
name += "." + op.variant_test_name
if name in fake_skips or "sparse" in name or "jiterator" in name:
self.skipTest("Skip failing test")
test_self = self
class TestPointwiseMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
out = func(*args, **kwargs)
if torch.Tag.pointwise in func.tags:
shapes = []
for inp in pytree.arg_tree_leaves(*args, **kwargs):
if isinstance(inp, torch.Tensor):
shapes.append(inp.shape)
out_shape = torch._refs._broadcast_shapes(*shapes)
for out_elem in pytree.tree_leaves(out):
if isinstance(out_elem, torch.Tensor):
test_self.assertEqual(out_elem.shape, out_shape)
return out
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample in samples:
mode = FakeTensorMode()
def map_to_fake(e):
if isinstance(e, torch.Tensor):
return mode.from_tensor(e)
else:
return e
input = tree_map(map_to_fake, sample.input)
args = tree_map(map_to_fake, sample.args)
kwargs = tree_map(map_to_fake, sample.kwargs)
try:
op(input, *args, **kwargs)
except Exception as e:
continue
with TestPointwiseMode():
with mode:
op(input, *args, **kwargs)
@ops(op_db, dtypes=OpDTypes.any_one)
def test_fake(self, device, dtype, op):
self._test_fake_helper(device, dtype, op, contextlib.nullcontext)
@ops(op_db, dtypes=OpDTypes.any_one)
def test_fake_autocast(self, device, dtype, op):
device_type = torch.device(device).type
if op.name in fake_autocast_device_skips[device_type]:
self.skipTest("Skip failing test")
def context_fn():
return torch.amp.autocast(device_type)
self._test_fake_helper(device, dtype, op, context_fn)
def _test_fake_crossref_helper(self, device, dtype, op, context):
samples = op.sample_inputs(device, dtype, requires_grad=True)
for iter, sample in enumerate(samples):
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
# skip these to speed up tests
common_skip_ops = (
aten.detach.default,
aten.empty_strided.default,
aten.copy_.default,
aten.is_same_size.default,
)
# TODO: enable check_aliasing, batch norm fails
try:
with torch._subclasses.CrossRefFakeMode(
ignore_op_fn=lambda fn: fn in common_skip_ops, check_aliasing=True
):
with warnings.catch_warnings(), context(), torch.autograd.set_multithreading_enabled(
False
):
composite_compliance.compute_expected_grads(
op.get_op(),
args,
kwargs,
sample.output_process_fn_grad,
op.gradcheck_wrapper,
)
except torch._subclasses.fake_tensor.UnsupportedOperatorException:
pass
@onlyCUDA
@ops([op for op in op_db if op.supports_autograd], allowed_dtypes=(torch.float,))
@skipOps(
"TestFakeTensor", "test_fake_crossref_backward_no_amp", fake_backward_xfails
)
def test_fake_crossref_backward_no_amp(self, device, dtype, op):
self._test_fake_crossref_helper(device, dtype, op, contextlib.nullcontext)
@onlyCUDA
@ops([op for op in op_db if op.supports_autograd], allowed_dtypes=(torch.float,))
@skipOps(
"TestFakeTensor",
"test_fake_crossref_backward_amp",
fake_backward_xfails | fake_autocast_backward_xfails,
)
def test_fake_crossref_backward_amp(self, device, dtype, op):
self._test_fake_crossref_helper(device, dtype, op, torch.cuda.amp.autocast)
@ops([op for op in ops_and_refs if op.is_factory_function])
def test_strided_layout(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
for sample in samples:
kwargs = sample.kwargs.copy()
kwargs["layout"] = torch.strided
strided_result = op(sample.input, *sample.args, **kwargs)
self.assertEqual(strided_result.layout, torch.strided)
instantiate_device_type_tests(TestCommon, globals())
instantiate_device_type_tests(TestCompositeCompliance, globals())
instantiate_device_type_tests(TestMathBits, globals())
instantiate_device_type_tests(TestRefsOpsInfo, globals(), only_for="cpu")
instantiate_device_type_tests(TestFakeTensor, globals())
instantiate_device_type_tests(TestTags, globals())
if __name__ == "__main__":
TestCase._default_dtype_check_enabled = True
run_tests()
|
from collections.abc import Sequence
from functools import partial
import warnings
import unittest
import itertools
import torch
import contextlib
import re
import os
from collections import defaultdict
from importlib import import_module
from torch.utils._pytree import tree_map
from typing import Dict
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_and_complex_types_and,
all_types_and_complex_and,
)
from test_proxy_tensor import xfail, skip, skipOps
from torch.testing._internal.common_utils import (
TestCase,
is_iterable_of_tensors,
run_tests,
IS_SANDCASTLE,
clone_input_helper,
IS_CI,
set_default_dtype,
suppress_warnings,
noncontiguous_like,
TEST_WITH_ASAN,
TEST_WITH_UBSAN,
skipIfRocm,
IS_WINDOWS,
IS_FBCODE,
first_sample,
parametrize,
skipIfTorchInductor,
slowTest,
)
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
ReductionPythonRefInfo,
SpectralFuncInfo,
ops_and_refs,
python_ref_db,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
ops,
onlyCUDA,
onlyCPU,
onlyNativeDeviceTypes,
OpDTypes,
skipMeta,
)
from torch._subclasses.fake_tensor import (
FakeTensor,
FakeTensorMode,
)
from torch._subclasses.fake_utils import outputs_alias_inputs
import torch._prims as prims
from torch._prims.context import TorchRefsMode
from torch.testing._internal import opinfo
from torch.testing._internal import composite_compliance
from torch.utils._pytree import tree_flatten
from torch.utils._python_dispatch import TorchDispatchMode
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs = op_db + python_ref_db
_ops_and_refs_with_no_numpy_ref = [op for op in _ops_and_refs if op.ref is None]
aten = torch.ops.aten
from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map
from torch._prims.executor import make_traced
from copy import copy
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_ops.py
|
setUp
|
def setUp(self):
# Turn on FakeTensor caching and cross-checking for these tests:
cache_enabled = unittest.mock.patch(
"torch._dynamo.config.fake_tensor_cache_enabled", True
)
cache_enabled.start()
self.addCleanup(cache_enabled.stop)
cache_crosscheck = unittest.mock.patch(
"torch._dynamo.config.fake_tensor_cache_crosscheck_enabled", True
)
cache_crosscheck.start()
self.addCleanup(cache_crosscheck.stop)
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
fake_skips = (
"aminmax", # failing input
"cov", # aweights cannot be negtaive
"istft", # window overlap add min: 0
"linalg.eigvals", # The tensor has a non-zero number of elements, but its data is not allocated yet
"linalg.eigvalsh", # aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.matrix_power", # Could not run 'aten::eye.m_out' with arguments from the 'Meta' backend
# "linalg.pinv", # Could not run 'aten::pinv.out' with arguments from the 'Meta' backen
"linalg.matrix_rank.hermitian", # Could not run 'aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.pinv.hermitian", # tensor.mH is only supported on matrices or batches of matrices. Got 1-D tensor
"linalg.solve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta' backend
"linalg.tensorsolve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta'
"lu_solve", # MALLOC ERROR: debug
"multinomial", # Could not run 'aten::multinomial' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_1", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_3", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_5", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"nanmean", # logical_not() got an unexpected keyword argument 'out'
"quantile", # quantile() q values must be in the range [0, 1]
"nanquantile", # quantile() q values must be in the range [0, 1]
"nn.functional.ctc_loss", # The tensor has a non-zero number of elements, but its data is not allocated yet
"nn.functional.embedding_bag", # sometimes errors
"nn.functional.nll_loss", # sometimes errors
"nn.functional.max_pool1d", # The tensor has a non-zero number of elements
"to_sparse", # Could not run 'aten::_to_sparse' with arguments from the 'Meta' backend
"tensor_split", # The tensor has a non-zero number of elements, but its data is not allocated yet
"repeat_interleave", # cannot repeat_interleave a meta tensor without output_size
"sparse.sampled.addmm", # sparsity not supported
# Can not infer total number of classes from meta. no way at present to throw DynamicOutputShapeException
"nn.functional.one_hot",
"narrow", # Fails only for one overload with DataDependentOutputException (hence skip).
)
fake_autocast_device_skips = defaultdict(dict)
fake_autocast_device_skips["cpu"] = {"linalg.pinv"}
fake_autocast_device_skips["cuda"] = {"linalg.pinv", "pinverse"}
dynamic_output_op_tests = (
"argwhere",
"bincount",
"combinations",
"linalg.lstsq",
"masked_select",
"nonzero",
"unique_consecutive",
"unique",
"linalg.lstsq.grad_oriented",
)
supported_dynamic_output_op_tests = (
"nonzero",
"unique",
"repeat_interleave",
"masked_select",
)
sometimes_dynamic_output_op_test = ("__getitem__", "index_select")
data_dependent_op_tests = (
"equal",
"corrcoef",
"nn.functional.gaussian_nll_loss",
"allclose",
)
aliasing_failures = ("histogramdd",)
fake_backward_skips = {
"linalg.cond",
"linalg.matrix_norm",
"linalg.norm",
"linalg.svd",
"linalg.svdvals",
"pca_lowrank",
"roll",
"svd_lowrank",
"sgn",
}
fake_backward_xfails = {skip(s) for s in fake_backward_skips} | {
xfail("fft.ihfftn"), # Mismatch in aten._conj_physical.default
xfail("fft.ihfft2"), # Mismatch in aten._conj_physical.default
skip("nn.functional.ctc_loss"),
}
fake_autocast_backward_xfails = {
skip("nn.functional.binary_cross_entropy"),
skip("sparse.sampled_addmm"),
skip("linalg.pinv"),
skip("linalg.pinv", "hermitian"),
skip("linalg.pinv", "singular"),
skip("pinverse"),
}
@unMarkDynamoStrictTest
class TestFakeTensor(TestCase):
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_ops.py
|
map_to_fake
|
def map_to_fake(e):
if isinstance(e, torch.Tensor):
return mode.from_tensor(e)
else:
return e
input = tree_map(map_to_fake, sample.input)
args = tree_map(map_to_fake, sample.args)
kwargs = tree_map(map_to_fake, sample.kwargs)
try:
with context():
res = op(sample.input, *sample.args, **sample.kwargs)
except Exception as e:
continue
with context():
with mode:
res_fake = op(input, *args, **kwargs)
for fake_out, real_out in zip(
tree_flatten(res_fake)[0], tree_flatten(res)[0]
):
if not isinstance(fake_out, torch.Tensor):
self.assertTrue(not isinstance(real_out, torch.Tensor))
continue
self.assertTrue(isinstance(fake_out, FakeTensor))
# if you see a shape exception here, you may need to add
# a `dynamic_output_shape` tag to an operator
check_strides = name not in fake_tensor_stride_failing_ops
# prims/decomps must correctly model strides,
# see https://github.com/pytorch/pytorch/issues/78050#issuecomment-1253950325
prims.utils.compare_tensor_meta(fake_out, real_out, check_strides)
if name not in aliasing_failures:
fake_aliasing = outputs_alias_inputs((input, args, kwargs), res_fake)
real_aliasing = outputs_alias_inputs((sample.input, sample, args, sample.kwargs), res)
self.assertEqual(fake_aliasing, real_aliasing)
self.assertTrue(name not in dynamic_output_op_tests and name not in data_dependent_op_tests)
|
def map_to_fake(e):
if isinstance(e, torch.Tensor):
return fake_mode.from_tensor(e)
else:
return e
input = tree_map(map_to_fake, sample.input)
args = tree_map(map_to_fake, sample.args)
kwargs = tree_map(map_to_fake, sample.kwargs)
try:
with context():
with fake_mode:
res_fake = op(input, *args, **kwargs)
if not match_results:
return
for fake_out, real_out in zip(
pytree.tree_leaves(res_fake), pytree.tree_leaves(res)
):
if not isinstance(fake_out, torch.Tensor):
self.assertTrue(not isinstance(real_out, torch.Tensor))
self.assertEqual(fake_out, real_out)
continue
self.assertTrue(isinstance(fake_out, FakeTensor))
# if you see a shape exception here, you may need to add
# a `dynamic_output_shape` tag to an operator
if op.op not in [
torch.ops.aten._efficient_attention_forward,
torch.ops.aten._flash_attention_forward,
]:
# prims/decomps must correctly model strides,
# see https://github.com/pytorch/pytorch/issues/78050#issuecomment-1253950325
# note: the excluded ops have intentionally incorrect device;
# see "Note [Seed and Offset]" (_meta_registrations.py)
prims.utils.compare_tensor_meta(fake_out, real_out, True)
if name not in aliasing_failures:
fake_aliasing = outputs_alias_inputs(
(input, args, kwargs), res_fake
)
real_aliasing = outputs_alias_inputs(
(sample.input, sample, args, sample.kwargs), res
)
self.assertEqual(fake_aliasing, real_aliasing)
self.assertTrue(
name not in dynamic_output_op_tests
and name not in data_dependent_op_tests
)
except torch._subclasses.fake_tensor.UnsupportedFakeTensorException:
pass
except torch._subclasses.fake_tensor.UnsupportedOperatorException:
pass
except torch._subclasses.fake_tensor.DynamicOutputShapeException:
self.assertTrue(
name in dynamic_output_op_tests
or name in sometimes_dynamic_output_op_test
)
self.assertTrue(
fake_mode.shape_env is None
or not fake_mode.shape_env.allow_dynamic_output_shape_ops
or name not in supported_dynamic_output_op_tests
)
except torch._subclasses.fake_tensor.DataDependentOutputException:
self.assertTrue(name in data_dependent_op_tests)
|
from collections.abc import Sequence
from functools import partial
import warnings
import unittest
import itertools
import torch
import contextlib
import re
import os
from collections import defaultdict
from importlib import import_module
from torch.utils._pytree import tree_map
from typing import Dict
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_and_complex_types_and,
all_types_and_complex_and,
)
from test_proxy_tensor import xfail, skip, skipOps
from torch.testing._internal.common_utils import (
TestCase,
is_iterable_of_tensors,
run_tests,
IS_SANDCASTLE,
clone_input_helper,
IS_CI,
set_default_dtype,
suppress_warnings,
noncontiguous_like,
TEST_WITH_ASAN,
TEST_WITH_UBSAN,
skipIfRocm,
IS_WINDOWS,
IS_FBCODE,
first_sample,
parametrize,
skipIfTorchInductor,
slowTest,
)
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
ReductionPythonRefInfo,
SpectralFuncInfo,
ops_and_refs,
python_ref_db,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
ops,
onlyCUDA,
onlyCPU,
onlyNativeDeviceTypes,
OpDTypes,
skipMeta,
)
from torch._subclasses.fake_tensor import (
FakeTensor,
FakeTensorMode,
)
from torch._subclasses.fake_utils import outputs_alias_inputs
import torch._prims as prims
from torch._prims.context import TorchRefsMode
from torch.testing._internal import opinfo
from torch.testing._internal import composite_compliance
from torch.utils._pytree import tree_flatten
from torch.utils._python_dispatch import TorchDispatchMode
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs = op_db + python_ref_db
_ops_and_refs_with_no_numpy_ref = [op for op in _ops_and_refs if op.ref is None]
aten = torch.ops.aten
from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map
from torch._prims.executor import make_traced
from copy import copy
fake_skips = (
"aminmax", # failing input
"cholesky", # Could not run 'aten::cholesky' with arguments from the 'Meta' backend
"cholesky_inverse", # Could not run 'aten::cholesky' with arguments from the 'Meta' backend
"cov", # aweights cannot be negtaive
"istft", # window overlap add min: 0
"linalg.eigvals", # The tensor has a non-zero number of elements, but its data is not allocated yet
"linalg.eigvalsh", # aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.matrix_power", # Could not run 'aten::eye.m_out' with arguments from the 'Meta' backend
# "linalg.pinv", # Could not run 'aten::pinv.out' with arguments from the 'Meta' backen
"linalg.matrix_rank.hermitian", # Could not run 'aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.pinv.hermitian", # tensor.mH is only supported on matrices or batches of matrices. Got 1-D tensor
"linalg.solve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta' backend
"linalg.tensorsolve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta'
"lu_solve", # MALLOC ERROR: debug
"multinomial", # Could not run 'aten::multinomial' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_1", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_3", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_5", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"nanmean", # logical_not() got an unexpected keyword argument 'out'
"quantile", # quantile() q values must be in the range [0, 1]
"nanquantile", # quantile() q values must be in the range [0, 1]
"nn.functional.ctc_loss", # The tensor has a non-zero number of elements, but its data is not allocated yet
"nn.functional.embedding_bag", # sometimes errors
"nn.functional.nll_loss", # sometimes errors
"nn.functional.max_pool1d", # The tensor has a non-zero number of elements
"to_sparse", # Could not run 'aten::to_sparse' with arguments from the 'Meta' backend
"tensor_split", # The tensor has a non-zero number of elements, but its data is not allocated yet
"repeat_interleave", # cannot repeat_interleave a meta tensor without output_size
"_segment_reduce.lengths", # Could not run 'aten::segment_reduce' with arguments from the 'Meta' backend.
"sparse.sampled.addmm", # sparsity not supported
# Can not infer total number of classes from meta. no way at present to throw DynamicOutputShapeException
"nn.functional.one_hot",
"narrow", # Fails only for one overload with DataDependentOutputException (hence skip).
)
fake_autocast_device_skips = defaultdict(dict)
fake_autocast_device_skips["cpu"] = {"linalg.pinv"}
dynamic_output_op_tests = (
"argwhere",
"bincount",
"combinations",
"linalg.lstsq",
"masked_select",
"nonzero",
"unique_consecutive",
"unique",
"linalg.lstsq.grad_oriented",
)
sometimes_dynamic_output_op_test = (
"__getitem__",
"index_select",
)
data_dependent_op_tests = (
"equal",
"corrcoef",
"nn.functional.gaussian_nll_loss",
"allclose",
)
aliasing_failures = (
"histogramdd",
)
fake_tensor_stride_failing_ops = {
"fft.fft2",
"fft.fft",
"fft.fftn",
"fft.hfft2",
"fft.hfft",
"fft.hfftn",
"fft.ifft2",
"fft.ifft",
"fft.ifftn",
"fft.ihfft2",
"fft.ihfft",
"fft.ihfftn",
"fft.irfft2",
"fft.irfft",
"fft.irfftn",
"fft.rfft2",
"fft.rfft",
"fft.rfftn",
"svd",
"linalg.svd",
}
fake_backward_xfails = fake_tensor_stride_failing_ops | {
"linalg.cond",
"linalg.matrix_norm",
"linalg.norm",
"linalg.svd",
"linalg.svdvals",
"pca_lowrank",
"roll",
"svd_lowrank",
"sgn",
"cholesky",
}
fake_backward_xfails = {xfail(stride_skip) for stride_skip in fake_backward_xfails} | {
xfail("_segment_reduce", "lengths"),
xfail("norm", "nuc"),
xfail("linalg.norm", "subgradients_at_zero"), # can accept vector inputs
skip('nn.functional.ctc_loss'),
}
fake_autocast_backward_xfails = {
skip("nn.functional.binary_cross_entropy"),
skip("sparse.sampled_addmm"),
skip("linalg.pinv"),
skip("linalg.pinv", "hermitian"),
skip("linalg.pinv", "singular"),
skip('pinverse'),
}
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
fake_skips = (
"aminmax", # failing input
"cov", # aweights cannot be negtaive
"istft", # window overlap add min: 0
"linalg.eigvals", # The tensor has a non-zero number of elements, but its data is not allocated yet
"linalg.eigvalsh", # aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.matrix_power", # Could not run 'aten::eye.m_out' with arguments from the 'Meta' backend
# "linalg.pinv", # Could not run 'aten::pinv.out' with arguments from the 'Meta' backen
"linalg.matrix_rank.hermitian", # Could not run 'aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.pinv.hermitian", # tensor.mH is only supported on matrices or batches of matrices. Got 1-D tensor
"linalg.solve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta' backend
"linalg.tensorsolve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta'
"lu_solve", # MALLOC ERROR: debug
"multinomial", # Could not run 'aten::multinomial' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_1", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_3", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_5", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"nanmean", # logical_not() got an unexpected keyword argument 'out'
"quantile", # quantile() q values must be in the range [0, 1]
"nanquantile", # quantile() q values must be in the range [0, 1]
"nn.functional.ctc_loss", # The tensor has a non-zero number of elements, but its data is not allocated yet
"nn.functional.embedding_bag", # sometimes errors
"nn.functional.nll_loss", # sometimes errors
"nn.functional.max_pool1d", # The tensor has a non-zero number of elements
"to_sparse", # Could not run 'aten::_to_sparse' with arguments from the 'Meta' backend
"tensor_split", # The tensor has a non-zero number of elements, but its data is not allocated yet
"repeat_interleave", # cannot repeat_interleave a meta tensor without output_size
"sparse.sampled.addmm", # sparsity not supported
# Can not infer total number of classes from meta. no way at present to throw DynamicOutputShapeException
"nn.functional.one_hot",
"narrow", # Fails only for one overload with DataDependentOutputException (hence skip).
)
fake_autocast_device_skips = defaultdict(dict)
fake_autocast_device_skips["cpu"] = {"linalg.pinv"}
fake_autocast_device_skips["cuda"] = {"linalg.pinv", "pinverse"}
dynamic_output_op_tests = (
"argwhere",
"bincount",
"combinations",
"linalg.lstsq",
"masked_select",
"nonzero",
"unique_consecutive",
"unique",
"linalg.lstsq.grad_oriented",
)
supported_dynamic_output_op_tests = (
"nonzero",
"unique",
"repeat_interleave",
"masked_select",
)
sometimes_dynamic_output_op_test = ("__getitem__", "index_select")
data_dependent_op_tests = (
"equal",
"corrcoef",
"nn.functional.gaussian_nll_loss",
"allclose",
)
aliasing_failures = ("histogramdd",)
fake_backward_skips = {
"linalg.cond",
"linalg.matrix_norm",
"linalg.norm",
"linalg.svd",
"linalg.svdvals",
"pca_lowrank",
"roll",
"svd_lowrank",
"sgn",
}
fake_backward_xfails = {skip(s) for s in fake_backward_skips} | {
xfail("fft.ihfftn"), # Mismatch in aten._conj_physical.default
xfail("fft.ihfft2"), # Mismatch in aten._conj_physical.default
skip("nn.functional.ctc_loss"),
}
fake_autocast_backward_xfails = {
skip("nn.functional.binary_cross_entropy"),
skip("sparse.sampled_addmm"),
skip("linalg.pinv"),
skip("linalg.pinv", "hermitian"),
skip("linalg.pinv", "singular"),
skip("pinverse"),
}
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
rosenbrock
|
def rosenbrock(tensor):
x, y = tensor
return (1 - x) ** 2 + 100 * (y - x**2) ** 2
|
def rosenbrock(tensor):
assert tensor.size() == torch.Size(
[2]
), f"Requires tensor with 2 scalars but got {tensor.size()}"
x, y = tensor
return (1 - x) ** 2 + 100 * (y - x**2) ** 2
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
drosenbrock
|
def drosenbrock(tensor):
x, y = tensor
return torch.tensor((-400 * x * (y - x**2) - 2 * (1 - x), 200 * (y - x**2)))
class TestOptim(TestCase):
exact_dtype = True
def _test_rosenbrock_sparse(
self,
constructor,
scheduler_constructors=None,
sparse_only=False,
maximize=False,
):
if scheduler_constructors is None:
scheduler_constructors = []
params_t = torch.tensor([1.5, 1.5])
params = Parameter(params_t)
optimizer = constructor([params])
schedulers = []
for scheduler_constructor in scheduler_constructors:
schedulers.append(scheduler_constructor(optimizer))
if not sparse_only:
params_c = Parameter(params_t.clone())
optimizer_c = constructor([params_c])
solution = torch.tensor([1, 1])
with torch.no_grad():
initial_dist = params.dist(solution)
def eval(params, sparse_grad, w):
# Depending on w, provide only the x or y gradient
optimizer.zero_grad()
loss = rosenbrock(params)
loss.backward()
grad = drosenbrock(params.data)
# NB: We torture test the optimizer by returning an
# uncoalesced sparse tensor
if w:
i = torch.LongTensor([[0, 0]])
x = grad[0]
v = torch.tensor([x / 4.0, x - x / 4.0])
else:
i = torch.LongTensor([[1, 1]])
y = grad[1]
v = torch.tensor([y - y / 4.0, y / 4.0])
x = sparse.DoubleTensor(i, v, torch.Size([2])).to(dtype=v.dtype)
with torch.no_grad():
if sparse_grad:
params.grad = x
else:
params.grad = x.to_dense()
return loss
for i in range(2000):
# Do cyclic coordinate descent
w = i % 2
optimizer.step(functools.partial(eval, params, True, w))
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(rosenbrock(params))
else:
scheduler.step()
if not sparse_only:
optimizer_c.step(functools.partial(eval, params_c, False, w))
self.assertEqual(params, params_c)
if not maximize:
self.assertLessEqual(params.data.dist(solution), initial_dist)
else:
self.assertGreaterEqual(rosenbrock(params), rosenbrock(params_t))
def _test_basic_cases_template(
self,
weight_tensor,
bias_tensor,
input_tensor,
constructor,
scheduler_constructors,
constructor_accepts_maximize=True,
constructor_accepts_foreach=False,
):
maximize_options = {False, constructor_accepts_maximize}
foreach_options = {False, constructor_accepts_foreach}
four_arg_constructor = constructor
if constructor_accepts_maximize and constructor_accepts_foreach:
pass
elif constructor_accepts_maximize:
def four_arg_constructor(weight, bias, maximize, foreach):
self.assertFalse(foreach)
return constructor(weight, bias, maximize)
elif constructor_accepts_foreach:
def four_arg_constructor(weight, bias, maximize, foreach):
self.assertFalse(maximize)
return constructor(weight, bias, foreach)
else:
def four_arg_constructor(weight, bias, maximize, foreach):
self.assertFalse(maximize or foreach)
return constructor(weight, bias)
for maximize, foreach in itertools.product(maximize_options, foreach_options):
with torch.no_grad():
weight = Parameter(weight_tensor.clone().detach())
bias = Parameter(bias_tensor.clone().detach())
input = input_tensor.clone().detach().requires_grad_()
optimizer = four_arg_constructor(weight, bias, maximize, foreach)
schedulers = []
for scheduler_constructor in scheduler_constructors:
schedulers.append(scheduler_constructor(optimizer))
# to check if the optimizer can be printed as a string
optimizer.__repr__()
def fn():
optimizer.zero_grad()
y = weight.mv(input)
if y.is_cuda and bias.is_cuda and y.get_device() != bias.get_device():
y = y.cuda(bias.get_device())
loss = (y + bias).pow(2).sum()
loss.backward()
return loss
initial_value = fn().item()
for _ in range(200):
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
val_loss = fn()
scheduler.step(val_loss)
else:
scheduler.step()
optimizer.step(fn)
if maximize:
self.assertGreater(fn().item(), initial_value)
else:
self.assertLess(fn().item(), initial_value)
def _test_state_dict(self, weight, bias, input, constructor, atol=None, rtol=None):
weight = Parameter(weight)
bias = Parameter(bias)
with torch.no_grad():
input = input.clone().detach().requires_grad_()
def fn_base(optimizer, weight, bias):
optimizer.zero_grad()
i = input_cuda if weight.is_cuda else input
loss = (weight.mv(i) + bias).pow(2).sum()
loss.backward()
return loss
optimizer = constructor(weight, bias)
fn = functools.partial(fn_base, optimizer, weight, bias)
# Prime the optimizer
for _i in range(20):
optimizer.step(fn)
# Clone the weights and construct new optimizer for them
with torch.no_grad():
weight_c = Parameter(weight.clone().detach())
bias_c = Parameter(bias.clone().detach())
optimizer_c = constructor(weight_c, bias_c)
fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c)
# Load state dict
state_dict = deepcopy(optimizer.state_dict())
state_dict_c = deepcopy(optimizer.state_dict())
optimizer_c.load_state_dict(state_dict_c)
# Run both optimizations in parallel
for _ in range(20):
optimizer.step(fn)
optimizer_c.step(fn_c)
self.assertEqual(weight, weight_c)
self.assertEqual(bias, bias_c)
# Make sure state dict wasn't modified
self.assertEqual(state_dict, state_dict_c)
# Make sure state dict is deterministic with equal but not identical parameters
self.assertEqual(optimizer.state_dict(), optimizer_c.state_dict())
# Make sure repeated parameters have identical representation in state dict
optimizer_c.param_groups.extend(optimizer_c.param_groups)
self.assertEqual(
optimizer.state_dict()["param_groups"][-1],
optimizer_c.state_dict()["param_groups"][-1],
)
# Make sure that optimizers that support maximize can load older models
old_state_dict = deepcopy(optimizer.state_dict())
state_dict_no_maximize = deepcopy(optimizer.state_dict())
if "maximize" in state_dict_no_maximize["param_groups"][0]:
for group in state_dict_no_maximize["param_groups"]:
del group["maximize"]
optimizer.load_state_dict(state_dict_no_maximize)
# Make sure we can still step
optimizer.step()
# Undo these changes before proceeding!
optimizer.load_state_dict(old_state_dict)
# Make sure that optimizers that support foreach can load older models
state_dict_no_foreach = deepcopy(optimizer.state_dict())
if "foreach" in state_dict_no_foreach["param_groups"][0]:
for group in state_dict_no_foreach["param_groups"]:
del group["foreach"]
optimizer.load_state_dict(state_dict_no_foreach)
# Make sure we can still step
optimizer.step()
# Undo these changes before proceeding!
optimizer.load_state_dict(old_state_dict)
# Make sure that loading optimizers with step not wrapped in tensor can work
state_dict = optimizer.state_dict()
if "step" in state_dict["state"][0] and torch.is_tensor(
state_dict["state"][0]["step"]
):
for state in state_dict["state"].values():
state["step"] = state["step"].item()
optimizer.load_state_dict(state_dict)
optimizer.step()
# Check that state dict can be loaded even when we cast parameters
# to a different type and move to a different device.
if not torch.cuda.is_available():
return
with torch.no_grad():
input_cuda = input.clone().detach().to(dtype=torch.float32, device="cuda")
weight_cuda = Parameter(
weight.clone().detach().to(dtype=torch.float32, device="cuda")
)
bias_cuda = Parameter(
bias.clone().detach().to(dtype=torch.float32, device="cuda")
)
optimizer_cuda = constructor(weight_cuda, bias_cuda)
fn_cuda = functools.partial(fn_base, optimizer_cuda, weight_cuda, bias_cuda)
state_dict = deepcopy(optimizer.state_dict())
state_dict_c = deepcopy(optimizer.state_dict())
optimizer_cuda.load_state_dict(state_dict_c)
# Make sure state dict wasn't modified
self.assertEqual(state_dict, state_dict_c)
# Make sure that device of state['step'] is still CPU
new_state_dict = optimizer_cuda.state_dict()
if "step" in state_dict["state"][0] and torch.is_tensor(
state_dict["state"][0]["step"]
):
for state in new_state_dict["state"].values():
self.assertEqual(state["step"].device.type, "cpu")
for _i in range(20):
optimizer.step(fn)
optimizer_cuda.step(fn_cuda)
self.assertEqual(weight, weight_cuda)
self.assertEqual(bias, bias_cuda, atol=atol, rtol=rtol)
# validate deepcopy() copies all public attributes
def getPublicAttr(obj):
return {k for k in obj.__dict__ if not k.startswith("_")}
self.assertEqual(getPublicAttr(optimizer), getPublicAttr(deepcopy(optimizer)))
def _test_basic_cases(
self,
constructor,
scheduler_constructors=None,
ignore_multidevice=False,
constructor_accepts_maximize=False,
constructor_accepts_foreach=False,
atol=None,
rtol=None,
):
if scheduler_constructors is None:
scheduler_constructors = []
def make_two_arg_constructor(
constructor, maximize: bool = False, foreach: bool = False
):
if constructor_accepts_maximize and constructor_accepts_foreach:
return lambda weight, bias: constructor(weight, bias, maximize, foreach)
if constructor_accepts_maximize:
return lambda weight, bias: constructor(weight, bias, maximize)
if constructor_accepts_foreach:
return lambda weight, bias: constructor(weight, bias, foreach)
return constructor
for maximize, foreach in itertools.product(
{False, constructor_accepts_maximize},
{False, constructor_accepts_foreach},
):
self._test_state_dict(
torch.randn(10, 5),
torch.randn(10),
torch.randn(5),
make_two_arg_constructor(constructor, maximize, foreach),
atol=atol,
rtol=rtol,
)
self._test_basic_cases_template(
torch.randn(10, 5),
torch.randn(10),
torch.randn(5),
constructor,
scheduler_constructors,
constructor_accepts_maximize,
constructor_accepts_foreach,
)
# non-contiguous parameters
self._test_basic_cases_template(
torch.randn(10, 5, 2)[..., 0],
torch.randn(10, 2)[..., 0],
torch.randn(5),
constructor,
scheduler_constructors,
constructor_accepts_maximize,
constructor_accepts_foreach,
)
# CUDA
if not torch.cuda.is_available():
return
self._test_basic_cases_template(
torch.randn(10, 5).cuda(),
torch.randn(10).cuda(),
torch.randn(5).cuda(),
constructor,
scheduler_constructors,
constructor_accepts_maximize,
constructor_accepts_foreach,
)
# Multi-GPU
if not torch.cuda.device_count() > 1 or ignore_multidevice:
return
self._test_basic_cases_template(
torch.randn(10, 5).cuda(0),
torch.randn(10).cuda(1),
torch.randn(5).cuda(0),
constructor,
scheduler_constructors,
constructor_accepts_maximize,
constructor_accepts_foreach,
)
def _test_complex_optimizer(self, optimizer_constructor):
complex_param = torch.randn(5, 5, dtype=torch.complex64, requires_grad=True)
real_param = torch.view_as_real(complex_param).detach().clone().requires_grad_()
complex_opt = optimizer_constructor(complex_param)
real_opt = optimizer_constructor(real_param)
for _ in range(3):
complex_param.grad = torch.randn_like(complex_param)
real_param.grad = torch.view_as_real(complex_param.grad)
complex_opt.step()
real_opt.step()
self.assertEqual(torch.view_as_real(complex_param), real_param)
def _test_complex_2d(self, optimizer_constructor, f=None):
if f is None:
f = rosenbrock
a1 = torch.randn(2, dtype=torch.complex64, requires_grad=True)
a1_real = a1.real.clone().detach()
a1_imag = a1.imag.clone().detach()
a1_real.requires_grad_()
a1_imag.requires_grad_()
optim1 = optimizer_constructor([a1])
optim2 = optimizer_constructor([a1_real, a1_imag])
for _ in range(10):
optim1.zero_grad()
optim2.zero_grad()
a2 = torch.complex(a1_real, a1_imag)
f(a1).backward()
f(a2).backward()
self.assertEqual(a1.grad.real, a1_real.grad)
self.assertEqual(a1.grad.imag, a1_imag.grad)
optim1.step()
optim2.step()
self.assertEqual(a1.real, a1_real)
self.assertEqual(a1.imag, a1_imag)
def _build_params_dict(self, weight, bias, **kwargs):
return [{"params": [weight]}, dict(params=[bias], **kwargs)]
def _build_params_dict_single(self, weight, bias, **kwargs):
return [dict(params=bias, **kwargs)]
def test_sgd(self):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
self._build_params_dict_single(weight, bias, lr=1e-2),
lr=1e-3,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
self._build_params_dict_single(weight, bias, lr=1e-2),
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[lambda opt: StepLR(opt, gamma=0.9, step_size=10)],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[
lambda opt: LinearLR(
opt, start_factor=0.4, end_factor=0.8, total_iters=4
)
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[lambda opt: ConstantLR(opt, factor=0.4, total_iters=4)],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[
lambda opt: StepLR(opt, gamma=0.9, step_size=10),
lambda opt: LinearLR(
opt, start_factor=0.4, end_factor=0.6, total_iters=4
),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[
lambda opt: StepLR(opt, gamma=0.9, step_size=10),
lambda opt: ReduceLROnPlateau(opt),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[
lambda opt: StepLR(opt, gamma=0.99, step_size=10),
lambda opt: ExponentialLR(opt, gamma=0.99),
lambda opt: ReduceLROnPlateau(opt),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias],
lr=1e-3,
momentum=0.5,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias],
lr=1e-3,
momentum=0.5,
weight_decay=1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias],
nesterov=True,
lr=1e-3,
momentum=0.5,
weight_decay=1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[lambda opt: PolynomialLR(opt, power=0.9, total_iters=4)],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
with self.assertRaisesRegex(ValueError, "Invalid momentum value: -0.5"):
optim.SGD(None, lr=1e-2, momentum=-0.5)
def test_sgd_sparse(self):
for foreach in (False, True):
self._test_rosenbrock_sparse(
lambda params: optim.SGD(params, lr=4.8e-3, foreach=foreach)
)
self._test_rosenbrock_sparse(
lambda params: optim.SGD(params, lr=0.0048, foreach=foreach),
[lambda opt: StepLR(opt, gamma=0.99999, step_size=300)],
)
def test_sgd_complex(self):
for foreach in (False, True):
self._test_complex_optimizer(
lambda param: optim.SGD([param], lr=0.001, foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.SGD([param], lr=0.001, momentum=1, foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.SGD(
[param], lr=0.001, momentum=1, weight_decay=1, foreach=foreach
)
)
self._test_complex_optimizer(
lambda param: optim.SGD(
[param],
lr=0.001,
nesterov=True,
momentum=1,
weight_decay=1,
foreach=foreach,
)
)
self._test_complex_optimizer(
lambda param: optim.SGD(
[param],
lr=0.001,
momentum=1,
dampening=0.5,
weight_decay=1,
foreach=foreach,
)
)
def _test_derived_optimizers_varying_tensors(self, optimizer_with_kwargs, kwarg):
if not torch.cuda.is_available():
return
assert kwarg in ("foreach", "fused")
# Specifically test that inputting params of different dtypes and devices
# is handled equivalently on the foreach and fused implementations as the
# single tensor implementations. We need multiple GPUs (vs just a CPU and
# GPU) because fused adam only works on GPUs. (Thus we only run the tests
# that call into this helper when TEST_MULTIGPU.)
params = [
torch.rand(2, 3, dtype=torch.float64, device='cuda:0', requires_grad=True),
torch.rand(2, 3, dtype=torch.float32, device='cuda:0', requires_grad=True),
torch.rand(2, 3, dtype=torch.float16, device='cuda:0', requires_grad=True),
torch.rand(2, 3, dtype=torch.bfloat16, device='cuda:0', requires_grad=True),
torch.rand(2, 3, dtype=torch.float64, device='cuda:1', requires_grad=True),
torch.rand(2, 3, dtype=torch.float32, device='cuda:1', requires_grad=True),
torch.rand(2, 3, dtype=torch.float16, device='cuda:1', requires_grad=True),
torch.rand(2, 3, dtype=torch.bfloat16, device='cuda:1', requires_grad=True),
torch.randint(1024, (2, 3), dtype=torch.int64, device='cuda:1', requires_grad=False),
]
for p in params:
if p.requires_grad:
p.grad = torch.rand_like(p, device=p.device, dtype=p.dtype)
kIterations = 7 if kwarg == "foreach" else 1
for optimizer_constructor, kwargs in optimizer_with_kwargs:
res, state = [], []
for enabled in (False, True):
kwargs_clone = deepcopy(kwargs)
kwargs_clone[kwarg] = enabled
params_clone = []
for p in params:
p_clone = p.clone().detach()
if p.requires_grad:
p_clone.requires_grad = True
p_clone.grad = p.grad.clone().detach()
params_clone.append(p_clone)
optimizer = optimizer_constructor(params_clone, **kwargs_clone)
for _ in range(kIterations):
optimizer.step()
state.append(optimizer.state)
res.append(params_clone)
st_state = state[0]
mt_state = state[1]
for st_p, mt_p in zip(res[0], res[1]):
self.assertEqual(st_p, mt_p)
# check that optimizer states are the same
st_p_state = st_state[st_p]
mt_p_state = mt_state[mt_p]
for k in st_p_state:
actual = mt_p_state[k]
# If `torch.optim.Adam` is `__init__`ed with either `fused=True` or `capturable=True`,
# `step` Tensor is 1D while usually it's 0D.
if (
k == "step"
and isinstance(actual, torch.Tensor)
and actual.ndim == 1
):
actual = actual[0]
self.assertEqual(st_p_state[k], actual)
def _test_derived_optimizers(self, optimizer_pairs_with_flags, flag):
if not torch.cuda.is_available():
return
assert flag in ("foreach", "fused")
# why 7? iteration 7 is where we start to see differences for RAdam
# params interacting with the small eps value, because that's right
# after rho_t becomes greater than 5 in step 6.
kIterations = 7
device = "cuda"
for optimizer_constructor, params in optimizer_pairs_with_flags:
res, state = [], []
for flag_value in (False, True):
input = torch.tensor(
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype=torch.float64, device=device
).reshape(3, 2)
torch.manual_seed(1)
model = torch.nn.Sequential(
torch.nn.Linear(2, 3),
torch.nn.Sigmoid(),
torch.nn.Linear(3, 1),
torch.nn.Sigmoid(),
)
model.to(dtype=torch.float64, device=device)
params_with_flags = deepcopy(params)
params_with_flags[flag] = flag_value
optimizer = optimizer_constructor(
model.parameters(), **params_with_flags
)
for i in range(kIterations):
optimizer.zero_grad()
output = model(input)
loss = output.sum()
loss.backward()
# Test that step behaves as expected (a no-op) when grads are set to None
if i == 0:
optimizer.zero_grad(set_to_none=True)
optimizer.step()
state.append(optimizer.state)
res.append(model.parameters())
st_state = state[0]
mt_state = state[1]
for st_p, mt_p in zip(res[0], res[1]):
self.assertEqual(st_p, mt_p)
# check that optimizer states are the same
st_p_state = st_state[st_p]
mt_p_state = mt_state[mt_p]
for k in st_p_state:
actual = mt_p_state[k]
# If `torch.optim.Adam` is `__init__`ed with either `fused=True` or `capturable=True`,
# `step` Tensor is 1D while usually it's 0D.
if (
k == "step"
and isinstance(actual, torch.Tensor)
and actual.ndim == 1
):
actual = actual[0]
self.assertEqual(st_p_state[k], actual)
def test_multi_tensor_optimizers(self):
optimizer_pairs_with_flags = [
(optim.Adam, dict(weight_decay=1.0, amsgrad=True, fused=False)),
(optim.Adam, dict(weight_decay=1.0, amsgrad=False, fused=False)),
(optim.Adam, dict(weight_decay=0.0, amsgrad=True, fused=False)),
(optim.Adam, dict(weight_decay=0.0, amsgrad=False, fused=False)),
(optim.AdamW, dict(weight_decay=1.0, amsgrad=True)),
(optim.AdamW, dict(weight_decay=1.0, amsgrad=False)),
(optim.AdamW, dict(weight_decay=0.0, amsgrad=True)),
(optim.AdamW, dict(weight_decay=0.0, amsgrad=False)),
(optim.NAdam, dict(weight_decay=0.0, momentum_decay=6e-3)),
(optim.NAdam, dict(weight_decay=1.0, momentum_decay=6e-3)),
(optim.NAdam, dict(weight_decay=0.0, momentum_decay=4e-3)),
(optim.NAdam, dict(weight_decay=0.01, momentum_decay=4e-3)),
(
optim.SGD,
dict(lr=0.2, momentum=1, dampening=0, weight_decay=1, nesterov=True),
),
(
optim.SGD,
dict(lr=0.2, momentum=1, dampening=0.5, weight_decay=1, nesterov=False),
),
(optim.RAdam, dict(weight_decay=0, eps=1e-6)),
(optim.RAdam, dict(weight_decay=0)),
(optim.RAdam, dict(weight_decay=1, eps=1e-6)),
(optim.RAdam, dict(weight_decay=1)),
(optim.RMSprop, dict(weight_decay=1, momentum=1, centered=True)),
(optim.RMSprop, dict(weight_decay=1, momentum=0, centered=True)),
(optim.RMSprop, dict(weight_decay=1, momentum=1, centered=False)),
(optim.RMSprop, dict(weight_decay=0, momentum=1, centered=False)),
(optim.Rprop, dict(lr=1e-2, etas=(0.5, 1.2), step_sizes=(1e-6, 50))),
(optim.ASGD, dict(weight_decay=0)),
(optim.ASGD, dict(weight_decay=1)),
(optim.Adamax, dict(weight_decay=0)),
(optim.Adamax, dict(weight_decay=1)),
(optim.Adadelta, dict(weight_decay=0)),
(optim.Adadelta, dict(weight_decay=1)),
(optim.Adagrad, dict(weight_decay=0)),
(optim.Adagrad, dict(weight_decay=1)),
]
self._test_derived_optimizers(optimizer_pairs_with_flags, "foreach")
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_multi_tensor_optimizers_with_varying_tensors(self):
optimizer_pairs_with_flags = [
(optim.Adam, dict(weight_decay=1.0, amsgrad=True, fused=False)),
(optim.Adam, dict(weight_decay=1.0, amsgrad=False, fused=False)),
(optim.Adam, dict(weight_decay=0.0, amsgrad=True, fused=False)),
(optim.Adam, dict(weight_decay=0.0, amsgrad=False, fused=False)),
(optim.AdamW, dict(weight_decay=1.0, amsgrad=True)),
(optim.AdamW, dict(weight_decay=1.0, amsgrad=False)),
(optim.AdamW, dict(weight_decay=0.0, amsgrad=True)),
(optim.AdamW, dict(weight_decay=0.0, amsgrad=False)),
(optim.NAdam, dict(weight_decay=0.0, momentum_decay=6e-3)),
(optim.NAdam, dict(weight_decay=1.0, momentum_decay=6e-3)),
(optim.NAdam, dict(weight_decay=0.0, momentum_decay=4e-3)),
(optim.NAdam, dict(weight_decay=0.01, momentum_decay=4e-3)),
(
optim.SGD,
dict(lr=0.2, momentum=1, dampening=0, weight_decay=1, nesterov=True),
),
(
optim.SGD,
dict(lr=0.2, momentum=1, dampening=0.5, weight_decay=1, nesterov=False),
),
(optim.RAdam, dict(weight_decay=0, eps=1e-6)),
(optim.RAdam, dict(weight_decay=0)),
(optim.RAdam, dict(weight_decay=1, eps=1e-6)),
(optim.RAdam, dict(weight_decay=1)),
(optim.RMSprop, dict(weight_decay=1, momentum=1, centered=True)),
(optim.RMSprop, dict(weight_decay=1, momentum=0, centered=True)),
(optim.RMSprop, dict(weight_decay=1, momentum=1, centered=False)),
(optim.RMSprop, dict(weight_decay=0, momentum=1, centered=False)),
(optim.Rprop, dict(lr=1e-2, etas=(0.5, 1.2), step_sizes=(1e-6, 50))),
(optim.ASGD, dict(weight_decay=0)),
(optim.ASGD, dict(weight_decay=1)),
(optim.Adamax, dict(weight_decay=0)),
(optim.Adamax, dict(weight_decay=1)),
(optim.Adadelta, dict(weight_decay=0)),
(optim.Adadelta, dict(weight_decay=1)),
(optim.Adagrad, dict(weight_decay=0)),
(optim.Adagrad, dict(weight_decay=1)),
]
self._test_derived_optimizers_varying_tensors(optimizer_pairs_with_flags, "foreach")
def test_fused_optimizers(self):
optimizer_pairs_with_flags = tuple(itertools.product(
(optim.Adam, optim.AdamW),
(
dict(weight_decay=1., amsgrad=False),
dict(weight_decay=1., amsgrad=True),
dict(weight_decay=0., amsgrad=False),
dict(weight_decay=0., amsgrad=True),
),
))
self._test_derived_optimizers(optimizer_pairs_with_flags, "fused")
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_fused_optimizers_with_varying_tensors(self):
optimizer_pairs_with_flags = tuple(itertools.product(
(optim.Adam, optim.AdamW),
(
dict(weight_decay=1., amsgrad=False),
dict(weight_decay=1., amsgrad=True),
dict(weight_decay=0., amsgrad=False),
dict(weight_decay=0., amsgrad=True),
),
))
self._test_derived_optimizers_varying_tensors(optimizer_pairs_with_flags, "fused")
def test_adam(self):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adam(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adam(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adam(
[weight, bias],
lr=1e-3,
amsgrad=True,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adam(
[weight, bias],
lr=1e-3,
weight_decay=0.1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adam(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3,
amsgrad=True,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adam(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3,
maximize=maximize,
foreach=foreach,
),
[lambda opt: ExponentialLR(opt, gamma=0.9)],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adam(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3,
maximize=maximize,
foreach=foreach,
),
[lambda opt: LinearLR(opt, start_factor=0.4, total_iters=4)],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adam(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3,
maximize=maximize,
foreach=foreach,
),
[lambda opt: ConstantLR(opt, factor=0.4, total_iters=4)],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adam(
[weight, bias],
lr=1e-3,
amsgrad=True,
maximize=maximize,
foreach=foreach,
),
[
lambda opt: ConstantLR(opt, factor=0.4, total_iters=4),
lambda opt: ExponentialLR(opt, gamma=0.9),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adam(
[weight, bias],
lr=1e-3,
amsgrad=True,
maximize=maximize,
foreach=foreach,
),
[
lambda opt: ExponentialLR(opt, gamma=0.9),
lambda opt: ReduceLROnPlateau(opt),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adam(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3,
amsgrad=True,
maximize=maximize,
foreach=foreach,
),
[
lambda opt: StepLR(opt, gamma=0.9, step_size=10),
lambda opt: ReduceLROnPlateau(opt),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adam(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3,
maximize=maximize,
foreach=foreach,
),
[lambda opt: PolynomialLR(opt, total_iters=4, power=0.9)],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_complex_2d(optim.Adam)
self._test_complex_2d(functools.partial(optim.Adam, foreach=True))
with self.assertRaisesRegex(
ValueError, "Invalid beta parameter at index 0: 1.0"
):
optim.Adam(None, lr=1e-2, betas=(1.0, 0.0))
with self.assertRaisesRegex(ValueError, "Invalid weight_decay value: -1"):
optim.Adam(None, lr=1e-2, weight_decay=-1)
def test_adamw(self):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.AdamW(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.AdamW(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.AdamW(
[weight, bias],
lr=1e-3,
weight_decay=1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.AdamW(
[weight, bias],
lr=1e-3,
weight_decay=1,
amsgrad=True,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_complex_2d(optim.AdamW)
self._test_complex_2d(functools.partial(optim.AdamW, foreach=True))
with self.assertRaisesRegex(ValueError, "Invalid weight_decay value: -1"):
optim.AdamW(None, lr=1e-2, weight_decay=-1)
def test_sparse_adam(self):
self._test_rosenbrock_sparse(
lambda params: optim.SparseAdam(params, lr=4e-2), [], True
)
self._test_rosenbrock_sparse(
lambda params: optim.SparseAdam(params, lr=4e-2, maximize=True),
[],
True,
True,
)
with self.assertRaisesRegex(
ValueError, "Invalid beta parameter at index 0: 1.0"
):
optim.SparseAdam(None, lr=1e-2, betas=(1.0, 0.0))
with self.assertRaisesRegex(
ValueError, "SparseAdam requires dense parameter tensors"
):
optim.SparseAdam([torch.zeros(3, layout=torch.sparse_coo)])
with self.assertRaisesRegex(
ValueError, "SparseAdam requires dense parameter tensors"
):
optim.SparseAdam([{"params": [torch.zeros(3, layout=torch.sparse_coo)]}])
# ROCm precision is too low to pass this test
def test_adadelta(self):
# Handles https://github.com/pytorch/pytorch/issues/69698
self.rel_tol = 4e-3
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adadelta(
[weight, bias], maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adadelta(
self._build_params_dict(weight, bias, rho=0.95),
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adadelta(
self._build_params_dict(weight, bias, rho=0.95),
maximize=maximize,
foreach=foreach,
),
[
lambda opt: StepLR(opt, gamma=0.9, step_size=10),
lambda opt: ReduceLROnPlateau(opt),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adadelta(
[weight, bias], weight_decay=1, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
with self.assertRaisesRegex(ValueError, "Invalid rho value: 1.1"):
optim.Adadelta(None, lr=1e-2, rho=1.1)
def test_adadelta_complex(self):
# Handles https://github.com/pytorch/pytorch/issues/69698
self.rel_tol = 2e-2
for optimizer in [optim.Adadelta]:
self._test_complex_optimizer(lambda weight: optimizer([weight]))
self._test_complex_optimizer(lambda weight: optimizer([weight], rho=0.95))
self._test_complex_optimizer(
lambda weight: optimizer([weight], rho=0.95, weight_decay=1)
)
def test_nadam(self):
self._test_basic_cases(
lambda weight, bias, foreach: optim.NAdam(
[weight, bias], lr=1e-3, foreach=foreach
),
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, foreach: optim.NAdam(
self._build_params_dict(weight, bias, lr=1e-2), lr=1e-3, foreach=foreach
),
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, foreach: optim.NAdam(
[weight, bias],
lr=1e-3,
weight_decay=0.1,
momentum_decay=6e-3,
foreach=foreach,
),
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, foreach: optim.NAdam(
[weight, bias],
lr=1e-3,
weight_decay=0.1,
momentum_decay=6e-3,
foreach=foreach,
),
[lambda opt: ExponentialLR(opt, gamma=0.9)],
constructor_accepts_foreach=True,
)
with self.assertRaisesRegex(
ValueError, "Invalid beta parameter at index 0: 1.0"
):
optim.NAdam(None, lr=1e-2, betas=(1.0, 0.0))
with self.assertRaisesRegex(ValueError, "Invalid momentum_decay value: -0.2"):
optim.NAdam(None, lr=1e-2, momentum_decay=-0.2)
def test_adagrad(self):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adagrad(
[weight, bias], lr=1e-1, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adagrad(
[weight, bias],
lr=1e-1,
initial_accumulator_value=0.1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adagrad(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adagrad(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-1,
maximize=maximize,
foreach=foreach,
),
[lambda opt: ReduceLROnPlateau(opt)],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adagrad(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-1,
maximize=maximize,
foreach=foreach,
),
[
lambda opt: ReduceLROnPlateau(opt),
lambda opt: ExponentialLR(opt, gamma=0.99),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
with self.assertRaisesRegex(ValueError, "Invalid lr_decay value: -0.5"):
optim.Adagrad(None, lr=1e-2, lr_decay=-0.5)
def test_adagrad_sparse(self):
for foreach in (False, True):
self._test_rosenbrock_sparse(
lambda params: optim.Adagrad(params, lr=1e-1, foreach=foreach)
)
self._test_rosenbrock_sparse(
lambda params: optim.Adagrad(params, lr=0.1, foreach=foreach),
[
lambda opt: StepLR(opt, gamma=1 - 1e-5, step_size=500),
lambda opt: ReduceLROnPlateau(opt, threshold=1e-4),
],
)
def test_adagrad_complex(self):
for foreach in (False, True):
self._test_complex_optimizer(
lambda param: optim.Adagrad([param], lr=1e-1, foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.Adagrad(
[param],
lr=1e-1,
initial_accumulator_value=0.1,
foreach=foreach,
)
)
def test_adamax(self):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adamax(
[weight, bias], lr=1e-1, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adamax(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adamax(
[weight, bias],
lr=1e-1,
weight_decay=1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_complex_2d(optim.Adamax)
self._test_complex_2d(functools.partial(optim.Adamax, foreach=True))
with self.assertRaisesRegex(
ValueError, "Invalid beta parameter at index 1: 1.0"
):
optim.Adamax(None, lr=1e-2, betas=(0.0, 1.0))
def test_radam(self):
self._test_basic_cases(
lambda weight, bias, foreach: optim.RAdam(
[weight, bias], lr=1e-3, foreach=foreach
),
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, foreach: optim.RAdam(
self._build_params_dict(weight, bias, lr=1e-2), lr=1e-3, foreach=foreach
),
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, foreach: optim.RAdam(
[weight, bias], lr=1e-3, weight_decay=0.1, foreach=foreach
),
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, foreach: optim.RAdam(
[weight, bias], lr=1e-3, foreach=foreach
),
[
lambda opt: ExponentialLR(opt, gamma=0.9),
lambda opt: ReduceLROnPlateau(opt),
],
constructor_accepts_foreach=True,
)
with self.assertRaisesRegex(
ValueError, "Invalid beta parameter at index 0: 1.0"
):
optim.RAdam(None, lr=1e-2, betas=(1.0, 0.0))
with self.assertRaisesRegex(ValueError, "Invalid weight_decay value: -1"):
optim.RAdam(None, lr=1e-2, weight_decay=-1)
def test_rmsprop(self):
for foreach in (False, True):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
[weight, bias], lr=1e-2, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2,
centered=True,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2,
centered=True,
momentum=0.1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2,
momentum=0.1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2,
momentum=0.1,
weight_decay=1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_complex_2d(lambda param: optim.RMSprop(param, foreach=foreach))
self._test_complex_2d(
lambda param: optim.RMSprop(param, centered=True, foreach=foreach)
)
self._test_complex_2d(
lambda param: optim.RMSprop(param, momentum=0.1, foreach=foreach)
)
self._test_complex_2d(
lambda param: optim.RMSprop(param, maximize=True, foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.RMSprop([param], foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.RMSprop([param], centered=True, foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.RMSprop([param], momentum=0.1, foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.RMSprop([param], maximize=True, foreach=foreach)
)
with self.assertRaisesRegex(ValueError, "Invalid momentum value: -1.0"):
optim.RMSprop(None, lr=1e-2, momentum=-1.0, foreach=foreach)
def test_asgd(self):
for foreach in (False, True):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.ASGD(
[weight, bias], lr=1e-3, t0=100, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.ASGD(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3,
t0=100,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.ASGD(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3,
weight_decay=1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
# Ref: https://github.com/pytorch/pytorch/issues/84560
# self._test_complex_2d(optimizer)
self._test_complex_optimizer(
lambda params: optim.ASGD([params], foreach=foreach)
)
self._test_complex_optimizer(
lambda params: optim.ASGD([params], maximize=True, foreach=foreach)
)
self._test_complex_optimizer(
lambda params: optim.ASGD(
[params], maximize=True, weight_decay=0.9, foreach=foreach
)
)
self._test_complex_optimizer(
lambda params: optim.ASGD(
[params], maximize=False, weight_decay=0.9, foreach=foreach
)
)
self._test_complex_optimizer(
lambda params: optim.ASGD([params], weight_decay=0.9, foreach=foreach)
)
with self.assertRaisesRegex(ValueError, "Invalid weight_decay value: -0.5"):
optim.ASGD(None, lr=1e-2, weight_decay=-0.5, foreach=foreach)
@skipIfRocm
def test_rprop(self):
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(
0
) == (8, 6)
for foreach in (False, True):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Rprop(
[weight, bias], lr=2e-4, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Rprop(
self._build_params_dict(weight, bias, lr=1e-2),
lr=2e-4,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
atol=4e-5 if is_cuda_sm86 else None,
rtol=3e-5 if is_cuda_sm86 else None,
)
self._test_complex_2d(lambda param: optim.Rprop(param, foreach=foreach))
self._test_complex_optimizer(
lambda param: optim.Rprop([param], lr=0.001, foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.Rprop(
[param], lr=0.001, maximize=True, foreach=foreach
)
)
with self.assertRaisesRegex(ValueError, "Invalid eta values: 1.0, 0.5"):
optim.Rprop(None, lr=1e-2, etas=(1.0, 0.5), foreach=foreach)
def test_lbfgs(self):
self._test_basic_cases(
lambda weight, bias: optim.LBFGS([weight, bias]), ignore_multidevice=True
)
self._test_basic_cases(
lambda weight, bias: optim.LBFGS(
[weight, bias], line_search_fn="strong_wolfe"
),
ignore_multidevice=True,
)
@unittest.skipIf(TEST_WITH_UBSAN, "division-by-zero error with UBSAN")
def test_lbfgs_return_type(self):
params = [torch.randn(10, 5), torch.randn(10)]
opt1 = optim.LBFGS(params, 0.01, tolerance_grad=math.inf)
opt2 = optim.LBFGS(params, 0.01, tolerance_grad=-math.inf)
def closure():
return torch.tensor([10])
res1 = opt1.step(closure)
res2 = opt2.step(closure)
self.assertEqual(type(res1), type(res2))
def test_invalid_param_type(self):
with self.assertRaises(TypeError):
optim.SGD(Parameter(torch.randn(5, 5)), lr=3)
def test_duplicate_params_in_param_group(self):
param = Parameter(torch.randn(5, 5))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
optim.SGD([param, param], lr=0.1)
self.assertEqual(len(w), 1)
self.assertIn(
"a parameter group with duplicate parameters", str(w[0].message)
)
def test_no_grad_for_all_params(self):
params = [torch.randn(5, 5, requires_grad=False) for _ in range(2)]
optimizer_list = [
optim.Adadelta,
optim.AdamW,
optim.Adam,
optim.Adagrad,
optim.Adamax,
optim.RMSprop,
optim.SGD,
optim.SparseAdam,
optim.ASGD,
]
for optim_ctr in optimizer_list:
opt = optim_ctr(params, lr=0.1)
# make sure step can still run even if
# all params have no grad
opt.step()
# make sure that `state_steps` is correctly either updated or not updated when `found_inf`.
def test_functional_fused_optimizer_with_foundinf(self):
if not torch.cuda.is_available():
self.skipTest("CUDA is required.")
from torch.optim import adam, adamw
num_tensors = 5
for functional_optim, amsgrad in itertools.product((adam.adam, adamw.adamw), (False, True)):
params, grads, exp_avgs, exp_avg_sqs = [[torch.ones((1,), device="cuda") for _ in range(num_tensors)] for _ in range(4)]
max_exp_avg_sqs = [torch.ones((1,), device="cuda") for _ in range(num_tensors)] if amsgrad else []
state_steps = [torch.ones((1,), dtype=torch.float32, device="cuda") for _ in range(num_tensors)]
grad_scale = torch.ones((1,), dtype=torch.float32, device="cuda")
found_inf = torch.ones((1,), dtype=torch.float32, device="cuda")
functional_optim(
params,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
foreach=False,
capturable=False,
fused=True,
amsgrad=amsgrad,
beta1=0.9,
beta2=0.99,
lr=1e-2,
weight_decay=0.0,
eps=1e-8,
maximize=False,
grad_scale=grad_scale,
found_inf=found_inf,
)
self.assertEqual(
state_steps,
[
torch.ones((1,), dtype=torch.float32, device="cuda")
for _ in range(num_tensors)
],
)
def test_empty_grad(self):
optimizers = [
torch.optim.Adadelta,
torch.optim.Adagrad,
torch.optim.Adam,
torch.optim.AdamW,
torch.optim.Adamax,
torch.optim.ASGD,
torch.optim.NAdam,
torch.optim.RAdam,
torch.optim.RMSprop,
torch.optim.Rprop,
torch.optim.SGD,
torch.optim.SparseAdam,
]
for optimizer in optimizers:
net = torch.nn.Embedding(
5, 1, padding_idx=0, sparse=optimizer is torch.optim.SparseAdam
)
original_params = (param.detach().clone() for param in net.parameters())
# Simulate a batch that only indexes the embedding at padding_idx
x = torch.tensor([[0, 0]]).int()
y = torch.tensor([[[3.0], [4.0]]])
opt = optimizer(net.parameters(), lr=1e-5)
torch.nn.MSELoss()(net.forward(x), y).backward()
opt.step()
for original_param, param in zip(original_params, net.parameters()):
# assert that the parameters have not changed
self.assertEqual(original_param, param)
@skipIfTorchDynamo()
def test_post_hook(self):
def post_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data += 2
params = [torch.Tensor([1, 1])]
opt = SGD(params, lr=0.001)
data = 2
hook_handle = opt.register_step_post_hook(post_hook)
opt.step()
opt.step()
# check if pre hooks were registered
self.assertEqual(data, 6)
# remove handles, take step and verify that hook is no longer registered
hook_handle.remove()
opt.step()
self.assertEqual(data, 6)
@skipIfTorchDynamo()
def test_pre_hook(self):
def pre_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data += 2
params = [torch.Tensor([1, 1])]
opt = SGD(params, lr=0.001)
data = 5
hook_handle = opt.register_step_pre_hook(pre_hook)
opt.step()
opt.step()
# check if pre hooks were registered
self.assertEqual(data, 9)
# remove handles, take step and verify that hook is no longer registered
hook_handle.remove()
opt.step()
self.assertEqual(data, 9)
@skipIfTorchDynamo()
def test_pre_and_post_hook(self):
def global_pre_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data.append(0)
def global_post_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data.append(5)
def local_pre_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data.append(1)
def local_post_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data.append(2)
params = [torch.Tensor([1, 1])]
opt1 = SGD(params, lr=0.001)
opt2 = Adam(params, lr=0.01)
data = []
# register global hooks to both optimizers
global_pre_handle = register_optimizer_step_pre_hook(global_pre_hook)
global_post_handle = register_optimizer_step_post_hook(global_post_hook)
# register local hooks
first_pre_handle = opt1.register_step_pre_hook(local_pre_hook)
first_post_handle = opt1.register_step_post_hook(local_post_hook)
second_pre_handle = opt2.register_step_pre_hook(local_pre_hook)
second_post_handle = opt2.register_step_post_hook(local_post_hook)
opt1.step()
self.assertListEqual(data, [0, 1, 2, 5])
opt2.step()
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5])
opt1.step()
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5, 0, 1, 2, 5])
# remove all hooks
global_pre_handle.remove()
global_post_handle.remove()
first_pre_handle.remove()
first_post_handle.remove()
second_pre_handle.remove()
second_post_handle.remove()
opt1.step()
opt2.step()
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5, 0, 1, 2, 5])
def test_fused_optimizer_raises(self):
if not torch.cuda.is_available():
self.skipTest("Requires CUDA devices")
for optimizer_ctor in (torch.optim.Adam, torch.optim.AdamW):
with self.assertRaisesRegex(RuntimeError, "`fused` and `foreach` cannot be `True` together."):
optimizer_ctor([torch.empty((), device="cuda")], foreach=True, fused=True)
with self.assertRaisesRegex(RuntimeError, "`fused` does not support `differentiable`"):
optimizer_ctor([torch.empty((), device="cuda")], differentiable=True, fused=True)
class SchedulerTestNet(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
return self.conv2(F.relu(self.conv1(x)))
class LambdaLRTestObject:
def __init__(self, value):
self.value = value
def __call__(self, epoch):
return self.value * epoch
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
class TestLRScheduler(TestCase):
exact_dtype = True
def setUp(self):
super().setUp()
self.net = SchedulerTestNet()
self.opt = SGD(
[
{"params": self.net.conv1.parameters()},
{"params": self.net.conv2.parameters(), "lr": 0.5},
],
lr=0.05,
)
def _check_warning_is_epoch_deprecation_warning(self, w, *, num_warnings: int = 1):
"""This function swallows the epoch deprecation warning which is produced when we
call `scheduler.step(epoch)` with some not `None` value of `epoch`.
this is deprecated, and this function will need to be removed/updated when
the schedulers no longer accept the parameter at all.
"""
self.assertEqual(len(w), num_warnings)
for warning in w:
self.assertEqual(len(warning.message.args), 1)
self.assertEqual(warning.message.args[0], EPOCH_DEPRECATION_WARNING)
def test_error_when_getlr_has_epoch(self):
class MultiStepLR(torch.optim.lr_scheduler.LRScheduler):
def __init__(self, optimizer, gamma, milestones, last_epoch=-1):
self.init_lr = [group["lr"] for group in optimizer.param_groups]
self.gamma = gamma
self.milestones = milestones
super().__init__(optimizer, last_epoch)
def get_lr(self, step):
global_step = self.last_epoch
gamma_power = (
[0]
+ [i + 1 for i, m in enumerate(self.milestones) if global_step >= m]
)[-1]
return [
init_lr * (self.gamma**gamma_power) for init_lr in self.init_lr
]
optimizer = torch.optim.SGD([torch.rand(1)], lr=1)
with self.assertRaises(TypeError):
scheduler = MultiStepLR(optimizer, gamma=1, milestones=[10, 20])
@skipIfTorchDynamo("Torchdynamo keeps references to optim in the guards and the stack of the graph break frames")
def test_no_cyclic_references(self):
import gc
param = Parameter(torch.empty(10))
optim = SGD([param], lr=0.5)
scheduler = LambdaLR(optim, lambda epoch: 1.0)
del scheduler
self.assertTrue(
len(gc.get_referrers(optim)) == 0,
"Optimizer should contain no cyclic references",
)
gc.collect()
del optim
self.assertEqual(
gc.collect(), 0, msg="Optimizer should be garbage-collected on __del__"
)
@skipIfTorchDynamo("Torchdynamo keeps references to optim in the guards and the stack of the graph break frames")
def test_no_cyclic_references_in_step(self):
import gc
import weakref
def run():
param = torch.empty(10, requires_grad=True)
optim = SGD(params=[param], lr=0.5)
scheduler = LambdaLR(optim, lambda epoch: 1.0)
param.sum().backward()
optim.step()
scheduler.step()
return weakref.ref(scheduler)
# To ensure that there are no reference cycles in scheduler,
# we need to turn off the garbage collector. Since gc will
# automatically collect unreachable objects.
gc.disable()
ref = run()
assert ref() is None
gc.enable() # restore
def test_old_pattern_warning(self):
epochs = 35
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
scheduler = StepLR(self.opt, gamma=0.1, step_size=3)
self.assertTrue(len(ws) == 0, "No warning should be raised")
def old_pattern():
for _ in range(epochs):
scheduler.step()
self.opt.step()
self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern)
def test_old_pattern_warning_with_arg(self):
epochs = 35
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
scheduler = StepLR(self.opt, gamma=0.1, step_size=3)
self.assertTrue(len(ws) == 0, "No warning should be raised")
def old_pattern2():
for _ in range(epochs):
scheduler.step()
self.opt.step()
self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern2)
def test_old_pattern_warning_resuming(self):
epochs = 35
for i, group in enumerate(self.opt.param_groups):
group["initial_lr"] = 0.01
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
scheduler = StepLR(self.opt, gamma=0.1, step_size=3, last_epoch=10)
self.assertTrue(len(ws) == 0, "No warning should be raised")
def old_pattern():
for _ in range(epochs):
scheduler.step()
self.opt.step()
self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern)
def test_old_pattern_warning_resuming_with_arg(self):
epochs = 35
for i, group in enumerate(self.opt.param_groups):
group["initial_lr"] = 0.01
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
scheduler = StepLR(self.opt, gamma=0.1, step_size=3, last_epoch=10)
self.assertTrue(len(ws) == 0, "No warning should be raised")
def old_pattern2():
for _ in range(epochs):
scheduler.step()
self.opt.step()
self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern2)
def test_old_pattern_warning_with_overridden_optim_step(self):
epochs = 35
for i, group in enumerate(self.opt.param_groups):
group["initial_lr"] = 0.01
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
scheduler = StepLR(self.opt, gamma=0.1, step_size=3, last_epoch=10)
self.assertTrue(len(ws) == 0, "No warning should be raised")
# emulate use-case with optimizer.step overridden
import types
old_step = self.opt.step
def new_step(o, *args, **kwargs):
retval = old_step(*args, **kwargs)
return retval
self.opt.step = types.MethodType(new_step, self.opt)
def old_pattern2():
for _ in range(epochs):
scheduler.step()
self.opt.step()
self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern2)
def test_new_pattern_no_warning(self):
epochs = 35
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
scheduler = StepLR(self.opt, gamma=0.1, step_size=3)
self.assertTrue(len(ws) == 0, "No warning should be raised")
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
for _ in range(epochs):
self.opt.step()
scheduler.step()
self.assertTrue(len(ws) == 0, "No warning should be raised")
def test_new_pattern_no_warning_with_arg(self):
epochs = 35
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
scheduler = StepLR(self.opt, gamma=0.1, step_size=3)
self.assertTrue(len(ws) == 0, "No warning should be raised")
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
for _ in range(epochs):
self.opt.step()
scheduler.step()
self.assertTrue(len(ws) == 0, "No warning should be raised")
def test_new_pattern_no_warning_with_overridden_optim_step(self):
epochs = 35
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
scheduler = StepLR(self.opt, gamma=0.1, step_size=3)
self.assertTrue(len(ws) == 0, "No warning should be raised")
# emulate use-case with optimizer.step overridden
import types
old_step = self.opt.step
def new_step(o, *args, **kwargs):
retval = old_step(*args, **kwargs)
return retval
self.opt.step = types.MethodType(new_step, self.opt)
def new_pattern():
for e in range(epochs):
self.opt.step()
scheduler.step()
self.assertWarnsRegex(
UserWarning, r"`optimizer.step\(\)` has been overridden", new_pattern
)
def _test_lr_is_constant_for_constant_epoch(self, scheduler):
l = []
for _ in range(10):
scheduler.optimizer.step()
with warnings.catch_warnings(record=True) as w:
scheduler.step(2)
self._check_warning_is_epoch_deprecation_warning(w)
l.append(self.opt.param_groups[0]["lr"])
self.assertEqual(min(l), max(l))
def test_step_lr_is_constant_for_constant_epoch(self):
scheduler = StepLR(self.opt, 2)
self._test_lr_is_constant_for_constant_epoch(scheduler)
def test_exponential_lr_is_constant_for_constant_epoch(self):
scheduler = ExponentialLR(self.opt, gamma=0.9)
self._test_lr_is_constant_for_constant_epoch(scheduler)
def test_constantlr_is_constant_for_constant_epoch(self):
scheduler = ConstantLR(self.opt)
self._test_lr_is_constant_for_constant_epoch(scheduler)
def test_linear_linearlr_is_constant_for_constant_epoch(self):
scheduler = LinearLR(self.opt)
self._test_lr_is_constant_for_constant_epoch(scheduler)
def test_polynomial_lr_is_constant_for_constant_epoch(self):
scheduler = PolynomialLR(self.opt, power=0.9)
self._test_lr_is_constant_for_constant_epoch(scheduler)
def test_step_lr(self):
# lr = 0.05 if epoch < 3
# lr = 0.005 if 30 <= epoch < 6
# lr = 0.0005 if epoch >= 9
epochs = 10
single_targets = [0.05] * 3 + [0.005] * 3 + [0.0005] * 3 + [0.00005] * 3
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = StepLR(self.opt, gamma=0.1, step_size=3)
self._test(scheduler, targets, epochs)
def test_get_last_lr_step_lr(self):
from torch.nn import Parameter
epochs = 10
optimizer = torch.optim.SGD(
[Parameter(torch.randn(2, 2, requires_grad=True))], 0.1
)
targets = [[0.1] * 3 + [0.01] * 3 + [0.001] * 3 + [0.0001]]
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 3, gamma=0.1)
self._test_get_last_lr(scheduler, targets, epochs)
def test_get_last_lr_multi_step_lr(self):
# lr = 0.05 if epoch < 2
# lr = 0.005 if 2 <= epoch < 5
# lr = 0.0005 if 5 <= epoch < 9
# lr = 0.00005 if 9 <= epoch
epochs = 10
single_targets = [0.05] * 2 + [0.005] * 3 + [0.0005] * 4 + [0.00005] * 1
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
self._test_get_last_lr(scheduler, targets, epochs)
def test_multi_step_lr(self):
# lr = 0.05 if epoch < 2
# lr = 0.005 if 2 <= epoch < 5
# lr = 0.0005 if epoch < 9
# lr = 0.00005 if epoch >= 9
epochs = 10
single_targets = [0.05] * 2 + [0.005] * 3 + [0.0005] * 4 + [0.00005] * 3
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
self._test(scheduler, targets, epochs)
def test_multi_step_lr_with_epoch(self):
# lr = 0.05 if epoch < 2
# lr = 0.005 if 2 <= epoch < 5
# lr = 0.0005 if epoch < 9
# lr = 0.00005 if epoch >= 9
epochs = 10
single_targets = [0.05] * 2 + [0.005] * 3 + [0.0005] * 4 + [0.00005] * 3
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
self._test_with_epoch(scheduler, targets, epochs)
def test_get_last_lr_constantlr(self):
# lr = 0.025 if epoch < 5
# lr = 0.005 if 5 <= epoch
epochs = 10
single_targets = [0.025] * 5 + [0.05] * 5
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = ConstantLR(self.opt, factor=1.0 / 2, total_iters=5)
self._test_get_last_lr(scheduler, targets, epochs)
def test_get_last_lr_linearlr(self):
# lr = 0.025 if epoch == 0
# lr = 0.03125 if epoch == 1
# lr = 0.0375 if epoch == 2
# lr = 0.04375 if epoch == 3
# lr = 0.005 if 4 <= epoch
epochs = 10
start_factor = 1.0 / 4
end_factor = 3.0 / 5
iters = 4
interpolation = [
start_factor + i * (end_factor - start_factor) / iters for i in range(iters)
]
single_targets = [x * 0.05 for x in interpolation] + [0.05 * end_factor] * (
epochs - iters
)
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = LinearLR(
self.opt,
start_factor=start_factor,
end_factor=end_factor,
total_iters=iters,
)
self._test_get_last_lr(scheduler, targets, epochs)
def test_constantlr(self):
# lr = 0.025 if epoch < 5
# lr = 0.005 if 5 <= epoch
epochs = 10
single_targets = [0.025] * 5 + [0.05] * 5
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = ConstantLR(self.opt, factor=1.0 / 2, total_iters=5)
self._test(scheduler, targets, epochs)
def test_linearlr(self):
# lr = 0.025 if epoch == 0
# lr = 0.03125 if epoch == 1
# lr = 0.0375 if epoch == 2
# lr = 0.04375 if epoch == 3
# lr = 0.005 if 4 <= epoch
epochs = 10
start_factor = 1.0 / 2
iters = 4
interpolation = [
start_factor + i * (1 - start_factor) / iters for i in range(iters)
]
single_targets = [x * 0.05 for x in interpolation] + [0.05] * (epochs - iters)
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = LinearLR(self.opt, start_factor=start_factor, total_iters=iters)
self._test(scheduler, targets, epochs)
def test_linearlr_start_factor_limits1(self):
start_factor = 0.0
iters = 4
with self.assertRaises(ValueError):
LinearLR(self.opt, start_factor=start_factor, total_iters=iters)
def test_linearlr_start_factor_limits2(self):
start_factor = 1.1
iters = 4
with self.assertRaises(ValueError):
LinearLR(self.opt, start_factor=start_factor, total_iters=iters)
def test_constantlr_with_epoch(self):
# lr = 0.025 if epoch < 5
# lr = 0.005 if 5 <= epoch
epochs = 10
single_targets = [0.025] * 5 + [0.05] * 5
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = ConstantLR(self.opt, factor=1.0 / 2, total_iters=5)
self._test_with_epoch(scheduler, targets, epochs)
def test_linearlr_with_epoch(self):
# lr = 0.025 if epoch == 0
# lr = 0.03125 if epoch == 1
# lr = 0.0375 if epoch == 2
# lr = 0.04375 if epoch == 3
# lr = 0.005 if 4 <= epoch
epochs = 10
start_factor = 1.0 / 2
end_factor = 1.0
iters = 4
interpolation = [
start_factor + i * (end_factor - start_factor) / iters for i in range(iters)
]
single_targets = [x * 0.05 for x in interpolation] + [0.05] * (epochs - iters)
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = LinearLR(self.opt, start_factor=start_factor, total_iters=iters)
self._test_with_epoch(scheduler, targets, epochs)
def test_exp_lr(self):
epochs = 10
single_targets = [0.05 * (0.9**x) for x in range(epochs)]
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = ExponentialLR(self.opt, gamma=0.9)
self._test(scheduler, targets, epochs)
def test_poly_lr(self):
epochs = 10
power = 0.9
total_iters = 5
single_targets = [
(1.0 - x / total_iters) ** power * 0.05 for x in range(total_iters)
] + [0.0] * (epochs - total_iters)
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = PolynomialLR(self.opt, power=power, total_iters=total_iters)
self._test(scheduler, targets, epochs)
def test_cos_anneal_lr(self):
epochs = 10
eta_min = 1e-10
single_targets = [
eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2
for x in range(epochs)
]
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min)
self._test(scheduler, targets, epochs)
def test_closed_form_step_lr(self):
scheduler = StepLR(self.opt, gamma=0.1, step_size=3)
closed_form_scheduler = StepLR(self.opt, gamma=0.1, step_size=3)
self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
def test_closed_form_linearlr(self):
scheduler = LinearLR(
self.opt, start_factor=1.0 / 3, end_factor=0.7, total_iters=4
)
closed_form_scheduler = LinearLR(
self.opt, start_factor=1.0 / 3, end_factor=0.7, total_iters=4
)
self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
def test_closed_form_constantlr(self):
scheduler = ConstantLR(self.opt, factor=1.0 / 3, total_iters=4)
closed_form_scheduler = ConstantLR(self.opt, factor=1.0 / 3, total_iters=4)
self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
def test_closed_form_multi_step_lr(self):
scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
closed_form_scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
def test_closed_form_exp_lr(self):
scheduler = ExponentialLR(self.opt, gamma=0.9)
closed_form_scheduler = ExponentialLR(self.opt, gamma=0.9)
self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
def test_closed_form_poly_lr(self):
scheduler = PolynomialLR(self.opt, power=0.9)
closed_form_scheduler = PolynomialLR(self.opt, power=0.9)
self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
def test_closed_form_cos_anneal_lr(self):
eta_min = 1e-10
epochs = 20
T_max = 5
scheduler = CosineAnnealingLR(self.opt, T_max=T_max, eta_min=eta_min)
closed_form_scheduler = CosineAnnealingLR(
self.opt, T_max=T_max, eta_min=eta_min
)
self._test_against_closed_form(scheduler, closed_form_scheduler, epochs)
def test_cos_anneal_lr_continue(self):
eta_min = 0.1
T_max = 5
scheduler = CosineAnnealingLR(self.opt, T_max=T_max, eta_min=eta_min)
self.opt.step()
scheduler.step()
original_lrs = scheduler._last_lr
new_scheduler = CosineAnnealingLR(
self.opt, T_max=T_max, eta_min=eta_min, last_epoch=0
)
new_lrs = new_scheduler._last_lr
torch.testing.assert_close(original_lrs, new_lrs, rtol=1e-4, atol=1e-5)
def test_reduce_lr_on_plateau1(self):
epochs = 10
for param_group in self.opt.param_groups:
param_group["lr"] = 0.5
targets = [[0.5] * 20]
metrics = [10 - i * 0.0167 for i in range(20)]
scheduler = ReduceLROnPlateau(
self.opt,
threshold_mode="abs",
mode="min",
threshold=0.01,
patience=5,
cooldown=5,
)
self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
def test_reduce_lr_on_plateau2(self):
epochs = 22
for param_group in self.opt.param_groups:
param_group["lr"] = 0.5
targets = [[0.5] * 6 + [0.05] * 7 + [0.005] * 7 + [0.0005] * 2]
metrics = [10 - i * 0.0165 for i in range(22)]
scheduler = ReduceLROnPlateau(
self.opt,
patience=5,
cooldown=0,
threshold_mode="abs",
mode="min",
threshold=0.1,
)
self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
def test_reduce_lr_on_plateau3(self):
epochs = 22
for param_group in self.opt.param_groups:
param_group["lr"] = 0.5
targets = [[0.5] * (2 + 6) + [0.05] * (5 + 6) + [0.005] * 4]
metrics = [-0.8] * 2 + [-0.234] * 20
scheduler = ReduceLROnPlateau(
self.opt, mode="max", patience=5, cooldown=5, threshold_mode="abs"
)
self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
def test_reduce_lr_on_plateau4(self):
epochs = 20
for param_group in self.opt.param_groups:
param_group["lr"] = 0.5
targets = [[0.5] * 20]
metrics = [1.5 * (1.025**i) for i in range(20)] # 1.025 > 1.1**0.25
scheduler = ReduceLROnPlateau(
self.opt, mode="max", patience=3, threshold_mode="rel", threshold=0.1
)
self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
def test_reduce_lr_on_plateau5(self):
epochs = 20
for param_group in self.opt.param_groups:
param_group["lr"] = 0.5
targets = [[0.5] * 6 + [0.05] * (5 + 6) + [0.005] * 4]
metrics = [1.5 * (1.005**i) for i in range(20)]
scheduler = ReduceLROnPlateau(
self.opt,
mode="max",
threshold_mode="rel",
threshold=0.1,
patience=5,
cooldown=5,
)
self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
def test_reduce_lr_on_plateau6(self):
epochs = 20
for param_group in self.opt.param_groups:
param_group["lr"] = 0.5
targets = [[0.5] * 20]
metrics = [1.5 * (0.85**i) for i in range(20)]
scheduler = ReduceLROnPlateau(
self.opt, mode="min", threshold_mode="rel", threshold=0.1
)
self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
def test_reduce_lr_on_plateau7(self):
epochs = 20
for param_group in self.opt.param_groups:
param_group["lr"] = 0.5
targets = [[0.5] * 6 + [0.05] * (5 + 6) + [0.005] * 4]
metrics = [1] * 7 + [0.6] + [0.5] * 12
scheduler = ReduceLROnPlateau(
self.opt,
mode="min",
threshold_mode="rel",
threshold=0.1,
patience=5,
cooldown=5,
)
self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
def test_reduce_lr_on_plateau8(self):
epochs = 20
for param_group in self.opt.param_groups:
param_group["lr"] = 0.5
targets = [[0.5] * 6 + [0.4] * 14, [0.5] * 6 + [0.3] * 14]
metrics = [1.5 * (1.005**i) for i in range(20)]
scheduler = ReduceLROnPlateau(
self.opt,
mode="max",
threshold_mode="rel",
min_lr=[0.4, 0.3],
threshold=0.1,
patience=5,
cooldown=5,
)
self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
def test_sequentiallr1(self):
epochs = 19
schedulers = [None] * 2
targets = [
[0.05, 0.04, 0.032]
+ [0.05 for x in range(4)]
+ [0.05 * 0.1 for x in range(4)]
+ [0.05 * 0.01 for x in range(4)]
+ [0.05 * 0.001 for x in range(4)]
]
milestones = [3]
schedulers[0] = ExponentialLR(self.opt, gamma=0.8)
schedulers[1] = StepLR(self.opt, gamma=0.1, step_size=4)
scheduler = SequentialLR(self.opt, schedulers=schedulers, milestones=milestones)
self._test(scheduler, targets, epochs)
def test_sequentiallr2(self):
epochs = 13
schedulers = [None] * 2
targets = [[0.005, 0.005, 0.005] + [0.05 * 0.9**x for x in range(10)]]
milestones = [3]
schedulers[0] = ConstantLR(self.opt, factor=0.1, total_iters=3)
schedulers[1] = ExponentialLR(self.opt, gamma=0.9)
scheduler = SequentialLR(self.opt, schedulers=schedulers, milestones=milestones)
self._test(scheduler, targets, epochs)
def test_sequentiallr3(self):
epochs = 12
schedulers = [None] * 3
targets = [
[0.005, 0.005, 0.005]
+ [0.05, 0.04, 0.032]
+ [0.05, 0.05, 0.005, 0.005, 0.0005, 0.0005]
]
milestones = [3, 6]
schedulers[0] = ConstantLR(self.opt, factor=0.1, total_iters=3)
schedulers[1] = ExponentialLR(self.opt, gamma=0.8)
schedulers[2] = StepLR(self.opt, gamma=0.1, step_size=2)
scheduler = SequentialLR(self.opt, schedulers=schedulers, milestones=milestones)
self._test(scheduler, targets, epochs)
def test_sequentiallr4(self):
optimizer = torch.optim.SGD([torch.tensor(0.5)], lr=0.1)
prev_lr = optimizer.param_groups[0]["lr"]
schedulers = [
torch.optim.lr_scheduler.ConstantLR(optimizer, factor=1),
torch.optim.lr_scheduler.ConstantLR(optimizer, factor=0.1),
]
scheduler = torch.optim.lr_scheduler.SequentialLR(
optimizer, schedulers, milestones=[10]
)
new_lr = optimizer.param_groups[0]["lr"]
# Ensure that multiple schedulers does not affect the initial learning rate
self.assertEqual(prev_lr, new_lr)
def test_get_last_lr_sequentiallr(self):
epochs = 12
milestones = [3, 6]
schedulers = [None] * 3
schedulers[0] = ConstantLR(self.opt, factor=0.1, total_iters=3)
schedulers[1] = ExponentialLR(self.opt, gamma=0.8)
schedulers[2] = StepLR(self.opt, gamma=0.1, step_size=2)
scheduler = SequentialLR(self.opt, schedulers=schedulers, milestones=milestones)
constant_lr_target = [0.005] * 3
exponential_lr_target = [0.05, 0.04, 0.032]
step_lr_target = [0.05, 0.05, 0.005, 0.005, 0.0005, 0.0005]
single_targets = constant_lr_target + exponential_lr_target + step_lr_target
targets = [single_targets, [x * 10 for x in single_targets]]
self._test_get_last_lr(scheduler, targets, epochs)
def test_chained_lr2_get_last_lr_before_step(self):
schedulers = [
LinearLR(self.opt, start_factor=0.4, total_iters=3),
MultiStepLR(self.opt, milestones=[4, 8, 10], gamma=0.1),
]
scheduler = ChainedScheduler(schedulers)
self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
def test_chained_lr1(self):
epochs = 10
schedulers = [None] * 1
targets = [[0.05] * 3 + [0.005] * 3 + [0.0005] * 3 + [0.00005] * 3]
schedulers[0] = StepLR(self.opt, gamma=0.1, step_size=3)
scheduler = ChainedScheduler(schedulers)
self._test([scheduler], targets, epochs)
self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
def test_chained_lr2(self):
epochs = 10
schedulers = [None] * 1
targets = [[0.02, 0.03, 0.04] + [0.05] * 9]
schedulers[0] = LinearLR(self.opt, start_factor=0.4, total_iters=3)
scheduler = ChainedScheduler(schedulers)
self._test([scheduler], targets, epochs)
self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
def test_chained_lr3(self):
epochs = 10
schedulers = [None] * 2
targets = [
[0.02, 0.03, 0.04, 0.05] + [0.005] * 4 + [0.0005] * 3 + [0.00005] * 3
]
schedulers[0] = LinearLR(self.opt, start_factor=0.4, total_iters=3)
schedulers[1] = MultiStepLR(self.opt, milestones=[4, 8, 10], gamma=0.1)
scheduler = ChainedScheduler(schedulers)
self._test([scheduler], targets, epochs)
self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
def test_chained_lr4(self):
epochs = 9
schedulers = [None] * 3
targets = [
[0.05 * 0.2 * 0.9**x for x in range(3)]
+ [0.05 * 0.2 * 0.9**3 * 0.1]
+ [0.05 * 0.9**x * 0.1 for x in range(4, 6)]
+ [0.05 * 0.9**x * 0.01 for x in range(6, 9)]
]
schedulers[0] = ExponentialLR(self.opt, gamma=0.9)
schedulers[1] = ConstantLR(self.opt, factor=0.2, total_iters=4)
schedulers[2] = StepLR(self.opt, gamma=0.1, step_size=3)
scheduler = ChainedScheduler(schedulers)
self._test([scheduler], targets, epochs)
self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
def test_chained_lr5(self):
def poly_lr(lr: float):
return [
(lr * ((1.0 - x / total_iters) ** power)) for x in range(total_iters)
] + [0.0] * (epochs - total_iters)
schedulers = [None] * 2
epochs = 10
power = 0.9
total_iters = 5
const_factor = 0.1
single_targets = [x * const_factor for x in poly_lr(lr=0.05)]
targets = [single_targets, [x * const_factor for x in poly_lr(0.5)]]
schedulers[0] = PolynomialLR(self.opt, power=power, total_iters=total_iters)
schedulers[1] = ConstantLR(self.opt, factor=const_factor)
scheduler = ChainedScheduler(schedulers)
self._test(scheduler, targets, epochs)
self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
def test_compound_step_and_multistep_lr(self):
epochs = 10
schedulers = [None] * 2
schedulers[0] = StepLR(self.opt, gamma=0.1, step_size=3)
schedulers[1] = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
targets = [[0.05] * 2 + [0.005] * 1 + [5e-4] * 2 + [5e-5] + [5e-6] * 3 + [5e-8]]
self._test(schedulers, targets, epochs)
def test_compound_step_and_exp_lr(self):
epochs = 10
schedulers = [None] * 2
single_targets = [0.05 * (0.9**x) for x in range(3)]
single_targets += [0.005 * (0.9**x) for x in range(3, 6)]
single_targets += [0.0005 * (0.9**x) for x in range(6, 9)]
single_targets += [0.00005 * (0.9**x) for x in range(9, 12)]
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers[0] = StepLR(self.opt, gamma=0.1, step_size=3)
schedulers[1] = ExponentialLR(self.opt, gamma=0.9)
self._test(schedulers, targets, epochs)
def test_compound_exp_and_multistep_lr(self):
epochs = 10
schedulers = [None] * 2
single_targets = [0.05 * (0.9**x) for x in range(2)]
single_targets += [0.005 * (0.9**x) for x in range(2, 5)]
single_targets += [0.0005 * (0.9**x) for x in range(5, 9)]
single_targets += [0.00005 * (0.9**x) for x in range(9, 11)]
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers[0] = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
schedulers[1] = ExponentialLR(self.opt, gamma=0.9)
self._test(schedulers, targets, epochs)
def test_compound_exp_and_linearlr(self):
epochs = 10
iters = 4
start_factor = 0.4
end_factor = 0.9
schedulers = [None] * 2
single_targets = [0.05 * (0.9**x) for x in range(11)]
for i in range(iters):
single_targets[i] *= start_factor + i / iters * (end_factor - start_factor)
for i in range(iters, 11):
single_targets[i] *= end_factor
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers[0] = LinearLR(
self.opt,
start_factor=start_factor,
end_factor=end_factor,
total_iters=iters,
)
schedulers[1] = ExponentialLR(self.opt, gamma=0.9)
self._test(schedulers, targets, epochs)
def test_compound_step_and_constantlr(self):
epochs = 10
iters = 4
factor = 0.4
schedulers = [None] * 2
single_targets = (
[0.05 * 0.4] * 3
+ [0.005 * 0.4]
+ [0.005] * 2
+ [0.0005] * 3
+ [0.00005] * 3
)
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers[0] = StepLR(self.opt, gamma=0.1, step_size=3)
schedulers[1] = ConstantLR(self.opt, factor=0.4, total_iters=4)
self._test(schedulers, targets, epochs)
def test_compound_linearlr_and_multistep_lr(self):
epochs = 10
iters = 4
start_factor = 0.4
schedulers = [None] * 2
single_targets = [0.05] * 2 + [0.005] * 3 + [0.0005] * 4 + [0.00005] * 2
for i in range(iters):
single_targets[i] *= start_factor + i / iters * (1 - start_factor)
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers[0] = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
schedulers[1] = LinearLR(self.opt, start_factor=start_factor, total_iters=iters)
self._test(schedulers, targets, epochs)
def test_compound_cosanneal_and_step_lr(self):
epochs = 10
eta_min = 1e-10
single_targets = [
eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2
for x in range(epochs)
]
single_targets = [x * 0.1 ** (i // 3) for i, x in enumerate(single_targets)]
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers = [None] * 2
schedulers[0] = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min)
schedulers[1] = StepLR(self.opt, gamma=0.1, step_size=3)
self._test(schedulers, targets, epochs)
def test_compound_cosanneal_and_multistep_lr(self):
epochs = 10
eta_min = 1e-10
single_targets = [
eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2
for x in range(epochs)
]
multipliers = [1] * 2 + [0.1] * 3 + [0.01] * 4 + [0.001]
single_targets = [x * y for x, y in zip(single_targets, multipliers)]
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers = [None] * 2
schedulers[0] = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min)
schedulers[1] = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9])
self._test(schedulers, targets, epochs)
def test_compound_cosanneal_and_linearlr(self):
epochs = 10
iters = 4
start_factor = 0.4
eta_min = 1e-10
schedulers = [None] * 2
single_targets = [
eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2
for x in range(epochs)
]
for i in range(iters):
single_targets[i] *= start_factor + i / iters * (1 - start_factor)
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers[0] = LinearLR(self.opt, start_factor=start_factor, total_iters=iters)
schedulers[1] = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min)
self._test(schedulers, targets, epochs)
def test_compound_cosanneal_and_exp_lr(self):
epochs = 10
eta_min = 1e-10
single_targets = [
eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2
for x in range(epochs)
]
multipliers = [0.1**i for i in range(epochs)]
single_targets = [x * y for x, y in zip(single_targets, multipliers)]
targets = [single_targets, [x * epochs for x in single_targets]]
schedulers = [None] * 2
schedulers[0] = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min)
schedulers[1] = ExponentialLR(self.opt, gamma=0.1)
self._test(schedulers, targets, epochs)
def test_compound_reduce_lr_on_plateau1(self):
epochs = 10
for param_group in self.opt.param_groups:
param_group["lr"] = 0.5
single_targets = [0.5] * 20
multipliers = [0.1 ** (i // 3) for i in range(20)]
single_targets = [x * y for x, y in zip(multipliers, single_targets)]
targets = [single_targets]
targets = targets[1:] # test runs step before checking lr
metrics = [10 - i * 0.0167 for i in range(20)]
schedulers = [None, None]
schedulers[0] = ReduceLROnPlateau(
self.opt,
threshold_mode="abs",
mode="min",
threshold=0.01,
patience=5,
cooldown=5,
)
schedulers[1] = StepLR(self.opt, gamma=0.1, step_size=3)
self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs)
def test_compound_reduce_lr_on_plateau2(self):
epochs = 22
for param_group in self.opt.param_groups:
param_group["lr"] = 0.5
single_targets = [0.5] * 6 + [0.05] * 7 + [0.005] * 7 + [0.0005] * 2
multipliers = [1] * 3 + [0.1] * 5 + [0.01] * 4 + [0.001] * 10
single_targets = [x * y for x, y in zip(single_targets, multipliers)]
targets = [single_targets]
targets = targets[1:] # test runs step before checking lr
metrics = [10 - i * 0.0165 for i in range(22)]
schedulers = [None] * 2
schedulers[0] = ReduceLROnPlateau(
self.opt,
patience=5,
cooldown=0,
threshold_mode="abs",
mode="min",
threshold=0.1,
)
schedulers[1] = MultiStepLR(self.opt, gamma=0.1, milestones=[3, 8, 12])
self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs)
def test_compound_reduce_lr_on_plateau3(self):
epochs = 22
for param_group in self.opt.param_groups:
param_group["lr"] = 0.5
single_targets = [0.5] * (2 + 6) + [0.05] * (5 + 6) + [0.005] * 4
multipliers = [0.1**i for i in range(epochs)]
single_targets = [x * y for x, y in zip(multipliers, single_targets)]
targets = [single_targets]
targets = targets[1:] # test runs step before checking lr
metrics = [-0.8] * 2 + [-0.234] * 20
schedulers = [None, None]
schedulers[0] = ReduceLROnPlateau(
self.opt, mode="max", patience=5, cooldown=5, threshold_mode="abs"
)
schedulers[1] = ExponentialLR(self.opt, gamma=0.1)
self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs)
def test_compound_reduce_lr_on_plateau4(self):
epochs = 20
for param_group in self.opt.param_groups:
param_group["lr"] = 0.05
epochs = 10
eta_min = 1e-10
single_targets = [
eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2
for x in range(epochs)
]
targets = [single_targets]
targets = targets[1:] # test runs step before checking lr
metrics = [1.5 * (1.025**i) for i in range(20)] # 1.025 > 1.1**0.25
schedulers = [None, None]
schedulers[0] = ReduceLROnPlateau(
self.opt, mode="max", patience=3, threshold_mode="rel", threshold=0.1
)
schedulers[1] = CosineAnnealingLR(self.opt, epochs, eta_min)
self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs)
def test_compound_reduce_lr_on_plateau5(self):
iters = 4
start_factor = 0.4
epochs = 22
for param_group in self.opt.param_groups:
param_group["lr"] = 0.5
single_targets = [0.5] * 6 + [0.05] * 7 + [0.005] * 7 + [0.0005] * 2
multipliers = [1] * 22
for i in range(iters):
multipliers[i] *= start_factor + i / iters * (1 - start_factor)
single_targets = [x * y for x, y in zip(single_targets, multipliers)]
targets = [single_targets]
targets = targets[1:] # test runs step before checking lr
metrics = [10 - i * 0.0165 for i in range(22)]
schedulers = [None] * 2
schedulers[0] = ReduceLROnPlateau(
self.opt,
patience=5,
cooldown=0,
threshold_mode="abs",
mode="min",
threshold=0.1,
)
schedulers[1] = LinearLR(self.opt, start_factor=start_factor, total_iters=iters)
self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs)
def test_cycle_lr_invalid_mode(self):
with self.assertRaises(ValueError):
scheduler = CyclicLR(self.opt, base_lr=0, max_lr=0, mode="CATS")
def test_cycle_lr_triangular_mode_one_lr(self):
lr_target = [1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3]
momentum_target = [5, 4, 3, 2, 1, 2, 3, 4, 5, 4, 3]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = CyclicLR(
self.opt,
base_lr=1,
max_lr=5,
step_size_up=4,
cycle_momentum=True,
base_momentum=1,
max_momentum=5,
mode="triangular",
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
def test_cycle_lr_triangular_mode_one_lr_no_momentum(self):
lr_target = [1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3]
lr_targets = [lr_target, lr_target]
momentum_target = [self.opt.defaults["momentum"]] * len(lr_target)
momentum_targets = [momentum_target, momentum_target]
scheduler = CyclicLR(
self.opt,
base_lr=1,
max_lr=5,
step_size_up=4,
cycle_momentum=False,
mode="triangular",
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
def test_cycle_lr_triangular2_mode_one_lr(self):
lr_target = [
1,
2,
3,
4,
5,
4,
3,
2,
1,
1.5,
2.0,
2.5,
3.0,
2.5,
2.0,
1.5,
1,
1.25,
1.50,
1.75,
2.00,
1.75,
]
momentum_target = [
5.0,
4.0,
3.0,
2.0,
1.0,
2.0,
3.0,
4.0,
5.0,
4.5,
4.0,
3.5,
3.0,
3.5,
4.0,
4.5,
5.0,
4.75,
4.5,
4.25,
4.0,
4.25,
]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = CyclicLR(
self.opt,
base_lr=1,
max_lr=5,
step_size_up=4,
cycle_momentum=True,
base_momentum=1,
max_momentum=5,
mode="triangular2",
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
def test_cycle_lr_exp_range_mode_one_lr(self):
base_lr, max_lr = 1, 5
diff_lr = max_lr - base_lr
gamma = 0.9
xs = [0, 0.25, 0.5, 0.75, 1, 0.75, 0.50, 0.25, 0, 0.25, 0.5, 0.75, 1]
lr_target = [base_lr + x * diff_lr * gamma**i for i, x in enumerate(xs)]
momentum_target = [max_lr - x * diff_lr * gamma**i for i, x in enumerate(xs)]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = CyclicLR(
self.opt,
base_lr=base_lr,
max_lr=max_lr,
step_size_up=4,
cycle_momentum=True,
base_momentum=base_lr,
max_momentum=max_lr,
mode="exp_range",
gamma=gamma,
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
def test_cycle_lr_triangular_mode(self):
lr_target_1 = [1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3]
lr_target_2 = [x + 1 for x in lr_target_1]
lr_targets = [lr_target_1, lr_target_2]
momentum_target_1 = [5, 4, 3, 2, 1, 2, 3, 4, 5, 4, 3]
momentum_target_2 = [x + 1 for x in momentum_target_1]
momentum_targets = [momentum_target_1, momentum_target_2]
scheduler = CyclicLR(
self.opt,
base_lr=[1, 2],
max_lr=[5, 6],
step_size_up=4,
cycle_momentum=True,
base_momentum=[1, 2],
max_momentum=[5, 6],
mode="triangular",
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target_1))
def test_cycle_lr_triangular2_mode(self):
lr_target_1 = [
1,
2,
3,
4,
5,
4,
3,
2,
1,
1.5,
2.0,
2.5,
3.0,
2.5,
2.0,
1.5,
1,
1.25,
1.50,
1.75,
2.00,
1.75,
]
lr_target_2 = [x + 2 for x in lr_target_1]
lr_targets = [lr_target_1, lr_target_2]
momentum_target_1 = [
5.0,
4.0,
3.0,
2.0,
1.0,
2.0,
3.0,
4.0,
5.0,
4.5,
4.0,
3.5,
3.0,
3.5,
4.0,
4.5,
5.0,
4.75,
4.5,
4.25,
4.0,
4.25,
]
momentum_target_2 = [x + 2 for x in momentum_target_1]
momentum_targets = [momentum_target_1, momentum_target_2]
scheduler = CyclicLR(
self.opt,
base_lr=[1, 3],
max_lr=[5, 7],
step_size_up=4,
cycle_momentum=True,
base_momentum=[1, 3],
max_momentum=[5, 7],
mode="triangular2",
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target_1))
def test_cycle_lr_exp_range_mode(self):
base_lr_1, max_lr_1 = 1, 5
base_lr_2, max_lr_2 = 5, 12
diff_lr_1 = max_lr_1 - base_lr_1
diff_lr_2 = max_lr_2 - base_lr_2
gamma = 0.9
xs = [0, 0.25, 0.5, 0.75, 1, 0.75, 0.50, 0.25, 0, 0.25, 0.5, 0.75, 1]
lr_target_1 = [base_lr_1 + x * diff_lr_1 * gamma**i for i, x in enumerate(xs)]
lr_target_2 = [base_lr_2 + x * diff_lr_2 * gamma**i for i, x in enumerate(xs)]
lr_targets = [lr_target_1, lr_target_2]
momentum_target_1 = [
max_lr_1 - x * diff_lr_1 * gamma**i for i, x in enumerate(xs)
]
momentum_target_2 = [
max_lr_2 - x * diff_lr_2 * gamma**i for i, x in enumerate(xs)
]
momentum_targets = [momentum_target_1, momentum_target_2]
scheduler = CyclicLR(
self.opt,
base_lr=[base_lr_1, base_lr_2],
max_lr=[max_lr_1, max_lr_2],
step_size_up=4,
cycle_momentum=True,
base_momentum=[base_lr_1, base_lr_2],
max_momentum=[max_lr_1, max_lr_2],
mode="exp_range",
gamma=gamma,
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target_1))
def test_cycle_lr_triangular_mode_step_size_up_down(self):
lr_target = [
1.0,
2.0,
3.0,
4.0,
5.0,
13.0 / 3,
11.0 / 3,
9.0 / 3,
7.0 / 3,
5.0 / 3,
1.0,
]
lr_targets = [lr_target, lr_target]
momentum_target = [
5.0,
4.0,
3.0,
2.0,
1.0,
5.0 / 3,
7.0 / 3,
3.0,
11.0 / 3,
13.0 / 3,
5.0,
]
momentum_targets = [momentum_target, momentum_target]
scheduler = CyclicLR(
self.opt,
base_lr=1,
max_lr=5,
step_size_up=4,
step_size_down=6,
cycle_momentum=True,
base_momentum=1,
max_momentum=5,
mode="triangular",
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
def test_cycle_lr_triangular2_mode_step_size_up_down(self):
lr_base_target = [
1.0,
3.0,
5.0,
13.0 / 3,
11.0 / 3,
9.0 / 3,
7.0 / 3,
5.0 / 3,
1.0,
2.0,
3.0,
8.0 / 3,
7.0 / 3,
6.0 / 3,
5.0 / 3,
4.0 / 3,
1.0,
3.0 / 2,
2.0,
11.0 / 6,
10.0 / 6,
9.0 / 6,
8.0 / 6,
7.0 / 6,
]
momentum_base_target = [
5.0,
3.0,
1.0,
5.0 / 3,
7.0 / 3,
3.0,
11.0 / 3,
13.0 / 3,
5.0,
4.0,
3.0,
10.0 / 3,
11.0 / 3,
4.0,
13.0 / 3,
14.0 / 3,
5.0,
4.5,
4.0,
25.0 / 6,
13.0 / 3,
4.5,
14.0 / 3,
29.0 / 6,
]
deltas = [2 * i for i in range(0, 2)]
base_lrs = [1 + delta for delta in deltas]
max_lrs = [5 + delta for delta in deltas]
lr_targets = [[x + delta for x in lr_base_target] for delta in deltas]
momentum_targets = [
[x + delta for x in momentum_base_target] for delta in deltas
]
scheduler = CyclicLR(
self.opt,
base_lr=base_lrs,
max_lr=max_lrs,
step_size_up=2,
step_size_down=6,
cycle_momentum=True,
base_momentum=base_lrs,
max_momentum=max_lrs,
mode="triangular2",
)
self._test_cycle_lr(
scheduler, lr_targets, momentum_targets, len(lr_base_target)
)
def test_cycle_lr_exp_range_mode_step_size_up_down(self):
base_lr, max_lr = 1, 5
diff_lr = max_lr - base_lr
gamma = 0.9
xs = [
0.0,
0.5,
1.0,
5.0 / 6,
4.0 / 6,
3.0 / 6,
2.0 / 6,
1.0 / 6,
0.0,
0.5,
1.0,
5.0 / 6,
4.0 / 6,
]
lr_target = [base_lr + x * diff_lr * gamma**i for i, x in enumerate(xs)]
lr_targets = [lr_target, lr_target]
momentum_target = [max_lr - x * diff_lr * gamma**i for i, x in enumerate(xs)]
momentum_targets = [momentum_target, momentum_target]
scheduler = CyclicLR(
self.opt,
base_lr=base_lr,
max_lr=max_lr,
step_size_up=2,
step_size_down=6,
cycle_momentum=True,
base_momentum=base_lr,
max_momentum=max_lr,
mode="exp_range",
gamma=gamma,
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
def test_cycle_lr_with_momentumless_optimizer(self):
# Note [Temporarily set optimizer to Adam]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The TestLRScheduler object carries around an SGD optimizer to avoid having to
# instantiate one for every test. This gets in the way for our very specific case
# in which we need to use Adam (or really any optimizer that doesn't use momentum)
# in order to test that the momentum bug in CyclicLR is fixed (the bug is described
# in more detail in https://github.com/pytorch/pytorch/issues/19003 ).
old_opt = self.opt
self.opt = optim.Adam(
[
{"params": self.net.conv1.parameters()},
{"params": self.net.conv2.parameters(), "lr": 0.5},
],
lr=0.05,
)
lr_target = [1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3]
lr_targets = [lr_target, lr_target]
momentum_target = [None] * len(lr_target)
momentum_targets = [momentum_target, momentum_target]
scheduler = CyclicLR(
self.opt,
base_lr=1,
max_lr=5,
step_size_up=4,
cycle_momentum=False,
mode="triangular",
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
self.opt = old_opt # set optimizer back to SGD
def test_cycle_lr_cycle_momentum_fail_with_momentumless_optimizer(self):
with self.assertRaises(ValueError):
adam_opt = optim.Adam(self.net.parameters())
scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=True)
def test_cycle_lr_removed_after_out_of_scope(self):
import gc
import weakref
gc.disable()
def test():
adam_opt = optim.Adam(self.net.parameters())
scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False)
return weakref.ref(scheduler)
ref = test()
assert ref() is None
gc.enable()
def test_cycle_lr_state_dict_picklable(self):
adam_opt = optim.Adam(self.net.parameters())
scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False)
self.assertIsInstance(scheduler._scale_fn_ref, weakref.WeakMethod)
state = scheduler.state_dict()
self.assertNotIn("_scale_fn_ref", state)
pickle.dumps(state)
def test_cycle_lr_scale_fn_restored_from_state_dict(self):
adam_opt = optim.Adam(self.net.parameters())
# Case 1: Built-in mode
scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, mode="triangular2")
restored_scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False)
restored_scheduler.load_state_dict(scheduler.state_dict())
self.assertTrue(restored_scheduler.mode == scheduler.mode == "triangular2")
self.assertIsNotNone(restored_scheduler._scale_fn_ref) and self.assertIsNotNone(scheduler._scale_fn_ref)
self.assertIs(restored_scheduler._scale_fn_custom, None)
self.assertIs(scheduler._scale_fn_custom, None)
# Case 2: Custom `scale_fn`
def scale_fn(_):
return 0.5
scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn)
restored_scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn)
restored_scheduler.load_state_dict(scheduler.state_dict())
self.assertIs(scheduler._scale_fn_custom, scale_fn)
self.assertIs(restored_scheduler._scale_fn_custom, scale_fn)
def test_onecycle_lr_invalid_anneal_strategy(self):
with self.assertRaises(ValueError):
scheduler = OneCycleLR(
self.opt, max_lr=1e-3, total_steps=10, anneal_strategy="CATS"
)
def test_onecycle_lr_invalid_pct_start(self):
with self.assertRaises(ValueError):
scheduler = OneCycleLR(self.opt, max_lr=1e-3, total_steps=10, pct_start=1.1)
def test_onecycle_lr_cannot_calculate_total_steps(self):
with self.assertRaises(ValueError):
scheduler = OneCycleLR(self.opt, max_lr=1e-3)
def test_onecycle_lr_linear_annealing(self):
lr_target = [1, 13, 25, 21.5, 18, 14.5, 11, 7.5, 4, 0.5]
momentum_target = [22, 11.5, 1, 4, 7, 10, 13, 16, 19, 22]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = OneCycleLR(
self.opt,
max_lr=25,
final_div_factor=2,
base_momentum=1,
max_momentum=22,
total_steps=10,
anneal_strategy="linear",
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, 10)
def test_onecycle_lr_linear_annealing_three_phases(self):
lr_target = [1, 9, 17, 25, 17, 9, 1, 0.75, 0.5, 0.25]
momentum_target = [22, 15, 8, 1, 8, 15, 22, 22, 22, 22]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = OneCycleLR(
self.opt,
max_lr=25,
div_factor=25,
base_momentum=1,
max_momentum=22,
total_steps=10,
anneal_strategy="linear",
pct_start=0.4,
final_div_factor=4,
three_phase=True,
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, 10)
def test_onecycle_lr_cosine_annealing(self):
def annealing_cos(start, end, pct):
cos_out = math.cos(math.pi * pct) + 1
return end + (start - end) / 2.0 * cos_out
lr_target = [
1,
13,
25,
annealing_cos(25, 0.5, 1 / 7.0),
annealing_cos(25, 0.5, 2 / 7.0),
annealing_cos(25, 0.5, 3 / 7.0),
annealing_cos(25, 0.5, 4 / 7.0),
annealing_cos(25, 0.5, 5 / 7.0),
annealing_cos(25, 0.5, 6 / 7.0),
0.5,
]
momentum_target = [
22,
11.5,
1,
annealing_cos(1, 22, 1 / 7.0),
annealing_cos(1, 22, 2 / 7.0),
annealing_cos(1, 22, 3 / 7.0),
annealing_cos(1, 22, 4 / 7.0),
annealing_cos(1, 22, 5 / 7.0),
annealing_cos(1, 22, 6 / 7.0),
22,
]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = OneCycleLR(
self.opt,
max_lr=25,
final_div_factor=2,
base_momentum=1,
max_momentum=22,
total_steps=10,
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, 10)
def test_cycle_lr_with_adam(self):
old_opt = self.opt
self.opt = optim.Adam(
[
{"params": self.net.conv1.parameters()},
{"params": self.net.conv2.parameters(), "lr": 0.5},
],
lr=0.05,
)
lr_target = [1, 13, 25, 21.5, 18, 14.5, 11, 7.5, 4, 0.5]
momentum_target = [22, 11.5, 1, 4, 7, 10, 13, 16, 19, 22]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = OneCycleLR(
self.opt,
max_lr=25,
final_div_factor=2,
base_momentum=1,
max_momentum=22,
total_steps=10,
anneal_strategy="linear",
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, 10, use_beta1=True)
self.opt = old_opt # set optimizer back to SGD
def test_lambda_lr(self):
epochs = 10
self.opt.param_groups[0]["lr"] = 0.05
self.opt.param_groups[1]["lr"] = 0.4
targets = [
[0.05 * (0.9**x) for x in range(epochs)],
[0.4 * (0.8**x) for x in range(epochs)],
]
scheduler = LambdaLR(
self.opt, lr_lambda=[lambda x1: 0.9**x1, lambda x2: 0.8**x2]
)
self._test(scheduler, targets, epochs)
def test_multiplicative_lr(self):
epochs = 10
self.opt.param_groups[0]["lr"] = 0.05
self.opt.param_groups[1]["lr"] = 0.4
targets = [
[0.05 * (0.9**x) for x in range(epochs)],
[0.4 * (0.8**x) for x in range(epochs)],
]
scheduler = MultiplicativeLR(
self.opt, lr_lambda=[lambda x1: 0.9, lambda x2: 0.8]
)
self._test(scheduler, targets, epochs)
@parametrize("T_mult", [1, 2, 4])
def test_CosineAnnealingWarmRestarts_lr1(self, T_mult):
iters = 100
eta_min = 1e-10
T_i = 10
T_cur = 0
targets = [[0.05], [0.5]]
scheduler = CosineAnnealingWarmRestarts(
self.opt, T_0=T_i, T_mult=T_mult, eta_min=eta_min
)
for _ in range(1, iters, 1):
T_cur += 1
if T_cur >= T_i:
T_cur = T_cur - T_i
T_i = int(T_mult) * T_i
targets[0] += [
eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2
]
targets[1] += [
eta_min + (0.5 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2
]
self._test(scheduler, targets, iters)
def test_CosineAnnealingWarmRestarts_lr2(self):
iters = 30
eta_min = 1e-10
T_mults = [1, 2, 4]
for T_mult in T_mults:
T_i = 10
T_cur = 0
targets = [[0.05], [0.5]]
scheduler = CosineAnnealingWarmRestarts(
self.opt, T_0=T_i, T_mult=T_mult, eta_min=eta_min
)
for _ in torch.arange(0.1, iters, 0.1):
T_cur = round(T_cur + 0.1, 1)
if T_cur >= T_i:
T_cur = T_cur - T_i
T_i = int(T_mult) * T_i
targets[0] += [
eta_min
+ (0.05 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2
]
targets[1] += [
eta_min
+ (0.5 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2
]
self._test_CosineAnnealingWarmRestarts(scheduler, targets, iters)
def test_CosineAnnealingWarmRestarts_lr3(self):
epochs_for_T_mults = [
[0, 1, 2, 3, 4, 5, 12, 27, 3, 4, 5, 6, 13],
[0, 1, 2, 3, 4, 5, 25, 32, 33, 34, 80, 81, 3],
[0, 0.1, 0.2, 0.3, 1.3, 2.3, 17.5, 18.5, 19.5, 29.5, 30.5, 31.5, 50],
]
T_curs_for_T_mults = [
[1, 2, 3, 4, 5, 2, 7, 3, 4, 5, 6, 3],
[1, 2, 3, 4, 5, 15, 2, 3, 4, 10, 11, 3],
[0.1, 0.2, 0.3, 1.3, 2.3, 7.5, 8.5, 9.5, 19.5, 20.5, 21.5, 10],
]
T_is_for_T_mults = [
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 20, 40, 40, 40, 80, 80, 10],
[10, 10, 10, 10, 10, 30, 30, 30, 30, 30, 30, 90],
]
eta_min = 1e-10
T_mults = [1, 2, 3]
for epochs, T_mult, T_curs, T_is in zip(
epochs_for_T_mults, T_mults, T_curs_for_T_mults, T_is_for_T_mults
):
targets = [[0.05], [0.5]]
scheduler = CosineAnnealingWarmRestarts(
self.opt, T_0=10, T_mult=T_mult, eta_min=eta_min
)
for T_cur, T_i in zip(T_curs, T_is):
targets[0] += [
eta_min
+ (0.05 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2
]
targets[1] += [
eta_min
+ (0.5 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2
]
self._test_interleaved_CosineAnnealingWarmRestarts(
scheduler, targets, epochs
)
def test_swalr_no_anneal(self):
epochs, swa_start, swa_lr = 10, 5, 0.01
initial_lrs = [group["lr"] for group in self.opt.param_groups]
targets = [
[lr] * (swa_start + 1) + [swa_lr] * (epochs - swa_start - 1)
for lr in initial_lrs
]
swa_scheduler = SWALR(self.opt, anneal_epochs=1, swa_lr=swa_lr)
self._test_swalr(swa_scheduler, None, targets, swa_start, epochs)
def test_swalr_cosine_anneal_after_multiplicative(self):
# same swa_lr for different param_groups
epochs, swa_start, swa_lr, anneal_epochs = 15, 5, 0.01, 5
mult_factor = 0.9
scheduler = MultiplicativeLR(self.opt, lr_lambda=lambda epoch: mult_factor)
swa_scheduler = SWALR(self.opt, anneal_epochs=anneal_epochs, swa_lr=swa_lr)
def anneal_coef(t):
if t + 1 >= anneal_epochs:
return 0.0
return (1 + math.cos(math.pi * (t + 1) / anneal_epochs)) / 2
initial_lrs = [group["lr"] for group in self.opt.param_groups]
targets_before_swa = [
[lr * mult_factor**i for i in range(swa_start + 1)] for lr in initial_lrs
]
swa_epochs = epochs - swa_start - 1
targets = [
lrs
+ [
lrs[-1] * anneal_coef(t) + swa_lr * (1 - anneal_coef(t))
for t in range(swa_epochs)
]
for lrs in targets_before_swa
]
self._test_swalr(swa_scheduler, scheduler, targets, swa_start, epochs)
def test_swalr_linear_anneal_after_multiplicative(self):
# separate swa_lr for different param_groups
epochs, swa_start, swa_lrs, anneal_epochs = 15, 5, [0.01, 0.02], 4
mult_factor = 0.9
scheduler = MultiplicativeLR(self.opt, lr_lambda=lambda epoch: mult_factor)
swa_scheduler = SWALR(
self.opt,
anneal_epochs=anneal_epochs,
anneal_strategy="linear",
swa_lr=swa_lrs,
)
def anneal_coef(t):
if t + 1 >= anneal_epochs:
return 0.0
return 1 - (t + 1) / anneal_epochs
initial_lrs = [group["lr"] for group in self.opt.param_groups]
targets_before_swa = [
[lr * mult_factor**i for i in range(swa_start + 1)] for lr in initial_lrs
]
swa_epochs = epochs - swa_start - 1
targets = [
lrs
+ [
lrs[-1] * anneal_coef(t) + swa_lr * (1 - anneal_coef(t))
for t in range(swa_epochs)
]
for lrs, swa_lr in zip(targets_before_swa, swa_lrs)
]
self._test_swalr(swa_scheduler, scheduler, targets, swa_start, epochs)
def _test_swalr(self, swa_scheduler, scheduler, targets, swa_start, epochs):
for epoch in range(epochs):
for param_group, target in zip(self.opt.param_groups, targets):
self.assertEqual(
target[epoch],
param_group["lr"],
msg="LR is wrong in epoch {}: expected {}, got {}".format(
epoch, target[epoch], param_group["lr"]
),
atol=1e-5,
rtol=0,
)
if epoch >= swa_start:
self.opt.step()
swa_scheduler.step()
elif scheduler is not None:
self.opt.step()
scheduler.step()
def test_swalr_hypers(self):
# Test that SWALR raises errors for incorrect hyper-parameters
with self.assertRaisesRegex(ValueError, "anneal_strategy must"):
swa_scheduler = SWALR(self.opt, anneal_strategy="exponential", swa_lr=1.0)
with self.assertRaisesRegex(ValueError, "anneal_epochs must"):
swa_scheduler = SWALR(self.opt, anneal_epochs=-1, swa_lr=1.0)
with self.assertRaisesRegex(ValueError, "anneal_epochs must"):
swa_scheduler = SWALR(self.opt, anneal_epochs=1.7, swa_lr=1.0)
with self.assertRaisesRegex(ValueError, "swa_lr must"):
swa_scheduler = SWALR(self.opt, swa_lr=[1.0, 0.1, 0.01])
def test_step_lr_state_dict(self):
self._check_scheduler_state_dict(
lambda: StepLR(self.opt, gamma=0.1, step_size=3),
lambda: StepLR(self.opt, gamma=0.01 / 2, step_size=1),
)
def test_multi_step_lr_state_dict(self):
self._check_scheduler_state_dict(
lambda: MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]),
lambda: MultiStepLR(self.opt, gamma=0.01, milestones=[1, 4, 6]),
)
def test_exp_step_lr_state_dict(self):
self._check_scheduler_state_dict(
lambda: ExponentialLR(self.opt, gamma=0.1),
lambda: ExponentialLR(self.opt, gamma=0.01),
)
def test_cosine_lr_state_dict(self):
epochs = 10
eta_min = 1e-10
self._check_scheduler_state_dict(
lambda: CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min),
lambda: CosineAnnealingLR(self.opt, T_max=epochs // 2, eta_min=eta_min / 2),
epochs=epochs,
)
def test_reduce_lr_on_plateau_state_dict(self):
scheduler = ReduceLROnPlateau(self.opt, mode="min", factor=0.1, patience=2)
for score in [1.0, 2.0, 3.0, 4.0, 3.0, 4.0, 5.0, 3.0, 2.0, 1.0]:
scheduler.step(score)
scheduler_copy = ReduceLROnPlateau(
self.opt, mode="max", factor=0.5, patience=10
)
scheduler_copy.load_state_dict(scheduler.state_dict())
for key in scheduler.__dict__.keys():
if key not in {"optimizer", "is_better"}:
self.assertEqual(scheduler.__dict__[key], scheduler_copy.__dict__[key])
def test_lambda_lr_state_dict_fn(self):
scheduler = LambdaLR(self.opt, lr_lambda=lambda x: x)
state = scheduler.state_dict()
self.assertIsNone(state["lr_lambdas"][0])
scheduler_copy = LambdaLR(self.opt, lr_lambda=lambda x: x)
scheduler_copy.load_state_dict(state)
for key in scheduler.__dict__.keys():
if key not in {"optimizer", "lr_lambdas"}:
self.assertEqual(scheduler.__dict__[key], scheduler_copy.__dict__[key])
def test_lambda_lr_state_dict_obj(self):
scheduler = LambdaLR(self.opt, lr_lambda=LambdaLRTestObject(10))
state = scheduler.state_dict()
self.assertIsNotNone(state["lr_lambdas"][0])
scheduler_copy = LambdaLR(self.opt, lr_lambda=LambdaLRTestObject(-1))
scheduler_copy.load_state_dict(state)
for key in scheduler.__dict__.keys():
if key not in {"optimizer"}:
self.assertEqual(scheduler.__dict__[key], scheduler_copy.__dict__[key])
def test_CosineAnnealingWarmRestarts_lr_state_dict(self):
self._check_scheduler_state_dict(
lambda: CosineAnnealingWarmRestarts(self.opt, T_0=10, T_mult=2),
lambda: CosineAnnealingWarmRestarts(self.opt, T_0=100),
)
def test_swa_lr_state_dict(self):
self._check_scheduler_state_dict(
lambda: SWALR(self.opt, anneal_epochs=3, swa_lr=0.5),
lambda: SWALR(
self.opt, anneal_epochs=10, anneal_strategy="linear", swa_lr=5.0
),
)
def _check_scheduler_state_dict(self, constr, constr2, epochs=10):
scheduler = constr()
for _ in range(epochs):
scheduler.optimizer.step()
scheduler.step()
scheduler_copy = constr2()
scheduler_copy.load_state_dict(scheduler.state_dict())
for key in scheduler.__dict__.keys():
if key != "optimizer":
self.assertEqual(scheduler.__dict__[key], scheduler_copy.__dict__[key])
self.assertEqual(scheduler.get_last_lr(), scheduler_copy.get_last_lr())
def _test_get_last_lr(self, schedulers, targets, epochs=10):
if isinstance(schedulers, LRScheduler):
schedulers = [schedulers]
optimizers = {scheduler.optimizer for scheduler in schedulers}
for epoch in range(epochs):
result = [scheduler.get_last_lr() for scheduler in schedulers]
[optimizer.step() for optimizer in optimizers]
[scheduler.step() for scheduler in schedulers]
target = [[t[epoch] for t in targets]] * len(schedulers)
for t, r in zip(target, result):
self.assertEqual(
target,
result,
msg="LR is wrong in epoch {}: expected {}, got {}".format(
epoch, t, r
),
atol=1e-5,
rtol=0,
)
def _test_with_epoch(self, schedulers, targets, epochs=10):
if isinstance(schedulers, LRScheduler):
schedulers = [schedulers]
optimizers = {scheduler.optimizer for scheduler in schedulers}
for epoch in range(epochs):
[optimizer.step() for optimizer in optimizers]
with warnings.catch_warnings(record=True) as w:
[
scheduler.step(epoch) for scheduler in schedulers
] # step before assert: skip initial lr
self._check_warning_is_epoch_deprecation_warning(
w, num_warnings=len(schedulers)
)
for param_group, target in zip(self.opt.param_groups, targets):
self.assertEqual(
target[epoch],
param_group["lr"],
msg="LR is wrong in epoch {}: expected {}, got {}".format(
epoch, target[epoch], param_group["lr"]
),
atol=1e-5,
rtol=0,
)
def _test(self, schedulers, targets, epochs=10):
if isinstance(schedulers, LRScheduler):
schedulers = [schedulers]
for epoch in range(epochs):
for param_group, target in zip(self.opt.param_groups, targets):
self.assertEqual(
target[epoch],
param_group["lr"],
msg="LR is wrong in epoch {}: expected {}, got {}".format(
epoch, target[epoch], param_group["lr"]
),
atol=1e-5,
rtol=0,
)
[scheduler.step() for scheduler in schedulers]
def _test_CosineAnnealingWarmRestarts(self, scheduler, targets, epochs=10):
for index, epoch in enumerate(torch.arange(0, epochs, 0.1)):
epoch = round(epoch.item(), 1)
scheduler.step(epoch)
for param_group, target in zip(self.opt.param_groups, targets):
self.assertEqual(
target[index],
param_group["lr"],
msg="LR is wrong in epoch {}: expected {}, got {}".format(
epoch, target[index], param_group["lr"]
),
atol=1e-5,
rtol=0,
)
def _test_interleaved_CosineAnnealingWarmRestarts(self, scheduler, targets, epochs):
for index, epoch in enumerate(epochs):
scheduler.step(epoch)
for param_group, target in zip(self.opt.param_groups, targets):
self.assertEqual(
target[index],
param_group["lr"],
msg="LR is wrong in epoch {}: expected {}, got {}".format(
epoch, target[index], param_group["lr"]
),
atol=1e-5,
rtol=0,
)
def _test_against_closed_form(self, scheduler, closed_form_scheduler, epochs=10):
self.setUp()
targets = []
for epoch in range(epochs):
closed_form_scheduler.optimizer.step()
with warnings.catch_warnings(record=True) as w:
closed_form_scheduler.step(epoch)
self._check_warning_is_epoch_deprecation_warning(w)
targets.append([group["lr"] for group in self.opt.param_groups])
self.setUp()
for epoch in range(epochs):
self.opt.step()
scheduler.step()
for i, param_group in enumerate(self.opt.param_groups):
self.assertEqual(
targets[epoch][i],
param_group["lr"],
msg="LR is wrong in epoch {}: expected {}, got {}".format(
epoch, targets[epoch][i], param_group["lr"]
),
atol=1e-5,
rtol=0,
)
def _test_reduce_lr_on_plateau(
self, schedulers, targets, metrics, epochs=10, verbose=False
):
if isinstance(schedulers, (LRScheduler, ReduceLROnPlateau)):
schedulers = [schedulers]
for epoch in range(epochs):
self.opt.step()
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(metrics[epoch])
else:
scheduler.step()
if verbose:
print("epoch{}:\tlr={}".format(epoch, self.opt.param_groups[0]["lr"]))
for param_group, target in zip(self.opt.param_groups, targets):
self.assertEqual(
target[epoch],
param_group["lr"],
msg="LR is wrong in epoch {}: expected {}, got {}".format(
epoch, target[epoch], param_group["lr"]
),
atol=1e-5,
rtol=0,
)
def _test_cycle_lr(
self,
scheduler,
lr_targets,
momentum_targets,
batch_iterations,
verbose=False,
use_beta1=False,
):
for batch_num in range(batch_iterations):
if verbose:
if "momentum" in self.opt.param_groups[0].keys():
print(
"batch{}:\tlr={},momentum={}".format(
batch_num,
self.opt.param_groups[0]["lr"],
self.opt.param_groups[0]["momentum"],
)
)
elif use_beta1 and "betas" in self.opt.param_groups[0].keys():
print(
"batch{}:\tlr={},beta1={}".format(
batch_num,
self.opt.param_groups[0]["lr"],
self.opt.param_groups[0]["betas"][0],
)
)
else:
print(
"batch{}:\tlr={}".format(
batch_num, self.opt.param_groups[0]["lr"]
)
)
for param_group, lr_target, momentum_target in zip(
self.opt.param_groups, lr_targets, momentum_targets
):
self.assertEqual(
lr_target[batch_num],
param_group["lr"],
msg="LR is wrong in batch_num {}: expected {}, got {}".format(
batch_num, lr_target[batch_num], param_group["lr"]
),
atol=1e-5,
rtol=0,
)
if use_beta1 and "betas" in param_group.keys():
self.assertEqual(
momentum_target[batch_num],
param_group["betas"][0],
msg="Beta1 is wrong in batch_num {}: expected {}, got {}".format(
batch_num,
momentum_target[batch_num],
param_group["betas"][0],
),
atol=1e-5,
rtol=0,
)
elif "momentum" in param_group.keys():
self.assertEqual(
momentum_target[batch_num],
param_group["momentum"],
msg="Momentum is wrong in batch_num {}: expected {}, got {}".format(
batch_num,
momentum_target[batch_num],
param_group["momentum"],
),
atol=1e-5,
rtol=0,
)
self.opt.step()
scheduler.step()
def test_cosine_then_cyclic(self):
# https://github.com/pytorch/pytorch/issues/21965
max_lr = 0.3
base_lr = 0.1
optim_lr = 0.5
model = torch.nn.Linear(2, 1)
optimizer = torch.optim.SGD(model.parameters(), lr=optim_lr)
lr_scheduler_1 = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=20, eta_min=0.1
)
lr_scheduler_2 = torch.optim.lr_scheduler.CyclicLR(
optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=1, step_size_down=3
)
for i in range(40):
optimizer.step()
if i <= lr_scheduler_1.T_max:
lr_scheduler_1.step()
else:
lr_scheduler_2.step()
last_lr = optimizer.param_groups[0]["lr"]
self.assertLessEqual(last_lr, max_lr)
class SWATestDNN(torch.nn.Module):
def __init__(self, input_features):
super().__init__()
self.n_features = 100
self.fc1 = torch.nn.Linear(input_features, self.n_features)
self.bn = torch.nn.BatchNorm1d(self.n_features)
def compute_preactivation(self, x):
return self.fc1(x)
def forward(self, x):
x = self.fc1(x)
x = self.bn(x)
return x
class SWATestCNN(torch.nn.Module):
def __init__(self, input_channels):
super().__init__()
self.n_features = 10
self.conv1 = torch.nn.Conv2d(
input_channels, self.n_features, kernel_size=3, padding=1
)
self.bn = torch.nn.BatchNorm2d(self.n_features, momentum=0.3)
def compute_preactivation(self, x):
return self.conv1(x)
def forward(self, x):
x = self.conv1(x)
x = self.bn(x)
return x
class TestSWAUtils(TestCase):
def _test_averaged_model(self, net_device, swa_device):
dnn = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=2),
torch.nn.BatchNorm2d(5, momentum=0.3),
torch.nn.Conv2d(5, 2, kernel_size=3),
torch.nn.ReLU(),
torch.nn.Linear(5, 5),
torch.nn.ReLU(),
torch.nn.Linear(5, 10),
).to(net_device)
averaged_dnn = AveragedModel(dnn, device=swa_device)
averaged_params = [torch.zeros_like(param) for param in dnn.parameters()]
n_updates = 10
for i in range(n_updates):
for p, p_avg in zip(dnn.parameters(), averaged_params):
p.detach().add_(torch.randn_like(p))
p_avg += p.detach() / n_updates
averaged_dnn.update_parameters(dnn)
for p_avg, p_swa in zip(averaged_params, averaged_dnn.parameters()):
self.assertEqual(p_avg, p_swa)
# Check that AveragedModel is on the correct device
self.assertTrue(p_swa.device == swa_device)
self.assertTrue(p.device == net_device)
self.assertTrue(averaged_dnn.n_averaged.device == swa_device)
def test_averaged_model_all_devices(self):
cpu = torch.device("cpu")
self._test_averaged_model(cpu, cpu)
if torch.cuda.is_available():
cuda = torch.device(0)
self._test_averaged_model(cuda, cpu)
self._test_averaged_model(cpu, cuda)
self._test_averaged_model(cuda, cuda)
def test_averaged_model_mixed_device(self):
if not torch.cuda.is_available():
return
dnn = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3), torch.nn.Linear(5, 10)
)
dnn[0].cuda()
dnn[1].cpu()
averaged_dnn = AveragedModel(dnn)
averaged_params = [torch.zeros_like(param) for param in dnn.parameters()]
n_updates = 10
for i in range(n_updates):
for p, p_avg in zip(dnn.parameters(), averaged_params):
p.detach().add_(torch.randn_like(p))
p_avg += p.detach() / n_updates
averaged_dnn.update_parameters(dnn)
for p_avg, p_swa in zip(averaged_params, averaged_dnn.parameters()):
self.assertEqual(p_avg, p_swa)
# Check that AveragedModel is on the correct device
self.assertTrue(p_avg.device == p_swa.device)
def test_averaged_model_state_dict(self):
dnn = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3), torch.nn.Linear(5, 10)
)
averaged_dnn = AveragedModel(dnn)
averaged_dnn2 = AveragedModel(dnn)
n_updates = 10
for i in range(n_updates):
for p in dnn.parameters():
p.detach().add_(torch.randn_like(p))
averaged_dnn.update_parameters(dnn)
averaged_dnn2.load_state_dict(averaged_dnn.state_dict())
for p_swa, p_swa2 in zip(averaged_dnn.parameters(), averaged_dnn2.parameters()):
self.assertEqual(p_swa, p_swa2)
self.assertTrue(averaged_dnn.n_averaged == averaged_dnn2.n_averaged)
def test_averaged_model_exponential(self):
# Test AveragedModel with EMA as avg_fn
dnn = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3),
torch.nn.BatchNorm2d(5, momentum=0.3),
torch.nn.Linear(5, 10),
)
alpha = 0.9
def avg_fn(p_avg, p, n_avg):
return alpha * p_avg + (1 - alpha) * p
averaged_dnn = AveragedModel(dnn, avg_fn=avg_fn)
averaged_params = [torch.zeros_like(param) for param in dnn.parameters()]
n_updates = 10
for i in range(n_updates):
updated_averaged_params = []
for p, p_avg in zip(dnn.parameters(), averaged_params):
p.detach().add_(torch.randn_like(p))
if i == 0:
updated_averaged_params.append(p.clone())
else:
updated_averaged_params.append(
(p_avg * alpha + p * (1 - alpha)).clone()
)
for b in dnn.buffers():
if b.size() != torch.Size([]):
b.detach_().add_(torch.randn_like(b))
averaged_dnn.update_parameters(dnn)
averaged_params = updated_averaged_params
for p_avg, p_swa in zip(averaged_params, averaged_dnn.parameters()):
self.assertEqual(p_avg, p_swa)
for b_avg, b_swa in zip(dnn.buffers(), averaged_dnn.module.buffers()):
self.assertEqual(b_avg, b_swa)
def test_averaged_model_exponential_buffers(self):
# Test AveragedModel with EMA as avg_fn and use_buffers as True.
dnn = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3),
torch.nn.BatchNorm2d(5, momentum=0.3),
torch.nn.Linear(5, 10),
)
alpha = 0.9
def avg_fn(p_avg, p, n_avg):
return alpha * p_avg + (1 - alpha) * p
averaged_dnn = AveragedModel(dnn, avg_fn=avg_fn, use_buffers=True)
dnn_params = itertools.chain(dnn.parameters(), dnn.buffers())
averaged_params = [
torch.zeros_like(param)
for param in dnn_params
if param.size() != torch.Size([])
]
n_updates = 10
for i in range(n_updates):
updated_averaged_params = []
for p, p_avg in zip(dnn_params, averaged_params):
if p.size() == torch.Size([]):
continue
p.detach().add_(torch.randn_like(p))
if i == 0:
updated_averaged_params.append(p.clone())
else:
updated_averaged_params.append(
(p_avg * alpha + p * (1 - alpha)).clone()
)
averaged_dnn.update_parameters(dnn)
averaged_params = updated_averaged_params
for p_avg, p_swa in zip(
averaged_params,
itertools.chain(
averaged_dnn.module.parameters(), averaged_dnn.module.buffers()
),
):
self.assertEqual(p_avg, p_swa)
def _test_update_bn(self, dnn, dl_x, dl_xy, cuda):
preactivation_sum = torch.zeros(dnn.n_features)
preactivation_squared_sum = torch.zeros(dnn.n_features)
if cuda:
preactivation_sum = preactivation_sum.cuda()
preactivation_squared_sum = preactivation_squared_sum.cuda()
total_num = 0
for x in dl_x:
x = x[0]
if cuda:
x = x.cuda()
dnn.forward(x)
preactivations = dnn.compute_preactivation(x)
if len(preactivations.shape) == 4:
preactivations = preactivations.transpose(1, 3)
preactivations = preactivations.contiguous().view(-1, dnn.n_features)
total_num += preactivations.shape[0]
preactivation_sum += torch.sum(preactivations, dim=0)
preactivation_squared_sum += torch.sum(preactivations**2, dim=0)
preactivation_mean = preactivation_sum / total_num
preactivation_var = preactivation_squared_sum / total_num
preactivation_var = preactivation_var - preactivation_mean**2
update_bn(dl_xy, dnn, device=x.device)
self.assertEqual(preactivation_mean, dnn.bn.running_mean)
self.assertEqual(preactivation_var, dnn.bn.running_var, atol=1e-1, rtol=0)
def _reset_bn(module):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
module.running_mean = torch.zeros_like(module.running_mean)
module.running_var = torch.ones_like(module.running_var)
# reset batch norm and run update_bn again
dnn.apply(_reset_bn)
update_bn(dl_xy, dnn, device=x.device)
self.assertEqual(preactivation_mean, dnn.bn.running_mean)
self.assertEqual(preactivation_var, dnn.bn.running_var, atol=1e-1, rtol=0)
# using the dl_x loader instead of dl_xy
dnn.apply(_reset_bn)
update_bn(dl_x, dnn, device=x.device)
self.assertEqual(preactivation_mean, dnn.bn.running_mean)
self.assertEqual(preactivation_var, dnn.bn.running_var, atol=1e-1, rtol=0)
def test_update_bn_dnn(self):
# Test update_bn for a fully-connected network with BatchNorm1d
objects, input_features = 100, 5
x = torch.rand(objects, input_features)
y = torch.rand(objects)
ds_x = torch.utils.data.TensorDataset(x)
ds_xy = torch.utils.data.TensorDataset(x, y)
dl_x = torch.utils.data.DataLoader(ds_x, batch_size=5, shuffle=True)
dl_xy = torch.utils.data.DataLoader(ds_xy, batch_size=5, shuffle=True)
dnn = SWATestDNN(input_features=input_features)
dnn.train()
self._test_update_bn(dnn, dl_x, dl_xy, False)
if torch.cuda.is_available():
dnn = SWATestDNN(input_features=input_features)
dnn.train()
self._test_update_bn(dnn.cuda(), dl_x, dl_xy, True)
self.assertTrue(dnn.training)
def test_update_bn_cnn(self):
# Test update_bn for convolutional network and BatchNorm2d
objects = 100
input_channels = 3
height, width = 5, 5
x = torch.rand(objects, input_channels, height, width)
y = torch.rand(objects)
ds_x = torch.utils.data.TensorDataset(x)
ds_xy = torch.utils.data.TensorDataset(x, y)
dl_x = torch.utils.data.DataLoader(ds_x, batch_size=5, shuffle=True)
dl_xy = torch.utils.data.DataLoader(ds_xy, batch_size=5, shuffle=True)
dnn = SWATestCNN(input_channels=input_channels)
dnn.train()
self._test_update_bn(dnn, dl_x, dl_xy, False)
if torch.cuda.is_available():
dnn = SWATestCNN(input_channels=input_channels)
dnn.train()
self._test_update_bn(dnn.cuda(), dl_x, dl_xy, True)
self.assertTrue(dnn.training)
def test_bn_update_eval_momentum(self):
# check that update_bn preserves eval mode
objects = 100
input_channels = 3
height, width = 5, 5
x = torch.rand(objects, input_channels, height, width)
ds_x = torch.utils.data.TensorDataset(x)
dl_x = torch.utils.data.DataLoader(ds_x, batch_size=5, shuffle=True)
dnn = SWATestCNN(input_channels=input_channels)
dnn.eval()
update_bn(dl_x, dnn)
self.assertFalse(dnn.training)
# check that momentum is preserved
self.assertEqual(dnn.bn.momentum, 0.3)
instantiate_parametrized_tests(TestLRScheduler)
|
def drosenbrock(tensor):
assert tensor.size() == torch.Size(
[2]
), f"Requires tensor with 2 scalars but got {tensor.size()}"
x, y = tensor
return torch.stack((-400 * x * (y - x**2) - 2 * (1 - x), 200 * (y - x**2)))
@markDynamoStrictTest
class TestOptimRenewed(TestCase):
"""
This test class validates the core optimizers and is structured as the correctness of:
- The update algorithms (forloop implementation)
* Every optimizer's algorithm is most readably implemented through a big for-loop
over all the parameters, which is what we refer to as the forloop or single tensor
implementation. These algorithms are manually validated by comparing to the paper
and systematically validated by assuring that the loss goes the right direction
when the optimizer has been applied.
* This implementation should compose with optimizer hyperparameters well, such as
supporting Tensor LRs, the capturable API, and sparse and complex parameters.
- Each varying implementation
* We then have implementations that improve upon the performance of the forloop
implementation by leveraging fusion, namely our foreach (mult_tensor) and fused
implementations.
* These variations are validated numerically by comparing with the forloop version
of the optimizer. In fact, we test most variations this way--we see the forloop
implementation as the ground truth and expect that improvements to it in any way
should be just as correct.
* Both params and optimizer states should be validated numerically.
- state_dict APIs
* The optimizer instance should be serializable
* Calling save and load should be deterministic
* Moving between devices should be seamless
* BC - load_state_dict should be able to handle older optimizer states
- Hook APIs (everything should fire in the right order)
- LR Scheduler integration (composing should not error + should go the right direction)
- Parameter groups (should be equivalent to having multiple optimizers)
- Erroring (what should error should error)
We also cover different ways of generating parameters and grads:
- With parameters, we either generate them randomly given specific shapes or we take
them from a sample NN module.
* Variety is important here because NN modules have type Parameter and randomly
generated tensors have type Tensor.
* Parameters can be sparse for a subset of the optimizers (check out OptimizerInfo)
* Complex parameters should be handled using view_as_real
* Parameters can be spread across different devices and different dtypes for any
given optimizer
* Parameters can be contiguous and noncontiguous
- With grads, we follow suit from the parameters.
* Grads can also be None, empty, or zero-valued, and this should not disrupt training.
"""
@onlyCPU
@optims(optim_db)
def test_optim_infos_do_not_specify_global_cliquey_kwargs(
self, device, dtype, optim_info
):
global_cliquey_flags = ["foreach", "fused", "differentiable"]
for optim_input in optim_info.optim_inputs_func(device=device):
self.assertFalse(
any(f for f in global_cliquey_flags if f in optim_input.kwargs)
)
@optims([optim for optim in optim_db if optim.optim_error_inputs_func is not None])
def test_errors(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
error_inputs = optim_info.optim_error_inputs_func(device=device, dtype=dtype)
for error_input in error_inputs:
optim_input = error_input.optimizer_error_input
params, kwargs = optim_input.params, optim_input.kwargs
if error_input.error_on == OptimizerErrorEnum.CONSTRUCTION_ERROR:
if issubclass(error_input.error_type, Warning):
with self.assertWarnsRegex(
error_input.error_type, error_input.error_regex
):
optim_cls(params, **kwargs)
else:
with self.assertRaisesRegex(
error_input.error_type, error_input.error_regex
):
optim_cls(params, **kwargs)
elif error_input.error_on == OptimizerErrorEnum.STEP_ERROR:
optim = optim_cls(params, **kwargs)
if issubclass(error_input.error_type, Warning):
with self.assertWarnsRegex(
error_input.error_type, error_input.error_regex
):
optim.step()
else:
with self.assertRaisesRegex(
error_input.error_type, error_input.error_regex
):
optim.step()
else:
raise NotImplementedError(f"Unknown error type {error_input.error_on}")
@parametrize("contiguous", [True, False])
@parametrize("with_lrsched", [True, False])
@optims(optim_db, dtypes=[torch.float32])
def test_forloop_goes_right_direction(
self, device, dtype, optim_info, contiguous, with_lrsched
):
optim_cls = optim_info.optim_cls
schedulers_constructors = (
optim_info.scheduler_inputs if with_lrsched else [None]
)
for schedulers_constructor in schedulers_constructors:
# with tensor LR we need fresh inputs for each scheduler
# or mutating it will carry across iters
optim_inputs = optim_info.optim_inputs_func(device=device)
for optim_input in optim_inputs:
if "foreach" in optim_info.supported_impls:
optim_input.kwargs["foreach"] = False # force forloop
if contiguous:
weight = Parameter(torch.randn((10, 5), device=device, dtype=dtype))
bias = Parameter(torch.randn((10), device=device, dtype=dtype))
else:
weight = Parameter(
torch.randn((10, 5, 2), device=device, dtype=dtype)[..., 0]
)
bias = Parameter(
torch.randn((10, 2), device=device, dtype=dtype)[..., 0]
)
input = torch.randn(5, device=device, dtype=dtype)
optimizer = optim_cls([weight, bias], **optim_input.kwargs)
schedulers = [
s(optimizer)
for s in (schedulers_constructor if schedulers_constructor else [])
]
def closure():
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
return loss
initial_value = closure().item()
for _ in range(20):
if optim_info.step_requires_closure:
loss = optimizer.step(closure)
else:
loss = closure()
optimizer.step()
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
else:
scheduler.step()
if optim_input.kwargs.get("maximize", False):
self.assertGreater(closure().item(), initial_value)
else:
self.assertLess(closure().item(), initial_value)
@onlyCUDA
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@parametrize("with_lrsched", [True, False])
@optims(optim_db, dtypes=[torch.float32])
def test_forloop_goes_right_direction_multigpu(
self, device, dtype, optim_info, with_lrsched
):
optim_cls = optim_info.optim_cls
schedulers_constructors = (
optim_info.scheduler_inputs if with_lrsched else [None]
)
for schedulers_constructor in schedulers_constructors:
# We need a fresh set of inputs if we have a tensor LR
# to not carry mutations across iterations.
optim_inputs = optim_info.optim_inputs_func(device=device)
for optim_input in optim_inputs:
if "foreach" in optim_info.supported_impls:
optim_input.kwargs["foreach"] = False # force forloop
weight = Parameter(torch.randn((10, 5), device="cuda:0", dtype=dtype))
bias = Parameter(torch.randn((10), device="cuda:1", dtype=dtype))
inpt = torch.randn(5, device="cuda:0", dtype=dtype)
optimizer = optim_cls([weight, bias], **optim_input.kwargs)
schedulers = [
s(optimizer)
for s in (schedulers_constructor if schedulers_constructor else [])
]
def closure():
optimizer.zero_grad()
loss = (weight.mv(inpt).cuda(1) + bias).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
return loss
initial_value = closure().item()
for _ in range(20):
loss = optimizer.step(closure)
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
else:
scheduler.step()
if optim_input.kwargs.get("maximize", False):
self.assertGreater(closure().item(), initial_value)
else:
self.assertLess(closure().item(), initial_value)
@optims(optim_db, dtypes=[torch.float32])
def test_param_group_with_lrscheduler_goes_right_direction(
self, device, dtype, optim_info
):
optim_cls = optim_info.optim_cls
for schedulers_c in optim_info.scheduler_inputs:
weight = Parameter(torch.randn((10, 5), device=device, dtype=dtype))
bias = Parameter(torch.randn((10), device=device, dtype=dtype))
inpt = torch.randn(5, device=device, dtype=dtype)
# avoid endless recompiles by wrapping LR in a tensor if we're compiling
lr = torch.tensor(0.01) if torch._utils.is_compiling() else 0.01
optimizer = optim_cls([{"params": [weight]}, {"params": [bias], "lr": lr}])
schedulers = [scheduler_c(optimizer) for scheduler_c in schedulers_c]
def closure():
optimizer.zero_grad()
loss = (weight.mv(inpt) + bias).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
return loss
initial_value = closure().item()
for _ in range(20):
loss = optimizer.step(closure)
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
else:
scheduler.step()
self.assertLess(closure().item(), initial_value)
@optims(optim_db, dtypes=[torch.float32])
def test_tensor_lr(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
for optim_input in all_optim_inputs:
weight = Parameter(torch.randn((10, 5), device=device, dtype=dtype))
weight_c = weight.clone().detach().requires_grad_(True)
bias = Parameter(torch.randn((10), device=device, dtype=dtype))
bias_c = bias.clone().detach().requires_grad_(True)
inpt = torch.randn(5, device=device, dtype=dtype)
kwargs = optim_input.kwargs
if "lr" in kwargs:
del kwargs["lr"]
kwargs["lr"] = 1.0 if optim_info.step_requires_closure else 1e-3
optimizer_r = optim_cls([weight, bias], **kwargs)
try:
kwargs["lr"] = torch.tensor(kwargs["lr"])
optimizer = optim_cls([weight_c, bias_c], **kwargs)
except ValueError as e:
self.assertRegex(str(e), ".*lr as a Tensor is not supported.*")
continue
def closure(optim, w, b, i):
optim.zero_grad()
loss = (w.mv(i) + b).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
w.grad = w.grad.to_sparse()
b.grad = b.grad.to_sparse()
return loss
for _ in range(5):
if optim_info.step_requires_closure:
optimizer_r.step(
functools.partial(closure, optimizer_r, weight, bias, inpt)
)
optimizer.step(
functools.partial(closure, optimizer, weight_c, bias_c, inpt)
)
else:
closure(optimizer_r, weight, bias, inpt)
closure(optimizer, weight_c, bias_c, inpt)
self.assertEqual(weight, weight_c)
self.assertEqual(bias, bias_c)
@parametrize("with_lrsched", [True, False])
@optims(
[o for o in optim_db if o.supports_sparse or o.only_supports_sparse_grads],
dtypes=[torch.float64],
)
def test_rosenbrock_sparse(self, device, dtype, optim_info, with_lrsched):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
# Fused impls do not support sparse gradients
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable", "fused")
)
kwarg_updates, schedulers_constructors = optim_info.metadata_for_sparse
if with_lrsched and len(schedulers_constructors) == 0:
return
supported_inputs = []
if len(kwarg_updates) != 0:
seen = set()
for i in all_optim_inputs:
for k in kwarg_updates:
if k in i.kwargs:
del i.kwargs[k]
hashable_kwargs = tuple(sorted(i.kwargs.items()))
if len(i.kwargs) > 0 and hashable_kwargs not in seen:
supported_inputs.append(i)
seen.add(hashable_kwargs)
if "lr" in kwarg_updates:
i.kwargs["lr"] = kwarg_updates["lr"]
else:
supported_inputs = all_optim_inputs
for optim_input in supported_inputs:
kwargs = optim_input.kwargs
multi_tensor = kwargs.get("foreach", False)
# For rosenbrock tests, it is mandated that the param is a tensor with 2 numbers
if multi_tensor:
params_t = [
torch.tensor([1.5, 1.5]),
torch.tensor([1.5, 1.5], dtype=dtype),
]
else:
params_t = [torch.tensor([1.5, 1.5])]
params = [Parameter(param_t) for param_t in params_t]
optimizer = optim_cls(params, **kwargs)
schedulers = [
s(optimizer) for s in (schedulers_constructors if with_lrsched else [])
]
if not optim_info.only_supports_sparse_grads:
params_c = [Parameter(param_t.clone()) for param_t in params_t]
optimizer_c = optim_cls(params_c, **kwargs)
schedulers_c = [
s(optimizer_c)
for s in (schedulers_constructors if with_lrsched else [])
]
solution = torch.tensor([1, 1])
with torch.no_grad():
initial_dist = sum(param.dist(solution) for param in params)
def get_grad(param, sparse_grad, w):
grad = drosenbrock(param)
# NB: We torture test the optimizer by returning an
# uncoalesced sparse tensor
# Depending on w, provide only the x or y gradient
if sparse_grad:
if w:
i = torch.tensor([[0, 0]], dtype=torch.int64)
x = grad[0]
v = torch.tensor([x / 4.0, x - x / 4.0])
else:
i = torch.tensor([[1, 1]], dtype=torch.int64)
y = grad[1]
v = torch.tensor([y - y / 4.0, y / 4.0])
grad_out = torch.sparse_coo_tensor(i, v, (2,), dtype=v.dtype)
else:
if w:
grad_out = torch.tensor([grad[0], 0], dtype=param.dtype)
else:
grad_out = torch.tensor([0, grad[1]], dtype=param.dtype)
return grad_out
def eval(params, sparse_grad, w):
optimizer.zero_grad()
if multi_tensor:
loss = sum(rosenbrock(param) for param in params)
else:
loss = rosenbrock(params[0])
loss.backward()
grads_out = [get_grad(param, sparse_grad, w) for param in params]
with torch.no_grad():
params[0].grad = grads_out[0]
if multi_tensor:
params[1].grad = grads_out[1].to(dtype=dtype)
return loss
for i in range(1800):
# Do cyclic coordinate descent
w = i % 2
optimizer.step(functools.partial(eval, params, True, w))
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(rosenbrock(params[0]))
else:
scheduler.step()
if not optim_info.only_supports_sparse_grads:
optimizer_c.step(functools.partial(eval, params_c, False, w))
for scheduler in schedulers_c:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(rosenbrock(params_c[0]))
else:
scheduler.step()
# Tolerance is increased due to floating point error from different
# code path for dense case: x v.s. x - x / 4.0 + x / 4.0
self.assertEqual(params, params_c, atol=5e-6, rtol=5e-6)
if not kwargs.get("maximize", False):
self.assertLessEqual(
sum(param.dist(solution) for param in params), initial_dist
)
else:
self.assertGreaterEqual(
sum(rosenbrock(param) for param in params),
sum(rosenbrock(param_t) for param_t in params_t),
)
@skipMPS
@optims([o for o in optim_db if o.supports_complex], dtypes=[torch.complex64])
def test_complex(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
# Also skip fused, since our fused kernels do not support complex
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable", "fused")
)
for optim_input in all_optim_inputs:
# Last param is intentionally real to test that we can mix real and complex
complex_params = [
torch.randn(10, 5, device=device, dtype=dtype, requires_grad=True),
torch.randn(10, device=device, dtype=dtype, requires_grad=True),
torch.randn(
10, 5, device=device, dtype=torch.float32, requires_grad=True
),
]
real_params = [
(
torch.view_as_real(param).detach().clone().requires_grad_()
if param.is_complex()
else param.detach().clone().requires_grad_()
)
for param in complex_params
]
complex_optimizer = optim_cls(complex_params, **optim_input.kwargs)
real_optimizer = optim_cls(real_params, **optim_input.kwargs)
real_steps = []
complex_steps = []
grads_losses = []
def real_closure():
for param in real_params:
grad = torch.randn_like(param)
param.grad = grad
real_steps.append(param.detach().clone())
grads_losses.append(grad.clone())
loss = torch.randn(1)
grads_losses.append(loss.clone())
return loss
def complex_closure():
for param in complex_params:
if torch.is_complex(param):
grad = torch.view_as_complex(grads_losses.pop(0))
complex_steps.append(torch.view_as_real_copy(param.detach()))
else:
grad = grads_losses.pop(0)
complex_steps.append(param.detach().clone())
param.grad = grad
return grads_losses.pop(0)
for _ in range(3):
if optim_info.step_requires_closure:
# LBFGS, for example, requires closure and calls it internally
real_optimizer.step(real_closure)
complex_optimizer.step(complex_closure)
else:
# For other optimizers, we call closure explicitly to set the gradients
real_closure()
complex_closure()
real_optimizer.step()
complex_optimizer.step()
# Final Parameters should be the same
complex_params_asreal = [
torch.view_as_real(param) if param.is_complex() else param
for param in complex_params
]
self.assertEqual(real_params, complex_params_asreal)
# All intermediate steps should also be the same
# also checks steps taken within for example a line search
self.assertEqual(complex_steps, real_steps)
@skipMPS
@optims([o for o in optim_db if o.supports_complex], dtypes=[torch.complex64])
def test_complex_2d(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
# Also skip fused, since our fused kernels do not support complex
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable", "fused")
)
for optim_input in all_optim_inputs:
if optim_info.step_requires_closure:
# Why? The way we implement complex is by turning complex params into view_as_real
# alternatives. For example, an size (M,N) tensor will become (M,N,2). In this test,
# we break apart a tensor into its real and imaginary parts, which would be 2x(M,N).
# For other pointwise optimizers, this distinction is trivial, but for LBFGS where
# there are reductions across all parameters (and all the grads get flattened into
# one long Tensor), this ordering matters. Why? Reductions are not deterministic
# because addition between floating point numbers is not associative, i.e.,
# a + b + c != a + c + b. Thus, we add a seed here to control the discrepancy that
# will happen with LBFGS. Note that in test_complex above, there is no need for a seed
# nor for increased tolerance, because results should be bitwise equivalent.
torch.manual_seed(2024)
a1 = torch.randn(2, device=device, dtype=dtype, requires_grad=True)
a1_real = a1.real.clone().detach()
a1_imag = a1.imag.clone().detach()
a1_real.requires_grad_()
a1_imag.requires_grad_()
optim1 = optim_cls([a1], **optim_input.kwargs)
optim2 = optim_cls([a1_real, a1_imag], **optim_input.kwargs)
a1_reals = TensorTracker()
a1_imags = TensorTracker()
a1_grad_reals = TensorTracker()
a1_grad_imags = TensorTracker()
losses = TensorTracker()
def closure1():
optim1.zero_grad()
loss = rosenbrock(a1).abs()
loss.backward()
# Track clones to best test accuracy
a1_reals.add(a1.real)
a1_imags.add(a1.imag)
a1_grad_reals.add(a1.grad.real)
a1_grad_imags.add(a1.grad.imag)
losses.add(loss)
return loss
def closure2():
optim2.zero_grad()
a1_reals.pop_check_set(a1_real, self)
a1_imags.pop_check_set(a1_imag, self)
a2 = torch.complex(a1_real, a1_imag)
loss = rosenbrock(a2).abs()
losses.pop_check_set(loss, self)
loss.backward()
a1_grad_reals.pop_check_set(a1_real.grad, self)
a1_grad_imags.pop_check_set(a1_imag.grad, self)
return loss
for _ in range(3):
if optim_info.step_requires_closure:
# LBFGS, for example, requires closure and calls it internally
optim1.step(closure1)
optim2.step(closure2)
else:
closure1()
closure2()
optim1.step()
optim2.step()
self.assertEqual(a1.real, a1_real)
self.assertEqual(a1.imag, a1_imag)
self.assertTrue(a1_reals.all_popped())
self.assertTrue(a1_imags.all_popped())
self.assertTrue(a1_grad_reals.all_popped())
self.assertTrue(a1_grad_imags.all_popped())
self.assertTrue(losses.all_popped())
def _compare_between(
self, inputs, models, optimizers, assert_eq_kwargs=None, assert_step_dtype=None
):
# why 7? iteration 7 is where we start to see differences for RAdam
# params interacting with the small eps value, because that's right
# after rho_t becomes greater than 5 in step 6.
if assert_eq_kwargs is None:
assert_eq_kwargs = {}
kIterations = 7
tracker = TensorTracker(assert_eq_kwargs)
for i in range(kIterations):
state, updated_params = [], []
if not isinstance(inputs, list):
inputs = [inputs, inputs]
for input, model, optimizer in zip(inputs, models, optimizers):
optimizer.zero_grad()
if i == 3:
# Freeze a layer to test if the step of this layer in 'fused' or 'foreach'
# is same as the step in 'forloop'.
model[2].requires_grad_(False)
if i == 5:
# Unfreeze the layer after 2 iters.
model[2].requires_grad_(True)
# Test that step behaves as expected (a no-op) when grads are set to None
if i != 2:
output = model(input)
loss = output.sum()
loss.backward()
optimizer.step()
state.append(optimizer.state)
updated_params.append(model.parameters())
og_state, new_state = state
for og_p, new_p in zip(updated_params[0], updated_params[1]):
tracker.add(og_p)
tracker.pop_check_set(new_p, self)
# check that optimizer states are the same
og_p_state = og_state[og_p]
new_p_state = new_state[new_p]
if assert_step_dtype is not None:
if torch.is_tensor(og_p_state.get("step", None)):
self.assertEqual(og_p_state["step"].dtype, assert_step_dtype)
if torch.is_tensor(new_p_state.get("step", None)):
self.assertEqual(new_p_state["step"].dtype, assert_step_dtype)
for k in og_p_state:
tracker.add(og_p_state[k])
tracker.pop_check_set(new_p_state[k], self)
self.assertTrue(tracker.all_popped())
def _test_derived_optimizers(
self,
device,
dtype,
optim_info,
flag,
reduced_precision=False,
assert_step_dtype=None,
):
"""
Given a flag 'fused' or 'foreach', test for parity of optimizer state
and updated parameters between when the flag is set to True and False
for provided optimizer configurations.
"""
assert flag in ("foreach", "fused")
assert_eq_kwargs = {} if not reduced_precision else FP16_REDUCED_PRECISION
optim_inputs = optim_info.optim_inputs_func(device=device, dtype=dtype)
optim_cls = optim_info.optim_cls
for optim_input in optim_inputs:
models, optimizers = [], []
kwargs = deepcopy(optim_input.kwargs)
if kwargs.get("capturable", False) and _get_device_type(device) == "cpu":
# capturable is not supported on CPU
continue
for flag_value in (False, True):
kwargs[flag] = flag_value
input = torch.tensor(
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype=dtype, device=device
).reshape(3, 2)
torch.manual_seed(1)
model = torch.nn.Sequential(
torch.nn.Linear(2, 3),
torch.nn.Sigmoid(),
torch.nn.Linear(3, 1),
torch.nn.Sigmoid(),
)
model.to(dtype=dtype, device=device)
# foreach/fused optimizers should be tested with a
# zero_size tensor as its last param.
# ref: https://github.com/pytorch/pytorch/issues/100701
empty_param = torch.empty(
(), device=device, dtype=dtype, requires_grad=True
)
empty_param.grad = torch.rand_like(empty_param)
params = list(model.parameters()) + [empty_param]
optimizer = optim_cls(params, **kwargs)
models.append(model)
optimizers.append(optimizer)
self._compare_between(
input, models, optimizers, assert_eq_kwargs, assert_step_dtype
)
@skipMPS # MPS doesn't support torch.float64, see https://github.com/pytorch/pytorch/issues/115350
@optims(
[optim for optim in optim_db if "foreach" in optim.supported_impls],
dtypes=[torch.float64],
)
def test_foreach_matches_forloop(self, device, dtype, optim_info):
self._test_derived_optimizers(device, dtype, optim_info, "foreach")
@onlyCUDA
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@parametrize("impl", ["foreach", "fused"])
@optims(
[
optim
for optim in optim_db
if "foreach" in optim.supported_impls or "fused" in optim.supported_impls
]
)
def test_mixed_device_dtype(self, device, dtype, optim_info, impl):
"""
Similar in essence to _test_derived_optimizers above. The main difference is that
_test_derived_optimizers uses model parameters whereas we randomly pass in
parameters of different dtypes and devices here. We need multiple GPUs (vs just a
CPU and GPU) because fused adam only works on GPUs. (Thus we only run the tests
that call into this helper when TEST_MULTIGPU.)
"""
assert impl in ("foreach", "fused")
if impl == "foreach" and "foreach" not in optim_info.supported_impls:
return unittest.skip(
f"foreach not supported for {optim_info.optim_cls.__name__}"
)
elif impl == "fused" and "cuda" not in optim_info.supports_fused_on:
return unittest.skip(
f"fused not supported for {optim_info.optim_cls.__name__} on cuda"
)
params = [
torch.rand(2, 3, dtype=torch.float64, device="cuda:0", requires_grad=True),
torch.rand(2, 3, dtype=torch.float32, device="cuda:0", requires_grad=True),
torch.rand(2, 3, dtype=torch.float16, device="cuda:0", requires_grad=True),
torch.rand(2, 3, dtype=torch.bfloat16, device="cuda:0", requires_grad=True),
torch.rand(2, 3, dtype=torch.float64, device="cuda:1", requires_grad=True),
torch.rand(2, 3, dtype=torch.float32, device="cuda:1", requires_grad=True),
torch.rand(2, 3, dtype=torch.float16, device="cuda:1", requires_grad=True),
torch.rand(2, 3, dtype=torch.bfloat16, device="cuda:1", requires_grad=True),
torch.randint(
1024, (2, 3), dtype=torch.int64, device="cuda:1", requires_grad=False
),
]
for p in params:
if p.requires_grad:
p.grad = torch.rand_like(p, device=p.device, dtype=p.dtype)
kIterations = 7 if impl == "foreach" else 1
optim_inputs = optim_info.optim_inputs_func(device=device)
optim_cls = optim_info.optim_cls
for optim_input in optim_inputs:
updated_params, state = [], []
kwargs = deepcopy(optim_input.kwargs)
if kwargs.get("capturable", False) and _get_device_type(device) == "cpu":
# capturable is not supported on CPU
continue
for use_impl in (False, True):
kwargs[impl] = use_impl
params_clone = []
for p in params:
p_clone = p.clone().detach()
if p.requires_grad:
p_clone.requires_grad = True
p_clone.grad = p.grad.clone().detach()
params_clone.append(p_clone)
optimizer = optim_cls(params_clone, **kwargs)
for _ in range(kIterations):
optimizer.step()
state.append(optimizer.state)
updated_params.append(params_clone)
og_state, new_state = state
for og_p, new_p in zip(updated_params[0], updated_params[1]):
# Increasing the tolerance as we are collating lots of ops together for optimizers and
# the designated tolerances are for single op only.
single_rtol, single_atol = torch.testing._comparison.get_tolerances(
new_p.dtype, rtol=None, atol=None
)
rtol = 5 * single_rtol
atol = 5 * single_atol
self.assertEqual(og_p, new_p, rtol=rtol, atol=atol)
# check that optimizer states are the same
og_p_state = og_state[og_p]
new_p_state = new_state[new_p]
for k in og_p_state:
actual = new_p_state[k]
self.assertEqual(og_p_state[k], actual, rtol=rtol, atol=atol)
@onlyCUDA
@optims(
[optim for optim in optim_db if "foreach" in optim.supported_impls],
dtypes=[torch.float64],
)
def test_set_default_dtype_works_with_foreach(self, device, dtype, optim_info):
# https://github.com/pytorch/pytorch/issues/110940
# We coerce step to always be float32 unless the
# default dtype is higher prec float64
old_default_dtype = torch.get_default_dtype()
for default_dtype in [torch.float64, torch.float16]:
try:
torch.set_default_dtype(default_dtype)
self._test_derived_optimizers(
device,
dtype,
optim_info,
"foreach",
reduced_precision=default_dtype == torch.float16,
assert_step_dtype=(
torch.float64
if default_dtype == torch.float64
else torch.float32
),
)
finally:
torch.set_default_dtype(old_default_dtype)
@onlyCUDA
@largeTensorTest("72GB", "cuda")
@optims(
[optim for optim in optim_db if "foreach" in optim.supported_impls],
dtypes=[torch.float16],
)
def test_foreach_large_tensor(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
optim_inputs = optim_info.optim_inputs_func(device=device)
for optim_input in optim_inputs:
params = [torch.ones(2**32, device=device, dtype=dtype)]
params[0].grad = torch.zeros_like(params[0])
optimizer = optim_cls(params, foreach=True, **optim_input.kwargs)
optimizer.step()
@onlyCUDA
@optims(
[optim for optim in optim_db if "foreach" in optim.supported_impls],
dtypes=[torch.float32],
)
def test_peak_memory_foreach(self, device, dtype, optim_info):
nparams = 10
optim_inputs = optim_info.optim_inputs_func(device=device)
optim_cls = optim_info.optim_cls
for optim_input in optim_inputs:
kwargs = deepcopy(optim_input.kwargs)
max_mems = []
for flag_value in (False, True):
kwargs["foreach"] = flag_value
# The 16 * 8 = 128 is critical here! Our CUDACachingAllocator allocates in blocks
# of 512, meaning any tensor that occupies <512 bytes of memory will allocate a
# whole 512 bytes anyway. We use 128 (cuz datasize would be 4 bytes) so that param
# is size 512 exactly, making our later calculations for intermediate_size easy.
param = torch.rand(16, 8, device=device, dtype=dtype)
params = [torch.rand_like(param) for _ in range(nparams)]
optimizer = optim_cls(params, **kwargs)
for p in params:
p.grad = torch.rand_like(p)
optimizer.step()
import gc
gc.collect()
torch.cuda.reset_peak_memory_stats()
optimizer.step()
gc.collect()
max_mems.append(torch.cuda.max_memory_allocated())
st_max_mem, mt_max_mem = max_mems
intermediate_size = nparams * param.nelement() * param.element_size()
nintermediates = 1 # we expect a budget of 1 intermediate most of the time
# Check the param group directly to handle if the compiler set capturable
if optimizer.param_groups[0].get(
"capturable", False
) or optim_cls.__name__ in ["Adadelta", "ASGD", "RAdam"]:
# with capturable in Adam(W), we have 2 extra intermediates for the bias_corrections
# with Adadelta, we have 2 extra for (acc_delta + eps) and (square_avg + eps)
# ASGD allocates axs, 2x mus, 2x etas, and grads at the same time
nintermediates = 3
if optim_cls.__name__ == "NAdam":
# with capturable in NAdam, we have 3 extra intermediates for the
# bias_correction, mus, and mu_nexts
if TEST_WITH_TORCHDYNAMO:
# With dynamo, the eager/FX backend appears to hold memory longer than
# vanilla eager: https://github.com/pytorch/pytorch/issues/125511
nintermediates = 8
else:
nintermediates = 5
if optim_cls.__name__ == "RAdam":
# RAdam has four intermediates with capturable
# num, unrect_step_size, buffer, grouped_grads
if TEST_WITH_TORCHDYNAMO:
# With dynamo, the eager/FX backend appears to hold memory than
# vanilla eager: https://github.com/pytorch/pytorch/issues/125511
nintermediates = 6
else:
nintermediates = 4
elif optim_cls.__name__ in ["NAdam", "Adagrad", "RMSprop", "Adafactor"]:
# NAdam uses two intermediates at the same time (grads & exp_avg_sq_sqrt)
# Adagrad uses std and grads at the same time
# RMSprop uses avg and grads
# Adafactor uses row/col var and its mean
nintermediates = 2
if optim_cls.__name__ == "Adafactor" and kwargs.get("maximize", False):
# When maximize is True, Adafactor also tracks device_grad
nintermediates = 3
# Dynamo ST uses less mem than eager in the case of Adam/Adagrad/Nadam/RAdam
# which makes the foreach memory check fail
if TEST_WITH_TORCHDYNAMO:
st_max_mem += 6000
expected_max_mem = st_max_mem + intermediate_size * nintermediates
# hipcc currently can't generate efficient code for the small buffer optimization
# code path (see Note [small buffer optimization] for details), thus we always
# dynamically allocate the tensor metadata for ROCM. Adjusting the expected max
# memory usage to account for this.
if TEST_WITH_ROCM:
expected_max_mem *= 1.02
self.assertLessEqual(mt_max_mem, expected_max_mem)
@optims(
[optim for optim in optim_db if "fused" in optim.supported_impls],
dtypes=floating_types_and(
torch.bfloat16,
torch.float16,
),
)
def test_fused_matches_forloop(self, device, dtype, optim_info):
if _get_device_type(device) not in optim_info.supports_fused_on:
self.skipTest(
f"{device} is not supported for fused on {optim_info.optim_cls.__name__}"
)
if _get_device_type(device) == "mps" and dtype not in (
torch.float16,
torch.float32,
):
self.skipTest("MPS supports only torch.float16 and torch.float32")
self._test_derived_optimizers(device, dtype, optim_info, "fused")
@optims(
[optim for optim in optim_db if "fused" in optim.supported_impls],
dtypes=(torch.float32,),
)
def test_fused_error_on_params_on_meta(self, device, dtype, optim_info):
if _get_device_type(device) not in optim_info.supports_fused_on:
self.skipTest(
f"{device} is not supported for fused on {optim_info.optim_cls.__name__}"
)
with torch.device("meta"):
model = torch.nn.Sequential(
torch.nn.Linear(2, 3),
torch.nn.Sigmoid(),
torch.nn.Linear(3, 1),
torch.nn.Sigmoid(),
).to(dtype)
optimizer = optim_info.optim_cls(model.parameters(), fused=True)
with torch.device("meta"):
for p in model.parameters():
p.grad = torch.rand_like(p)
with self.assertRaisesRegex(
RuntimeError,
"`fused=True` requires all the params to be floating point Tensors",
):
optimizer.step()
optimizer.zero_grad(set_to_none=True)
model.to_empty(device=device)
for p in model.parameters():
p.grad = torch.rand_like(p)
optimizer.step()
@onlyNativeDeviceTypes
@largeTensorTest("64GB")
@optims(
[optim for optim in optim_db if "fused" in optim.supported_impls],
dtypes=[torch.float16],
)
def test_fused_large_tensor(self, device, dtype, optim_info):
if device not in optim_info.supports_fused_on:
self.skipTest(
f"{device} is not supported for fused on {optim_info.optim_cls.__name__}"
)
optim_cls = optim_info.optim_cls
optim_inputs = optim_info.optim_inputs_func(device=device)
for optim_input in optim_inputs:
params = [torch.ones(2**32, device=device, dtype=dtype)]
params[0].grad = torch.zeros_like(params[0])
optimizer = optim_cls(params, fused=True, **optim_input.kwargs)
optimizer.step()
@onlyCUDA
@optims(
[optim for optim in optim_db if "fused" in optim.supported_impls],
dtypes=[torch.float32],
)
def test_fused_does_not_step_if_foundinf(self, device, dtype, optim_info):
if device not in optim_info.supports_fused_on:
self.skipTest(
f"{device} is not supported for fused on {optim_info.optim_cls.__name__}"
)
optim_cls = optim_info.optim_cls
optim_inputs = optim_info.optim_inputs_func(device=device)
num_params = 5
for optim_input in optim_inputs:
for no_grad_scale in (False, True):
params = [
torch.ones((1,), device=device, dtype=dtype)
for _ in range(num_params)
]
params_c = [param.clone().detach() for param in params]
for p in params:
p.grad = torch.ones_like(p)
optimizer = optim_cls(params, fused=True, **optim_input.kwargs)
optimizer.grad_scale = (
None
if no_grad_scale
else torch.ones((1,), dtype=dtype, device=device)
)
optimizer.found_inf = torch.ones((), dtype=dtype, device=device)
optimizer.step()
for p in params:
if "step" in optimizer.state[p]:
self.assertEqual(
torch.zeros((), dtype=dtype, device=device),
optimizer.state[p]["step"],
)
self.assertEqual(params, params_c)
@parametrize("impl", ["fused", "capturable"])
@optims(
[optim for optim in optim_db if "fused" in optim.supported_impls],
dtypes=[torch.float32],
)
def test_cpu_load_state_dict(self, device, dtype, impl, optim_info):
# NOTE: This SIMULATES a fused/capturable optimizer with state moved to CPU, issue 103256
# How do we get there? Users typically create CUDA models on fused optimizers and then
# store checkpoints on CPU as CUDA memory is limited with torch.load(...map_location="cpu").
# Since this is a unit test, it is more expedient to simulate what the state_dict
# would look like, which is basically CPU tensors with fused/capturable flag = True.
optim_cls = optim_info.optim_cls
opt_name = optim_cls.__name__
if opt_name in ("SGD", "Adagrad") and impl == "capturable":
# Capturable SGD/Adagrad does not exist
self.skipTest("SGD does not currently support capturable")
if _get_device_type(device) == "cpu":
self.skipTest("Test is only for non-cpu devices")
elif (
impl == "fused"
and _get_device_type(device) not in optim_info.supports_fused_on
):
self.skipTest(f"{device} is not supported for fused on {opt_name}")
elif impl == "capturable" and _get_device_type(device) == "mps":
self.skipTest("MPS does not support capturable")
cpu_optim_inputs = optim_info.optim_inputs_func(device="cpu")
for optim_input in cpu_optim_inputs:
param = torch.tensor([0.1, 0.2], dtype=dtype, device="cpu")
optimizer = optim_cls([param], **optim_input.kwargs)
param.grad = torch.rand_like(param)
optimizer.step()
optim_state_dict_cpu = deepcopy(optimizer.state_dict())
optim_state_dict_cpu["param_groups"][0][impl] = True
# load
optim_input.kwargs[impl] = True
param_device = param.clone().detach().to(device=device)
optimizer_device = optim_cls([param_device], **optim_input.kwargs)
optimizer_device.load_state_dict(optim_state_dict_cpu)
optimizer_device.zero_grad()
param_device.grad = torch.rand_like(param_device)
optimizer_device.step()
@optims(optim_db, dtypes=[torch.float32])
def test_param_groups_weight_decay(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
for optim_input in all_optim_inputs:
weight_kwargs = optim_input.kwargs
bias_kwargs = deepcopy(optim_input.kwargs)
bias_kwargs["weight_decay"] = 0.0
weight = Parameter(torch.randn((10, 5), device=device, dtype=dtype))
bias = Parameter(torch.randn((10), device=device, dtype=dtype))
input = torch.randn(5, device=device, dtype=dtype)
optimizer = optim_cls(
[
dict(params=[weight], **weight_kwargs),
dict(params=[bias], **bias_kwargs),
]
)
loss = (weight.mv(input) + bias).pow(2).sum()
initial_value = loss.item()
for _ in range(20):
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
optimizer.step()
# Test that the direction of loss moved appropriately
if optim_input.kwargs.get("maximize", False):
self.assertGreater(loss.item(), initial_value)
else:
self.assertLess(loss.item(), initial_value)
@optims(optim_db, dtypes=[torch.float32])
def test_param_groups_lr(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
for optim_input in all_optim_inputs:
# optim_input.kwargs will be the param group kwargs, which should have >0 lr
if "lr" not in optim_input.kwargs or optim_input.kwargs["lr"] == 0:
optim_input.kwargs["lr"] = 1e-3
outer_kwargs = {"lr": 1e-28}
if optim_cls.__name__ == "Rprop":
# Allow min step size to be 0
outer_kwargs["step_sizes"] = (0, 50)
weight = Parameter(torch.randn((10, 5), device=device, dtype=dtype))
bias = Parameter(torch.randn((10), device=device, dtype=dtype))
irrelevant = Parameter(torch.randn(2, device=device, dtype=dtype))
irrelevant_clone = irrelevant.clone()
input = torch.randn(5, device=device, dtype=dtype)
optimizer = optim_cls(
[
dict(params=[weight, bias], **optim_input.kwargs),
dict(params=[irrelevant]),
],
**outer_kwargs,
)
loss = (weight.mv(input) + bias).pow(2).sum()
initial_value = loss.item()
for _ in range(20):
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
irrelevant.grad = torch.rand_like(irrelevant)
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
irrelevant.grad = irrelevant.grad.to_sparse()
optimizer.step()
# Test that the direction of loss moved appropriately
if optim_input.kwargs.get("maximize", False):
self.assertGreater(loss.item(), initial_value)
else:
self.assertLess(loss.item(), initial_value)
# Test that irrelevant parameters were not updated since lr was almost 0
self.assertEqual(irrelevant, irrelevant_clone)
@optims(optim_db, dtypes=[torch.float32])
def test_step_is_noop_when_params_have_no_grad(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
params = [
torch.randn(2, 3, requires_grad=False, device=device, dtype=dtype)
for _ in range(2)
]
old_params = [p.clone().detach() for p in params]
def closure():
return torch.tensor([1], device=device, dtype=dtype)
for optim_input in all_optim_inputs:
optimizer = optim_cls(params, **optim_input.kwargs)
optimizer.step(closure)
@optims(optim_db, dtypes=[torch.float32])
def test_step_is_noop_for_zero_grads(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
param = torch.randn((5, 1), device=device, dtype=dtype, requires_grad=True)
old_param = param.clone().detach()
def closure():
return torch.tensor([1], device=device, dtype=dtype)
for optim_input in all_optim_inputs:
kwargs = optim_input.kwargs
# params will decay even if grads are empty if weight_decay != 0,
# and capturable doesn't work for CPU tensors
if kwargs.get("weight_decay", 0) != 0:
continue
# AdamW params will be updated regardless of grads due to lr, so make lr smaller
if optim_cls.__name__ == "AdamW":
kwargs["lr"] = (
torch.tensor(1e-5)
if isinstance(kwargs.get("lr", 1e-5), torch.Tensor)
else 1e-5
)
if kwargs.get("differentiable", False):
params = [param.clone()]
else:
params = [param]
optimizer = optim_cls(params, **kwargs)
if optim_info.only_supports_sparse_grads:
# Intentionally construct a multidimensional empty v for the sparse grad
# Single dim v passes the test while multidim correctly repros the issue
# https://github.com/pytorch/pytorch/issues/82486
i = torch.empty((1, 0), device=device, dtype=dtype)
v = torch.empty((0, 1), device=device, dtype=dtype)
params[0].grad = torch.sparse_coo_tensor(
i, v, (5, 1), device=device, dtype=dtype
)
else:
params[0].grad = torch.zeros_like(params[0])
optimizer.step(closure)
self.assertEqual(old_param, params[0])
@optims(optim_db, dtypes=[torch.float32])
def test_optimizer_can_be_printed(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
params = [
Parameter(torch.randn(2, 3, requires_grad=True, device=device, dtype=dtype))
for _ in range(2)
]
for optim_input in all_optim_inputs:
optimizer = optim_cls(params, **optim_input.kwargs)
optimizer.__repr__()
@optims(optim_db, dtypes=[torch.float32])
def test_state_dict_deterministic(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
weight = Parameter(
torch.randn(2, 3, requires_grad=True, device=device, dtype=dtype)
)
bias = Parameter(torch.randn(2, requires_grad=True, device=device, dtype=dtype))
input = torch.randn(3, requires_grad=True, device=device, dtype=dtype)
params = [weight, bias]
def fwd_bwd(optim, w, b, i):
optim.zero_grad()
loss = (w.mv(i) + b).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
if w.grad is not None:
w.grad = w.grad.to_sparse()
if b.grad is not None:
b.grad = b.grad.to_sparse()
return loss
for optim_input in all_optim_inputs:
optimizer = optim_cls(params, **optim_input.kwargs)
closure = functools.partial(fwd_bwd, optimizer, weight, bias, input)
# Prime the optimizer
for _ in range(10):
if optim_info.step_requires_closure:
optimizer.step(closure)
else:
closure()
optimizer.step()
# Clone the weights and construct a new optimizer for them
with torch.no_grad():
weight_c = Parameter(weight.clone())
bias_c = Parameter(bias.clone())
optimizer_c = optim_cls([weight_c, bias_c], **optim_input.kwargs)
closure_c = functools.partial(fwd_bwd, optimizer_c, weight_c, bias_c, input)
# Load the state dict from the original optimizer into the new one
optimizer_c.load_state_dict(deepcopy(optimizer.state_dict()))
# Run both optimizers in parallel
for _ in range(10):
if optim_info.step_requires_closure:
optimizer.step(closure)
optimizer_c.step(closure_c)
else:
closure()
closure_c()
optimizer.step()
optimizer_c.step()
self.assertEqual(weight, weight_c)
self.assertEqual(bias, bias_c)
# Make sure state dict is deterministic with equal (not identical) parameters
self.assertEqual(optimizer.state_dict(), optimizer_c.state_dict())
# Make sure repeated parameters have identical representation (see #36831)
optimizer_c.param_groups.extend(optimizer_c.param_groups)
self.assertEqual(
optimizer.state_dict()["param_groups"][-1],
optimizer_c.state_dict()["param_groups"][-1],
)
@optims(optim_db, dtypes=[torch.float32])
def test_can_load_older_state_dict(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
for optim_input in all_optim_inputs:
torch.manual_seed(1)
model = torch.nn.Sequential(
torch.nn.Conv2d(4, 2, 1, stride=2),
torch.nn.BatchNorm2d(2, eps=1e-05, momentum=0.1),
)
model.to(dtype=dtype, device=device)
input = torch.rand(1, 4, 16, 16, device=device, dtype=dtype)
optimizer = optim_cls(model.parameters(), **optim_input.kwargs)
def fwd_bwd(optim, mod, i):
optim.zero_grad()
loss = mod(i).sum()
loss.backward()
return loss
for _ in range(3):
if optim_info.step_requires_closure:
optimizer.step(functools.partial(fwd_bwd, optimizer, model, input))
else:
fwd_bwd(optimizer, model, input)
optimizer.step()
# old_state_dict has all new flags del'd
old_state_dict = deepcopy(optimizer.state_dict())
old_state_dict_pg = old_state_dict["param_groups"]
for group in old_state_dict_pg:
for flag in optim_info.not_og_supported_flags:
if flag in group:
del group[flag]
optimizer.load_state_dict(old_state_dict)
# Make sure we can still step
if optim_info.step_requires_closure:
optimizer.step(functools.partial(fwd_bwd, optimizer, model, input))
else:
fwd_bwd(optimizer, model, input)
optimizer.step()
@optims(optim_db, dtypes=[torch.float32])
def test_save_load_equality_with_weights_only(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
weight = Parameter(
torch.randn(2, 3, requires_grad=True, device=device, dtype=dtype)
)
bias = Parameter(torch.randn(2, requires_grad=True, device=device, dtype=dtype))
input = torch.randn(3, requires_grad=True, device=device, dtype=dtype)
params = [weight, bias]
def fwd_bwd(optim, w, b, i):
optim.zero_grad()
loss = (w.mv(i) + b).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
return loss
for optim_input in all_optim_inputs:
optimizer = optim_cls(params, **optim_input.kwargs)
closure = functools.partial(fwd_bwd, optimizer, weight, bias, input)
# Prime the optimizer
for _ in range(3):
optimizer.step(closure)
sd = optimizer.state_dict()
# === Check saved/loaded state_dict are the same (including weights_only load). ===
with tempfile.TemporaryFile() as f:
torch.save(sd, f)
f.seek(0)
sd_copy = torch.load(f)
self.assertEqual(sd_copy, sd)
del sd_copy
f.seek(0)
sd_copy_wo = torch.load(f, weights_only=True)
self.assertEqual(sd_copy_wo, sd)
@optims(optim_db, dtypes=[torch.float32])
def test_load_nontensor_step(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
params = [
Parameter(torch.randn(2, 3, device=device, dtype=dtype)) for _ in range(2)
]
for p in params:
p.grad = torch.rand_like(p)
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
p.grad = p.grad.to_sparse()
# Needed for second order optims like LBFGS
closure_loss = torch.rand(1, device=device, dtype=dtype)
def closure():
return closure_loss if optim_info.step_requires_closure else None
for optim_input in all_optim_inputs:
kwargs = optim_input.kwargs
optimizer = optim_cls(params, **optim_input.kwargs)
for _ in range(3):
optimizer.step(closure)
state_dict = deepcopy(optimizer.state_dict())
for p_state in state_dict["state"].values():
if "step" in p_state and torch.is_tensor(p_state["step"]):
p_state["step"] = p_state["step"].item()
optimizer.load_state_dict(state_dict)
optimizer.step(closure)
@onlyCUDA
@optims(optim_db, dtypes=[torch.float32])
def test_state_dict_with_cuda_params(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
# We limit our configs to CPU only, because we will be moving them to CUDA later
cpu_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
"cpu", dtype, optim_info, skip=("differentiable",)
)
# Needed for second order optims like LBFGS
closure_loss = torch.rand(1, device=device, dtype=dtype)
def closure():
return closure_loss if optim_info.step_requires_closure else None
for optim_input in cpu_optim_inputs:
if (
"fused" in optim_input.kwargs
and "cuda" not in optim_info.supports_fused_on
):
self.skipTest(
f"cuda is not supported for fused on {optim_cls.__name__}"
)
params = [
Parameter(torch.randn(2, 3, device="cpu", dtype=dtype))
for _ in range(2)
]
for p in params:
p.grad = torch.randn_like(p)
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
p.grad = p.grad.to_sparse()
optimizer = optim_cls(params, **optim_input.kwargs)
for _ in range(3):
optimizer.step(closure)
with torch.no_grad():
params_cuda = [p.to(device="cuda") for p in params]
for i, p in enumerate(params_cuda):
p.grad = params[i].grad.to(device="cuda")
optimizer_cuda = optim_cls(params_cuda, **optim_input.kwargs)
state_dict_cpu = deepcopy(optimizer.state_dict())
state_dict_cuda = deepcopy(optimizer.state_dict())
optimizer_cuda.load_state_dict(state_dict_cuda)
# Make sure state_dict_cuda isn't modified by merely calling load_state_dict
self.assertEqual(state_dict_cpu, state_dict_cuda)
# Make sure that device of state['step'] is still CPU _unless_ torch.compile() added a capturable!
capturable = state_dict_cpu["param_groups"][0].get("capturable", False)
fused = state_dict_cpu["param_groups"][0].get("fused", False)
new_state_dict = optimizer_cuda.state_dict()
for state_cpu, state_cuda in zip(
state_dict_cpu["state"].values(), new_state_dict["state"].values()
):
if "step" in state_cpu and torch.is_tensor(state_cpu["step"]):
self.assertEqual(
state_cuda["step"].device.type,
"cuda" if capturable or fused else "cpu",
)
for _ in range(5):
optimizer.step(closure)
optimizer_cuda.step(closure)
self.assertEqual(params, params_cuda)
self.assertEqual(optimizer.state_dict(), optimizer_cuda.state_dict())
@staticmethod
def _state_dict_pre_hook(optimizer: Optimizer) -> None:
optimizer.state["test"] = 1
@staticmethod
def _state_dict_post_hook(
optimizer: Optimizer, state_dict: Dict[str, Any]
) -> Dict[str, Any]:
if "test" in state_dict["state"]:
state_dict["state"].pop("test")
state_dict["ran_state_dict_pre_hook"] = True
else:
state_dict["ran_state_dict_pre_hook"] = False
return state_dict
@optims(optim_db, dtypes=[torch.float32])
def test_state_dict_pre_hook(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
param = torch.rand(2, 3, device=device, dtype=dtype, requires_grad=True)
optim = optim_cls([param], **optim_input.kwargs)
optim.register_state_dict_pre_hook(self.__class__._state_dict_pre_hook)
state_dict = optim.state_dict()
self.assertEqual(state_dict["state"]["test"], 1)
@optims(optim_db, dtypes=[torch.float32])
def test_state_dict_post_hook(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
param = torch.rand(2, 3, device=device, dtype=dtype, requires_grad=True)
optim = optim_cls([param], **optim_input.kwargs)
optim.register_state_dict_post_hook(self.__class__._state_dict_post_hook)
state_dict = optim.state_dict()
self.assertFalse(state_dict["ran_state_dict_pre_hook"])
@optims(optim_db, dtypes=[torch.float32])
def test_state_dict_pre_post_hook(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
param = torch.rand(2, 3, device=device, dtype=dtype, requires_grad=True)
optim = optim_cls([param], **optim_input.kwargs)
optim.register_state_dict_pre_hook(self.__class__._state_dict_pre_hook)
optim.register_state_dict_post_hook(self.__class__._state_dict_post_hook)
state_dict = optim.state_dict()
self.assertFalse("test" in state_dict["state"])
self.assertTrue(state_dict["ran_state_dict_pre_hook"])
@staticmethod
def _load_state_dict_pre_hook1(
optimizer: Optimizer, state_dict: Dict[str, Any]
) -> None:
state_dict["param_groups"][0]["lr"] = 0.002
@staticmethod
def _load_state_dict_pre_hook2(
optimizer: Optimizer, state_dict: Dict[str, Any]
) -> Dict[str, Any]:
# The typical use case for returning a state dict is to drastically modify the state dict.
# I will simulate by simply making a deep copy and ensuring that my_state_dict still gets used
my_state_dict = deepcopy(state_dict)
my_state_dict["param_groups"][0]["lr"] = 0.003
return my_state_dict
@staticmethod
def _load_state_dict_post_hook(optimizer: Optimizer) -> None:
optimizer.state["ran_load_state_dict_pre_hook2"] = (
optimizer.param_groups[0]["lr"] == 0.003
)
optimizer.state["ran_load_state_dict_post_hook"] = True
@optims(optim_db, dtypes=[torch.float32])
def test_load_state_dict_pre_hook_and_prepend(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
param = torch.rand(2, 3, device=device, dtype=dtype, requires_grad=True)
optim = optim_cls([param], **optim_input.kwargs)
state_dict = optim.state_dict()
# usually one would have a new optim instance here, but it's all the same here
optim.register_load_state_dict_pre_hook(
self.__class__._load_state_dict_pre_hook1
)
optim.load_state_dict(state_dict)
self.assertEqual(optim.param_groups[0]["lr"], 0.002)
optim.register_load_state_dict_pre_hook(
self.__class__._load_state_dict_pre_hook2, prepend=True
)
optim.load_state_dict(state_dict)
# If prepend were False would be 0.003 but since prepend is True, the other hook overrides
self.assertEqual(optim.param_groups[0]["lr"], 0.002)
@optims(optim_db, dtypes=[torch.float32])
def test_load_state_dict_post_hook(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
param = torch.rand(2, 3, device=device, dtype=dtype, requires_grad=True)
optim = optim_cls([param], **optim_input.kwargs)
optim.register_load_state_dict_post_hook(
self.__class__._load_state_dict_post_hook
)
optim.load_state_dict(optim.state_dict())
self.assertFalse(optim.state["ran_load_state_dict_pre_hook2"])
self.assertTrue(optim.state["ran_load_state_dict_post_hook"])
@optims(optim_db, dtypes=[torch.float32])
def test_load_state_dict_pre_post_hook(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
param = torch.rand(2, 3, device=device, dtype=dtype, requires_grad=True)
optim = optim_cls([param], **optim_input.kwargs)
optim.register_load_state_dict_pre_hook(
self.__class__._load_state_dict_pre_hook2
)
optim.register_load_state_dict_post_hook(
self.__class__._load_state_dict_post_hook
)
optim.load_state_dict(optim.state_dict())
self.assertTrue(optim.state["ran_load_state_dict_pre_hook2"])
self.assertTrue(optim.state["ran_load_state_dict_post_hook"])
@optims(optim_db, dtypes=[torch.float32])
def test_step_post_hook(self, device, dtype, optim_info):
def post_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data += 2
params = [torch.tensor([1, 1], device=device, dtype=dtype)]
def dummy_closure():
return 1
closure = dummy_closure if optim_info.step_requires_closure else None
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
optim = optim_info.optim_cls(params, **optim_input.kwargs)
data = 2
hook_handle = optim.register_step_post_hook(post_hook)
optim.step(closure)
optim.step(closure)
# check if post hooks were registered
self.assertEqual(data, 6)
# remove handles, take step and verify that hook is no longer registered
hook_handle.remove()
optim.step(closure)
self.assertEqual(data, 6)
@optims(optim_db, dtypes=[torch.float32])
def test_step_pre_hook(self, device, dtype, optim_info):
def pre_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data += 2
params = [torch.tensor([1, 1], device=device, dtype=dtype)]
def dummy_closure():
return 1
closure = dummy_closure if optim_info.step_requires_closure else None
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
optim = optim_info.optim_cls(params, **optim_input.kwargs)
data = 5
hook_handle = optim.register_step_pre_hook(pre_hook)
optim.step(closure)
optim.step(closure)
# check if pre hooks were registered
self.assertEqual(data, 9)
# remove handles, take step and verify that hook is no longer registered
hook_handle.remove()
optim.step(closure)
self.assertEqual(data, 9)
@optims(optim_db, dtypes=[torch.float32])
def test_step_all_hooks(self, device, dtype, optim_info):
def global_pre_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data.append(0)
def global_post_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data.append(5)
def local_pre_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data.append(1)
def local_post_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data.append(2)
params = [torch.tensor([1, 1], device=device, dtype=dtype)]
def dummy_closure():
return 1
closure = dummy_closure if optim_info.step_requires_closure else None
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
optim = optim_info.optim_cls(params, **optim_input.kwargs)
optim2 = SGD(params)
data = []
# register global hooks to both optimizers
global_pre_handle = register_optimizer_step_pre_hook(global_pre_hook)
global_post_handle = register_optimizer_step_post_hook(global_post_hook)
# register local hooks
first_pre_handle = optim.register_step_pre_hook(local_pre_hook)
first_post_handle = optim.register_step_post_hook(local_post_hook)
second_pre_handle = optim2.register_step_pre_hook(local_pre_hook)
second_post_handle = optim2.register_step_post_hook(local_post_hook)
optim.step(closure)
self.assertListEqual(data, [0, 1, 2, 5])
optim2.step(closure)
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5])
optim.step(closure)
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5, 0, 1, 2, 5])
# remove all hooks
global_pre_handle.remove()
global_post_handle.remove()
first_pre_handle.remove()
first_post_handle.remove()
second_pre_handle.remove()
second_post_handle.remove()
optim.step(closure)
optim2.step(closure)
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5, 0, 1, 2, 5])
@optims(optim_db, dtypes=[torch.float32])
def test_deepcopy_copies_all_public_attrs(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable",)
)
params = [
Parameter(torch.randn(2, 3, device=device, dtype=dtype)) for _ in range(2)
]
for p in params:
p.grad = torch.rand_like(p)
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
p.grad = p.grad.to_sparse()
# Needed for second order optims like LBFGS
def closure():
return 1 if optim_info.step_requires_closure else None
def getPublicAttrs(obj):
return {k for k in obj.__dict__ if not k.startswith("_")}
for optim_input in all_optim_inputs:
optimizer = optim_cls(params, **optim_input.kwargs)
# Make some state
for _ in range(3):
if optim_info.step_requires_closure:
optimizer.step(closure)
else:
closure()
optimizer.step()
self.assertEqual(
getPublicAttrs(optimizer), getPublicAttrs(deepcopy(optimizer))
)
@optims(
[optim for optim in optim_db if optim.step_requires_closure],
dtypes=[torch.float32],
)
def test_second_order_optims_return_consistent_types(
self, device, dtype, optim_info
):
# Motivated by #7586
optim_cls = optim_info.optim_cls
params = [
torch.randn(10, 5, device=device, dtype=dtype),
torch.randn(10, device=device, dtype=dtype),
]
def closure():
return torch.tensor([10], device=device, dtype=dtype)
for optim_input in optim_info.optim_inputs_func(device=device):
# Currently, the only second order optim is LBFGS, so we just go ahead and modify
# "tolerance_grad", but this may not scale if we add second order optims in the future
kwargs = optim_input.kwargs
kwargs["tolerance_grad"] = math.inf
optim_inf = optim_cls(params, **kwargs)
kwargs["tolerance_grad"] = -math.inf
optim_neg_inf = optim_cls(params, **kwargs)
res1 = optim_inf.step(closure)
res2 = optim_neg_inf.step(closure)
self.assertEqual(type(res1), type(res2))
@onlyCUDA
@optims(
[
optim
for optim in optim_db
if "cpu" in optim.supports_fused_on and "cuda" in optim.supports_fused_on
],
dtypes=floating_types_and(
torch.bfloat16,
torch.float16,
),
)
def test_fused_cpu_matches_cuda(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
optim_inputs = optim_info.optim_inputs_func(device="cpu")
for optim_input in optim_inputs:
inpts, models, optimizers = [], [], []
for dev in ("cpu", "cuda"):
kwargs = optim_input.kwargs
kwargs["fused"] = True
inpt = torch.tensor(
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype=dtype, device=dev
).reshape(3, 2)
torch.manual_seed(1)
model = torch.nn.Sequential(
torch.nn.Linear(2, 3),
torch.nn.Sigmoid(),
torch.nn.Linear(3, 1),
torch.nn.Sigmoid(),
)
model.to(dtype=dtype, device=dev)
# foreach/fused optimizers should be tested with a
# zero_size tensor as its last param.
# ref: https://github.com/pytorch/pytorch/issues/100701
empty_param = torch.empty(
(), device=dev, dtype=dtype, requires_grad=True
)
empty_param.grad = torch.rand_like(empty_param)
params = list(model.parameters()) + [empty_param]
optimizer = optim_cls(params, **kwargs)
inpts.append(inpt)
models.append(model)
optimizers.append(optimizer)
self._compare_between(inpts, models, optimizers)
@onlyCUDA
@optims(
[
o
for o in optim_db
if ("foreach" in o.supported_impls and o.optim_cls.__name__ != "Adafactor")
],
dtypes=[torch.float32],
)
def test_defaults_changed_to_foreach(self, device, dtype, optim_info):
# Test that the default implementations for optimizers are changed to foreach
# except Adafactor, which defaults to the single tensor impl for memory efficiency.
optim_cls = optim_info.optim_cls
model = torch.nn.Linear(5, 5)
model.to(dtype=dtype, device=device)
inpt = torch.rand(2, 5, dtype=dtype, device=device)
import inspect
module = inspect.getmodule(optim_cls)
for optim_input in optim_info.optim_inputs_func(device=device):
optim = optim_cls(model.parameters(), **optim_input.kwargs)
optim.zero_grad()
output = model(inpt)
loss = output.sum()
loss.backward()
with patch.object(
module, f"_multi_tensor_{optim_cls.__name__.lower()}"
) as mocked_foreach_impl:
optim.step()
self.assertTrue(mocked_foreach_impl.called)
@optims(optim_db, dtypes=[torch.float32])
def test_non_empty_state(self, device, dtype, optim_info):
# There are internal tests that check that the state is not empty
optim_cls = optim_info.optim_cls
model = torch.nn.Linear(5, 5)
model.to(dtype=dtype, device=device)
inpt = torch.rand(2, 5, dtype=dtype, device=device)
for optim_input in optim_info.optim_inputs_func(device=device):
optim = optim_cls(model.parameters(), **optim_input.kwargs)
optim.zero_grad()
output = model(inpt)
loss = output.sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
for param in model.parameters():
if param.grad is not None:
param.grad = param.grad.to_sparse()
if optim_info.step_requires_closure:
optim.step(lambda: 1.0)
else:
optim.step()
for state in optim.state.values():
self.assertGreater(len(state), 0)
instantiate_device_type_tests(TestOptimRenewed, globals(), allow_mps=True)
if __name__ == "__main__":
run_tests()
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_ops.py
|
map_to_fake
|
def map_to_fake(e):
if isinstance(e, torch.Tensor):
return mode.from_tensor(e)
else:
return e
input = tree_map(map_to_fake, sample.input)
args = tree_map(map_to_fake, sample.args)
kwargs = tree_map(map_to_fake, sample.kwargs)
try:
with context():
res = op(sample.input, *sample.args, **sample.kwargs)
except Exception as e:
continue
with context():
with mode:
res_fake = op(input, *args, **kwargs)
for fake_out, real_out in zip(
tree_flatten(res_fake)[0], tree_flatten(res)[0]
):
if not isinstance(fake_out, torch.Tensor):
self.assertTrue(not isinstance(real_out, torch.Tensor))
continue
self.assertTrue(isinstance(fake_out, FakeTensor))
# if you see a shape exception here, you may need to add
# a `dynamic_output_shape` tag to an operator
check_strides = name not in fake_tensor_stride_failing_ops
# prims/decomps must correctly model strides,
# see https://github.com/pytorch/pytorch/issues/78050#issuecomment-1253950325
prims.utils.compare_tensor_meta(fake_out, real_out, check_strides)
if name not in aliasing_failures:
fake_aliasing = outputs_alias_inputs((input, args, kwargs), res_fake)
real_aliasing = outputs_alias_inputs((sample.input, sample, args, sample.kwargs), res)
self.assertEqual(fake_aliasing, real_aliasing)
self.assertTrue(name not in dynamic_output_op_tests and name not in data_dependent_op_tests)
|
def map_to_fake(e):
if isinstance(e, torch.Tensor):
return fake_mode.from_tensor(e)
else:
return e
input = tree_map(map_to_fake, sample.input)
args = tree_map(map_to_fake, sample.args)
kwargs = tree_map(map_to_fake, sample.kwargs)
try:
with context():
with fake_mode:
res_fake = op(input, *args, **kwargs)
if not match_results:
return
for fake_out, real_out in zip(
pytree.tree_leaves(res_fake), pytree.tree_leaves(res)
):
if not isinstance(fake_out, torch.Tensor):
self.assertTrue(not isinstance(real_out, torch.Tensor))
self.assertEqual(fake_out, real_out)
continue
self.assertTrue(isinstance(fake_out, FakeTensor))
# if you see a shape exception here, you may need to add
# a `dynamic_output_shape` tag to an operator
if op.op not in [
torch.ops.aten._efficient_attention_forward,
torch.ops.aten._flash_attention_forward,
]:
# prims/decomps must correctly model strides,
# see https://github.com/pytorch/pytorch/issues/78050#issuecomment-1253950325
# note: the excluded ops have intentionally incorrect device;
# see "Note [Seed and Offset]" (_meta_registrations.py)
prims.utils.compare_tensor_meta(fake_out, real_out, True)
if name not in aliasing_failures:
fake_aliasing = outputs_alias_inputs(
(input, args, kwargs), res_fake
)
real_aliasing = outputs_alias_inputs(
(sample.input, sample, args, sample.kwargs), res
)
self.assertEqual(fake_aliasing, real_aliasing)
self.assertTrue(
name not in dynamic_output_op_tests
and name not in data_dependent_op_tests
)
except torch._subclasses.fake_tensor.UnsupportedFakeTensorException:
pass
except torch._subclasses.fake_tensor.UnsupportedOperatorException:
pass
except torch._subclasses.fake_tensor.DynamicOutputShapeException:
self.assertTrue(
name in dynamic_output_op_tests
or name in sometimes_dynamic_output_op_test
)
self.assertTrue(
fake_mode.shape_env is None
or not fake_mode.shape_env.allow_dynamic_output_shape_ops
or name not in supported_dynamic_output_op_tests
)
except torch._subclasses.fake_tensor.DataDependentOutputException:
self.assertTrue(name in data_dependent_op_tests)
|
from collections.abc import Sequence
from functools import partial
import warnings
import unittest
import itertools
import torch
import contextlib
import re
import os
from collections import defaultdict
from importlib import import_module
from torch.utils._pytree import tree_map
from typing import Dict
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_and_complex_types_and,
all_types_and_complex_and,
)
from test_proxy_tensor import xfail, skip, skipOps
from torch.testing._internal.common_utils import (
TestCase,
is_iterable_of_tensors,
run_tests,
IS_SANDCASTLE,
clone_input_helper,
IS_CI,
set_default_dtype,
suppress_warnings,
noncontiguous_like,
TEST_WITH_ASAN,
TEST_WITH_UBSAN,
skipIfRocm,
IS_WINDOWS,
IS_FBCODE,
first_sample,
parametrize,
skipIfTorchInductor,
slowTest,
)
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
ReductionPythonRefInfo,
SpectralFuncInfo,
ops_and_refs,
python_ref_db,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
ops,
onlyCUDA,
onlyCPU,
onlyNativeDeviceTypes,
OpDTypes,
skipMeta,
)
from torch._subclasses.fake_tensor import (
FakeTensor,
FakeTensorMode,
)
from torch._subclasses.fake_utils import outputs_alias_inputs
import torch._prims as prims
from torch._prims.context import TorchRefsMode
from torch.testing._internal import opinfo
from torch.testing._internal import composite_compliance
from torch.utils._pytree import tree_flatten
from torch.utils._python_dispatch import TorchDispatchMode
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs = op_db + python_ref_db
_ops_and_refs_with_no_numpy_ref = [op for op in _ops_and_refs if op.ref is None]
aten = torch.ops.aten
from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map
from torch._prims.executor import make_traced
from copy import copy
fake_skips = (
"aminmax", # failing input
"cholesky", # Could not run 'aten::cholesky' with arguments from the 'Meta' backend
"cholesky_inverse", # Could not run 'aten::cholesky' with arguments from the 'Meta' backend
"cov", # aweights cannot be negtaive
"istft", # window overlap add min: 0
"linalg.eigvals", # The tensor has a non-zero number of elements, but its data is not allocated yet
"linalg.eigvalsh", # aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.matrix_power", # Could not run 'aten::eye.m_out' with arguments from the 'Meta' backend
# "linalg.pinv", # Could not run 'aten::pinv.out' with arguments from the 'Meta' backen
"linalg.matrix_rank.hermitian", # Could not run 'aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.pinv.hermitian", # tensor.mH is only supported on matrices or batches of matrices. Got 1-D tensor
"linalg.solve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta' backend
"linalg.tensorsolve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta'
"lu_solve", # MALLOC ERROR: debug
"multinomial", # Could not run 'aten::multinomial' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_1", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_3", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_5", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"nanmean", # logical_not() got an unexpected keyword argument 'out'
"quantile", # quantile() q values must be in the range [0, 1]
"nanquantile", # quantile() q values must be in the range [0, 1]
"nn.functional.ctc_loss", # The tensor has a non-zero number of elements, but its data is not allocated yet
"nn.functional.embedding_bag", # sometimes errors
"nn.functional.nll_loss", # sometimes errors
"nn.functional.max_pool1d", # The tensor has a non-zero number of elements
"to_sparse", # Could not run 'aten::to_sparse' with arguments from the 'Meta' backend
"tensor_split", # The tensor has a non-zero number of elements, but its data is not allocated yet
"repeat_interleave", # cannot repeat_interleave a meta tensor without output_size
"_segment_reduce.lengths", # Could not run 'aten::segment_reduce' with arguments from the 'Meta' backend.
"sparse.sampled.addmm", # sparsity not supported
# Can not infer total number of classes from meta. no way at present to throw DynamicOutputShapeException
"nn.functional.one_hot",
"narrow", # Fails only for one overload with DataDependentOutputException (hence skip).
)
fake_autocast_device_skips = defaultdict(dict)
fake_autocast_device_skips["cpu"] = {"linalg.pinv"}
dynamic_output_op_tests = (
"argwhere",
"bincount",
"combinations",
"linalg.lstsq",
"masked_select",
"nonzero",
"unique_consecutive",
"unique",
"linalg.lstsq.grad_oriented",
)
sometimes_dynamic_output_op_test = (
"__getitem__",
"index_select",
)
data_dependent_op_tests = (
"equal",
"corrcoef",
"nn.functional.gaussian_nll_loss",
"allclose",
)
aliasing_failures = (
"histogramdd",
)
fake_tensor_stride_failing_ops = {
"fft.fft2",
"fft.fft",
"fft.fftn",
"fft.hfft2",
"fft.hfft",
"fft.hfftn",
"fft.ifft2",
"fft.ifft",
"fft.ifftn",
"fft.ihfft2",
"fft.ihfft",
"fft.ihfftn",
"fft.irfft2",
"fft.irfft",
"fft.irfftn",
"fft.rfft2",
"fft.rfft",
"fft.rfftn",
"svd",
"linalg.svd",
}
fake_backward_xfails = fake_tensor_stride_failing_ops | {
"linalg.cond",
"linalg.matrix_norm",
"linalg.norm",
"linalg.svd",
"linalg.svdvals",
"pca_lowrank",
"roll",
"svd_lowrank",
"sgn",
"cholesky",
}
fake_backward_xfails = {xfail(stride_skip) for stride_skip in fake_backward_xfails} | {
xfail("_segment_reduce", "lengths"),
xfail("norm", "nuc"),
xfail("linalg.norm", "subgradients_at_zero"), # can accept vector inputs
skip('nn.functional.ctc_loss'),
}
fake_autocast_backward_xfails = {
skip("nn.functional.binary_cross_entropy"),
skip("sparse.sampled_addmm"),
skip("linalg.pinv"),
skip("linalg.pinv", "hermitian"),
skip("linalg.pinv", "singular"),
skip('pinverse'),
}
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
fake_skips = (
"aminmax", # failing input
"cov", # aweights cannot be negtaive
"istft", # window overlap add min: 0
"linalg.eigvals", # The tensor has a non-zero number of elements, but its data is not allocated yet
"linalg.eigvalsh", # aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.matrix_power", # Could not run 'aten::eye.m_out' with arguments from the 'Meta' backend
# "linalg.pinv", # Could not run 'aten::pinv.out' with arguments from the 'Meta' backen
"linalg.matrix_rank.hermitian", # Could not run 'aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.pinv.hermitian", # tensor.mH is only supported on matrices or batches of matrices. Got 1-D tensor
"linalg.solve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta' backend
"linalg.tensorsolve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta'
"lu_solve", # MALLOC ERROR: debug
"multinomial", # Could not run 'aten::multinomial' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_1", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_3", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_5", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"nanmean", # logical_not() got an unexpected keyword argument 'out'
"quantile", # quantile() q values must be in the range [0, 1]
"nanquantile", # quantile() q values must be in the range [0, 1]
"nn.functional.ctc_loss", # The tensor has a non-zero number of elements, but its data is not allocated yet
"nn.functional.embedding_bag", # sometimes errors
"nn.functional.nll_loss", # sometimes errors
"nn.functional.max_pool1d", # The tensor has a non-zero number of elements
"to_sparse", # Could not run 'aten::_to_sparse' with arguments from the 'Meta' backend
"tensor_split", # The tensor has a non-zero number of elements, but its data is not allocated yet
"repeat_interleave", # cannot repeat_interleave a meta tensor without output_size
"sparse.sampled.addmm", # sparsity not supported
# Can not infer total number of classes from meta. no way at present to throw DynamicOutputShapeException
"nn.functional.one_hot",
"narrow", # Fails only for one overload with DataDependentOutputException (hence skip).
)
fake_autocast_device_skips = defaultdict(dict)
fake_autocast_device_skips["cpu"] = {"linalg.pinv"}
fake_autocast_device_skips["cuda"] = {"linalg.pinv", "pinverse"}
dynamic_output_op_tests = (
"argwhere",
"bincount",
"combinations",
"linalg.lstsq",
"masked_select",
"nonzero",
"unique_consecutive",
"unique",
"linalg.lstsq.grad_oriented",
)
supported_dynamic_output_op_tests = (
"nonzero",
"unique",
"repeat_interleave",
"masked_select",
)
sometimes_dynamic_output_op_test = ("__getitem__", "index_select")
data_dependent_op_tests = (
"equal",
"corrcoef",
"nn.functional.gaussian_nll_loss",
"allclose",
)
aliasing_failures = ("histogramdd",)
fake_backward_skips = {
"linalg.cond",
"linalg.matrix_norm",
"linalg.norm",
"linalg.svd",
"linalg.svdvals",
"pca_lowrank",
"roll",
"svd_lowrank",
"sgn",
}
fake_backward_xfails = {skip(s) for s in fake_backward_skips} | {
xfail("fft.ihfftn"), # Mismatch in aten._conj_physical.default
xfail("fft.ihfft2"), # Mismatch in aten._conj_physical.default
skip("nn.functional.ctc_loss"),
}
fake_autocast_backward_xfails = {
skip("nn.functional.binary_cross_entropy"),
skip("sparse.sampled_addmm"),
skip("linalg.pinv"),
skip("linalg.pinv", "hermitian"),
skip("linalg.pinv", "singular"),
skip("pinverse"),
}
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_ops.py
|
test_fake_autocast
|
def test_fake_autocast(self, device, dtype, op):
if op.name in fake_autocast_device_skips[device]:
self.skipTest("Skip failing test")
context = torch.cuda.amp.autocast if device == "cuda" else torch.cpu.amp.autocast
self._test_fake_helper(device, dtype, op, context)
|
def test_fake_autocast(self, device, dtype, op):
device_type = torch.device(device).type
if op.name in fake_autocast_device_skips[device_type]:
self.skipTest("Skip failing test")
def context_fn():
return torch.amp.autocast(device_type)
self._test_fake_helper(device, dtype, op, context_fn)
|
from collections.abc import Sequence
from functools import partial
import warnings
import unittest
import itertools
import torch
import contextlib
import re
import os
from collections import defaultdict
from importlib import import_module
from torch.utils._pytree import tree_map
from typing import Dict
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_and_complex_types_and,
all_types_and_complex_and,
)
from test_proxy_tensor import xfail, skip, skipOps
from torch.testing._internal.common_utils import (
TestCase,
is_iterable_of_tensors,
run_tests,
IS_SANDCASTLE,
clone_input_helper,
IS_CI,
set_default_dtype,
suppress_warnings,
noncontiguous_like,
TEST_WITH_ASAN,
TEST_WITH_UBSAN,
skipIfRocm,
IS_WINDOWS,
IS_FBCODE,
first_sample,
parametrize,
skipIfTorchInductor,
slowTest,
)
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
ReductionPythonRefInfo,
SpectralFuncInfo,
ops_and_refs,
python_ref_db,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
ops,
onlyCUDA,
onlyCPU,
onlyNativeDeviceTypes,
OpDTypes,
skipMeta,
)
from torch._subclasses.fake_tensor import (
FakeTensor,
FakeTensorMode,
)
from torch._subclasses.fake_utils import outputs_alias_inputs
import torch._prims as prims
from torch._prims.context import TorchRefsMode
from torch.testing._internal import opinfo
from torch.testing._internal import composite_compliance
from torch.utils._pytree import tree_flatten
from torch.utils._python_dispatch import TorchDispatchMode
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs = op_db + python_ref_db
_ops_and_refs_with_no_numpy_ref = [op for op in _ops_and_refs if op.ref is None]
aten = torch.ops.aten
from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map
from torch._prims.executor import make_traced
from copy import copy
fake_skips = (
"aminmax", # failing input
"cholesky", # Could not run 'aten::cholesky' with arguments from the 'Meta' backend
"cholesky_inverse", # Could not run 'aten::cholesky' with arguments from the 'Meta' backend
"cov", # aweights cannot be negtaive
"istft", # window overlap add min: 0
"linalg.eigvals", # The tensor has a non-zero number of elements, but its data is not allocated yet
"linalg.eigvalsh", # aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.matrix_power", # Could not run 'aten::eye.m_out' with arguments from the 'Meta' backend
# "linalg.pinv", # Could not run 'aten::pinv.out' with arguments from the 'Meta' backen
"linalg.matrix_rank.hermitian", # Could not run 'aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.pinv.hermitian", # tensor.mH is only supported on matrices or batches of matrices. Got 1-D tensor
"linalg.solve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta' backend
"linalg.tensorsolve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta'
"lu_solve", # MALLOC ERROR: debug
"multinomial", # Could not run 'aten::multinomial' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_1", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_3", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_5", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"nanmean", # logical_not() got an unexpected keyword argument 'out'
"quantile", # quantile() q values must be in the range [0, 1]
"nanquantile", # quantile() q values must be in the range [0, 1]
"nn.functional.ctc_loss", # The tensor has a non-zero number of elements, but its data is not allocated yet
"nn.functional.embedding_bag", # sometimes errors
"nn.functional.nll_loss", # sometimes errors
"nn.functional.max_pool1d", # The tensor has a non-zero number of elements
"to_sparse", # Could not run 'aten::to_sparse' with arguments from the 'Meta' backend
"tensor_split", # The tensor has a non-zero number of elements, but its data is not allocated yet
"repeat_interleave", # cannot repeat_interleave a meta tensor without output_size
"_segment_reduce.lengths", # Could not run 'aten::segment_reduce' with arguments from the 'Meta' backend.
"sparse.sampled.addmm", # sparsity not supported
# Can not infer total number of classes from meta. no way at present to throw DynamicOutputShapeException
"nn.functional.one_hot",
"narrow", # Fails only for one overload with DataDependentOutputException (hence skip).
)
fake_autocast_device_skips = defaultdict(dict)
fake_autocast_device_skips["cpu"] = {"linalg.pinv"}
dynamic_output_op_tests = (
"argwhere",
"bincount",
"combinations",
"linalg.lstsq",
"masked_select",
"nonzero",
"unique_consecutive",
"unique",
"linalg.lstsq.grad_oriented",
)
sometimes_dynamic_output_op_test = (
"__getitem__",
"index_select",
)
data_dependent_op_tests = (
"equal",
"corrcoef",
"nn.functional.gaussian_nll_loss",
"allclose",
)
aliasing_failures = (
"histogramdd",
)
fake_tensor_stride_failing_ops = {
"fft.fft2",
"fft.fft",
"fft.fftn",
"fft.hfft2",
"fft.hfft",
"fft.hfftn",
"fft.ifft2",
"fft.ifft",
"fft.ifftn",
"fft.ihfft2",
"fft.ihfft",
"fft.ihfftn",
"fft.irfft2",
"fft.irfft",
"fft.irfftn",
"fft.rfft2",
"fft.rfft",
"fft.rfftn",
"svd",
"linalg.svd",
}
fake_backward_xfails = fake_tensor_stride_failing_ops | {
"linalg.cond",
"linalg.matrix_norm",
"linalg.norm",
"linalg.svd",
"linalg.svdvals",
"pca_lowrank",
"roll",
"svd_lowrank",
"sgn",
"cholesky",
}
fake_backward_xfails = {xfail(stride_skip) for stride_skip in fake_backward_xfails} | {
xfail("_segment_reduce", "lengths"),
xfail("norm", "nuc"),
xfail("linalg.norm", "subgradients_at_zero"), # can accept vector inputs
skip('nn.functional.ctc_loss'),
}
fake_autocast_backward_xfails = {
skip("nn.functional.binary_cross_entropy"),
skip("sparse.sampled_addmm"),
skip("linalg.pinv"),
skip("linalg.pinv", "hermitian"),
skip("linalg.pinv", "singular"),
skip('pinverse'),
}
class TestFakeTensor(TestCase):
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
fake_skips = (
"aminmax", # failing input
"cov", # aweights cannot be negtaive
"istft", # window overlap add min: 0
"linalg.eigvals", # The tensor has a non-zero number of elements, but its data is not allocated yet
"linalg.eigvalsh", # aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.matrix_power", # Could not run 'aten::eye.m_out' with arguments from the 'Meta' backend
# "linalg.pinv", # Could not run 'aten::pinv.out' with arguments from the 'Meta' backen
"linalg.matrix_rank.hermitian", # Could not run 'aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.pinv.hermitian", # tensor.mH is only supported on matrices or batches of matrices. Got 1-D tensor
"linalg.solve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta' backend
"linalg.tensorsolve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta'
"lu_solve", # MALLOC ERROR: debug
"multinomial", # Could not run 'aten::multinomial' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_1", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_3", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_5", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"nanmean", # logical_not() got an unexpected keyword argument 'out'
"quantile", # quantile() q values must be in the range [0, 1]
"nanquantile", # quantile() q values must be in the range [0, 1]
"nn.functional.ctc_loss", # The tensor has a non-zero number of elements, but its data is not allocated yet
"nn.functional.embedding_bag", # sometimes errors
"nn.functional.nll_loss", # sometimes errors
"nn.functional.max_pool1d", # The tensor has a non-zero number of elements
"to_sparse", # Could not run 'aten::_to_sparse' with arguments from the 'Meta' backend
"tensor_split", # The tensor has a non-zero number of elements, but its data is not allocated yet
"repeat_interleave", # cannot repeat_interleave a meta tensor without output_size
"sparse.sampled.addmm", # sparsity not supported
# Can not infer total number of classes from meta. no way at present to throw DynamicOutputShapeException
"nn.functional.one_hot",
"narrow", # Fails only for one overload with DataDependentOutputException (hence skip).
)
fake_autocast_device_skips = defaultdict(dict)
fake_autocast_device_skips["cpu"] = {"linalg.pinv"}
fake_autocast_device_skips["cuda"] = {"linalg.pinv", "pinverse"}
dynamic_output_op_tests = (
"argwhere",
"bincount",
"combinations",
"linalg.lstsq",
"masked_select",
"nonzero",
"unique_consecutive",
"unique",
"linalg.lstsq.grad_oriented",
)
supported_dynamic_output_op_tests = (
"nonzero",
"unique",
"repeat_interleave",
"masked_select",
)
sometimes_dynamic_output_op_test = ("__getitem__", "index_select")
data_dependent_op_tests = (
"equal",
"corrcoef",
"nn.functional.gaussian_nll_loss",
"allclose",
)
aliasing_failures = ("histogramdd",)
fake_backward_skips = {
"linalg.cond",
"linalg.matrix_norm",
"linalg.norm",
"linalg.svd",
"linalg.svdvals",
"pca_lowrank",
"roll",
"svd_lowrank",
"sgn",
}
fake_backward_xfails = {skip(s) for s in fake_backward_skips} | {
xfail("fft.ihfftn"), # Mismatch in aten._conj_physical.default
xfail("fft.ihfft2"), # Mismatch in aten._conj_physical.default
skip("nn.functional.ctc_loss"),
}
fake_autocast_backward_xfails = {
skip("nn.functional.binary_cross_entropy"),
skip("sparse.sampled_addmm"),
skip("linalg.pinv"),
skip("linalg.pinv", "hermitian"),
skip("linalg.pinv", "singular"),
skip("pinverse"),
}
@unMarkDynamoStrictTest
class TestFakeTensor(TestCase):
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_ops.py
|
context_fn
|
def context_fn():
return torch.amp.autocast(device_type)
self._test_fake_helper(device, dtype, op, context_fn)
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
fake_skips = (
"aminmax", # failing input
"cov", # aweights cannot be negtaive
"istft", # window overlap add min: 0
"linalg.eigvals", # The tensor has a non-zero number of elements, but its data is not allocated yet
"linalg.eigvalsh", # aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.matrix_power", # Could not run 'aten::eye.m_out' with arguments from the 'Meta' backend
# "linalg.pinv", # Could not run 'aten::pinv.out' with arguments from the 'Meta' backen
"linalg.matrix_rank.hermitian", # Could not run 'aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.pinv.hermitian", # tensor.mH is only supported on matrices or batches of matrices. Got 1-D tensor
"linalg.solve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta' backend
"linalg.tensorsolve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta'
"lu_solve", # MALLOC ERROR: debug
"multinomial", # Could not run 'aten::multinomial' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_1", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_3", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_5", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"nanmean", # logical_not() got an unexpected keyword argument 'out'
"quantile", # quantile() q values must be in the range [0, 1]
"nanquantile", # quantile() q values must be in the range [0, 1]
"nn.functional.ctc_loss", # The tensor has a non-zero number of elements, but its data is not allocated yet
"nn.functional.embedding_bag", # sometimes errors
"nn.functional.nll_loss", # sometimes errors
"nn.functional.max_pool1d", # The tensor has a non-zero number of elements
"to_sparse", # Could not run 'aten::_to_sparse' with arguments from the 'Meta' backend
"tensor_split", # The tensor has a non-zero number of elements, but its data is not allocated yet
"repeat_interleave", # cannot repeat_interleave a meta tensor without output_size
"sparse.sampled.addmm", # sparsity not supported
# Can not infer total number of classes from meta. no way at present to throw DynamicOutputShapeException
"nn.functional.one_hot",
"narrow", # Fails only for one overload with DataDependentOutputException (hence skip).
)
fake_autocast_device_skips = defaultdict(dict)
fake_autocast_device_skips["cpu"] = {"linalg.pinv"}
fake_autocast_device_skips["cuda"] = {"linalg.pinv", "pinverse"}
dynamic_output_op_tests = (
"argwhere",
"bincount",
"combinations",
"linalg.lstsq",
"masked_select",
"nonzero",
"unique_consecutive",
"unique",
"linalg.lstsq.grad_oriented",
)
supported_dynamic_output_op_tests = (
"nonzero",
"unique",
"repeat_interleave",
"masked_select",
)
sometimes_dynamic_output_op_test = ("__getitem__", "index_select")
data_dependent_op_tests = (
"equal",
"corrcoef",
"nn.functional.gaussian_nll_loss",
"allclose",
)
aliasing_failures = ("histogramdd",)
fake_backward_skips = {
"linalg.cond",
"linalg.matrix_norm",
"linalg.norm",
"linalg.svd",
"linalg.svdvals",
"pca_lowrank",
"roll",
"svd_lowrank",
"sgn",
}
fake_backward_xfails = {skip(s) for s in fake_backward_skips} | {
xfail("fft.ihfftn"), # Mismatch in aten._conj_physical.default
xfail("fft.ihfft2"), # Mismatch in aten._conj_physical.default
skip("nn.functional.ctc_loss"),
}
fake_autocast_backward_xfails = {
skip("nn.functional.binary_cross_entropy"),
skip("sparse.sampled_addmm"),
skip("linalg.pinv"),
skip("linalg.pinv", "hermitian"),
skip("linalg.pinv", "singular"),
skip("pinverse"),
}
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_ops.py
|
test_strided_layout
|
instantiate_device_type_tests(TestCommon, globals())
instantiate_device_type_tests(TestCompositeCompliance, globals())
instantiate_device_type_tests(TestMathBits, globals())
instantiate_device_type_tests(TestRefsOpsInfo, globals(), only_for="cpu")
instantiate_device_type_tests(TestFakeTensor, globals())
instantiate_device_type_tests(TestTags, globals())
if __name__ == "__main__":
run_tests()
|
def test_strided_layout(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
for sample in samples:
kwargs = sample.kwargs.copy()
kwargs["layout"] = torch.strided
strided_result = op(sample.input, *sample.args, **kwargs)
self.assertEqual(strided_result.layout, torch.strided)
|
import contextlib
import copy
import inspect
import itertools
import os
import re
import unittest
import warnings
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from importlib import import_module
from typing import Dict, List
import torch
import torch._prims as prims
import torch.utils._pytree as pytree
from torch._prims.context import TorchRefsMode
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
from torch.testing._internal import composite_compliance, opinfo
from torch.testing._internal.common_device_type import (
deviceCountAtLeast,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypesAnd,
OpDTypes,
ops,
skipMeta,
)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
floating_and_complex_types_and,
integral_types_and,
)
from torch.testing._internal.common_methods_invocations import (
BinaryUfuncInfo,
op_db,
ops_and_refs,
python_ref_db,
ReductionOpInfo,
ReductionPythonRefInfo,
skip,
skipOps,
SpectralFuncInfo,
UnaryUfuncInfo,
xfail,
)
from torch.testing._internal.common_utils import (
clone_input_helper,
first_sample,
IS_CI,
IS_FBCODE,
is_iterable_of_tensors,
IS_SANDCASTLE,
IS_WINDOWS,
noncontiguous_like,
parametrize,
run_tests,
set_default_dtype,
skipIfTorchInductor,
slowTest,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
TestCase,
unMarkDynamoStrictTest,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
_variant_ops = partial(
ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
_ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
from copy import copy
from torch._prims.executor import make_traced
fake_skips = (
"aminmax", # failing input
"cov", # aweights cannot be negtaive
"istft", # window overlap add min: 0
"linalg.eigvals", # The tensor has a non-zero number of elements, but its data is not allocated yet
"linalg.eigvalsh", # aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.matrix_power", # Could not run 'aten::eye.m_out' with arguments from the 'Meta' backend
# "linalg.pinv", # Could not run 'aten::pinv.out' with arguments from the 'Meta' backen
"linalg.matrix_rank.hermitian", # Could not run 'aten::linalg_eigvalsh.out' with arguments from the 'Meta' backend
"linalg.pinv.hermitian", # tensor.mH is only supported on matrices or batches of matrices. Got 1-D tensor
"linalg.solve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta' backend
"linalg.tensorsolve", # Could not run 'aten::linalg_solve' with arguments from the 'Meta'
"lu_solve", # MALLOC ERROR: debug
"multinomial", # Could not run 'aten::multinomial' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_1", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_3", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"mvlgamma.mvlgamma_p_5", # Could not run 'aten::_local_scalar_dense' with arguments from the 'Meta' backend
"nanmean", # logical_not() got an unexpected keyword argument 'out'
"quantile", # quantile() q values must be in the range [0, 1]
"nanquantile", # quantile() q values must be in the range [0, 1]
"nn.functional.ctc_loss", # The tensor has a non-zero number of elements, but its data is not allocated yet
"nn.functional.embedding_bag", # sometimes errors
"nn.functional.nll_loss", # sometimes errors
"nn.functional.max_pool1d", # The tensor has a non-zero number of elements
"to_sparse", # Could not run 'aten::_to_sparse' with arguments from the 'Meta' backend
"tensor_split", # The tensor has a non-zero number of elements, but its data is not allocated yet
"repeat_interleave", # cannot repeat_interleave a meta tensor without output_size
"sparse.sampled.addmm", # sparsity not supported
# Can not infer total number of classes from meta. no way at present to throw DynamicOutputShapeException
"nn.functional.one_hot",
"narrow", # Fails only for one overload with DataDependentOutputException (hence skip).
)
fake_autocast_device_skips = defaultdict(dict)
fake_autocast_device_skips["cpu"] = {"linalg.pinv"}
fake_autocast_device_skips["cuda"] = {"linalg.pinv", "pinverse"}
dynamic_output_op_tests = (
"argwhere",
"bincount",
"combinations",
"linalg.lstsq",
"masked_select",
"nonzero",
"unique_consecutive",
"unique",
"linalg.lstsq.grad_oriented",
)
supported_dynamic_output_op_tests = (
"nonzero",
"unique",
"repeat_interleave",
"masked_select",
)
sometimes_dynamic_output_op_test = ("__getitem__", "index_select")
data_dependent_op_tests = (
"equal",
"corrcoef",
"nn.functional.gaussian_nll_loss",
"allclose",
)
aliasing_failures = ("histogramdd",)
fake_backward_skips = {
"linalg.cond",
"linalg.matrix_norm",
"linalg.norm",
"linalg.svd",
"linalg.svdvals",
"pca_lowrank",
"roll",
"svd_lowrank",
"sgn",
}
fake_backward_xfails = {skip(s) for s in fake_backward_skips} | {
xfail("fft.ihfftn"), # Mismatch in aten._conj_physical.default
xfail("fft.ihfft2"), # Mismatch in aten._conj_physical.default
skip("nn.functional.ctc_loss"),
}
fake_autocast_backward_xfails = {
skip("nn.functional.binary_cross_entropy"),
skip("sparse.sampled_addmm"),
skip("linalg.pinv"),
skip("linalg.pinv", "hermitian"),
skip("linalg.pinv", "singular"),
skip("pinverse"),
}
@unMarkDynamoStrictTest
class TestFakeTensor(TestCase):
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_optim.py
|
eval
|
def eval(params, sparse_grad, w):
# Depending on w, provide only the x or y gradient
optimizer.zero_grad()
loss = rosenbrock(params)
loss.backward()
grad = drosenbrock(params.data)
# NB: We torture test the optimizer by returning an
# uncoalesced sparse tensor
if w:
i = torch.LongTensor([[0, 0]])
x = grad[0]
v = torch.tensor([x / 4.0, x - x / 4.0])
else:
i = torch.LongTensor([[1, 1]])
y = grad[1]
v = torch.tensor([y - y / 4.0, y / 4.0])
x = sparse.DoubleTensor(i, v, torch.Size([2])).to(dtype=v.dtype)
with torch.no_grad():
if sparse_grad:
params.grad = x
else:
params.grad = x.to_dense()
return loss
for i in range(2000):
# Do cyclic coordinate descent
w = i % 2
optimizer.step(functools.partial(eval, params, True, w))
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(rosenbrock(params))
else:
scheduler.step()
if not sparse_only:
optimizer_c.step(functools.partial(eval, params_c, False, w))
self.assertEqual(params, params_c)
if not maximize:
self.assertLessEqual(params.data.dist(solution), initial_dist)
else:
self.assertGreaterEqual(rosenbrock(params), rosenbrock(params_t))
|
def eval(params, sparse_grad, w):
optimizer.zero_grad()
if multi_tensor:
loss = sum(rosenbrock(param) for param in params)
else:
loss = rosenbrock(params[0])
loss.backward()
grads_out = [get_grad(param, sparse_grad, w) for param in params]
with torch.no_grad():
params[0].grad = grads_out[0]
if multi_tensor:
params[1].grad = grads_out[1].to(dtype=dtype)
return loss
for i in range(1800):
# Do cyclic coordinate descent
w = i % 2
optimizer.step(functools.partial(eval, params, True, w))
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(rosenbrock(params[0]))
else:
scheduler.step()
if not optim_info.only_supports_sparse_grads:
optimizer_c.step(functools.partial(eval, params_c, False, w))
for scheduler in schedulers_c:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(rosenbrock(params_c[0]))
else:
scheduler.step()
# Tolerance is increased due to floating point error from different
# code path for dense case: x v.s. x - x / 4.0 + x / 4.0
self.assertEqual(params, params_c, atol=5e-6, rtol=5e-6)
if not kwargs.get("maximize", False):
self.assertLessEqual(
sum(param.dist(solution) for param in params), initial_dist
)
else:
self.assertGreaterEqual(
sum(rosenbrock(param) for param in params),
sum(rosenbrock(param_t) for param_t in params_t),
)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
closure
|
def closure():
return torch.tensor([10])
res1 = opt1.step(closure)
res2 = opt2.step(closure)
self.assertEqual(type(res1), type(res2))
|
def closure():
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
return loss
initial_value = closure().item()
for _ in range(20):
if optim_info.step_requires_closure:
loss = optimizer.step(closure)
else:
loss = closure()
optimizer.step()
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
else:
scheduler.step()
if optim_input.kwargs.get("maximize", False):
self.assertGreater(closure().item(), initial_value)
else:
self.assertLess(closure().item(), initial_value)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
closure
|
def closure():
return torch.tensor([10])
res1 = opt1.step(closure)
res2 = opt2.step(closure)
self.assertEqual(type(res1), type(res2))
|
def closure():
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
return loss
initial_value = closure().item()
for _ in range(20):
if optim_info.step_requires_closure:
loss = optimizer.step(closure)
else:
loss = closure()
optimizer.step()
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
else:
scheduler.step()
if optim_input.kwargs.get("maximize", False):
self.assertGreater(closure().item(), initial_value)
else:
self.assertLess(closure().item(), initial_value)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
closure
|
def closure():
return torch.tensor([10])
res1 = opt1.step(closure)
res2 = opt2.step(closure)
self.assertEqual(type(res1), type(res2))
|
def closure():
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
return loss
initial_value = closure().item()
for _ in range(20):
if optim_info.step_requires_closure:
loss = optimizer.step(closure)
else:
loss = closure()
optimizer.step()
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
else:
scheduler.step()
if optim_input.kwargs.get("maximize", False):
self.assertGreater(closure().item(), initial_value)
else:
self.assertLess(closure().item(), initial_value)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
four_arg_constructor
|
def four_arg_constructor(weight, bias, maximize, foreach):
self.assertFalse(foreach)
return constructor(weight, bias, maximize)
|
try:
kwargs["lr"] = torch.tensor(kwargs["lr"])
optimizer = optim_cls([weight_c, bias_c], **kwargs)
except ValueError as e:
self.assertRegex(str(e), ".*lr as a Tensor is not supported.*")
continue
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
closure
|
def closure():
return torch.tensor([10])
res1 = opt1.step(closure)
res2 = opt2.step(closure)
self.assertEqual(type(res1), type(res2))
|
def closure():
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
return loss
initial_value = closure().item()
for _ in range(20):
if optim_info.step_requires_closure:
loss = optimizer.step(closure)
else:
loss = closure()
optimizer.step()
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
else:
scheduler.step()
if optim_input.kwargs.get("maximize", False):
self.assertGreater(closure().item(), initial_value)
else:
self.assertLess(closure().item(), initial_value)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
four_arg_constructor
|
def four_arg_constructor(weight, bias, maximize, foreach):
self.assertFalse(foreach)
return constructor(weight, bias, maximize)
|
if with_lrsched and len(schedulers_constructors) == 0:
return
supported_inputs = []
if len(kwarg_updates) != 0:
seen = set()
for i in all_optim_inputs:
for k in kwarg_updates:
if k in i.kwargs:
del i.kwargs[k]
hashable_kwargs = tuple(sorted(i.kwargs.items()))
if len(i.kwargs) > 0 and hashable_kwargs not in seen:
supported_inputs.append(i)
seen.add(hashable_kwargs)
if "lr" in kwarg_updates:
i.kwargs["lr"] = kwarg_updates["lr"]
else:
supported_inputs = all_optim_inputs
for optim_input in supported_inputs:
kwargs = optim_input.kwargs
multi_tensor = kwargs.get("foreach", False)
# For rosenbrock tests, it is mandated that the param is a tensor with 2 numbers
if multi_tensor:
params_t = [
torch.tensor([1.5, 1.5]),
torch.tensor([1.5, 1.5], dtype=dtype),
]
else:
params_t = [torch.tensor([1.5, 1.5])]
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
four_arg_constructor
|
def four_arg_constructor(weight, bias, maximize, foreach):
self.assertFalse(foreach)
return constructor(weight, bias, maximize)
|
params = [Parameter(param_t) for param_t in params_t]
optimizer = optim_cls(params, **kwargs)
schedulers = [
s(optimizer) for s in (schedulers_constructors if with_lrsched else [])
]
if not optim_info.only_supports_sparse_grads:
params_c = [Parameter(param_t.clone()) for param_t in params_t]
optimizer_c = optim_cls(params_c, **kwargs)
schedulers_c = [
s(optimizer_c)
for s in (schedulers_constructors if with_lrsched else [])
]
solution = torch.tensor([1, 1])
with torch.no_grad():
initial_dist = sum(param.dist(solution) for param in params)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
get_grad
|
def get_grad(param, sparse_grad, w):
grad = drosenbrock(param)
# NB: We torture test the optimizer by returning an
# uncoalesced sparse tensor
# Depending on w, provide only the x or y gradient
if sparse_grad:
if w:
i = torch.tensor([[0, 0]], dtype=torch.int64)
x = grad[0]
v = torch.tensor([x / 4.0, x - x / 4.0])
else:
i = torch.tensor([[1, 1]], dtype=torch.int64)
y = grad[1]
v = torch.tensor([y - y / 4.0, y / 4.0])
grad_out = torch.sparse_coo_tensor(i, v, (2,), dtype=v.dtype)
else:
if w:
grad_out = torch.tensor([grad[0], 0], dtype=param.dtype)
else:
grad_out = torch.tensor([0, grad[1]], dtype=param.dtype)
return grad_out
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_optim.py
|
eval
|
def eval(params, sparse_grad, w):
# Depending on w, provide only the x or y gradient
optimizer.zero_grad()
loss = rosenbrock(params)
loss.backward()
grad = drosenbrock(params.data)
# NB: We torture test the optimizer by returning an
# uncoalesced sparse tensor
if w:
i = torch.LongTensor([[0, 0]])
x = grad[0]
v = torch.tensor([x / 4.0, x - x / 4.0])
else:
i = torch.LongTensor([[1, 1]])
y = grad[1]
v = torch.tensor([y - y / 4.0, y / 4.0])
x = sparse.DoubleTensor(i, v, torch.Size([2])).to(dtype=v.dtype)
with torch.no_grad():
if sparse_grad:
params.grad = x
else:
params.grad = x.to_dense()
return loss
for i in range(2000):
# Do cyclic coordinate descent
w = i % 2
optimizer.step(functools.partial(eval, params, True, w))
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(rosenbrock(params))
else:
scheduler.step()
if not sparse_only:
optimizer_c.step(functools.partial(eval, params_c, False, w))
self.assertEqual(params, params_c)
if not maximize:
self.assertLessEqual(params.data.dist(solution), initial_dist)
else:
self.assertGreaterEqual(rosenbrock(params), rosenbrock(params_t))
|
def eval(params, sparse_grad, w):
optimizer.zero_grad()
if multi_tensor:
loss = sum(rosenbrock(param) for param in params)
else:
loss = rosenbrock(params[0])
loss.backward()
grads_out = [get_grad(param, sparse_grad, w) for param in params]
with torch.no_grad():
params[0].grad = grads_out[0]
if multi_tensor:
params[1].grad = grads_out[1].to(dtype=dtype)
return loss
for i in range(1800):
# Do cyclic coordinate descent
w = i % 2
optimizer.step(functools.partial(eval, params, True, w))
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(rosenbrock(params[0]))
else:
scheduler.step()
if not optim_info.only_supports_sparse_grads:
optimizer_c.step(functools.partial(eval, params_c, False, w))
for scheduler in schedulers_c:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(rosenbrock(params_c[0]))
else:
scheduler.step()
# Tolerance is increased due to floating point error from different
# code path for dense case: x v.s. x - x / 4.0 + x / 4.0
self.assertEqual(params, params_c, atol=5e-6, rtol=5e-6)
if not kwargs.get("maximize", False):
self.assertLessEqual(
sum(param.dist(solution) for param in params), initial_dist
)
else:
self.assertGreaterEqual(
sum(rosenbrock(param) for param in params),
sum(rosenbrock(param_t) for param_t in params_t),
)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
_test_state_dict
|
def _test_state_dict(self, weight, bias, input, constructor, atol=None, rtol=None):
weight = Parameter(weight)
bias = Parameter(bias)
with torch.no_grad():
input = input.clone().detach().requires_grad_()
def fn_base(optimizer, weight, bias):
optimizer.zero_grad()
i = input_cuda if weight.is_cuda else input
loss = (weight.mv(i) + bias).pow(2).sum()
loss.backward()
return loss
optimizer = constructor(weight, bias)
fn = functools.partial(fn_base, optimizer, weight, bias)
# Prime the optimizer
for _i in range(20):
optimizer.step(fn)
# Clone the weights and construct new optimizer for them
with torch.no_grad():
weight_c = Parameter(weight.clone().detach())
bias_c = Parameter(bias.clone().detach())
optimizer_c = constructor(weight_c, bias_c)
fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c)
# Load state dict
state_dict = deepcopy(optimizer.state_dict())
state_dict_c = deepcopy(optimizer.state_dict())
optimizer_c.load_state_dict(state_dict_c)
# Run both optimizations in parallel
for _ in range(20):
optimizer.step(fn)
optimizer_c.step(fn_c)
self.assertEqual(weight, weight_c)
self.assertEqual(bias, bias_c)
# Make sure state dict wasn't modified
self.assertEqual(state_dict, state_dict_c)
# Make sure state dict is deterministic with equal but not identical parameters
self.assertEqual(optimizer.state_dict(), optimizer_c.state_dict())
# Make sure repeated parameters have identical representation in state dict
optimizer_c.param_groups.extend(optimizer_c.param_groups)
self.assertEqual(
optimizer.state_dict()["param_groups"][-1],
optimizer_c.state_dict()["param_groups"][-1],
)
# Make sure that optimizers that support maximize can load older models
old_state_dict = deepcopy(optimizer.state_dict())
state_dict_no_maximize = deepcopy(optimizer.state_dict())
if "maximize" in state_dict_no_maximize["param_groups"][0]:
for group in state_dict_no_maximize["param_groups"]:
del group["maximize"]
optimizer.load_state_dict(state_dict_no_maximize)
# Make sure we can still step
optimizer.step()
# Undo these changes before proceeding!
optimizer.load_state_dict(old_state_dict)
# Make sure that optimizers that support foreach can load older models
state_dict_no_foreach = deepcopy(optimizer.state_dict())
if "foreach" in state_dict_no_foreach["param_groups"][0]:
for group in state_dict_no_foreach["param_groups"]:
del group["foreach"]
optimizer.load_state_dict(state_dict_no_foreach)
# Make sure we can still step
optimizer.step()
# Undo these changes before proceeding!
optimizer.load_state_dict(old_state_dict)
# Make sure that loading optimizers with step not wrapped in tensor can work
state_dict = optimizer.state_dict()
if "step" in state_dict["state"][0] and torch.is_tensor(
state_dict["state"][0]["step"]
):
for state in state_dict["state"].values():
state["step"] = state["step"].item()
optimizer.load_state_dict(state_dict)
optimizer.step()
# Check that state dict can be loaded even when we cast parameters
# to a different type and move to a different device.
if not torch.cuda.is_available():
return
with torch.no_grad():
input_cuda = input.clone().detach().to(dtype=torch.float32, device="cuda")
weight_cuda = Parameter(
weight.clone().detach().to(dtype=torch.float32, device="cuda")
)
bias_cuda = Parameter(
bias.clone().detach().to(dtype=torch.float32, device="cuda")
)
optimizer_cuda = constructor(weight_cuda, bias_cuda)
fn_cuda = functools.partial(fn_base, optimizer_cuda, weight_cuda, bias_cuda)
state_dict = deepcopy(optimizer.state_dict())
state_dict_c = deepcopy(optimizer.state_dict())
optimizer_cuda.load_state_dict(state_dict_c)
# Make sure state dict wasn't modified
self.assertEqual(state_dict, state_dict_c)
# Make sure that device of state['step'] is still CPU
new_state_dict = optimizer_cuda.state_dict()
if "step" in state_dict["state"][0] and torch.is_tensor(
state_dict["state"][0]["step"]
):
for state in new_state_dict["state"].values():
self.assertEqual(state["step"].device.type, "cpu")
for _i in range(20):
optimizer.step(fn)
optimizer_cuda.step(fn_cuda)
self.assertEqual(weight, weight_cuda)
self.assertEqual(bias, bias_cuda, atol=atol, rtol=rtol)
# validate deepcopy() copies all public attributes
def getPublicAttr(obj):
return {k for k in obj.__dict__ if not k.startswith("_")}
self.assertEqual(getPublicAttr(optimizer), getPublicAttr(deepcopy(optimizer)))
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
real_closure
|
# Make sure that loading optimizers with step not wrapped in tensor can work
state_dict = optimizer.state_dict()
if "step" in state_dict["state"][0] and torch.is_tensor(
state_dict["state"][0]["step"]
):
for state in state_dict["state"].values():
state["step"] = state["step"].item()
optimizer.load_state_dict(state_dict)
optimizer.step()
|
def real_closure():
for param in real_params:
grad = torch.randn_like(param)
param.grad = grad
real_steps.append(param.detach().clone())
grads_losses.append(grad.clone())
loss = torch.randn(1)
grads_losses.append(loss.clone())
return loss
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_optim.py
|
complex_closure
|
def complex_closure():
for param in complex_params:
if torch.is_complex(param):
grad = torch.view_as_complex(grads_losses.pop(0))
complex_steps.append(torch.view_as_real_copy(param.detach()))
else:
grad = grads_losses.pop(0)
complex_steps.append(param.detach().clone())
param.grad = grad
return grads_losses.pop(0)
for _ in range(3):
if optim_info.step_requires_closure:
# LBFGS, for example, requires closure and calls it internally
real_optimizer.step(real_closure)
complex_optimizer.step(complex_closure)
else:
# For other optimizers, we call closure explicitly to set the gradients
real_closure()
complex_closure()
real_optimizer.step()
complex_optimizer.step()
# Final Parameters should be the same
complex_params_asreal = [
torch.view_as_real(param) if param.is_complex() else param
for param in complex_params
]
self.assertEqual(real_params, complex_params_asreal)
# All intermediate steps should also be the same
# also checks steps taken within for example a line search
self.assertEqual(complex_steps, real_steps)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_optim.py
|
test_complex_2d
|
def test_complex_2d(self, device, dtype, optim_info):
optim_cls = optim_info.optim_cls
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
# Also skip fused, since our fused kernels do not support complex
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info, skip=("differentiable", "fused")
)
for optim_input in all_optim_inputs:
if optim_info.step_requires_closure:
# Why? The way we implement complex is by turning complex params into view_as_real
# alternatives. For example, an size (M,N) tensor will become (M,N,2). In this test,
# we break apart a tensor into its real and imaginary parts, which would be 2x(M,N).
# For other pointwise optimizers, this distinction is trivial, but for LBFGS where
# there are reductions across all parameters (and all the grads get flattened into
# one long Tensor), this ordering matters. Why? Reductions are not deterministic
# because addition between floating point numbers is not associative, i.e.,
# a + b + c != a + c + b. Thus, we add a seed here to control the discrepancy that
# will happen with LBFGS. Note that in test_complex above, there is no need for a seed
# nor for increased tolerance, because results should be bitwise equivalent.
torch.manual_seed(2024)
a1 = torch.randn(2, device=device, dtype=dtype, requires_grad=True)
a1_real = a1.real.clone().detach()
a1_imag = a1.imag.clone().detach()
a1_real.requires_grad_()
a1_imag.requires_grad_()
optim1 = optim_cls([a1], **optim_input.kwargs)
optim2 = optim_cls([a1_real, a1_imag], **optim_input.kwargs)
a1_reals = TensorTracker()
a1_imags = TensorTracker()
a1_grad_reals = TensorTracker()
a1_grad_imags = TensorTracker()
losses = TensorTracker()
def closure1():
optim1.zero_grad()
loss = rosenbrock(a1).abs()
loss.backward()
# Track clones to best test accuracy
a1_reals.add(a1.real)
a1_imags.add(a1.imag)
a1_grad_reals.add(a1.grad.real)
a1_grad_imags.add(a1.grad.imag)
losses.add(loss)
return loss
def closure2():
optim2.zero_grad()
a1_reals.pop_check_set(a1_real, self)
a1_imags.pop_check_set(a1_imag, self)
a2 = torch.complex(a1_real, a1_imag)
loss = rosenbrock(a2).abs()
losses.pop_check_set(loss, self)
loss.backward()
a1_grad_reals.pop_check_set(a1_real.grad, self)
a1_grad_imags.pop_check_set(a1_imag.grad, self)
return loss
for _ in range(3):
if optim_info.step_requires_closure:
# LBFGS, for example, requires closure and calls it internally
optim1.step(closure1)
optim2.step(closure2)
else:
closure1()
closure2()
optim1.step()
optim2.step()
self.assertEqual(a1.real, a1_real)
self.assertEqual(a1.imag, a1_imag)
self.assertTrue(a1_reals.all_popped())
self.assertTrue(a1_imags.all_popped())
self.assertTrue(a1_grad_reals.all_popped())
self.assertTrue(a1_grad_imags.all_popped())
self.assertTrue(losses.all_popped())
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
@markDynamoStrictTest
class TestOptimRenewed(TestCase):
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_optim.py
|
closure1
|
# Check that state dict can be loaded even when we cast parameters
# to a different type and move to a different device.
if not torch.cuda.is_available():
return
with torch.no_grad():
input_cuda = input.clone().detach().to(dtype=torch.float32, device="cuda")
weight_cuda = Parameter(
weight.clone().detach().to(dtype=torch.float32, device="cuda")
)
bias_cuda = Parameter(
bias.clone().detach().to(dtype=torch.float32, device="cuda")
)
optimizer_cuda = constructor(weight_cuda, bias_cuda)
fn_cuda = functools.partial(fn_base, optimizer_cuda, weight_cuda, bias_cuda)
state_dict = deepcopy(optimizer.state_dict())
state_dict_c = deepcopy(optimizer.state_dict())
optimizer_cuda.load_state_dict(state_dict_c)
# Make sure state dict wasn't modified
self.assertEqual(state_dict, state_dict_c)
|
def closure1():
optim1.zero_grad()
loss = rosenbrock(a1).abs()
loss.backward()
# Track clones to best test accuracy
a1_reals.add(a1.real)
a1_imags.add(a1.imag)
a1_grad_reals.add(a1.grad.real)
a1_grad_imags.add(a1.grad.imag)
losses.add(loss)
return loss
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_optim.py
|
closure2
|
# Make sure that device of state['step'] is still CPU
new_state_dict = optimizer_cuda.state_dict()
if "step" in state_dict["state"][0] and torch.is_tensor(
state_dict["state"][0]["step"]
):
for state in new_state_dict["state"].values():
self.assertEqual(state["step"].device.type, "cpu")
for _i in range(20):
optimizer.step(fn)
optimizer_cuda.step(fn_cuda)
self.assertEqual(weight, weight_cuda)
self.assertEqual(bias, bias_cuda, atol=atol, rtol=rtol)
# validate deepcopy() copies all public attributes
|
def closure2():
optim2.zero_grad()
a1_reals.pop_check_set(a1_real, self)
a1_imags.pop_check_set(a1_imag, self)
a2 = torch.complex(a1_real, a1_imag)
loss = rosenbrock(a2).abs()
losses.pop_check_set(loss, self)
loss.backward()
a1_grad_reals.pop_check_set(a1_real.grad, self)
a1_grad_imags.pop_check_set(a1_imag.grad, self)
return loss
for _ in range(3):
if optim_info.step_requires_closure:
# LBFGS, for example, requires closure and calls it internally
optim1.step(closure1)
optim2.step(closure2)
else:
closure1()
closure2()
optim1.step()
optim2.step()
self.assertEqual(a1.real, a1_real)
self.assertEqual(a1.imag, a1_imag)
self.assertTrue(a1_reals.all_popped())
self.assertTrue(a1_imags.all_popped())
self.assertTrue(a1_grad_reals.all_popped())
self.assertTrue(a1_grad_imags.all_popped())
self.assertTrue(losses.all_popped())
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_optim.py
|
_test_complex_optimizer
|
def _test_complex_optimizer(self, optimizer_constructor):
complex_param = torch.randn(5, 5, dtype=torch.complex64, requires_grad=True)
real_param = torch.view_as_real(complex_param).detach().clone().requires_grad_()
complex_opt = optimizer_constructor(complex_param)
real_opt = optimizer_constructor(real_param)
for _ in range(3):
complex_param.grad = torch.randn_like(complex_param)
real_param.grad = torch.view_as_real(complex_param.grad)
complex_opt.step()
real_opt.step()
self.assertEqual(torch.view_as_real(complex_param), real_param)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
_test_complex_2d
|
def _test_complex_2d(self, optimizer_constructor, f=None):
if f is None:
f = rosenbrock
a1 = torch.randn(2, dtype=torch.complex64, requires_grad=True)
a1_real = a1.real.clone().detach()
a1_imag = a1.imag.clone().detach()
a1_real.requires_grad_()
a1_imag.requires_grad_()
optim1 = optimizer_constructor([a1])
optim2 = optimizer_constructor([a1_real, a1_imag])
for _ in range(10):
optim1.zero_grad()
optim2.zero_grad()
a2 = torch.complex(a1_real, a1_imag)
f(a1).backward()
f(a2).backward()
self.assertEqual(a1.grad.real, a1_real.grad)
self.assertEqual(a1.grad.imag, a1_imag.grad)
optim1.step()
optim2.step()
self.assertEqual(a1.real, a1_real)
self.assertEqual(a1.imag, a1_imag)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
_build_params_dict
|
def _build_params_dict(self, weight, bias, **kwargs):
return [{"params": [weight]}, dict(params=[bias], **kwargs)]
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
_build_params_dict_single
|
def _build_params_dict_single(self, weight, bias, **kwargs):
return [dict(params=bias, **kwargs)]
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_sgd
|
def test_sgd(self):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
self._build_params_dict_single(weight, bias, lr=1e-2),
lr=1e-3,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
self._build_params_dict_single(weight, bias, lr=1e-2),
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[lambda opt: StepLR(opt, gamma=0.9, step_size=10)],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[
lambda opt: LinearLR(
opt, start_factor=0.4, end_factor=0.8, total_iters=4
)
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[lambda opt: ConstantLR(opt, factor=0.4, total_iters=4)],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[
lambda opt: StepLR(opt, gamma=0.9, step_size=10),
lambda opt: LinearLR(
opt, start_factor=0.4, end_factor=0.6, total_iters=4
),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[
lambda opt: StepLR(opt, gamma=0.9, step_size=10),
lambda opt: ReduceLROnPlateau(opt),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[
lambda opt: StepLR(opt, gamma=0.99, step_size=10),
lambda opt: ExponentialLR(opt, gamma=0.99),
lambda opt: ReduceLROnPlateau(opt),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias],
lr=1e-3,
momentum=0.5,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias],
lr=1e-3,
momentum=0.5,
weight_decay=1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias],
nesterov=True,
lr=1e-3,
momentum=0.5,
weight_decay=1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[lambda opt: PolynomialLR(opt, power=0.9, total_iters=4)],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
with self.assertRaisesRegex(ValueError, "Invalid momentum value: -0.5"):
optim.SGD(None, lr=1e-2, momentum=-0.5)
|
# foreach/fused optimizers should be tested with a
# zero_size tensor as its last param.
# ref: https://github.com/pytorch/pytorch/issues/100701
empty_param = torch.empty(
(), device=device, dtype=dtype, requires_grad=True
)
empty_param.grad = torch.rand_like(empty_param)
params = list(model.parameters()) + [empty_param]
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
test_sgd_sparse
|
def test_sgd_sparse(self):
for foreach in (False, True):
self._test_rosenbrock_sparse(
lambda params: optim.SGD(params, lr=4.8e-3, foreach=foreach)
)
self._test_rosenbrock_sparse(
lambda params: optim.SGD(params, lr=0.0048, foreach=foreach),
[lambda opt: StepLR(opt, gamma=0.99999, step_size=300)],
)
|
optimizer = optim_cls(params, **kwargs)
models.append(model)
optimizers.append(optimizer)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
_test_derived_optimizers_varying_tensors
|
def _test_derived_optimizers_varying_tensors(self, optimizer_with_kwargs, kwarg):
if not torch.cuda.is_available():
return
assert kwarg in ("foreach", "fused")
# Specifically test that inputting params of different dtypes and devices
# is handled equivalently on the foreach and fused implementations as the
# single tensor implementations. We need multiple GPUs (vs just a CPU and
# GPU) because fused adam only works on GPUs. (Thus we only run the tests
# that call into this helper when TEST_MULTIGPU.)
params = [
torch.rand(2, 3, dtype=torch.float64, device='cuda:0', requires_grad=True),
torch.rand(2, 3, dtype=torch.float32, device='cuda:0', requires_grad=True),
torch.rand(2, 3, dtype=torch.float16, device='cuda:0', requires_grad=True),
torch.rand(2, 3, dtype=torch.bfloat16, device='cuda:0', requires_grad=True),
torch.rand(2, 3, dtype=torch.float64, device='cuda:1', requires_grad=True),
torch.rand(2, 3, dtype=torch.float32, device='cuda:1', requires_grad=True),
torch.rand(2, 3, dtype=torch.float16, device='cuda:1', requires_grad=True),
torch.rand(2, 3, dtype=torch.bfloat16, device='cuda:1', requires_grad=True),
torch.randint(1024, (2, 3), dtype=torch.int64, device='cuda:1', requires_grad=False),
]
for p in params:
if p.requires_grad:
p.grad = torch.rand_like(p, device=p.device, dtype=p.dtype)
kIterations = 7 if kwarg == "foreach" else 1
for optimizer_constructor, kwargs in optimizer_with_kwargs:
res, state = [], []
for enabled in (False, True):
kwargs_clone = deepcopy(kwargs)
kwargs_clone[kwarg] = enabled
params_clone = []
for p in params:
p_clone = p.clone().detach()
if p.requires_grad:
p_clone.requires_grad = True
p_clone.grad = p.grad.clone().detach()
params_clone.append(p_clone)
optimizer = optimizer_constructor(params_clone, **kwargs_clone)
for _ in range(kIterations):
optimizer.step()
state.append(optimizer.state)
res.append(params_clone)
st_state = state[0]
mt_state = state[1]
for st_p, mt_p in zip(res[0], res[1]):
self.assertEqual(st_p, mt_p)
# check that optimizer states are the same
st_p_state = st_state[st_p]
mt_p_state = mt_state[mt_p]
for k in st_p_state:
actual = mt_p_state[k]
# If `torch.optim.Adam` is `__init__`ed with either `fused=True` or `capturable=True`,
# `step` Tensor is 1D while usually it's 0D.
if (
k == "step"
and isinstance(actual, torch.Tensor)
and actual.ndim == 1
):
actual = actual[0]
self.assertEqual(st_p_state[k], actual)
|
params = [
torch.rand(2, 3, dtype=torch.float64, device="cuda:0", requires_grad=True),
torch.rand(2, 3, dtype=torch.float32, device="cuda:0", requires_grad=True),
torch.rand(2, 3, dtype=torch.float16, device="cuda:0", requires_grad=True),
torch.rand(2, 3, dtype=torch.bfloat16, device="cuda:0", requires_grad=True),
torch.rand(2, 3, dtype=torch.float64, device="cuda:1", requires_grad=True),
torch.rand(2, 3, dtype=torch.float32, device="cuda:1", requires_grad=True),
torch.rand(2, 3, dtype=torch.float16, device="cuda:1", requires_grad=True),
torch.rand(2, 3, dtype=torch.bfloat16, device="cuda:1", requires_grad=True),
torch.randint(
1024, (2, 3), dtype=torch.int64, device="cuda:1", requires_grad=False
),
]
for p in params:
if p.requires_grad:
p.grad = torch.rand_like(p, device=p.device, dtype=p.dtype)
kIterations = 7 if impl == "foreach" else 1
optim_inputs = optim_info.optim_inputs_func(device=device)
optim_cls = optim_info.optim_cls
for optim_input in optim_inputs:
updated_params, state = [], []
kwargs = deepcopy(optim_input.kwargs)
if kwargs.get("capturable", False) and _get_device_type(device) == "cpu":
# capturable is not supported on CPU
continue
for use_impl in (False, True):
kwargs[impl] = use_impl
params_clone = []
for p in params:
p_clone = p.clone().detach()
if p.requires_grad:
p_clone.requires_grad = True
p_clone.grad = p.grad.clone().detach()
params_clone.append(p_clone)
optimizer = optim_cls(params_clone, **kwargs)
for _ in range(kIterations):
optimizer.step()
state.append(optimizer.state)
updated_params.append(params_clone)
og_state, new_state = state
for og_p, new_p in zip(updated_params[0], updated_params[1]):
# Increasing the tolerance as we are collating lots of ops together for optimizers and
# the designated tolerances are for single op only.
single_rtol, single_atol = torch.testing._comparison.get_tolerances(
new_p.dtype, rtol=None, atol=None
)
rtol = 5 * single_rtol
atol = 5 * single_atol
self.assertEqual(og_p, new_p, rtol=rtol, atol=atol)
# check that optimizer states are the same
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
test_multi_tensor_optimizers_with_varying_tensors
|
def test_multi_tensor_optimizers_with_varying_tensors(self):
optimizer_pairs_with_flags = [
(optim.Adam, dict(weight_decay=1.0, amsgrad=True, fused=False)),
(optim.Adam, dict(weight_decay=1.0, amsgrad=False, fused=False)),
(optim.Adam, dict(weight_decay=0.0, amsgrad=True, fused=False)),
(optim.Adam, dict(weight_decay=0.0, amsgrad=False, fused=False)),
(optim.AdamW, dict(weight_decay=1.0, amsgrad=True)),
(optim.AdamW, dict(weight_decay=1.0, amsgrad=False)),
(optim.AdamW, dict(weight_decay=0.0, amsgrad=True)),
(optim.AdamW, dict(weight_decay=0.0, amsgrad=False)),
(optim.NAdam, dict(weight_decay=0.0, momentum_decay=6e-3)),
(optim.NAdam, dict(weight_decay=1.0, momentum_decay=6e-3)),
(optim.NAdam, dict(weight_decay=0.0, momentum_decay=4e-3)),
(optim.NAdam, dict(weight_decay=0.01, momentum_decay=4e-3)),
(
optim.SGD,
dict(lr=0.2, momentum=1, dampening=0, weight_decay=1, nesterov=True),
),
(
optim.SGD,
dict(lr=0.2, momentum=1, dampening=0.5, weight_decay=1, nesterov=False),
),
(optim.RAdam, dict(weight_decay=0, eps=1e-6)),
(optim.RAdam, dict(weight_decay=0)),
(optim.RAdam, dict(weight_decay=1, eps=1e-6)),
(optim.RAdam, dict(weight_decay=1)),
(optim.RMSprop, dict(weight_decay=1, momentum=1, centered=True)),
(optim.RMSprop, dict(weight_decay=1, momentum=0, centered=True)),
(optim.RMSprop, dict(weight_decay=1, momentum=1, centered=False)),
(optim.RMSprop, dict(weight_decay=0, momentum=1, centered=False)),
(optim.Rprop, dict(lr=1e-2, etas=(0.5, 1.2), step_sizes=(1e-6, 50))),
(optim.ASGD, dict(weight_decay=0)),
(optim.ASGD, dict(weight_decay=1)),
(optim.Adamax, dict(weight_decay=0)),
(optim.Adamax, dict(weight_decay=1)),
(optim.Adadelta, dict(weight_decay=0)),
(optim.Adadelta, dict(weight_decay=1)),
(optim.Adagrad, dict(weight_decay=0)),
(optim.Adagrad, dict(weight_decay=1)),
]
self._test_derived_optimizers_varying_tensors(optimizer_pairs_with_flags, "foreach")
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_rmsprop
|
def test_rmsprop(self):
for foreach in (False, True):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
[weight, bias], lr=1e-2, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2,
centered=True,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2,
centered=True,
momentum=0.1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2,
momentum=0.1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2,
momentum=0.1,
weight_decay=1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_complex_2d(lambda param: optim.RMSprop(param, foreach=foreach))
self._test_complex_2d(
lambda param: optim.RMSprop(param, centered=True, foreach=foreach)
)
self._test_complex_2d(
lambda param: optim.RMSprop(param, momentum=0.1, foreach=foreach)
)
self._test_complex_2d(
lambda param: optim.RMSprop(param, maximize=True, foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.RMSprop([param], foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.RMSprop([param], centered=True, foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.RMSprop([param], momentum=0.1, foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.RMSprop([param], maximize=True, foreach=foreach)
)
with self.assertRaisesRegex(ValueError, "Invalid momentum value: -1.0"):
optim.RMSprop(None, lr=1e-2, momentum=-1.0, foreach=foreach)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_rprop
|
def test_rprop(self):
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(
0
) == (8, 6)
for foreach in (False, True):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Rprop(
[weight, bias], lr=2e-4, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Rprop(
self._build_params_dict(weight, bias, lr=1e-2),
lr=2e-4,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
atol=4e-5 if is_cuda_sm86 else None,
rtol=3e-5 if is_cuda_sm86 else None,
)
self._test_complex_2d(lambda param: optim.Rprop(param, foreach=foreach))
self._test_complex_optimizer(
lambda param: optim.Rprop([param], lr=0.001, foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.Rprop(
[param], lr=0.001, maximize=True, foreach=foreach
)
)
with self.assertRaisesRegex(ValueError, "Invalid eta values: 1.0, 0.5"):
optim.Rprop(None, lr=1e-2, etas=(1.0, 0.5), foreach=foreach)
|
for optim_input in all_optim_inputs:
optimizer = optim_cls(params, **optim_input.kwargs)
optimizer.step(closure)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
test_lbfgs_return_type
|
def test_lbfgs_return_type(self):
params = [torch.randn(10, 5), torch.randn(10)]
opt1 = optim.LBFGS(params, 0.01, tolerance_grad=math.inf)
opt2 = optim.LBFGS(params, 0.01, tolerance_grad=-math.inf)
def closure():
return torch.tensor([10])
res1 = opt1.step(closure)
res2 = opt2.step(closure)
self.assertEqual(type(res1), type(res2))
|
param = torch.randn((5, 1), device=device, dtype=dtype, requires_grad=True)
old_param = param.clone().detach()
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
closure
|
def closure():
return torch.tensor([10])
res1 = opt1.step(closure)
res2 = opt2.step(closure)
self.assertEqual(type(res1), type(res2))
|
def closure():
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
return loss
initial_value = closure().item()
for _ in range(20):
if optim_info.step_requires_closure:
loss = optimizer.step(closure)
else:
loss = closure()
optimizer.step()
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
else:
scheduler.step()
if optim_input.kwargs.get("maximize", False):
self.assertGreater(closure().item(), initial_value)
else:
self.assertLess(closure().item(), initial_value)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
test_invalid_param_type
|
def test_invalid_param_type(self):
with self.assertRaises(TypeError):
optim.SGD(Parameter(torch.randn(5, 5)), lr=3)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_duplicate_params_in_param_group
|
def test_duplicate_params_in_param_group(self):
param = Parameter(torch.randn(5, 5))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
optim.SGD([param, param], lr=0.1)
self.assertEqual(len(w), 1)
self.assertIn(
"a parameter group with duplicate parameters", str(w[0].message)
)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_no_grad_for_all_params
|
def test_no_grad_for_all_params(self):
params = [torch.randn(5, 5, requires_grad=False) for _ in range(2)]
optimizer_list = [
optim.Adadelta,
optim.AdamW,
optim.Adam,
optim.Adagrad,
optim.Adamax,
optim.RMSprop,
optim.SGD,
optim.SparseAdam,
optim.ASGD,
]
for optim_ctr in optimizer_list:
opt = optim_ctr(params, lr=0.1)
# make sure step can still run even if
# all params have no grad
opt.step()
# make sure that `state_steps` is correctly either updated or not updated when `found_inf`.
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_adagrad
|
def test_adagrad(self):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adagrad(
[weight, bias], lr=1e-1, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adagrad(
[weight, bias],
lr=1e-1,
initial_accumulator_value=0.1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adagrad(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adagrad(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-1,
maximize=maximize,
foreach=foreach,
),
[lambda opt: ReduceLROnPlateau(opt)],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adagrad(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-1,
maximize=maximize,
foreach=foreach,
),
[
lambda opt: ReduceLROnPlateau(opt),
lambda opt: ExponentialLR(opt, gamma=0.99),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
with self.assertRaisesRegex(ValueError, "Invalid lr_decay value: -0.5"):
optim.Adagrad(None, lr=1e-2, lr_decay=-0.5)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_adagrad_complex
|
def test_adagrad_complex(self):
for foreach in (False, True):
self._test_complex_optimizer(
lambda param: optim.Adagrad([param], lr=1e-1, foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.Adagrad(
[param],
lr=1e-1,
initial_accumulator_value=0.1,
foreach=foreach,
)
)
|
loss = (weight.mv(input) + bias).pow(2).sum()
initial_value = loss.item()
for _ in range(20):
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
irrelevant.grad = torch.rand_like(irrelevant)
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
irrelevant.grad = irrelevant.grad.to_sparse()
optimizer.step()
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
test_adamax
|
def test_adamax(self):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adamax(
[weight, bias], lr=1e-1, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adamax(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adamax(
[weight, bias],
lr=1e-1,
weight_decay=1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_complex_2d(optim.Adamax)
self._test_complex_2d(functools.partial(optim.Adamax, foreach=True))
with self.assertRaisesRegex(
ValueError, "Invalid beta parameter at index 1: 1.0"
):
optim.Adamax(None, lr=1e-2, betas=(0.0, 1.0))
|
# Test that the direction of loss moved appropriately
if optim_input.kwargs.get("maximize", False):
self.assertGreater(loss.item(), initial_value)
else:
self.assertLess(loss.item(), initial_value)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
closure
|
def closure():
return torch.tensor([10])
res1 = opt1.step(closure)
res2 = opt2.step(closure)
self.assertEqual(type(res1), type(res2))
|
def closure():
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
return loss
initial_value = closure().item()
for _ in range(20):
if optim_info.step_requires_closure:
loss = optimizer.step(closure)
else:
loss = closure()
optimizer.step()
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
else:
scheduler.step()
if optim_input.kwargs.get("maximize", False):
self.assertGreater(closure().item(), initial_value)
else:
self.assertLess(closure().item(), initial_value)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
test_fused_optimizers_with_varying_tensors
|
def test_fused_optimizers_with_varying_tensors(self):
optimizer_pairs_with_flags = tuple(itertools.product(
(optim.Adam, optim.AdamW),
(
dict(weight_decay=1., amsgrad=False),
dict(weight_decay=1., amsgrad=True),
dict(weight_decay=0., amsgrad=False),
dict(weight_decay=0., amsgrad=True),
),
))
self._test_derived_optimizers_varying_tensors(optimizer_pairs_with_flags, "fused")
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_adamw
|
def test_adamw(self):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.AdamW(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.AdamW(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.AdamW(
[weight, bias],
lr=1e-3,
weight_decay=1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.AdamW(
[weight, bias],
lr=1e-3,
weight_decay=1,
amsgrad=True,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_complex_2d(optim.AdamW)
self._test_complex_2d(functools.partial(optim.AdamW, foreach=True))
with self.assertRaisesRegex(ValueError, "Invalid weight_decay value: -1"):
optim.AdamW(None, lr=1e-2, weight_decay=-1)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_sparse_adam
|
def test_sparse_adam(self):
self._test_rosenbrock_sparse(
lambda params: optim.SparseAdam(params, lr=4e-2), [], True
)
self._test_rosenbrock_sparse(
lambda params: optim.SparseAdam(params, lr=4e-2, maximize=True),
[],
True,
True,
)
with self.assertRaisesRegex(
ValueError, "Invalid beta parameter at index 0: 1.0"
):
optim.SparseAdam(None, lr=1e-2, betas=(1.0, 0.0))
with self.assertRaisesRegex(
ValueError, "SparseAdam requires dense parameter tensors"
):
optim.SparseAdam([torch.zeros(3, layout=torch.sparse_coo)])
with self.assertRaisesRegex(
ValueError, "SparseAdam requires dense parameter tensors"
):
optim.SparseAdam([{"params": [torch.zeros(3, layout=torch.sparse_coo)]}])
# ROCm precision is too low to pass this test
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_adadelta
|
def test_adadelta(self):
# Handles https://github.com/pytorch/pytorch/issues/69698
self.rel_tol = 4e-3
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adadelta(
[weight, bias], maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adadelta(
self._build_params_dict(weight, bias, rho=0.95),
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adadelta(
self._build_params_dict(weight, bias, rho=0.95),
maximize=maximize,
foreach=foreach,
),
[
lambda opt: StepLR(opt, gamma=0.9, step_size=10),
lambda opt: ReduceLROnPlateau(opt),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adadelta(
[weight, bias], weight_decay=1, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
with self.assertRaisesRegex(ValueError, "Invalid rho value: 1.1"):
optim.Adadelta(None, lr=1e-2, rho=1.1)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_functional_fused_optimizer_with_foundinf
|
def test_functional_fused_optimizer_with_foundinf(self):
if not torch.cuda.is_available():
self.skipTest("CUDA is required.")
from torch.optim import adam, adamw
num_tensors = 5
for functional_optim, amsgrad in itertools.product((adam.adam, adamw.adamw), (False, True)):
params, grads, exp_avgs, exp_avg_sqs = [[torch.ones((1,), device="cuda") for _ in range(num_tensors)] for _ in range(4)]
max_exp_avg_sqs = [torch.ones((1,), device="cuda") for _ in range(num_tensors)] if amsgrad else []
state_steps = [torch.ones((1,), dtype=torch.float32, device="cuda") for _ in range(num_tensors)]
grad_scale = torch.ones((1,), dtype=torch.float32, device="cuda")
found_inf = torch.ones((1,), dtype=torch.float32, device="cuda")
functional_optim(
params,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
foreach=False,
capturable=False,
fused=True,
amsgrad=amsgrad,
beta1=0.9,
beta2=0.99,
lr=1e-2,
weight_decay=0.0,
eps=1e-8,
maximize=False,
grad_scale=grad_scale,
found_inf=found_inf,
)
self.assertEqual(
state_steps,
[
torch.ones((1,), dtype=torch.float32, device="cuda")
for _ in range(num_tensors)
],
)
|
return torch.tensor([1], device=device, dtype=dtype)
for optim_input in all_optim_inputs:
kwargs = optim_input.kwargs
# params will decay even if grads are empty if weight_decay != 0,
# and capturable doesn't work for CPU tensors
if kwargs.get("weight_decay", 0) != 0:
continue
# AdamW params will be updated regardless of grads due to lr, so make lr smaller
if optim_cls.__name__ == "AdamW":
kwargs["lr"] = (
torch.tensor(1e-5)
if isinstance(kwargs.get("lr", 1e-5), torch.Tensor)
else 1e-5
)
if kwargs.get("differentiable", False):
params = [param.clone()]
else:
params = [param]
optimizer = optim_cls(params, **kwargs)
if optim_info.only_supports_sparse_grads:
# Intentionally construct a multidimensional empty v for the sparse grad
# Single dim v passes the test while multidim correctly repros the issue
# https://github.com/pytorch/pytorch/issues/82486
i = torch.empty((1, 0), device=device, dtype=dtype)
v = torch.empty((0, 1), device=device, dtype=dtype)
params[0].grad = torch.sparse_coo_tensor(
i, v, (5, 1), device=device, dtype=dtype
)
else:
params[0].grad = torch.zeros_like(params[0])
optimizer.step(closure)
self.assertEqual(old_param, params[0])
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
post_hook
|
def post_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data += 2
params = [torch.Tensor([1, 1])]
opt = SGD(params, lr=0.001)
data = 2
hook_handle = opt.register_step_post_hook(post_hook)
opt.step()
opt.step()
# check if pre hooks were registered
self.assertEqual(data, 6)
# remove handles, take step and verify that hook is no longer registered
hook_handle.remove()
opt.step()
self.assertEqual(data, 6)
|
def post_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data += 2
params = [torch.tensor([1, 1], device=device, dtype=dtype)]
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
pre_hook
|
def pre_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data += 2
params = [torch.Tensor([1, 1])]
opt = SGD(params, lr=0.001)
data = 5
hook_handle = opt.register_step_pre_hook(pre_hook)
opt.step()
opt.step()
# check if pre hooks were registered
self.assertEqual(data, 9)
# remove handles, take step and verify that hook is no longer registered
hook_handle.remove()
opt.step()
self.assertEqual(data, 9)
|
def pre_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data += 2
params = [torch.tensor([1, 1], device=device, dtype=dtype)]
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
fwd_bwd
|
def fwd_bwd(optim, w, b, i):
optim.zero_grad()
loss = (w.mv(i) + b).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
if w.grad is not None:
w.grad = w.grad.to_sparse()
if b.grad is not None:
b.grad = b.grad.to_sparse()
return loss
for optim_input in all_optim_inputs:
optimizer = optim_cls(params, **optim_input.kwargs)
closure = functools.partial(fwd_bwd, optimizer, weight, bias, input)
# Prime the optimizer
for _ in range(10):
if optim_info.step_requires_closure:
optimizer.step(closure)
else:
closure()
optimizer.step()
# Clone the weights and construct a new optimizer for them
with torch.no_grad():
weight_c = Parameter(weight.clone())
bias_c = Parameter(bias.clone())
optimizer_c = optim_cls([weight_c, bias_c], **optim_input.kwargs)
closure_c = functools.partial(fwd_bwd, optimizer_c, weight_c, bias_c, input)
# Load the state dict from the original optimizer into the new one
optimizer_c.load_state_dict(deepcopy(optimizer.state_dict()))
# Run both optimizers in parallel
for _ in range(10):
if optim_info.step_requires_closure:
optimizer.step(closure)
optimizer_c.step(closure_c)
else:
closure()
closure_c()
optimizer.step()
optimizer_c.step()
self.assertEqual(weight, weight_c)
self.assertEqual(bias, bias_c)
# Make sure state dict is deterministic with equal (not identical) parameters
self.assertEqual(optimizer.state_dict(), optimizer_c.state_dict())
# Make sure repeated parameters have identical representation (see #36831)
optimizer_c.param_groups.extend(optimizer_c.param_groups)
self.assertEqual(
optimizer.state_dict()["param_groups"][-1],
optimizer_c.state_dict()["param_groups"][-1],
)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_optim.py
|
local_post_hook
|
def local_post_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data.append(2)
params = [torch.Tensor([1, 1])]
opt1 = SGD(params, lr=0.001)
opt2 = Adam(params, lr=0.01)
data = []
# register global hooks to both optimizers
global_pre_handle = register_optimizer_step_pre_hook(global_pre_hook)
global_post_handle = register_optimizer_step_post_hook(global_post_hook)
# register local hooks
first_pre_handle = opt1.register_step_pre_hook(local_pre_hook)
first_post_handle = opt1.register_step_post_hook(local_post_hook)
second_pre_handle = opt2.register_step_pre_hook(local_pre_hook)
second_post_handle = opt2.register_step_post_hook(local_post_hook)
opt1.step()
self.assertListEqual(data, [0, 1, 2, 5])
opt2.step()
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5])
opt1.step()
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5, 0, 1, 2, 5])
# remove all hooks
global_pre_handle.remove()
global_post_handle.remove()
first_pre_handle.remove()
first_post_handle.remove()
second_pre_handle.remove()
second_post_handle.remove()
opt1.step()
opt2.step()
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5, 0, 1, 2, 5])
|
def local_post_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data.append(2)
params = [torch.tensor([1, 1], device=device, dtype=dtype)]
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
test_fused_optimizer_raises
|
def test_fused_optimizer_raises(self):
if not torch.cuda.is_available():
self.skipTest("Requires CUDA devices")
for optimizer_ctor in (torch.optim.Adam, torch.optim.AdamW):
with self.assertRaisesRegex(RuntimeError, "`fused` and `foreach` cannot be `True` together."):
optimizer_ctor([torch.empty((), device="cuda")], foreach=True, fused=True)
with self.assertRaisesRegex(RuntimeError, "`fused` does not support `differentiable`"):
optimizer_ctor([torch.empty((), device="cuda")], differentiable=True, fused=True)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.