library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/test_xpu.py
test_stream_event_repr
def test_stream_event_repr(self): s = torch.xpu.current_stream() self.assertTrue("torch.xpu.Stream" in str(s)) e = torch.xpu.Event() self.assertTrue("torch.xpu.Event(uninitialized)" in str(e)) s.record_event(e) self.assertTrue("torch.xpu.Event" in str(e))
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpu(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
test_events
def test_events(self): stream = torch.xpu.current_stream() event = torch.xpu.Event() self.assertTrue(event.query()) stream.record_event(event) event.synchronize() self.assertTrue(event.query())
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpu(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
test_generic_stream_event
def test_generic_stream_event(self): stream = torch.Stream("xpu") self.assertEqual(stream.device_index, torch.xpu.current_device()) xpu_stream = torch.xpu.Stream( stream_id=stream.stream_id, device_index=stream.device_index, device_type=stream.device_type, ) self.assertEqual(stream.stream_id, xpu_stream.stream_id) self.assertNotEqual(stream.stream_id, torch.xpu.current_stream().stream_id) event1 = torch.Event("xpu") event2 = torch.Event("xpu") self.assertEqual(event1.event_id, 0) a = torch.randn(1000) b = torch.randn(1000) with torch.xpu.stream(xpu_stream): a_xpu = a.to("xpu", non_blocking=True) b_xpu = b.to("xpu", non_blocking=True) self.assertEqual(stream.stream_id, torch.xpu.current_stream().stream_id) event1.record(stream) event1.synchronize() self.assertTrue(event1.query()) c_xpu = a_xpu + b_xpu event2.record() event2.synchronize() self.assertTrue(event2.query()) self.assertNotEqual(event1.event_id, event2.event_id) self.assertEqual(c_xpu.cpu(), a + b) with self.assertRaisesRegex( NotImplementedError, "elapsedTime is not supported by XPU backend." ): event1.elapsed_time(event2)
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpu(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
to_cpu
def to_cpu(arg): if isinstance(arg, torch.Tensor): return arg.to(device="cpu") return arg samples = op.reference_inputs(device, dtype) for sample in samples: cpu_sample = sample.transform(to_cpu) xpu_results = op(sample.input, *sample.args, **sample.kwargs) cpu_results = op(cpu_sample.input, *cpu_sample.args, **cpu_sample.kwargs) xpu_results = sample.output_process_fn_grad(xpu_results) cpu_results = cpu_sample.output_process_fn_grad(cpu_results) # Lower tolerance because we are running this as a `@slowTest` # Don't want the periodic tests to fail frequently self.assertEqual(xpu_results, cpu_results, atol=1e-4, rtol=1e-4)
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
test_non_standard_bool_values
def test_non_standard_bool_values(self, device, dtype, op): # Test boolean values other than 0x00 and 0x01 (gh-54789) def convert_boolean_tensors(x): if not isinstance(x, torch.Tensor) or x.dtype != torch.bool: return x # Map False -> 0 and True -> Random value in [2, 255] true_vals = torch.randint( 2, 255, x.shape, dtype=torch.uint8, device=x.device ) false_vals = torch.zeros((), dtype=torch.uint8, device=x.device) x_int = torch.where(x, true_vals, false_vals) ret = x_int.view(torch.bool) self.assertEqual(ret, x) return ret for sample in op.sample_inputs(device, dtype): expect = op(sample.input, *sample.args, **sample.kwargs) transformed = sample.transform(convert_boolean_tensors) actual = op(transformed.input, *transformed.args, **transformed.kwargs) self.assertEqual(expect, actual)
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpu(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
convert_boolean_tensors
def convert_boolean_tensors(x): if not isinstance(x, torch.Tensor) or x.dtype != torch.bool: return x # Map False -> 0 and True -> Random value in [2, 255] true_vals = torch.randint( 2, 255, x.shape, dtype=torch.uint8, device=x.device ) false_vals = torch.zeros((), dtype=torch.uint8, device=x.device) x_int = torch.where(x, true_vals, false_vals) ret = x_int.view(torch.bool) self.assertEqual(ret, x) return ret for sample in op.sample_inputs(device, dtype): expect = op(sample.input, *sample.args, **sample.kwargs) transformed = sample.transform(convert_boolean_tensors) actual = op(transformed.input, *transformed.args, **transformed.kwargs) self.assertEqual(expect, actual)
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
test_serialization_array_with_storage
def test_serialization_array_with_storage(self): x = torch.randn(5, 5).xpu() y = torch.zeros(2, 5, dtype=torch.int, device="xpu") q = [x, y, x, y.storage()] with tempfile.NamedTemporaryFile() as f: torch.save(q, f) f.seek(0) q_copy = torch.load(f) self.assertEqual(q_copy, q, atol=0, rtol=0) q_copy[0].fill_(5) self.assertEqual(q_copy[0], q_copy[2], atol=0, rtol=0) self.assertEqual(q_copy[0].dtype, torch.float) self.assertEqual(q_copy[1].dtype, torch.int) self.assertEqual(q_copy[2].dtype, torch.float) self.assertTrue(isinstance(q_copy[3], torch.storage.TypedStorage)) self.assertTrue(isinstance(q_copy[3]._untyped_storage, torch.UntypedStorage)) q_copy[1].fill_(10) y.fill_(10) self.assertEqual(q_copy[3], y.storage())
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpu(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
test_serialization_array_with_empty
def test_serialization_array_with_empty(self): x = [ torch.randn(4, 4).xpu(), torch.tensor([], dtype=torch.float, device=torch.device("xpu")), ] with tempfile.NamedTemporaryFile() as f: torch.save(x, f) f.seek(0) x_copy = torch.load(f) for original, copy in zip(x, x_copy): self.assertEqual(copy, original) self.assertIs(type(copy), type(original)) self.assertEqual(copy.get_device(), original.get_device())
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpu(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
test_multi_device_behavior
def test_multi_device_behavior(self): current_device = torch.xpu.current_device() target_device = (current_device + 1) % torch.xpu.device_count() with torch.xpu.device(target_device): self.assertEqual(target_device, torch.xpu.current_device()) self.assertEqual(current_device, torch.xpu.current_device()) with torch.xpu._DeviceGuard(target_device): self.assertEqual(target_device, torch.xpu.current_device()) self.assertEqual(current_device, torch.xpu.current_device())
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpu(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
test_get_device_properties
def test_get_device_properties(self): current_device = torch.xpu.current_device() device_properties = torch.xpu.get_device_properties(current_device) self.assertEqual(device_properties, torch.xpu.get_device_properties(None)) self.assertEqual(device_properties, torch.xpu.get_device_properties()) device_name = torch.xpu.get_device_name(current_device) self.assertEqual(device_name, torch.xpu.get_device_name(None)) self.assertEqual(device_name, torch.xpu.get_device_name()) device_capability = torch.xpu.get_device_capability(current_device) self.assertTrue(device_capability["max_work_group_size"] > 0) self.assertTrue(device_capability["max_num_sub_groups"] > 0) self.assertEqual( device_properties.driver_version, device_capability["driver_version"] ) self.assertEqual(device_properties.has_fp16, device_capability["has_fp16"]) self.assertEqual(device_properties.has_fp64, device_capability["has_fp64"]) self.assertEqual( device_properties.has_atomic64, device_capability["has_atomic64"] ) self.assertEqual( device_properties.has_bfloat16_conversions, device_capability["has_bfloat16_conversions"], ) self.assertEqual( device_properties.has_subgroup_matrix_multiply_accumulate, device_capability["has_subgroup_matrix_multiply_accumulate"], ) self.assertEqual( device_properties.has_subgroup_matrix_multiply_accumulate_tensor_float32, device_capability["has_subgroup_matrix_multiply_accumulate_tensor_float32"], ) self.assertEqual( device_properties.has_subgroup_2d_block_io, device_capability["has_subgroup_2d_block_io"], )
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpu(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
run_model
def run_model(model, input): input_xpu = input.clone().to('xpu') model_xpu = copy.deepcopy(model).to('xpu') loss_xpu = model_xpu(input_xpu).sum() loss = model(input).sum() torch.testing.assert_close(loss_xpu.cpu(), loss)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
test_streams
def test_streams(self): s0 = torch.xpu.Stream() torch.xpu.set_stream(s0) s1 = torch.xpu.current_stream() self.assertEqual(s0, s1) s2 = torch.xpu.Stream() self.assertFalse(s0 == s2) torch.xpu.set_stream(s2) with torch.xpu.stream(s0): self.assertEqual(s0, torch.xpu.current_stream()) self.assertEqual(s2, torch.xpu.current_stream())
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpu(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xnnpack_integration.py
test_decomposed_linear
def test_decomposed_linear(self): data_shape = [2, 32] weight_output_dim = 24 weight_shape = (weight_output_dim, data_shape[-1]) class DecomposedLinearAddmm(torch.nn.Module): def __init__(self): super().__init__() self.weight = torch.nn.Parameter(torch.rand(weight_shape), requires_grad=False) self.bias = torch.nn.Parameter(torch.rand((weight_output_dim)), requires_grad=False) def forward(self, x): weight_t = self.weight.t() return torch.addmm(self.bias, x, weight_t) class DecomposedLinearMatmulAdd(torch.nn.Module): def __init__(self): super().__init__() self.weight = torch.nn.Parameter(torch.rand(weight_shape), requires_grad=False) self.bias = torch.nn.Parameter(torch.rand((weight_output_dim)), requires_grad=False) def forward(self, x): weight_t = self.weight.t() y = torch.matmul(x, weight_t) res = y.add_(self.bias) return res class DecomposedLinearMatmul(torch.nn.Module): def __init__(self): super().__init__() self.weight = torch.nn.Parameter(torch.rand(weight_shape), requires_grad=False) self.bias = torch.nn.Parameter(torch.rand((weight_output_dim)), requires_grad=False) def forward(self, x): weight_t = self.weight.t() res = torch.matmul(x, weight_t) return res # Linear with bias pattern. pattern_count_map = {"Tensor = prim::CallFunction": -1, "prepacked::linear_clamp_prepack": 1, "prepacked::linear_clamp_run": 1} TestXNNPACKRewritePass.validate_transformed_module(DecomposedLinearAddmm(), pattern_count_map, data_shape) TestXNNPACKRewritePass.validate_transformed_module(DecomposedLinearMatmulAdd(), pattern_count_map, data_shape) TestXNNPACKRewritePass.validate_transformed_module(DecomposedLinearMatmul(), pattern_count_map, data_shape)
def test_decomposed_linear(self): data_shape = [2, 32] weight_output_dim = 24 weight_shape = (weight_output_dim, data_shape[-1]) class DecomposedLinearAddmm(torch.nn.Module): def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter( torch.rand(weight_shape), requires_grad=False ) self.bias = torch.nn.Parameter( torch.rand(weight_output_dim), requires_grad=False ) def forward(self, x): weight_t = self.weight.t() return torch.addmm(self.bias, x, weight_t) class DecomposedLinearMatmulAdd(torch.nn.Module): def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter( torch.rand(weight_shape), requires_grad=False ) self.bias = torch.nn.Parameter( torch.rand(weight_output_dim), requires_grad=False ) def forward(self, x): weight_t = self.weight.t() y = torch.matmul(x, weight_t) res = y.add_(self.bias) return res class DecomposedLinearMatmul(torch.nn.Module): def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter( torch.rand(weight_shape), requires_grad=False ) self.bias = torch.nn.Parameter( torch.rand(weight_output_dim), requires_grad=False ) def forward(self, x): weight_t = self.weight.t() res = torch.matmul(x, weight_t) return res # Linear with bias pattern. pattern_count_map = { "Tensor = prim::CallFunction": -1, "prepacked::linear_clamp_prepack": 1, "prepacked::linear_clamp_run": 1, } TestXNNPACKRewritePass.validate_transformed_module( DecomposedLinearAddmm(), pattern_count_map, data_shape ) TestXNNPACKRewritePass.validate_transformed_module( DecomposedLinearMatmulAdd(), pattern_count_map, data_shape ) TestXNNPACKRewritePass.validate_transformed_module( DecomposedLinearMatmul(), pattern_count_map, data_shape )
import unittest import torch import torch.backends.xnnpack from torch.nn import functional as F from torch.utils.mobile_optimizer import optimize_for_mobile from torch.testing import FileCheck import torch.testing._internal.hypothesis_utils as hu from torch.testing._internal.common_utils import TestCase, run_tests, slowTest from hypothesis import given, assume from hypothesis import strategies as st import io import itertools from torch.testing._internal.common_utils import IS_FBCODE, TEST_WITH_TSAN @unittest.skipUnless(torch.backends.xnnpack.enabled, " XNNPACK must be enabled for these tests." " Please build with USE_XNNPACK=1.") @unittest.skipIf(TEST_WITH_TSAN, "TSAN fails with XNNPACK. Does not seem to have a good reason for failures.") class TestXNNPACKRewritePass(TestCase):
import io import itertools import unittest from hypothesis import assume, given, strategies as st import torch import torch.backends.xnnpack import torch.testing._internal.hypothesis_utils as hu from torch.nn import functional as F from torch.testing import FileCheck from torch.testing._internal.common_utils import ( IS_FBCODE, run_tests, slowTest, TEST_WITH_TSAN, TestCase, ) from torch.utils.mobile_optimizer import optimize_for_mobile @unittest.skipUnless( torch.backends.xnnpack.enabled, " XNNPACK must be enabled for these tests." " Please build with USE_XNNPACK=1.", ) @unittest.skipIf( TEST_WITH_TSAN, "TSAN fails with XNNPACK. Does not seem to have a good reason for failures.", ) class TestXNNPACKRewritePass(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_xpu.py
test_autocast_checkpointing
def test_autocast_checkpointing(self): model = torch.nn.Sequential( torch.nn.Linear(8, 8), torch.nn.Linear(8, 8), torch.nn.Linear(8, 8) ).xpu() input = torch.rand( (8, 8), device="xpu", dtype=torch.float16, requires_grad=True ) for reentrant in (True, False): with torch.autocast("xpu"): output = checkpoint_sequential(model, 2, input, use_reentrant=reentrant) self.assertTrue(output.requires_grad) self.assertTrue(output.dtype is torch.float16) output.sum().backward()
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpuAutocast(TestAutocast):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
test_xpu_autocast_dtype
def test_xpu_autocast_dtype(self): dtype = torch.get_autocast_dtype("xpu") self.assertEqual(dtype, torch.float16) mat0_fp32 = torch.randn((10, 10), dtype=torch.float32, device="xpu") mat1_fp32 = torch.randn((10, 10), dtype=torch.float32, device="xpu") with torch.amp.autocast("xpu"): result = torch.mm(mat0_fp32, mat1_fp32) self.assertEqual(result.dtype, torch.float16)
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpuAutocast(TestAutocast):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
setUp
def setUp(self): super().setUp() self.autocast_lists = AutocastTestLists(torch.device("xpu"))
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpuAutocast(TestAutocast):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
test_event_creation_callback
def test_event_creation_callback(self): gpu_trace.register_callback_for_event_creation(self.mock) event = torch.xpu.Event() event.record() self.mock.assert_called_once_with(event._as_parameter_.value)
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpuTrace(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
test_event_deletion_callback
def test_event_deletion_callback(self): gpu_trace.register_callback_for_event_deletion(self.mock) event = torch.xpu.Event() event.record() event_id = event._as_parameter_.value del event self.mock.assert_called_once_with(event_id)
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpuTrace(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
test_event_record_callback
def test_event_record_callback(self): gpu_trace.register_callback_for_event_record(self.mock) event = torch.xpu.Event() event.record() self.mock.assert_called_once_with( event._as_parameter_.value, torch.xpu.current_stream().sycl_queue )
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpuTrace(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
test_event_wait_callback
def test_event_wait_callback(self): gpu_trace.register_callback_for_event_wait(self.mock) event = torch.xpu.Event() event.record() event.wait() self.mock.assert_called_once_with( event._as_parameter_.value, torch.xpu.current_stream().sycl_queue )
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpuTrace(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
test_device_synchronization_callback
def test_device_synchronization_callback(self): gpu_trace.register_callback_for_device_synchronization(self.mock) torch.xpu.synchronize() self.mock.assert_called()
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpuTrace(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
test_stream_synchronization_callback
def test_stream_synchronization_callback(self): gpu_trace.register_callback_for_stream_synchronization(self.mock) stream = torch.xpu.Stream() stream.synchronize() self.mock.assert_called_once_with(stream.sycl_queue)
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpuTrace(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xpu.py
test_event_synchronization_callback
def test_event_synchronization_callback(self): gpu_trace.register_callback_for_event_synchronization(self.mock) event = torch.xpu.Event() event.record() event.synchronize() self.mock.assert_called_once_with(event._as_parameter_.value)
import subprocess import sys import tempfile import unittest import torch import torch.xpu._gpu_trace as gpu_trace from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyXPU, OpDTypes, ops, ) from torch.testing._internal.common_methods_invocations import ops_and_refs from torch.testing._internal.common_utils import ( NoTest, run_tests, suppress_warnings, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) from torch.utils.checkpoint import checkpoint_sequential TEST_MULTIXPU = torch.xpu.device_count() > 1 cpu_device = torch.device("cpu") xpu_device = torch.device("xpu") any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one _xpu_computation_op_list = [ "fill", "zeros", "zeros_like", "clone", "view_as_real", "view_as_complex", "view", "resize_", "resize_as_", "add", "sub", "mul", "div", "abs", ] _xpu_tensor_factory_op_list = [ "as_strided", "empty", "empty_strided", ] _xpu_not_test_dtype_op_list = [ "resize_", # Skipped by CPU "resize_as_", # Skipped by CPU "abs", # Not aligned dtype ] _xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list _xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list] _xpu_computation_ops = [ op for op in ops_and_refs if op.name in _xpu_computation_op_list ] class TestXpuTrace(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/check_tests_conform.py
check
def check(path): """Check a test file for common issues with pytest->pytorch conversion.""" print(path.name) print("=" * len(path.name), "\n") src = path.read_text().split("\n") for num, line in enumerate(src): if is_comment(line): continue # module level test functions if line.startswith("def test"): report_violation(line, num, header="Module-level test function") # test classes must inherit from TestCase if line.startswith("class Test") and "TestCase" not in line: report_violation( line, num, header="Test class does not inherit from TestCase" ) # last vestiges of pytest-specific stuff if "pytest.mark" in line: report_violation(line, num, header="pytest.mark.something") for part in ["pytest.xfail", "pytest.skip", "pytest.param"]: if part in line: report_violation(line, num, header=f"stray {part}") if textwrap.dedent(line).startswith("@parametrize"): # backtrack to check nn = num for nn in range(num, -1, -1): ln = src[nn] if "class Test" in ln: # hack: large indent => likely an inner class if len(ln) - len(ln.lstrip()) < 8: break else: report_violation(line, num, "off-class parametrize") if not src[nn - 1].startswith("@instantiate_parametrized_tests"): report_violation( line, num, f"missing instantiation of parametrized tests in {ln}?" )
import sys import textwrap from pathlib import Path
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/check_tests_conform.py
is_comment
def is_comment(line): return textwrap.dedent(line).startswith("#")
import sys import textwrap from pathlib import Path
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/check_tests_conform.py
report_violation
def report_violation(line, lineno, header): print(f">>>> line {lineno} : {header}\n {line}\n") if __name__ == "__main__": argv = sys.argv if len(argv) != 2: raise ValueError("Usage : python check_tests_conform path/to/file/or/dir") path = Path(argv[1]) if path.is_dir(): # run for all files in the directory (no subdirs) for this_path in path.glob("test*.py"): # breakpoint() check(this_path) else: check(path)
import sys import textwrap from pathlib import Path
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/conftest.py
pytest_configure
def pytest_configure(config): config.addinivalue_line("markers", "slow: very slow tests")
import sys import pytest import torch._numpy as tnp import numpy as np
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/conftest.py
pytest_addoption
def pytest_addoption(parser): parser.addoption("--runslow", action="store_true", help="run slow tests") parser.addoption("--nonp", action="store_true", help="error when NumPy is accessed") class Inaccessible: def __getattribute__(self, attr): raise RuntimeError(f"Using --nonp but accessed np.{attr}")
import sys import pytest import torch._numpy as tnp import numpy as np
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/conftest.py
__getattribute__
def __getattribute__(self, attr): raise RuntimeError(f"Using --nonp but accessed np.{attr}")
import sys import pytest import torch._numpy as tnp class Inaccessible: import numpy as np
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/conftest.py
pytest_sessionstart
def pytest_sessionstart(session): if session.config.getoption("--nonp"): sys.modules["numpy"] = Inaccessible()
import sys import pytest import torch._numpy as tnp import numpy as np
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/conftest.py
pytest_generate_tests
def pytest_generate_tests(metafunc): """ Hook to parametrize test cases See https://docs.pytest.org/en/6.2.x/parametrize.html#pytest-generate-tests The logic here allows us to test with both NumPy-proper and torch._numpy. Normally we'd just test torch._numpy, e.g. import torch._numpy as np ... def test_foo(): np.array([42]) ... but this hook allows us to test NumPy-proper as well, e.g. def test_foo(np): np.array([42]) ... np is a pytest parameter, which is either NumPy-proper or torch._numpy. This allows us to sanity check our own tests, so that tested behaviour is consistent with NumPy-proper. pytest will have test names respective to the library being tested, e.g. $ pytest --collect-only test_foo[torch._numpy] test_foo[numpy] """ np_params = [tnp] try: import numpy as np except ImportError: pass else: if not isinstance(np, Inaccessible): # i.e. --nonp was used np_params.append(np) if "np" in metafunc.fixturenames: metafunc.parametrize("np", np_params)
import sys import pytest import torch._numpy as tnp import numpy as np
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/conftest.py
test_foo
def test_foo(): np.array([42]) ...
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/conftest.py
test_foo
def test_foo(): np.array([42]) ...
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dlpack.py
test_readonly
def test_readonly(self): x = np.arange(5) x.flags.writeable = False with pytest.raises(BufferError): x.__dlpack__()
import functools import sys import unittest from unittest import skipIf as skipif import numpy import pytest import torch from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, skipIfTorchDynamo, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy.testing import assert_array_equal import torch._numpy as np from torch._numpy.testing import assert_array_equal skip = functools.partial(skipif, True) IS_PYPY = False @skipif(numpy.__version__ < "1.24", reason="numpy.dlpack is new in numpy 1.23") @instantiate_parametrized_tests class TestDLPack(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dlpack.py
test_ndim0
def test_ndim0(self): x = np.array(1.0) y = np.from_dlpack(x) assert_array_equal(x, y)
import functools import sys import unittest from unittest import skipIf as skipif import numpy import pytest import torch from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, skipIfTorchDynamo, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy.testing import assert_array_equal import torch._numpy as np from torch._numpy.testing import assert_array_equal skip = functools.partial(skipif, True) IS_PYPY = False @skipif(numpy.__version__ < "1.24", reason="numpy.dlpack is new in numpy 1.23") @instantiate_parametrized_tests class TestDLPack(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dlpack.py
test_from_torch
def test_from_torch(self): t = torch.arange(4) a = np.from_dlpack(t) assert_array_equal(a, np.asarray(t))
import functools import sys import unittest from unittest import skipIf as skipif import numpy import pytest import torch from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, skipIfTorchDynamo, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy.testing import assert_array_equal import torch._numpy as np from torch._numpy.testing import assert_array_equal skip = functools.partial(skipif, True) IS_PYPY = False @skipif(numpy.__version__ < "1.24", reason="numpy.dlpack is new in numpy 1.23") @instantiate_parametrized_tests class TestDLPack(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dlpack.py
test_to_torch
def test_to_torch(self): a = np.arange(4) t = torch.from_dlpack(a) assert_array_equal(np.asarray(t), a)
import functools import sys import unittest from unittest import skipIf as skipif import numpy import pytest import torch from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, skipIfTorchDynamo, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy.testing import assert_array_equal import torch._numpy as np from torch._numpy.testing import assert_array_equal skip = functools.partial(skipif, True) IS_PYPY = False @skipif(numpy.__version__ < "1.24", reason="numpy.dlpack is new in numpy 1.23") @instantiate_parametrized_tests class TestDLPack(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dtype.py
assert_dtype_equal
def assert_dtype_equal(a, b): assert_equal(a, b) assert_equal( hash(a), hash(b), "two equivalent types do not hash to the same value !" )
import functools import operator import pickle import sys import types from itertools import permutations from typing import Any from unittest import skipIf as skipif import pytest from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xfailIfTorchDynamo, xpassIfTorchDynamo, ) skip = functools.partial(skipif, True) import numpy as np from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy.testing import assert_, assert_equal import numpy
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dtype.py
assert_dtype_not_equal
def assert_dtype_not_equal(a, b): assert_(a != b) assert_(hash(a) != hash(b), "two different types hash to the same value !") @instantiate_parametrized_tests class TestBuiltin(TestCase): @parametrize("t", [int, float, complex, np.int32]) def test_run(self, t): """Only test hash runs at all.""" dt = np.dtype(t) hash(dt) def test_equivalent_dtype_hashing(self): # Make sure equivalent dtypes with different type num hash equal intp = np.dtype(np.intp) if intp.itemsize == 4: left = intp right = np.dtype(np.int32) else: left = intp right = np.dtype(np.int64) assert_(left == right) assert_(hash(left) == hash(right)) @xfailIfTorchDynamo # TypeError -> InternalTorchDynamoError def test_invalid_types(self): # Make sure invalid type strings raise an error assert_raises(TypeError, np.dtype, "O3") assert_raises(TypeError, np.dtype, "O5") assert_raises(TypeError, np.dtype, "O7") assert_raises(TypeError, np.dtype, "b3") assert_raises(TypeError, np.dtype, "h4") assert_raises(TypeError, np.dtype, "I5") assert_raises(TypeError, np.dtype, "e3") assert_raises(TypeError, np.dtype, "f5") if np.dtype("l").itemsize == 8: assert_raises(TypeError, np.dtype, "l4") assert_raises(TypeError, np.dtype, "L4") else: assert_raises(TypeError, np.dtype, "l8") assert_raises(TypeError, np.dtype, "L8") # XXX: what is 'q'? on my 64-bit ubuntu maching it's int64, same as 'l' # if np.dtype('q').itemsize == 8: # assert_raises(TypeError, np.dtype, 'q4') # assert_raises(TypeError, np.dtype, 'Q4') # else: # assert_raises(TypeError, np.dtype, 'q8') # assert_raises(TypeError, np.dtype, 'Q8') def test_richcompare_invalid_dtype_equality(self): # Make sure objects that cannot be converted to valid # dtypes results in False/True when compared to valid dtypes. # Here 7 cannot be converted to dtype. No exceptions should be raised assert not np.dtype(np.int32) == 7, "dtype richcompare failed for ==" assert np.dtype(np.int32) != 7, "dtype richcompare failed for !=" @parametrize("operation", [operator.le, operator.lt, operator.ge, operator.gt]) def test_richcompare_invalid_dtype_comparison(self, operation): # Make sure TypeError is raised for comparison operators # for invalid dtypes. Here 7 is an invalid dtype. with pytest.raises(TypeError): operation(np.dtype(np.int32), 7) @skipif( numpy.__version__ < "1.24", reason="older numpies emit DeprecatioWarnings instead", ) @parametrize( "dtype", [ "Bool", "Bytes0", "Complex32", "Complex64", "Datetime64", "Float16", "Float32", "Float64", "Int8", "Int16", "Int32", "Int64", "Object0", "Str0", "Timedelta64", "UInt8", "UInt16", "Uint32", "UInt32", "Uint64", "UInt64", "Void0", "Float128", "Complex128", ], ) def test_numeric_style_types_are_invalid(self, dtype): with assert_raises(TypeError): np.dtype(dtype) @skip(reason="dtype attributes not yet implemented") class TestDtypeAttributeDeletion(TestCase): def test_dtype_non_writable_attributes_deletion(self): dt = np.dtype(np.double) attr = [ "subdtype", "descr", "str", "name", "base", "shape", "isbuiltin", "isnative", "isalignedstruct", "fields", "metadata", "hasobject", ] for s in attr: assert_raises(AttributeError, delattr, dt, s) def test_dtype_writable_attributes_deletion(self): dt = np.dtype(np.double) attr = ["names"] for s in attr: assert_raises(AttributeError, delattr, dt, s) @instantiate_parametrized_tests class TestPickling(TestCase): def check_pickling(self, dtype): for proto in range(pickle.HIGHEST_PROTOCOL + 1): buf = pickle.dumps(dtype, proto) # The dtype pickling itself pickles `np.dtype` if it is pickled # as a singleton `dtype` should be stored in the buffer: assert b"_DType_reconstruct" not in buf assert b"dtype" in buf pickled = pickle.loads(buf) assert_equal(pickled, dtype) # XXX: out dtypes do not have .descr # assert_equal(pickled.descr, dtype.descr) # if dtype.metadata is not None: # assert_equal(pickled.metadata, dtype.metadata) # Check the reconstructed dtype is functional x = np.zeros(3, dtype=dtype) y = np.zeros(3, dtype=pickled) assert_equal(x, y) assert_equal(x[0], y[0]) @parametrize("t", [int, float, complex, np.int32, bool]) def test_builtin(self, t): self.check_pickling(np.dtype(t)) @parametrize( "DType", [ subtest(type(np.dtype(t)), name=f"{np.dtype(t).name}_{i}") for i, t in enumerate(np.typecodes["All"]) ] + [np.dtype], ) def test_pickle_types(self, DType): # Check that DTypes (the classes/types) roundtrip when pickling for proto in range(pickle.HIGHEST_PROTOCOL + 1): roundtrip_DType = pickle.loads(pickle.dumps(DType, proto)) assert roundtrip_DType is DType @skip(reason="XXX: value-based promotions, we don't have.") @instantiate_parametrized_tests class TestPromotion(TestCase): """Test cases related to more complex DType promotions. Further promotion tests are defined in `test_numeric.py` """ @parametrize( "other, expected, expected_weak", [ (2**16 - 1, np.complex64, None), (2**32 - 1, np.complex128, np.complex64), subtest((np.float16(2), np.complex64, None), name="float16_complex64_None"), subtest((np.float32(2), np.complex64, None), name="float32_complex64_None"), # repeat for complex scalars: subtest( (np.complex64(2), np.complex64, None), name="complex64_complex64_None" ), ], ) def test_complex_other_value_based( self, weak_promotion, other, expected, expected_weak ): if weak_promotion and expected_weak is not None: expected = expected_weak # This would change if we modify the value based promotion min_complex = np.dtype(np.complex64) res = np.result_type(other, min_complex) assert res == expected # Check the same for a simple ufunc call that uses the same logic: res = np.minimum(other, np.ones(3, dtype=min_complex)).dtype assert res == expected @parametrize( "other, expected", [ (np.bool_, np.complex128), (np.int64, np.complex128), (np.float16, np.complex64), (np.float32, np.complex64), (np.float64, np.complex128), (np.complex64, np.complex64), (np.complex128, np.complex128), ], ) def test_complex_scalar_value_based(self, other, expected): # This would change if we modify the value based promotion complex_scalar = 1j res = np.result_type(other, complex_scalar) assert res == expected # Check the same for a simple ufunc call that uses the same logic: res = np.minimum(np.ones(3, dtype=other), complex_scalar).dtype assert res == expected @parametrize("val", [2, 2**32, 2**63, 2**64, 2 * 100]) def test_python_integer_promotion(self, val): # If we only path scalars (mainly python ones!), the result must take # into account that the integer may be considered int32, int64, uint64, # or object depending on the input value. So test those paths! expected_dtype = np.result_type(np.array(val).dtype, np.array(0).dtype) assert np.result_type(val, 0) == expected_dtype # For completeness sake, also check with a NumPy scalar as second arg: assert np.result_type(val, np.int8(0)) == expected_dtype @parametrize( "dtypes, expected", [ # These promotions are not associative/commutative: ([np.int16, np.float16], np.float32), ([np.int8, np.float16], np.float32), ([np.uint8, np.int16, np.float16], np.float32), # The following promotions are not ambiguous, but cover code # paths of abstract promotion (no particular logic being tested) ([1, 1, np.float64], np.float64), ([1, 1.0, np.complex128], np.complex128), ([1, 1j, np.float64], np.complex128), ([1.0, 1.0, np.int64], np.float64), ([1.0, 1j, np.float64], np.complex128), ([1j, 1j, np.float64], np.complex128), ([1, True, np.bool_], np.int_), ], ) def test_permutations_do_not_influence_result(self, dtypes, expected): # Tests that most permutations do not influence the result. In the # above some uint and int combintations promote to a larger integer # type, which would then promote to a larger than necessary float. for perm in permutations(dtypes): assert np.result_type(*perm) == expected class TestMisc(TestCase): def test_dtypes_are_true(self): # test for gh-6294 assert bool(np.dtype("f8")) assert bool(np.dtype("i8")) @xpassIfTorchDynamo # (reason="No keyword arg for dtype ctor.") def test_keyword_argument(self): # test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971 assert np.dtype(dtype=np.float64) == np.dtype(np.float64) @skipif(sys.version_info >= (3, 9), reason="Requires python 3.9") def test_class_getitem_38(self) -> None: match = "Type subscription requires python >= 3.9" with pytest.raises(TypeError): # , match=match): np.dtype[Any] class TestFromDTypeAttribute(TestCase): def test_simple(self): class dt: dtype = np.dtype("f8") assert np.dtype(dt) == np.float64 assert np.dtype(dt()) == np.float64 @skip( reason="We simply require the .name attribute, so this " "fails with an AttributeError." ) def test_recursion(self): class dt: pass dt.dtype = dt with pytest.raises(RecursionError): np.dtype(dt) dt_instance = dt() dt_instance.dtype = dt with pytest.raises(RecursionError): np.dtype(dt_instance) @skip(reason="Parameteric dtypes, our stuff is simpler.") @skipif(sys.version_info < (3, 9), reason="Requires python 3.9") @instantiate_parametrized_tests class TestClassGetItem(TestCase): def test_dtype(self) -> None: alias = np.dtype[Any] assert isinstance(alias, types.GenericAlias) assert alias.__origin__ is np.dtype @parametrize("code", np.typecodes["All"]) def test_dtype_subclass(self, code: str) -> None: cls = type(np.dtype(code)) alias = cls[Any] assert isinstance(alias, types.GenericAlias) assert alias.__origin__ is cls @parametrize("arg_len", range(4)) def test_subscript_tuple(self, arg_len: int) -> None: arg_tup = (Any,) * arg_len if arg_len == 1: assert np.dtype[arg_tup] else: with pytest.raises(TypeError): np.dtype[arg_tup] def test_subscript_scalar(self) -> None: assert np.dtype[Any] if __name__ == "__main__": run_tests()
import functools import operator import pickle import sys import types from itertools import permutations from typing import Any from unittest import skipIf as skipif import pytest from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xfailIfTorchDynamo, xpassIfTorchDynamo, ) skip = functools.partial(skipif, True) import numpy as np from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy.testing import assert_, assert_equal import numpy
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dtype.py
test_run
def test_run(self, t): """Only test hash runs at all.""" dt = np.dtype(t) hash(dt)
import functools import operator import pickle import sys import types from itertools import permutations from typing import Any from unittest import skipIf as skipif import pytest from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xfailIfTorchDynamo, xpassIfTorchDynamo, ) skip = functools.partial(skipif, True) import numpy as np from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy.testing import assert_, assert_equal import numpy @instantiate_parametrized_tests class TestBuiltin(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dtype.py
test_invalid_types
def test_invalid_types(self): # Make sure invalid type strings raise an error assert_raises(TypeError, np.dtype, "O3") assert_raises(TypeError, np.dtype, "O5") assert_raises(TypeError, np.dtype, "O7") assert_raises(TypeError, np.dtype, "b3") assert_raises(TypeError, np.dtype, "h4") assert_raises(TypeError, np.dtype, "I5") assert_raises(TypeError, np.dtype, "e3") assert_raises(TypeError, np.dtype, "f5") if np.dtype("l").itemsize == 8: assert_raises(TypeError, np.dtype, "l4") assert_raises(TypeError, np.dtype, "L4") else: assert_raises(TypeError, np.dtype, "l8") assert_raises(TypeError, np.dtype, "L8") # XXX: what is 'q'? on my 64-bit ubuntu maching it's int64, same as 'l' # if np.dtype('q').itemsize == 8: # assert_raises(TypeError, np.dtype, 'q4') # assert_raises(TypeError, np.dtype, 'Q4') # else: # assert_raises(TypeError, np.dtype, 'q8') # assert_raises(TypeError, np.dtype, 'Q8')
import functools import operator import pickle import sys import types from itertools import permutations from typing import Any from unittest import skipIf as skipif import pytest from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xfailIfTorchDynamo, xpassIfTorchDynamo, ) skip = functools.partial(skipif, True) import numpy as np from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy.testing import assert_, assert_equal import numpy @instantiate_parametrized_tests class TestBuiltin(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dtype.py
test_numeric_style_types_are_invalid
def test_numeric_style_types_are_invalid(self, dtype): with assert_raises(TypeError): np.dtype(dtype)
import functools import operator import pickle import sys import types from itertools import permutations from typing import Any from unittest import skipIf as skipif import pytest from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xfailIfTorchDynamo, xpassIfTorchDynamo, ) skip = functools.partial(skipif, True) import numpy as np from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy.testing import assert_, assert_equal import numpy @instantiate_parametrized_tests class TestBuiltin(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dtype.py
test_dtype_non_writable_attributes_deletion
def test_dtype_non_writable_attributes_deletion(self): dt = np.dtype(np.double) attr = [ "subdtype", "descr", "str", "name", "base", "shape", "isbuiltin", "isnative", "isalignedstruct", "fields", "metadata", "hasobject", ] for s in attr: assert_raises(AttributeError, delattr, dt, s)
import functools import operator import pickle import sys import types from itertools import permutations from typing import Any from unittest import skipIf as skipif import pytest from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xfailIfTorchDynamo, xpassIfTorchDynamo, ) skip = functools.partial(skipif, True) import numpy as np from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy.testing import assert_, assert_equal import numpy @skip(reason="dtype attributes not yet implemented") class TestDtypeAttributeDeletion(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dtype.py
test_dtype_writable_attributes_deletion
def test_dtype_writable_attributes_deletion(self): dt = np.dtype(np.double) attr = ["names"] for s in attr: assert_raises(AttributeError, delattr, dt, s)
import functools import operator import pickle import sys import types from itertools import permutations from typing import Any from unittest import skipIf as skipif import pytest from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xfailIfTorchDynamo, xpassIfTorchDynamo, ) skip = functools.partial(skipif, True) import numpy as np from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy.testing import assert_, assert_equal import numpy @skip(reason="dtype attributes not yet implemented") class TestDtypeAttributeDeletion(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dtype.py
test_pickle_types
def test_pickle_types(self, DType): # Check that DTypes (the classes/types) roundtrip when pickling for proto in range(pickle.HIGHEST_PROTOCOL + 1): roundtrip_DType = pickle.loads(pickle.dumps(DType, proto)) assert roundtrip_DType is DType
import functools import operator import pickle import sys import types from itertools import permutations from typing import Any from unittest import skipIf as skipif import pytest from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xfailIfTorchDynamo, xpassIfTorchDynamo, ) skip = functools.partial(skipif, True) import numpy as np from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy.testing import assert_, assert_equal import numpy @instantiate_parametrized_tests class TestPickling(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dtype.py
test_permutations_do_not_influence_result
def test_permutations_do_not_influence_result(self, dtypes, expected): # Tests that most permutations do not influence the result. In the # above some uint and int combintations promote to a larger integer # type, which would then promote to a larger than necessary float. for perm in permutations(dtypes): assert np.result_type(*perm) == expected
import functools import operator import pickle import sys import types from itertools import permutations from typing import Any from unittest import skipIf as skipif import pytest from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xfailIfTorchDynamo, xpassIfTorchDynamo, ) skip = functools.partial(skipif, True) import numpy as np from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy.testing import assert_, assert_equal import numpy @skip(reason="XXX: value-based promotions, we don't have.") @instantiate_parametrized_tests class TestPromotion(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/conftest.py
pytest_collection_modifyitems
def pytest_collection_modifyitems(config, items): if not config.getoption("--runslow"): skip_slow = pytest.mark.skip(reason="slow test, use --runslow to run") for item in items: if "slow" in item.keywords: item.add_marker(skip_slow)
import sys import pytest import torch._numpy as tnp import numpy as np
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dlpack.py
test_dtype_passthrough
def test_dtype_passthrough(self, dtype): x = np.arange(5, dtype=dtype) y = np.from_dlpack(x) assert y.dtype == x.dtype assert_array_equal(x, y)
import functools import sys import unittest from unittest import skipIf as skipif import numpy import pytest import torch from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, skipIfTorchDynamo, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy.testing import assert_array_equal import torch._numpy as np from torch._numpy.testing import assert_array_equal skip = functools.partial(skipif, True) IS_PYPY = False @skipif(numpy.__version__ < "1.24", reason="numpy.dlpack is new in numpy 1.23") @instantiate_parametrized_tests class TestDLPack(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dlpack.py
test_higher_dims
def test_higher_dims(self, ndim): shape = (1,) * ndim x = np.zeros(shape, dtype=np.float64) assert shape == np.from_dlpack(x).shape
import functools import sys import unittest from unittest import skipIf as skipif import numpy import pytest import torch from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, skipIfTorchDynamo, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy.testing import assert_array_equal import torch._numpy as np from torch._numpy.testing import assert_array_equal skip = functools.partial(skipif, True) IS_PYPY = False @skipif(numpy.__version__ < "1.24", reason="numpy.dlpack is new in numpy 1.23") @instantiate_parametrized_tests class TestDLPack(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dlpack.py
test_dlpack_device
def test_dlpack_device(self): x = np.arange(5) assert x.__dlpack_device__() == (1, 0) y = np.from_dlpack(x) assert y.__dlpack_device__() == (1, 0) z = y[::2] assert z.__dlpack_device__() == (1, 0)
import functools import sys import unittest from unittest import skipIf as skipif import numpy import pytest import torch from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, skipIfTorchDynamo, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy.testing import assert_array_equal import torch._numpy as np from torch._numpy.testing import assert_array_equal skip = functools.partial(skipif, True) IS_PYPY = False @skipif(numpy.__version__ < "1.24", reason="numpy.dlpack is new in numpy 1.23") @instantiate_parametrized_tests class TestDLPack(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dlpack.py
dlpack_deleter_exception
def dlpack_deleter_exception(self): x = np.arange(5) _ = x.__dlpack__() raise RuntimeError
import functools import sys import unittest from unittest import skipIf as skipif import numpy import pytest import torch from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, skipIfTorchDynamo, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy.testing import assert_array_equal import torch._numpy as np from torch._numpy.testing import assert_array_equal skip = functools.partial(skipif, True) IS_PYPY = False @skipif(numpy.__version__ < "1.24", reason="numpy.dlpack is new in numpy 1.23") @instantiate_parametrized_tests class TestDLPack(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_xnnpack_integration.py
test_conv1d_with_relu_fc
def test_conv1d_with_relu_fc(self): batch_size_list = range(1, 3) input_channels_per_group_list = range(10, 12) width_list = range(10, 12) output_channels_per_group_list = range(10, 12) groups_list = range(1, 3) kernel_list = range(1, 4) stride_list = range(1, 3) padding_list = range(0, 3) dilation_list = range(1, 3) output_features_list = range(1, 3) for hparams in itertools.product(batch_size_list, input_channels_per_group_list, width_list, output_channels_per_group_list, groups_list, kernel_list, stride_list, padding_list, dilation_list, output_features_list): batch_size, input_channels_per_group, width, output_channels_per_group, \ groups, kernel, stride, padding, dilation, output_features = hparams input_channels = input_channels_per_group * groups output_channels = output_channels_per_group * groups conv_weight_shape = (output_channels, input_channels_per_group, kernel) conv_bias_shape = (output_channels) conv_output_width = int((width + 2 * padding - dilation * (kernel - 1) - 1) / stride) + 1 fc_weight_shape = (output_features, output_channels * conv_output_width) fc_bias_shape = (output_features) class Net(torch.nn.Module): def __init__(self): super().__init__() self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False) self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False) self.stride = stride self.padding = padding self.dilation = dilation self.groups = groups self.fc_weight = torch.nn.Parameter(torch.rand(fc_weight_shape), requires_grad=False) self.fc_bias = torch.nn.Parameter(torch.rand(fc_bias_shape), requires_grad=False) def forward(self, x): x = F.conv1d(x, self.conv_weight, self.conv_bias, self.stride, self.padding, self.dilation, self.groups) x = F.relu(x) x = x.view(x.size(0), -1) x = F.linear(x, self.fc_weight, self.fc_bias) return x data_shape = (batch_size, input_channels, width) pattern_count_transformed_map = {"Tensor = aten::conv1d": -1, "Tensor = aten::conv2d": 1} pattern_count_optimized_map = {"Tensor = aten::conv1d": -1, "Tensor = aten::conv2d": -1, "prepacked::conv2d_clamp_prepack" : -1, "prepacked::conv2d_clamp_run": 1} TestXNNPACKConv1dTransformPass.validate_transform_conv1d_to_conv2d(Net(), pattern_count_transformed_map, pattern_count_optimized_map, data_shape)
def test_conv1d_with_relu_fc(self): batch_size_list = range(1, 3) input_channels_per_group_list = range(10, 12) width_list = range(10, 12) output_channels_per_group_list = range(10, 12) groups_list = range(1, 3) kernel_list = range(1, 4) stride_list = range(1, 3) padding_list = range(0, 3) dilation_list = range(1, 3) output_features_list = range(1, 3) for hparams in itertools.product( batch_size_list, input_channels_per_group_list, width_list, output_channels_per_group_list, groups_list, kernel_list, stride_list, padding_list, dilation_list, output_features_list, ): ( batch_size, input_channels_per_group, width, output_channels_per_group, groups, kernel, stride, padding, dilation, output_features, ) = hparams input_channels = input_channels_per_group * groups output_channels = output_channels_per_group * groups conv_weight_shape = (output_channels, input_channels_per_group, kernel) conv_bias_shape = output_channels conv_output_width = ( int((width + 2 * padding - dilation * (kernel - 1) - 1) / stride) + 1 ) fc_weight_shape = (output_features, output_channels * conv_output_width) fc_bias_shape = output_features class Net(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv_weight = torch.nn.Parameter( torch.rand(conv_weight_shape), requires_grad=False ) self.conv_bias = torch.nn.Parameter( torch.rand(conv_bias_shape), requires_grad=False ) self.stride = stride self.padding = padding self.dilation = dilation self.groups = groups self.fc_weight = torch.nn.Parameter( torch.rand(fc_weight_shape), requires_grad=False ) self.fc_bias = torch.nn.Parameter( torch.rand(fc_bias_shape), requires_grad=False ) def forward(self, x): x = F.conv1d( x, self.conv_weight, self.conv_bias, self.stride, self.padding, self.dilation, self.groups, ) x = F.relu(x) x = x.view(x.size(0), -1) x = F.linear(x, self.fc_weight, self.fc_bias) return x data_shape = (batch_size, input_channels, width) pattern_count_transformed_map = { "Tensor = aten::conv1d": -1, "Tensor = aten::conv2d": 1, } pattern_count_optimized_map = { "Tensor = aten::conv1d": -1, "Tensor = aten::conv2d": -1, "prepacked::conv2d_clamp_prepack": -1, "prepacked::conv2d_clamp_run": 1, } TestXNNPACKConv1dTransformPass.validate_transform_conv1d_to_conv2d( Net(), pattern_count_transformed_map, pattern_count_optimized_map, data_shape, )
import unittest import torch import torch.backends.xnnpack from torch.nn import functional as F from torch.utils.mobile_optimizer import optimize_for_mobile from torch.testing import FileCheck import torch.testing._internal.hypothesis_utils as hu from torch.testing._internal.common_utils import TestCase, run_tests, slowTest from hypothesis import given, assume from hypothesis import strategies as st import io import itertools from torch.testing._internal.common_utils import IS_FBCODE, TEST_WITH_TSAN @unittest.skipUnless(torch.backends.xnnpack.enabled, " XNNPACK must be enabled for these tests." " Please build with USE_XNNPACK=1.") @unittest.skipIf(TEST_WITH_TSAN, "TSAN is not fork-safe since we're forking in a multi-threaded environment") class TestXNNPACKConv1dTransformPass(TestCase):
import io import itertools import unittest from hypothesis import assume, given, strategies as st import torch import torch.backends.xnnpack import torch.testing._internal.hypothesis_utils as hu from torch.nn import functional as F from torch.testing import FileCheck from torch.testing._internal.common_utils import ( IS_FBCODE, run_tests, slowTest, TEST_WITH_TSAN, TestCase, ) from torch.utils.mobile_optimizer import optimize_for_mobile @unittest.skipUnless( torch.backends.xnnpack.enabled, " XNNPACK must be enabled for these tests." " Please build with USE_XNNPACK=1.", ) @unittest.skipIf( TEST_WITH_TSAN, "TSAN is not fork-safe since we're forking in a multi-threaded environment", ) class TestXNNPACKConv1dTransformPass(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_einsum_sums_int32
def test_einsum_sums_int32(self): self.check_einsum_sums("i4") self.check_einsum_sums("i4", True)
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_einsum_sums_float16
def test_einsum_sums_float16(self): self.check_einsum_sums("f2")
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_einsum_sums_float32
def test_einsum_sums_float32(self): self.check_einsum_sums("f4")
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_einsum_sums_float64
def test_einsum_sums_float64(self): self.check_einsum_sums("f8") self.check_einsum_sums("f8", True)
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_einsum_sums_cfloat64
def test_einsum_sums_cfloat64(self): self.check_einsum_sums("c8") self.check_einsum_sums("c8", True)
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_einsum_sums_cfloat128
def test_einsum_sums_cfloat128(self): self.check_einsum_sums("c16")
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_einsum_misc
def test_einsum_misc(self): # This call used to crash because of a bug in # PyArray_AssignZero a = np.ones((1, 2)) b = np.ones((2, 2, 1)) assert_equal(np.einsum("ij...,j...->i...", a, b), [[[2], [2]]]) assert_equal(np.einsum("ij...,j...->i...", a, b, optimize=True), [[[2], [2]]]) # Regression test for issue #10369 (test unicode inputs with Python 2) assert_equal(np.einsum("ij...,j...->i...", a, b), [[[2], [2]]]) assert_equal(np.einsum("...i,...i", [1, 2, 3], [2, 3, 4]), 20) assert_equal( np.einsum("...i,...i", [1, 2, 3], [2, 3, 4], optimize="greedy"), 20 ) # The iterator had an issue with buffering this reduction a = np.ones((5, 12, 4, 2, 3), np.int64) b = np.ones((5, 12, 11), np.int64) assert_equal( np.einsum("ijklm,ijn,ijn->", a, b, b), np.einsum("ijklm,ijn->", a, b) ) assert_equal( np.einsum("ijklm,ijn,ijn->", a, b, b, optimize=True), np.einsum("ijklm,ijn->", a, b, optimize=True), ) # Issue #2027, was a problem in the contiguous 3-argument # inner loop implementation a = np.arange(1, 3) b = np.arange(1, 5).reshape(2, 2) c = np.arange(1, 9).reshape(4, 2) assert_equal( np.einsum("x,yx,zx->xzy", a, b, c), [ [[1, 3], [3, 9], [5, 15], [7, 21]], [[8, 16], [16, 32], [24, 48], [32, 64]], ], ) assert_equal( np.einsum("x,yx,zx->xzy", a, b, c, optimize=True), [ [[1, 3], [3, 9], [5, 15], [7, 21]], [[8, 16], [16, 32], [24, 48], [32, 64]], ], ) # Ensure explicitly setting out=None does not cause an error # see issue gh-15776 and issue gh-15256 assert_equal(np.einsum("i,j", [1], [2], out=None), [[2]])
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_subscript_range
def test_subscript_range(self): # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used # when creating a subscript from arrays a = np.ones((2, 3)) b = np.ones((3, 4)) np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False) np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False) np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False) assert_raises( ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False), ) assert_raises( ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False), )
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_einsum_broadcast
def test_einsum_broadcast(self): # Issue #2455 change in handling ellipsis # remove the 'middle broadcast' error # only use the 'RIGHT' iteration in prepare_op_axes # adds auto broadcast on left where it belongs # broadcast on right has to be explicit # We need to test the optimized parsing as well A = np.arange(2 * 3 * 4).reshape(2, 3, 4) B = np.arange(3) ref = np.einsum("ijk,j->ijk", A, B, optimize=False) for opt in [True, False]: assert_equal(np.einsum("ij...,j...->ij...", A, B, optimize=opt), ref) assert_equal(np.einsum("ij...,...j->ij...", A, B, optimize=opt), ref) assert_equal( np.einsum("ij...,j->ij...", A, B, optimize=opt), ref ) # used to raise error A = np.arange(12).reshape((4, 3)) B = np.arange(6).reshape((3, 2)) ref = np.einsum("ik,kj->ij", A, B, optimize=False) for opt in [True, False]: assert_equal(np.einsum("ik...,k...->i...", A, B, optimize=opt), ref) assert_equal(np.einsum("ik...,...kj->i...j", A, B, optimize=opt), ref) assert_equal( np.einsum("...k,kj", A, B, optimize=opt), ref ) # used to raise error assert_equal( np.einsum("ik,k...->i...", A, B, optimize=opt), ref ) # used to raise error dims = [2, 3, 4, 5] a = np.arange(np.prod(dims)).reshape(dims) v = np.arange(dims[2]) ref = np.einsum("ijkl,k->ijl", a, v, optimize=False) for opt in [True, False]: assert_equal(np.einsum("ijkl,k", a, v, optimize=opt), ref) assert_equal( np.einsum("...kl,k", a, v, optimize=opt), ref ) # used to raise error assert_equal(np.einsum("...kl,k...", a, v, optimize=opt), ref) J, K, M = 160, 160, 120 A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M) B = np.arange(J * K * M * 3).reshape(J, K, M, 3) ref = np.einsum("...lmn,...lmno->...o", A, B, optimize=False) for opt in [True, False]: assert_equal( np.einsum("...lmn,lmno->...o", A, B, optimize=opt), ref ) # used to raise error
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_einsum_fixedstridebug
def test_einsum_fixedstridebug(self): # Issue #4485 obscure einsum bug # This case revealed a bug in nditer where it reported a stride # as 'fixed' (0) when it was in fact not fixed during processing # (0 or 4). The reason for the bug was that the check for a fixed # stride was using the information from the 2D inner loop reuse # to restrict the iteration dimensions it had to validate to be # the same, but that 2D inner loop reuse logic is only triggered # during the buffer copying step, and hence it was invalid to # rely on those values. The fix is to check all the dimensions # of the stride in question, which in the test case reveals that # the stride is not fixed. # # NOTE: This test is triggered by the fact that the default buffersize, # used by einsum, is 8192, and 3*2731 = 8193, is larger than that # and results in a mismatch between the buffering and the # striding for operand A. A = np.arange(2 * 3).reshape(2, 3).astype(np.float32) B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16) es = np.einsum("cl, cpx->lpx", A, B) tp = np.tensordot(A, B, axes=(0, 0)) assert_equal(es, tp) # The following is the original test case from the bug report, # made repeatable by changing random arrays to aranges. A = np.arange(3 * 3).reshape(3, 3).astype(np.float64) B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32) es = np.einsum("cl, cpxy->lpxy", A, B) tp = np.tensordot(A, B, axes=(0, 0)) assert_equal(es, tp)
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_einsum_fixed_collapsingbug
def test_einsum_fixed_collapsingbug(self): # Issue #5147. # The bug only occurred when output argument of einssum was used. x = np.random.normal(0, 1, (5, 5, 5, 5)) y1 = np.zeros((5, 5)) np.einsum("aabb->ab", x, out=y1) idx = np.arange(5) y2 = x[idx[:, None], idx[:, None], idx, idx] assert_equal(y1, y2)
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_different_paths
def test_different_paths(self, dtype): # Test originally added to cover broken float16 path: gh-20305 # Likely most are covered elsewhere, at least partially. dtype = np.dtype(dtype) # Simple test, designed to excersize most specialized code paths, # note the +0.5 for floats. This makes sure we use a float value # where the results must be exact. arr = (np.arange(7) + 0.5).astype(dtype) scalar = np.array(2, dtype=dtype) # contig -> scalar: res = np.einsum("i->", arr) assert res == arr.sum() # contig, contig -> contig: res = np.einsum("i,i->i", arr, arr) assert_array_equal(res, arr * arr) # noncontig, noncontig -> contig: res = np.einsum("i,i->i", arr.repeat(2)[::2], arr.repeat(2)[::2]) assert_array_equal(res, arr * arr) # contig + contig -> scalar assert np.einsum("i,i->", arr, arr) == (arr * arr).sum() # contig + scalar -> contig (with out) out = np.ones(7, dtype=dtype) res = np.einsum("i,->i", arr, dtype.type(2), out=out) assert_array_equal(res, arr * dtype.type(2)) # scalar + contig -> contig (with out) res = np.einsum(",i->i", scalar, arr) assert_array_equal(res, arr * dtype.type(2)) # scalar + contig -> scalar res = np.einsum(",i->", scalar, arr) # Use einsum to compare to not have difference due to sum round-offs: assert res == np.einsum("i->", scalar * arr) # contig + scalar -> scalar res = np.einsum("i,->", arr, scalar) # Use einsum to compare to not have difference due to sum round-offs: assert res == np.einsum("i->", scalar * arr) # contig + contig + contig -> scalar if dtype in ["e", "B", "b"]: # FIXME make xfail raise SkipTest("overflow differs in pytorch and numpy") arr = np.array([0.5, 0.5, 0.25, 4.5, 3.0], dtype=dtype) res = np.einsum("i,i,i->", arr, arr, arr) assert_array_equal(res, (arr * arr * arr).sum()) # four arrays: res = np.einsum("i,i,i,i->", arr, arr, arr, arr) assert_array_equal(res, (arr * arr * arr * arr).sum())
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_small_boolean_arrays
def test_small_boolean_arrays(self): # See gh-5946. # Use array of True embedded in False. a = np.zeros((16, 1, 1), dtype=np.bool_)[:2] a[...] = True out = np.zeros((16, 1, 1), dtype=np.bool_)[:2] tgt = np.ones((2, 1, 1), dtype=np.bool_) res = np.einsum("...ij,...jk->...ik", a, a, out=out) assert_equal(res, tgt)
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_out_is_res
def test_out_is_res(self): a = np.arange(9).reshape(3, 3) res = np.einsum("...ij,...jk->...ik", a, a, out=a) assert res is a
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
optimize_compare
def optimize_compare(self, subscripts, operands=None): # Tests all paths of the optimization function against # conventional einsum if operands is None: args = [subscripts] terms = subscripts.split("->")[0].split(",") for term in terms: dims = [global_size_dict[x] for x in term] args.append(np.random.rand(*dims)) else: args = [subscripts] + operands noopt = np.einsum(*args, optimize=False) opt = np.einsum(*args, optimize="greedy") assert_almost_equal(opt, noopt) opt = np.einsum(*args, optimize="optimal") assert_almost_equal(opt, noopt)
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_hadamard_like_products
def test_hadamard_like_products(self): # Hadamard outer products self.optimize_compare("a,ab,abc->abc") self.optimize_compare("a,b,ab->ab")
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_index_transformations
def test_index_transformations(self): # Simple index transformation cases self.optimize_compare("ea,fb,gc,hd,abcd->efgh") self.optimize_compare("ea,fb,abcd,gc,hd->efgh") self.optimize_compare("abcd,ea,fb,gc,hd->efgh")
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_dtype.py
test_recursion
def test_recursion(self): class dt: pass dt.dtype = dt with pytest.raises(RecursionError): np.dtype(dt) dt_instance = dt() dt_instance.dtype = dt with pytest.raises(RecursionError): np.dtype(dt_instance)
import functools import operator import pickle import sys import types from itertools import permutations from typing import Any from unittest import skipIf as skipif import pytest from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xfailIfTorchDynamo, xpassIfTorchDynamo, ) skip = functools.partial(skipif, True) import numpy as np from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy.testing import assert_, assert_equal import numpy class TestFromDTypeAttribute(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_einsum_views
def test_einsum_views(self): # pass-through for do_opt in [True, False]: a = np.arange(6) a = a.reshape(2, 3) b = np.einsum("...", a, optimize=do_opt) assert_(b.tensor._base is a.tensor) b = np.einsum(a, [Ellipsis], optimize=do_opt) assert_(b.base is a) b = np.einsum("ij", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, a) b = np.einsum(a, [0, 1], optimize=do_opt) assert_(b.base is a) assert_equal(b, a) # output is writeable whenever input is writeable b = np.einsum("...", a, optimize=do_opt) assert_(b.flags["WRITEABLE"]) a.flags["WRITEABLE"] = False b = np.einsum("...", a, optimize=do_opt) assert_(not b.flags["WRITEABLE"]) # transpose a = np.arange(6) a.shape = (2, 3) b = np.einsum("ji", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, a.T) b = np.einsum(a, [1, 0], optimize=do_opt) assert_(b.base is a) assert_equal(b, a.T) # diagonal a = np.arange(9) a.shape = (3, 3) b = np.einsum("ii->i", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[i, i] for i in range(3)]) b = np.einsum(a, [0, 0], [0], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[i, i] for i in range(3)]) # diagonal with various ways of broadcasting an additional dimension a = np.arange(27) a.shape = (3, 3, 3) b = np.einsum("...ii->...i", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum("ii...->...i", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum("...ii->i...", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("jii->ij", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("ii...->i...", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum("i...i->i...", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum("i...i->...i", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) # triple diagonal a = np.arange(27) a.shape = (3, 3, 3) b = np.einsum("iii->i", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[i, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[i, i, i] for i in range(3)]) # swap axes a = np.arange(24) a.shape = (2, 3, 4) b = np.einsum("ijk->jik", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, a.swapaxes(0, 1)) b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt) assert_(b.base is a) assert_equal(b, a.swapaxes(0, 1)) # @np._no_nep50_warning()
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_einsum_sums_int16
def test_einsum_sums_int16(self): self.check_einsum_sums("i2")
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_complex
def test_complex(self): # Long test cases self.optimize_compare("acdf,jbje,gihb,hfac,gfac,gifabc,hfac") self.optimize_compare("acdf,jbje,gihb,hfac,gfac,gifabc,hfac") self.optimize_compare("cd,bdhe,aidb,hgca,gc,hgibcd,hgac") self.optimize_compare("abhe,hidj,jgba,hiab,gab") self.optimize_compare("bde,cdh,agdb,hica,ibd,hgicd,hiac") self.optimize_compare("chd,bde,agbc,hiad,hgc,hgi,hiad") self.optimize_compare("chd,bde,agbc,hiad,bdi,cgh,agdb") self.optimize_compare("bdhe,acad,hiab,agac,hibd")
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_collapse
def test_collapse(self): # Inner products self.optimize_compare("ab,ab,c->") self.optimize_compare("ab,ab,c->c") self.optimize_compare("ab,ab,cd,cd->") self.optimize_compare("ab,ab,cd,cd->ac") self.optimize_compare("ab,ab,cd,cd->cd") self.optimize_compare("ab,ab,cd,cd,ef,ef->")
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_expand
def test_expand(self): # Outer products self.optimize_compare("ab,cd,ef->abcdef") self.optimize_compare("ab,cd,ef->acdf") self.optimize_compare("ab,cd,de->abcde") self.optimize_compare("ab,cd,de->be") self.optimize_compare("ab,bcd,cd->abcd") self.optimize_compare("ab,bcd,cd->abd")
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_edge_cases
def test_edge_cases(self): # Difficult edge cases for optimization self.optimize_compare("eb,cb,fb->cef") self.optimize_compare("dd,fb,be,cdb->cef") self.optimize_compare("bca,cdb,dbf,afc->") self.optimize_compare("dcc,fce,ea,dbf->ab") self.optimize_compare("fdf,cdd,ccd,afe->ae") self.optimize_compare("abcd,ad") self.optimize_compare("ed,fcd,ff,bcf->be") self.optimize_compare("baa,dcf,af,cde->be") self.optimize_compare("bd,db,eac->ace") self.optimize_compare("fff,fae,bef,def->abd") self.optimize_compare("efc,dbc,acf,fd->abe") self.optimize_compare("ba,ac,da->bcd")
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_inner_product
def test_inner_product(self): # Inner products self.optimize_compare("ab,ab") self.optimize_compare("ab,ba") self.optimize_compare("abc,abc") self.optimize_compare("abc,bac") self.optimize_compare("abc,cba")
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_random_cases
def test_random_cases(self): # Randomly built test cases self.optimize_compare("aab,fa,df,ecc->bde") self.optimize_compare("ecb,fef,bad,ed->ac") self.optimize_compare("bcf,bbb,fbf,fc->") self.optimize_compare("bb,ff,be->e") self.optimize_compare("bcb,bb,fc,fff->") self.optimize_compare("fbb,dfd,fc,fc->") self.optimize_compare("afd,ba,cc,dc->bf") self.optimize_compare("adb,bc,fa,cfc->d") self.optimize_compare("bbd,bda,fc,db->acf") self.optimize_compare("dba,ead,cad->bce") self.optimize_compare("aef,fbc,dca->bde")
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_combined_views_mapping
def test_combined_views_mapping(self): # gh-10792 a = np.arange(9).reshape(1, 1, 3, 1, 3) b = np.einsum("bbcdc->d", a) assert_equal(b, [12])
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_output_order
def test_output_order(self): # Ensure output order is respected for optimize cases, the below # conraction should yield a reshaped tensor view # gh-16415 a = np.ones((2, 3, 5), order="F") b = np.ones((4, 3), order="F") for opt in [True, False]: tmp = np.einsum("...ft,mf->...mt", a, b, order="a", optimize=opt) assert_(tmp.flags.f_contiguous) tmp = np.einsum("...ft,mf->...mt", a, b, order="f", optimize=opt) assert_(tmp.flags.f_contiguous) tmp = np.einsum("...ft,mf->...mt", a, b, order="c", optimize=opt) assert_(tmp.flags.c_contiguous) tmp = np.einsum("...ft,mf->...mt", a, b, order="k", optimize=opt) assert_(tmp.flags.c_contiguous is False) assert_(tmp.flags.f_contiguous is False) tmp = np.einsum("...ft,mf->...mt", a, b, optimize=opt) assert_(tmp.flags.c_contiguous is False) assert_(tmp.flags.f_contiguous is False) c = np.ones((4, 3), order="C") for opt in [True, False]: tmp = np.einsum("...ft,mf->...mt", a, c, order="a", optimize=opt) assert_(tmp.flags.c_contiguous) d = np.ones((2, 3, 5), order="C") for opt in [True, False]: tmp = np.einsum("...ft,mf->...mt", d, c, order="a", optimize=opt) assert_(tmp.flags.c_contiguous)
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @instantiate_parametrized_tests class TestEinsum(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
build_operands
def build_operands(self, string, size_dict=global_size_dict): # Builds views based off initial operands operands = [string] terms = string.split("->")[0].split(",") for term in terms: dims = [size_dict[x] for x in term] operands.append(np.random.rand(*dims)) return operands
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @skip(reason="no pytorch analog") class TestEinsumPath(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
assert_path_equal
def assert_path_equal(self, comp, benchmark): # Checks if list of tuples are equivalent ret = len(comp) == len(benchmark) assert_(ret) for pos in range(len(comp) - 1): ret &= isinstance(comp[pos + 1], tuple) ret &= comp[pos + 1] == benchmark[pos + 1] assert_(ret)
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @skip(reason="no pytorch analog") class TestEinsumPath(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_memory_contraints
def test_memory_contraints(self): # Ensure memory constraints are satisfied outer_test = self.build_operands("a,b,c->abc") path, path_str = np.einsum_path(*outer_test, optimize=("greedy", 0)) self.assert_path_equal(path, ["einsum_path", (0, 1, 2)]) path, path_str = np.einsum_path(*outer_test, optimize=("optimal", 0)) self.assert_path_equal(path, ["einsum_path", (0, 1, 2)]) long_test = self.build_operands("acdf,jbje,gihb,hfac") path, path_str = np.einsum_path(*long_test, optimize=("greedy", 0)) self.assert_path_equal(path, ["einsum_path", (0, 1, 2, 3)]) path, path_str = np.einsum_path(*long_test, optimize=("optimal", 0)) self.assert_path_equal(path, ["einsum_path", (0, 1, 2, 3)])
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @skip(reason="no pytorch analog") class TestEinsumPath(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_long_paths
def test_long_paths(self): # Long complex cases # Long test 1 long_test1 = self.build_operands("acdf,jbje,gihb,hfac,gfac,gifabc,hfac") path, path_str = np.einsum_path(*long_test1, optimize="greedy") self.assert_path_equal( path, ["einsum_path", (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)] ) path, path_str = np.einsum_path(*long_test1, optimize="optimal") self.assert_path_equal( path, ["einsum_path", (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)] ) # Long test 2 long_test2 = self.build_operands("chd,bde,agbc,hiad,bdi,cgh,agdb") path, path_str = np.einsum_path(*long_test2, optimize="greedy") self.assert_path_equal( path, ["einsum_path", (3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)] ) path, path_str = np.einsum_path(*long_test2, optimize="optimal") self.assert_path_equal( path, ["einsum_path", (0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)] )
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @skip(reason="no pytorch analog") class TestEinsumPath(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_edge_paths
def test_edge_paths(self): # Difficult edge cases # Edge test1 edge_test1 = self.build_operands("eb,cb,fb->cef") path, path_str = np.einsum_path(*edge_test1, optimize="greedy") self.assert_path_equal(path, ["einsum_path", (0, 2), (0, 1)]) path, path_str = np.einsum_path(*edge_test1, optimize="optimal") self.assert_path_equal(path, ["einsum_path", (0, 2), (0, 1)]) # Edge test2 edge_test2 = self.build_operands("dd,fb,be,cdb->cef") path, path_str = np.einsum_path(*edge_test2, optimize="greedy") self.assert_path_equal(path, ["einsum_path", (0, 3), (0, 1), (0, 1)]) path, path_str = np.einsum_path(*edge_test2, optimize="optimal") self.assert_path_equal(path, ["einsum_path", (0, 3), (0, 1), (0, 1)]) # Edge test3 edge_test3 = self.build_operands("bca,cdb,dbf,afc->") path, path_str = np.einsum_path(*edge_test3, optimize="greedy") self.assert_path_equal(path, ["einsum_path", (1, 2), (0, 2), (0, 1)]) path, path_str = np.einsum_path(*edge_test3, optimize="optimal") self.assert_path_equal(path, ["einsum_path", (1, 2), (0, 2), (0, 1)]) # Edge test4 edge_test4 = self.build_operands("dcc,fce,ea,dbf->ab") path, path_str = np.einsum_path(*edge_test4, optimize="greedy") self.assert_path_equal(path, ["einsum_path", (1, 2), (0, 1), (0, 1)]) path, path_str = np.einsum_path(*edge_test4, optimize="optimal") self.assert_path_equal(path, ["einsum_path", (1, 2), (0, 2), (0, 1)]) # Edge test5 edge_test4 = self.build_operands( "a,ac,ab,ad,cd,bd,bc->", size_dict={"a": 20, "b": 20, "c": 20, "d": 20} ) path, path_str = np.einsum_path(*edge_test4, optimize="greedy") self.assert_path_equal(path, ["einsum_path", (0, 1), (0, 1, 2, 3, 4, 5)]) path, path_str = np.einsum_path(*edge_test4, optimize="optimal") self.assert_path_equal(path, ["einsum_path", (0, 1), (0, 1, 2, 3, 4, 5)])
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @skip(reason="no pytorch analog") class TestEinsumPath(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_path_type_input
def test_path_type_input(self): # Test explicit path handling path_test = self.build_operands("dcc,fce,ea,dbf->ab") path, path_str = np.einsum_path(*path_test, optimize=False) self.assert_path_equal(path, ["einsum_path", (0, 1, 2, 3)]) path, path_str = np.einsum_path(*path_test, optimize=True) self.assert_path_equal(path, ["einsum_path", (1, 2), (0, 1), (0, 1)]) exp_path = ["einsum_path", (0, 2), (0, 2), (0, 1)] path, path_str = np.einsum_path(*path_test, optimize=exp_path) self.assert_path_equal(path, exp_path) # Double check einsum works on the input path noopt = np.einsum(*path_test, optimize=False) opt = np.einsum(*path_test, optimize=exp_path) assert_almost_equal(noopt, opt)
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @skip(reason="no pytorch analog") class TestEinsumPath(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_path_type_input_internal_trace
def test_path_type_input_internal_trace(self): # gh-20962 path_test = self.build_operands("cab,cdd->ab") exp_path = ["einsum_path", (1,), (0, 1)] path, path_str = np.einsum_path(*path_test, optimize=exp_path) self.assert_path_equal(path, exp_path) # Double check einsum works on the input path noopt = np.einsum(*path_test, optimize=False) opt = np.einsum(*path_test, optimize=exp_path) assert_almost_equal(noopt, opt)
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @skip(reason="no pytorch analog") class TestEinsumPath(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_path_type_input_invalid
def test_path_type_input_invalid(self): path_test = self.build_operands("ab,bc,cd,de->ae") exp_path = ["einsum_path", (2, 3), (0, 1)] assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path) assert_raises(RuntimeError, np.einsum_path, *path_test, optimize=exp_path) path_test = self.build_operands("a,a,a->a") exp_path = ["einsum_path", (1,), (0, 1)] assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path) assert_raises(RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @skip(reason="no pytorch analog") class TestEinsumPath(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_spaces
def test_spaces(self): # gh-10794 arr = np.array([[1]]) for sp in itertools.product(["", " "], repeat=4): # no error for any spacing np.einsum("{}...a{}->{}...a{}".format(*sp), arr)
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) @skip(reason="no pytorch analog") class TestEinsumPath(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_einsum.py
test_overlap
def test_overlap(self): a = np.arange(9, dtype=int).reshape(3, 3) b = np.arange(9, dtype=int).reshape(3, 3) d = np.dot(a, b) # sanity check c = np.einsum("ij,jk->ik", a, b) assert_equal(c, d) # gh-10080, out overlaps one of the operands c = np.einsum("ij,jk->ik", a, b, out=b) assert_equal(c, d)
import functools import itertools from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from pytest import raises as assert_raises import torch._numpy as np from torch._numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_array_equal, assert_equal, suppress_warnings, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) skip = functools.partial(skipif, True) chars = "abcdefghij" sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) class TestMisc(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_getlimits.py
test_singleton
def test_singleton(self): ftype = finfo(float) ftype2 = finfo(float) assert_equal(id(ftype), id(ftype2))
import functools import warnings from unittest import expectedFailure as xfail, skipIf import numpy from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import double, finfo, half, iinfo, single from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy import double, finfo, half, iinfo, single from torch._numpy.testing import assert_, assert_equal skip = functools.partial(skipIf, True) @skip(reason="torch.finfo is not a singleton. Why demanding it is?") class TestPythonFloat(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_getlimits.py
test_singleton
def test_singleton(self): ftype = finfo(float) ftype2 = finfo(float) assert_equal(id(ftype), id(ftype2))
import functools import warnings from unittest import expectedFailure as xfail, skipIf import numpy from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import double, finfo, half, iinfo, single from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy import double, finfo, half, iinfo, single from torch._numpy.testing import assert_, assert_equal skip = functools.partial(skipIf, True) @skip(reason="torch.finfo is not a singleton. Why demanding it is?") class TestPythonFloat(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_getlimits.py
test_singleton
def test_singleton(self): ftype = finfo(float) ftype2 = finfo(float) assert_equal(id(ftype), id(ftype2))
import functools import warnings from unittest import expectedFailure as xfail, skipIf import numpy from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import double, finfo, half, iinfo, single from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy import double, finfo, half, iinfo, single from torch._numpy.testing import assert_, assert_equal skip = functools.partial(skipIf, True) @skip(reason="torch.finfo is not a singleton. Why demanding it is?") class TestPythonFloat(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_getlimits.py
test_singleton
def test_singleton(self): ftype = finfo(float) ftype2 = finfo(float) assert_equal(id(ftype), id(ftype2))
import functools import warnings from unittest import expectedFailure as xfail, skipIf import numpy from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import double, finfo, half, iinfo, single from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy import double, finfo, half, iinfo, single from torch._numpy.testing import assert_, assert_equal skip = functools.partial(skipIf, True) @skip(reason="torch.finfo is not a singleton. Why demanding it is?") class TestPythonFloat(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_getlimits.py
test_basic_missing
def test_basic_missing(self): dt = np.float32 for attr in [ "epsneg", "iexp", "machep", "maxexp", "minexp", "negep", "nexp", "nmant", "precision", "smallest_subnormal", ]: getattr(finfo(dt), attr)
import functools import warnings from unittest import expectedFailure as xfail, skipIf import numpy from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import double, finfo, half, iinfo, single from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy import double, finfo, half, iinfo, single from torch._numpy.testing import assert_, assert_equal skip = functools.partial(skipIf, True) class TestFinfo(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_getlimits.py
test_unsigned_max
def test_unsigned_max(self, T): max_calculated = T(0) - T(1) assert_equal(iinfo(T).max, max_calculated)
import functools import warnings from unittest import expectedFailure as xfail, skipIf import numpy from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import double, finfo, half, iinfo, single from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy import double, finfo, half, iinfo, single from torch._numpy.testing import assert_, assert_equal skip = functools.partial(skipIf, True) @instantiate_parametrized_tests class TestIinfo(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_getlimits.py
test_finfo_repr
def test_finfo_repr(self): repr_f32 = repr(np.finfo(np.float32)) assert "finfo(resolution=1e-06, min=-3.40282e+38," in repr_f32 assert "dtype=float32" in repr_f32
import functools import warnings from unittest import expectedFailure as xfail, skipIf import numpy from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import double, finfo, half, iinfo, single from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy import double, finfo, half, iinfo, single from torch._numpy.testing import assert_, assert_equal skip = functools.partial(skipIf, True) class TestRepr(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_getlimits.py
assert_ma_equal
def assert_ma_equal(discovered, ma_like): # Check MachAr-like objects same as calculated MachAr instances for key, value in discovered.__dict__.items(): assert_equal(value, getattr(ma_like, key)) if hasattr(value, "shape"): assert_equal(value.shape, getattr(ma_like, key).shape) assert_equal(value.dtype, getattr(ma_like, key).dtype) class TestMisc(TestCase): @skip(reason="Instantiate {i,f}info from dtypes.") def test_instances(self): iinfo(10) finfo(3.0) @skip(reason="MachAr no implemented (does it need to)?") def test_known_types(self): # Test we are correctly compiling parameters for known types for ftype, ma_like in ( (np.float16, _float_ma[16]), (np.float32, _float_ma[32]), (np.float64, _float_ma[64]), ): assert_ma_equal(_discovered_machar(ftype), ma_like) # Suppress warning for broken discovery of double double on PPC ld_ma = _discovered_machar(np.longdouble) bytes = np.dtype(np.longdouble).itemsize if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): # 80-bit extended precision assert_ma_equal(ld_ma, _float_ma[80]) elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: # IEE 754 128-bit assert_ma_equal(ld_ma, _float_ma[128]) @skip(reason="MachAr no implemented (does it need to be)?") def test_subnormal_warning(self): """Test that the subnormal is zero warning is not being raised.""" ld_ma = _discovered_machar(np.longdouble) bytes = np.dtype(np.longdouble).itemsize with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): # 80-bit extended precision ld_ma.smallest_subnormal assert len(w) == 0 elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: # IEE 754 128-bit ld_ma.smallest_subnormal assert len(w) == 0 else: # Double double ld_ma.smallest_subnormal # This test may fail on some platforms assert len(w) == 0 @xpassIfTorchDynamo # (reason="None of nmant, minexp, maxexp is implemented.") def test_plausible_finfo(self): # Assert that finfo returns reasonable results for all types for ftype in ( [np.float16, np.float32, np.float64, np.longdouble] + [ np.complex64, np.complex128, ] # no complex256 in torch._numpy + ([np.clongdouble] if hasattr(np, "clongdouble") else []) ): info = np.finfo(ftype) assert_(info.nmant > 1) assert_(info.minexp < -1) assert_(info.maxexp > 1) if __name__ == "__main__": run_tests()
import functools import warnings from unittest import expectedFailure as xfail, skipIf import numpy from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import double, finfo, half, iinfo, single from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy import double, finfo, half, iinfo, single from torch._numpy.testing import assert_, assert_equal skip = functools.partial(skipIf, True)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_getlimits.py
test_plausible_finfo
def test_plausible_finfo(self): # Assert that finfo returns reasonable results for all types for ftype in ( [np.float16, np.float32, np.float64, np.longdouble] + [ np.complex64, np.complex128, ] # no complex256 in torch._numpy + ([np.clongdouble] if hasattr(np, "clongdouble") else []) ): info = np.finfo(ftype) assert_(info.nmant > 1) assert_(info.minexp < -1) assert_(info.maxexp > 1)
import functools import warnings from unittest import expectedFailure as xfail, skipIf import numpy from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy import double, finfo, half, iinfo, single from numpy.testing import assert_, assert_equal import torch._numpy as np from torch._numpy import double, finfo, half, iinfo, single from torch._numpy.testing import assert_, assert_equal skip = functools.partial(skipIf, True) class TestMisc(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/torch_np/numpy_tests/core/test_indexing.py
test_index_no_floats
def test_index_no_floats(self): a = np.array([[[5]]]) assert_raises(IndexError, lambda: a[0.0]) assert_raises(IndexError, lambda: a[0, 0.0]) assert_raises(IndexError, lambda: a[0.0, 0]) assert_raises(IndexError, lambda: a[0.0, :]) assert_raises(IndexError, lambda: a[:, 0.0]) assert_raises(IndexError, lambda: a[:, 0.0, :]) assert_raises(IndexError, lambda: a[0.0, :, :]) assert_raises(IndexError, lambda: a[0, 0, 0.0]) assert_raises(IndexError, lambda: a[0.0, 0, 0]) assert_raises(IndexError, lambda: a[0, 0.0, 0]) assert_raises(IndexError, lambda: a[-1.4]) assert_raises(IndexError, lambda: a[0, -1.4]) assert_raises(IndexError, lambda: a[-1.4, 0]) assert_raises(IndexError, lambda: a[-1.4, :]) assert_raises(IndexError, lambda: a[:, -1.4]) assert_raises(IndexError, lambda: a[:, -1.4, :]) assert_raises(IndexError, lambda: a[-1.4, :, :]) assert_raises(IndexError, lambda: a[0, 0, -1.4]) assert_raises(IndexError, lambda: a[-1.4, 0, 0]) assert_raises(IndexError, lambda: a[0, -1.4, 0]) # Note torch validates index arguments "depth-first", so will prioritise # raising TypeError over IndexError, e.g. # # >>> a = np.array([[[5]]]) # >>> a[0.0:, 0.0] # IndexError: only integers, slices (`:`), ellipsis (`...`), # numpy.newaxis # (`None`) and integer or boolean arrays are # valid indices # >>> t = torch.as_tensor([[[5]]]) # identical to a # >>> t[0.0:, 0.0] # TypeError: slice indices must be integers or None or have an # __index__ method # assert_raises((IndexError, TypeError), lambda: a[0.0:, 0.0]) assert_raises((IndexError, TypeError), lambda: a[0.0:, 0.0, :])
import functools import operator import re import sys import warnings from itertools import product from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest import pytest from pytest import raises as assert_raises from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, skipIfTorchDynamo, TEST_WITH_TORCHDYNAMO, TestCase, xpassIfTorchDynamo, ) import numpy as np from numpy.testing import ( assert_, assert_array_equal, assert_equal, assert_warns, HAS_REFCOUNT, ) import torch._numpy as np from torch._numpy.testing import ( assert_, assert_array_equal, assert_equal, assert_warns, HAS_REFCOUNT, ) skip = functools.partial(skipif, True) @instantiate_parametrized_tests class TestIndexing(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added