python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Owner(s): ["oncall: jit"]
import io
import os
import sys
import copy
import unittest
import torch
from typing import Optional
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
find_library_location,
)
from torch.testing import FileCheck
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
class TestTorchbind(JitTestCase):
def setUp(self):
if IS_SANDCASTLE or IS_WINDOWS or IS_MACOS or IS_FBCODE:
raise unittest.SkipTest("non-portable load_library call used in test")
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def test_torchbind(self):
def test_equality(f, cmp_key):
obj1 = f()
obj2 = torch.jit.script(f)()
return (cmp_key(obj1), cmp_key(obj2))
def f():
val = torch.classes._TorchScriptTesting._Foo(5, 3)
val.increment(1)
return val
test_equality(f, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "Expected a value of type 'int'"):
val = torch.classes._TorchScriptTesting._Foo(5, 3)
val.increment('foo')
def f():
ss = torch.classes._TorchScriptTesting._StackString(["asdf", "bruh"])
return ss.pop()
test_equality(f, lambda x: x)
def f():
ss1 = torch.classes._TorchScriptTesting._StackString(["asdf", "bruh"])
ss2 = torch.classes._TorchScriptTesting._StackString(["111", "222"])
ss1.push(ss2.pop())
return ss1.pop() + ss2.pop()
test_equality(f, lambda x: x)
# test nn module with prepare_scriptable function
class NonJitableClass(object):
def __init__(self, int1, int2):
self.int1 = int1
self.int2 = int2
def return_vals(self):
return self.int1, self.int2
class CustomWrapper(torch.nn.Module):
def __init__(self, foo):
super(CustomWrapper, self).__init__()
self.foo = foo
def forward(self) -> None:
self.foo.increment(1)
return
def __prepare_scriptable__(self):
int1, int2 = self.foo.return_vals()
foo = torch.classes._TorchScriptTesting._Foo(int1, int2)
return CustomWrapper(foo)
foo = CustomWrapper(NonJitableClass(1, 2))
jit_foo = torch.jit.script(foo)
def test_torchbind_take_as_arg(self):
global StackString # see [local resolution in python]
StackString = torch.classes._TorchScriptTesting._StackString
def foo(stackstring):
# type: (StackString)
stackstring.push("lel")
return stackstring
script_input = torch.classes._TorchScriptTesting._StackString([])
scripted = torch.jit.script(foo)
script_output = scripted(script_input)
self.assertEqual(script_output.pop(), "lel")
def test_torchbind_return_instance(self):
def foo():
ss = torch.classes._TorchScriptTesting._StackString(["hi", "mom"])
return ss
scripted = torch.jit.script(foo)
# Ensure we are creating the object and calling __init__
# rather than calling the __init__wrapper nonsense
fc = FileCheck().check('prim::CreateObject()')\
.check('prim::CallMethod[name="__init__"]')
fc.run(str(scripted.graph))
out = scripted()
self.assertEqual(out.pop(), "mom")
self.assertEqual(out.pop(), "hi")
def test_torchbind_return_instance_from_method(self):
def foo():
ss = torch.classes._TorchScriptTesting._StackString(["hi", "mom"])
clone = ss.clone()
ss.pop()
return ss, clone
scripted = torch.jit.script(foo)
out = scripted()
self.assertEqual(out[0].pop(), "hi")
self.assertEqual(out[1].pop(), "mom")
self.assertEqual(out[1].pop(), "hi")
def test_torchbind_def_property_getter_setter(self):
def foo_getter_setter_full():
fooGetterSetter = torch.classes._TorchScriptTesting._FooGetterSetter(5, 6)
# getX method intentionally adds 2 to x
old = fooGetterSetter.x
# setX method intentionally adds 2 to x
fooGetterSetter.x = old + 4
new = fooGetterSetter.x
return old, new
self.checkScript(foo_getter_setter_full, ())
def foo_getter_setter_lambda():
foo = torch.classes._TorchScriptTesting._FooGetterSetterLambda(5)
old = foo.x
foo.x = old + 4
new = foo.x
return old, new
self.checkScript(foo_getter_setter_lambda, ())
def test_torchbind_def_property_just_getter(self):
def foo_just_getter():
fooGetterSetter = torch.classes._TorchScriptTesting._FooGetterSetter(5, 6)
# getY method intentionally adds 4 to x
return fooGetterSetter, fooGetterSetter.y
scripted = torch.jit.script(foo_just_getter)
out, result = scripted()
self.assertEqual(result, 10)
with self.assertRaisesRegex(RuntimeError, 'can\'t set attribute'):
out.y = 5
def foo_not_setter():
fooGetterSetter = torch.classes._TorchScriptTesting._FooGetterSetter(5, 6)
old = fooGetterSetter.y
fooGetterSetter.y = old + 4
# getY method intentionally adds 4 to x
return fooGetterSetter.y
with self.assertRaisesRegexWithHighlight(RuntimeError,
'Tried to set read-only attribute: y',
'fooGetterSetter.y = old + 4'):
scripted = torch.jit.script(foo_not_setter)
def test_torchbind_def_property_readwrite(self):
def foo_readwrite():
fooReadWrite = torch.classes._TorchScriptTesting._FooReadWrite(5, 6)
old = fooReadWrite.x
fooReadWrite.x = old + 4
return fooReadWrite.x, fooReadWrite.y
self.checkScript(foo_readwrite, ())
def foo_readwrite_error():
fooReadWrite = torch.classes._TorchScriptTesting._FooReadWrite(5, 6)
fooReadWrite.y = 5
return fooReadWrite
with self.assertRaisesRegexWithHighlight(RuntimeError,
'Tried to set read-only attribute: y',
'fooReadWrite.y = 5'):
scripted = torch.jit.script(foo_readwrite_error)
def test_torchbind_take_instance_as_method_arg(self):
def foo():
ss = torch.classes._TorchScriptTesting._StackString(["mom"])
ss2 = torch.classes._TorchScriptTesting._StackString(["hi"])
ss.merge(ss2)
return ss
scripted = torch.jit.script(foo)
out = scripted()
self.assertEqual(out.pop(), "hi")
self.assertEqual(out.pop(), "mom")
def test_torchbind_return_tuple(self):
def f():
val = torch.classes._TorchScriptTesting._StackString(["3", "5"])
return val.return_a_tuple()
scripted = torch.jit.script(f)
tup = scripted()
self.assertEqual(tup, (1337.0, 123))
def test_torchbind_save_load(self):
def foo():
ss = torch.classes._TorchScriptTesting._StackString(["mom"])
ss2 = torch.classes._TorchScriptTesting._StackString(["hi"])
ss.merge(ss2)
return ss
scripted = torch.jit.script(foo)
self.getExportImportCopy(scripted)
def test_torchbind_lambda_method(self):
def foo():
ss = torch.classes._TorchScriptTesting._StackString(["mom"])
return ss.top()
scripted = torch.jit.script(foo)
self.assertEqual(scripted(), "mom")
def test_torchbind_class_attr_recursive(self):
class FooBar(torch.nn.Module):
def __init__(self, foo_model):
super(FooBar, self).__init__()
self.foo_mod = foo_model
def forward(self) -> int:
return self.foo_mod.info()
def to_ivalue(self):
torchbind_model = torch.classes._TorchScriptTesting._Foo(self.foo_mod.info(), 1)
return FooBar(torchbind_model)
inst = FooBar(torch.classes._TorchScriptTesting._Foo(2, 3))
scripted = torch.jit.script(inst.to_ivalue())
self.assertEqual(scripted(), 6)
def test_torchbind_class_attribute(self):
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
inst = FooBar1234()
scripted = torch.jit.script(inst)
eic = self.getExportImportCopy(scripted)
assert eic() == "deserialized"
for expected in ["deserialized", "was", "i"]:
assert eic.f.pop() == expected
def test_torchbind_getstate(self):
class FooBar4321(torch.nn.Module):
def __init__(self):
super(FooBar4321, self).__init__()
self.f = torch.classes._TorchScriptTesting._PickleTester([3, 4])
def forward(self):
return self.f.top()
inst = FooBar4321()
scripted = torch.jit.script(inst)
eic = self.getExportImportCopy(scripted)
# NB: we expect the values {7, 3, 3, 1} as __getstate__ is defined to
# return {1, 3, 3, 7}. I tried to make this actually depend on the
# values at instantiation in the test with some transformation, but
# because it seems we serialize/deserialize multiple times, that
# transformation isn't as you would it expect it to be.
assert eic() == 7
for expected in [7, 3, 3, 1]:
assert eic.f.pop() == expected
def test_torchbind_deepcopy(self):
class FooBar4321(torch.nn.Module):
def __init__(self):
super(FooBar4321, self).__init__()
self.f = torch.classes._TorchScriptTesting._PickleTester([3, 4])
def forward(self):
return self.f.top()
inst = FooBar4321()
scripted = torch.jit.script(inst)
copied = copy.deepcopy(scripted)
assert copied.forward() == 7
for expected in [7, 3, 3, 1]:
assert copied.f.pop() == expected
def test_torchbind_python_deepcopy(self):
class FooBar4321(torch.nn.Module):
def __init__(self):
super(FooBar4321, self).__init__()
self.f = torch.classes._TorchScriptTesting._PickleTester([3, 4])
def forward(self):
return self.f.top()
inst = FooBar4321()
copied = copy.deepcopy(inst)
assert copied() == 7
for expected in [7, 3, 3, 1]:
assert copied.f.pop() == expected
def test_torchbind_tracing(self):
class TryTracing(torch.nn.Module):
def __init__(self):
super(TryTracing, self).__init__()
self.f = torch.classes._TorchScriptTesting._PickleTester([3, 4])
def forward(self):
return torch.ops._TorchScriptTesting.take_an_instance(self.f)
traced = torch.jit.trace(TryTracing(), ())
self.assertEqual(torch.zeros(4, 4), traced())
def test_torchbind_pass_wrong_type(self):
with self.assertRaisesRegex(RuntimeError, 'but instead found type \'Tensor\''):
torch.ops._TorchScriptTesting.take_an_instance(torch.rand(3, 4))
def test_torchbind_tracing_nested(self):
class TryTracingNest(torch.nn.Module):
def __init__(self):
super(TryTracingNest, self).__init__()
self.f = torch.classes._TorchScriptTesting._PickleTester([3, 4])
class TryTracing123(torch.nn.Module):
def __init__(self):
super(TryTracing123, self).__init__()
self.nest = TryTracingNest()
def forward(self):
return torch.ops._TorchScriptTesting.take_an_instance(self.nest.f)
traced = torch.jit.trace(TryTracing123(), ())
self.assertEqual(torch.zeros(4, 4), traced())
def test_torchbind_pickle_serialization(self):
nt = torch.classes._TorchScriptTesting._PickleTester([3, 4])
b = io.BytesIO()
torch.save(nt, b)
b.seek(0)
nt_loaded = torch.load(b)
for exp in [7, 3, 3, 1]:
self.assertEqual(nt_loaded.pop(), exp)
def test_torchbind_instantiate_missing_class(self):
with self.assertRaisesRegex(RuntimeError, 'Tried to instantiate class \'foo.IDontExist\', but it does not exist!'):
torch.classes.foo.IDontExist(3, 4, 5)
def test_torchbind_optional_explicit_attr(self):
class TorchBindOptionalExplicitAttr(torch.nn.Module):
foo : Optional[torch.classes._TorchScriptTesting._StackString]
def __init__(self):
super().__init__()
self.foo = torch.classes._TorchScriptTesting._StackString(["test"])
def forward(self) -> str:
foo_obj = self.foo
if foo_obj is not None:
return foo_obj.pop()
else:
return '<None>'
mod = TorchBindOptionalExplicitAttr()
scripted = torch.jit.script(mod)
def test_torchbind_no_init(self):
with self.assertRaisesRegex(RuntimeError, 'torch::init'):
x = torch.classes._TorchScriptTesting._NoInit()
def test_profiler_custom_op(self):
inst = torch.classes._TorchScriptTesting._PickleTester([3, 4])
with torch.autograd.profiler.profile() as prof:
torch.ops._TorchScriptTesting.take_an_instance(inst)
found_event = False
for e in prof.function_events:
if e.name == '_TorchScriptTesting::take_an_instance':
found_event = True
self.assertTrue(found_event)
def test_torchbind_getattr(self):
foo = torch.classes._TorchScriptTesting._StackString(["test"])
self.assertEqual(None, getattr(foo, 'bar', None))
def test_torchbind_attr_exception(self):
foo = torch.classes._TorchScriptTesting._StackString(["test"])
with self.assertRaisesRegex(AttributeError, 'does not have a field'):
foo.bar
def test_lambda_as_constructor(self):
obj_no_swap = torch.classes._TorchScriptTesting._LambdaInit(4, 3, False)
self.assertEqual(obj_no_swap.diff(), 1)
obj_swap = torch.classes._TorchScriptTesting._LambdaInit(4, 3, True)
self.assertEqual(obj_swap.diff(), -1)
def test_staticmethod(self):
def fn(inp: int) -> int:
return torch.classes._TorchScriptTesting._StaticMethod.staticMethod(inp)
self.checkScript(fn, (1,))
def test_default_args(self):
def fn() -> int:
obj = torch.classes._TorchScriptTesting._DefaultArgs()
obj.increment(5)
obj.decrement()
obj.decrement(2)
obj.divide()
obj.scale_add(5)
obj.scale_add(3, 2)
obj.divide(3)
return obj.increment()
self.checkScript(fn, ())
def gn() -> int:
obj = torch.classes._TorchScriptTesting._DefaultArgs(5)
obj.increment(3)
obj.increment()
obj.decrement(2)
obj.divide()
obj.scale_add(3)
obj.scale_add(3, 2)
obj.divide(2)
return obj.decrement()
self.checkScript(gn, ())
|
pytorch-master
|
test/jit/test_torchbind.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import inspect
import unittest
from typing import Any, Dict, List, NamedTuple, Optional, Tuple
from textwrap import dedent
from collections import OrderedDict
from torch import Tensor
import torch
import torch.nn as nn
import types
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import skipIfTorchDynamo
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestList(JitTestCase):
def test_list_bool_conversion(self):
def if_predicate(l: List[int]):
if l:
s = 0
for n in l:
s += n
return s
else:
return -1
self.checkScript(if_predicate, ([1, 2, 3],))
self.checkScript(if_predicate, ([],))
def while_predicate(l: List[int]):
s = 0
while l:
s += l.pop()
self.checkScript(while_predicate, ([1, 2, 3],))
self.checkScript(while_predicate, ([],))
def ternary_predicate(l: List[int]):
return "non-empty" if l else "empty"
self.checkScript(ternary_predicate, ([1, 2, 3],))
self.checkScript(ternary_predicate, ([],))
def test_in_check(self):
def int_in(x: List[int]) -> bool:
return 2 in x
self.checkScript(int_in, ([1, 2, 3],))
self.checkScript(int_in, ([1, 3, 3],))
def float_in(x: List[float]) -> bool:
return 2. in x
self.checkScript(float_in, ([1., 2., 3.],))
self.checkScript(float_in, ([1., 3., 3.],))
def str_in(x: List[str]) -> bool:
return 'hi' in x
self.checkScript(str_in, (['not', 'here'],))
self.checkScript(str_in, (['hi', 'bye'],))
self.checkScript(str_in, ([],))
def test_list_literal(self):
def reassign():
x = [1]
if 1 == 1:
x = [2, 3]
return
self.checkScript(reassign, (), optimize=False)
def reassign_arity_change():
x = [1]
if 1 == 1:
x = [1, 2, 3]
return
self.checkScript(reassign_arity_change, (), optimize=False)
def reassign_from_empty_literal():
x = []
if 1 == 1:
x = [1, 2, 3]
return
with self.assertRaisesRegexWithHighlight(RuntimeError, r"previously had type List\[Tensor\]", "x"):
self.checkScript(reassign_from_empty_literal, (), optimize=False)
def reassign_from_empty_builtin():
x = torch.jit.annotate(List[int], [])
if 1 == 1:
x = [1, 2, 3]
y = torch.jit.annotate(List[float], [])
if 1 == 1:
y = [1.0, 2.0, 3.0]
z = []
if 1 == 1:
z = [torch.randn([1])]
return
self.checkScript(reassign_from_empty_builtin, (), optimize=False)
def reassign_bad_type():
x = [1]
if 1 == 1:
x = [1.0]
return
with self.assertRaisesRegexWithHighlight(RuntimeError, "previously had type", "x"):
self.checkScript(reassign_bad_type, (), optimize=False)
def reassign_nested():
x = torch.jit.annotate(List[int], [])
if 1 == 1:
x = [1, 2, 3]
if 1 == 1:
x = [1.0]
return
with self.assertRaisesRegexWithHighlight(RuntimeError, "previously had type", "x"):
self.checkScript(reassign_nested, (), optimize=False)
def test_list_variance(self):
"""
`List[T1]` is not a subtype of `List[T2]`, even if `T1` is a
subtype of `T2`. However, if we have a temporary list object
(that is, a list comprehension or a list literal) on the rhs of
an assignment statement, we want to ignore the inferred type of
the rhs if we can prove that: 1) both the lhs and the rhs are
lists, and 2) the inner type of the lhs list is a subtype of the
inner type of the rhs list.
# This should pass
x: List[Optional[int]] = [None, None, None]
# This should fail
y: List[None] = [None, None, None]
x: List[Optional[int]] = y
"""
def test_listliteral_is_typed_from_annotation():
x: List[Optional[int]] = [None, None, None]
return x
self.checkScript(test_listliteral_is_typed_from_annotation, ())
def test_listcomprehension_is_typed_from_annotation():
x: List[Optional[int]] = [None for _ in range(3)]
return x
self.checkScript(test_listcomprehension_is_typed_from_annotation, ())
def test_lists_with_different_internal_types_are_invariant(self):
x: List[int] = [1, 2, 3]
y: List[Optional[int]] = x
return x
with self.assertRaisesRegex(RuntimeError, "Variable 'y' is "
"annotated with type "
r"List\[Optional\[int\]\] but is "
"being assigned to a value of type "
r"List\[int\]"):
torch.jit.script(test_lists_with_different_internal_types_are_invariant)
def test_lists_with_different_internal_types_are_invariant_recursive(self):
x: List[List[int]] = [[1, 2], [3]]
y: List[List[Optional[int]]] = x
return x
with self.assertRaisesRegex(RuntimeError, "Variable 'y' is "
"annotated with type "
r"List\[List\[Optional\[int\]\]\] "
"but is being assigned to a value "
r"of type List\[List\[int\]\]"):
torch.jit.script(test_lists_with_different_internal_types_are_invariant_recursive)
def test_del(self):
def inputs():
return [1, 2, 3, 4]
def fn(x: List[int]) -> List[int]:
del x[1]
return x
python_out = fn(inputs())
# checkScript reuses the same object, but here it's being mutated so do
# it manually
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(fn)))
self.assertEqual(cu.fn(inputs()), python_out)
self.assertEqual(torch.jit.script(fn)(inputs()), python_out)
@torch.jit.script
def fn2(x: List[int]) -> List[int]:
del x[100]
return x
with self.assertRaisesRegexWithHighlight(RuntimeError, "out of range", "x[100]"):
fn2([])
with self.assertRaisesRegexWithHighlight(RuntimeError, "deletion at a single index", "x[1:3]"):
@torch.jit.script
def fn(x: List[int]) -> List[int]:
del x[1:3]
return x
def test_list_keyword(self):
def foo():
return list([1, 2, 3]), list(("a", "b")), list(range(5)), list("abcdefg") # noqa: C410
self.checkScript(foo, ())
def foo2():
x: List[int] = list()
x.append(1)
return x,
self.checkScript(foo2, ())
def foo3():
return list(list("abc"))
self.checkScript(foo3, ())
FileCheck().check_count("aten::list", 2, exactly=True).run(torch.jit.script(foo3).graph)
def test_dict_keyword_with_kwargs(self):
def fn():
return dict(foo=1, bar=2, baz=3)
self.checkScript(fn, ())
def test_dict_keyword_with_kwargs_using_container_values(self):
def fn():
return dict(foo=[1, 2, 3], bar=[4, 5, 6], baz=[7, 8, 9])
self.checkScript(fn, ())
def test_dict_keyword_with_iterable(self):
def fn():
return dict([("foo", 1), ("bar", 2), ("baz", 3)]) # noqa: C406
self.checkScript(fn, ())
def test_dict_keyword_with_empty_iterable(self):
def fn():
return dict([]) # noqa: C406
self.checkScript(fn, ())
def test_dict_keyword_with_internal_aggregate_function(self):
def fn():
return dict(zip(["foo", "baz", "bar"], [1, 2, 3]))
self.checkScript(fn, ())
def test_dict_keyword_with_mapping(self):
def fn():
return dict({"foo" : 1, "bar" : 2, "baz" : 3})
self.checkScript(fn, ())
def test_dict_keyword_with_mapping_and_kwargs(self):
def fn():
return dict({"foo" : 1, "bar" : 2}, baz=3)
self.checkScript(fn, ())
def test_dict_keyword_with_dict_comprehension(self):
def fn():
return dict({i: chr(i + 65) for i in range(4)})
self.checkScript(fn, ())
def test_dict_keyword_with_dict_comprehension_and_kwargs(self):
def fn():
return dict({chr(65 + i) : i for i in range(4)}, foo=2)
self.checkScript(fn, ())
def test_dict_keyword_with_empty_dict_comprehension(self):
def fn():
return dict({})
self.checkScript(fn, ())
def test_dict_keyword_is_correctly_typed(self):
def fn():
x: Dict[str, int] = dict()
x["foo"] = 1
return x
self.checkScript(fn, ())
def test_dict_keyword_with_mismatched_annotations(self):
err_msg = r"Dict type annotation `Dict\[int, str\]` did not " \
"match the type of an actual key type `str`"
with self.assertRaisesRegex(RuntimeError, err_msg):
@torch.jit.script
def fn():
x: Dict[int, str] = dict([("foo", 1), ("bar", 2), ("baz", 3)]) # noqa: C406
return x
def test_dict_keyword_with_nested_call(self):
def fn():
return dict(dict(foo=1, bar=2, baz=3))
self.checkScript(fn, ())
def test_dict_keyword_with_previously_declared_variable(self):
def fn():
d = {"foo" : 1, "bar" : 2}
return dict(d)
self.checkScript(fn, ())
def test_dict_keyword_with_previously_declared_variable_and_kwargs(self):
def fn():
d = {"foo" : 1, "bar" : 2}
return dict(d, baz=3)
self.checkScript(fn, ())
def test_min_bool_list(self):
def jit_min_list(a: List[bool], b: List[bool]) -> List[bool]:
return min(a, b)
self.checkScript(jit_min_list, ([True, False], [False, True]))
def test_min_max_list(self):
def jit_min_list(a: List[int], b: List[int]) -> List[int]:
return min(a, b)
def jit_min_list_float(a: List[float], b: List[float]) -> List[float]:
return min(a, b)
def jit_min_list_bool(a: List[bool], b: List[bool]) -> List[bool]:
return min(a, b)
def run_tests(func, a, b):
for t in zip(a, b):
self.checkScript(func, t)
args_left_int = [[1, 8, 8], [2, 1, 1], [], [2], [1], [1, 2, 3]]
args_right_int = [[2, 1, 1], [1, 8, 8], [], [1], [], [1, 2]]
run_tests(jit_min_list, args_left_int, args_right_int)
args_left_float = [[1., 8., 8.], [2., 1., 1.], [], [2.], [1.], [1., 2., 3.]]
args_right_float = [[2., 1., 1.], [1., 8., 8.], [], [1.], [], [1., 2.]]
run_tests(jit_min_list_float, args_left_float, args_right_float)
args_left_bool = [[], [], [], [False], [True], [False, True], [True, True],
[False, False, False], [False, False, True]]
args_right_bool = [[], [False], [True], [True], [False], [True, True],
[False, True], [False, False, True], [False, False, False]]
run_tests(jit_min_list_bool, args_left_bool, args_right_bool)
def jit_max_list(a: List[int], b: List[int]) -> List[int]:
return max(a, b)
def jit_max_list_float(a: List[float], b: List[float]) -> List[float]:
return max(a, b)
def jit_max_list_bool(a: List[bool], b: List[bool]) -> List[bool]:
return max(a, b)
args_left_int = [[1, 8, 8], [8, 1, 1], [], [1], [], [1, 2]]
args_right_int = [[8, 1, 1], [1, 8, 8], [], [2], [1], [1, 2, 3]]
run_tests(jit_max_list, args_left_int, args_right_int)
args_left_float = [[1., 8., 8.], [8., 1., 1.], [], [1.], [], [1., 2.]]
args_right_float = [[8., 1., 1.], [1., 8., 8.], [], [2.], [1.], [1., 2., 3.]]
run_tests(jit_max_list_float, args_left_float, args_right_float)
run_tests(jit_max_list_bool, args_left_bool, args_right_bool)
def test_list_gather(self):
def index():
a = [1, 2, 3]
return a[1]
self.checkScript(index, ())
def negative_index():
a = [1, 2, 3]
return a[-1]
self.checkScript(negative_index, ())
def bad_index():
a = [1, 2, 3]
return a[4]
self.checkScriptRaisesRegex(bad_index, (), Exception,
"list index out of range")
def bad_negative_index():
a = [1, 2, 3]
return a[-5]
self.checkScriptRaisesRegex(bad_negative_index, (), Exception,
"list index out of range")
def test_list_len(self):
def func():
a = [1, 2, 3]
return len(a) == 3
self.checkScript(func, ())
def func2():
a = []
return len(a) == 0
self.checkScript(func2, ())
def test_list_ops(self):
def test_equality():
a = [1, 2, 3]
b = [1, 2, 3]
return a == b
self.checkScript(test_equality, (), optimize=True)
def test_equality_str():
a = ["foo", "bar"]
b = ["foo", "bar"]
return a == b
self.checkScript(test_equality_str, (), optimize=True)
def test_inequality():
a = [1, 2, 3]
b = [1, 2, 3]
return a != b
self.checkScript(test_inequality, (), optimize=True)
def test_inequality_str():
a = ["foo", "bar"]
b = ["foo", "bar", "food"]
return a != b
self.checkScript(test_inequality_str, (), optimize=True)
def test_non_equality():
a = [1, 2, 3]
b = [3]
return a == b
self.checkScript(test_non_equality, (), optimize=True)
def test_non_inequality():
a = [1, 2, 3]
b = [3]
return a != b
self.checkScript(test_non_equality, (), optimize=True)
def test_list_equality_as_cond():
a = [1, 2, 3]
b = [3]
if a == b:
c = 1
else:
c = 2
return c
self.checkScript(test_list_equality_as_cond, (), optimize=True)
def test_list_add():
a = [1, 2, 3]
b = [2]
c = a + b
return c == [1, 2, 3, 2]
self.checkScript(test_list_add, (), optimize=True)
def test_list_add_empty():
a = [1, 2, 3]
b = torch.jit.annotate(List[int], [])
c = a + b
return c == [1, 2, 3]
self.checkScript(test_list_add_empty, (), optimize=True)
def test_tensor_list_equality():
t1 = torch.ones([1, 1])
t2 = torch.ones([1, 1])
x = [t1, t2]
y = [t2, t1]
return x == y
self.checkScript(test_tensor_list_equality, (), optimize=True)
def test_invalid_list_equality():
t1 = torch.ones([2, 2])
t2 = torch.ones([2, 2])
x = [t1, t2]
y = [t2, t1]
# will throw since the tensors have more than one element
return x == y
self.checkScriptRaisesRegex(
test_invalid_list_equality,
(),
RuntimeError,
"Boolean value of Tensor")
def test_list_sort(self):
template = dedent('''
def func():
li_1 = {list_create}
li_2 = {list_create}
li_3 = {list_create}
li_1.sort()
li_2.sort(reverse=True)
li_4 = sorted(li_3)
return li_1, li_2, li_3, li_4
''')
lists = ["[]", "[1, 3, 2]", "[True, False, True]", "[1.2, .2, 3.2]",
"[torch.tensor(1.0), torch.tensor(0.2), torch.tensor(0.5)]",
"[torch.tensor(5), torch.tensor(-2), torch.tensor(4)]"]
for li in lists:
code = template.format(list_create=li)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
t1 = cu.func()
t2 = scope['func']()
self.assertEqual(t1, t2)
def test_fail(x: List[Tensor]) -> List[Tensor]:
x.sort()
return x
self.checkScriptRaisesRegex(test_fail, (([torch.zeros([2]), torch.zeros([2])],)), Exception,
"Boolean value of Tensor with more than one value")
@torch.jit.script
def test_mutation():
a = [1, 2, 3]
a.sort()
return a
test_mutation()
FileCheck().check("aten::sort").run(test_mutation.graph_for())
def test_sorted_copy():
a = [torch.tensor(2), torch.tensor(0), torch.tensor(1)]
b = sorted(a)
a[0] = torch.tensor(10)
return a, b
self.checkScript(test_sorted_copy, ())
def test_list_slice(self):
def test_regular_slice():
a = [0, 1, 2, 3, 4]
return a[2:3] == [2]
self.checkScript(test_regular_slice, ())
def test_open_ended_slice():
a = [0, 1, 2, 3, 4]
return a[2:] == [2, 3, 4]
self.checkScript(test_open_ended_slice, ())
def test_open_ended_slice2():
a = [0, 1, 2, 3, 4]
return a[:2] == [0, 1]
self.checkScript(test_open_ended_slice2, ())
def test_negative_slice():
a = [0, 1, 2, 3, 4]
return a[:-1] == [0, 1, 2, 3]
self.checkScript(test_negative_slice, ())
def test_negative_slice2():
a = [0, 1, 2, 3, 4]
return a[-3:-1] == [2, 3]
self.checkScript(test_negative_slice2, ())
def test_backward_slice():
a = [0, 1, 2, 3, 4]
return a[3:2] == torch.jit.annotate(List[int], [])
self.checkScript(test_backward_slice, ())
def test_over_slice():
a = [0, 1, 2, 3, 4]
return a[3:10] == [3, 4]
self.checkScript(test_backward_slice, ())
def test_slice_index(self):
a = torch.tensor(
[
[[1, 11], [2, 22]],
[[3, 33], [4, 44]],
[[5, 55], [6, 66]],
]
)
def test_index_slice1(x):
x = x[:, :, [0, 1]]
return x
self.checkScript(test_index_slice1, (a,))
def test_index_slice2(x):
x = x[[2, 1, 0], :, :]
return x
self.checkScript(test_index_slice2, (a,))
def test_index_slice3(x):
x = x[[0, 1], :, [1]]
return x
self.checkScript(test_index_slice3, (a,))
def test_index_slice_empty_list(x):
empty_list: List[int] = []
x = x[empty_list, :, :]
return x
self.checkScript(test_index_slice_empty_list, (a,))
def test_index_slice_out_of_bounds_index(x):
x = x[[4], :, :]
return x
with self.assertRaisesRegexWithHighlight(RuntimeError, "index 4 is out of bounds for dimension 0 with size 3",
"x[[4], :, :]"):
self.checkScript(test_index_slice_out_of_bounds_index, (a,))
def test_mutable_list_append(self):
def test_append():
a = [0, 1]
a.append(2)
a.append(3)
return a == [0, 1, 2, 3]
self.checkScript(test_append, ())
def test_comprehensions_basic(self):
def comp(l: List[int]) -> List[int]:
n = [x * 3 for x in l]
return n
comp([1, 2, 3])
self.checkScript(comp, ([1, 2, 3],))
def test_comprehensions_basic_float(self):
def comp(l: List[float]) -> List[float]:
n = [x * 3 for x in l]
return n
self.checkScript(comp, ([1.0, 2.0, 3.0],))
def test_comprehensions_two_comps(self):
@torch.jit.script
def comp(l1: List[int], l2: List[int]) -> List[int]:
n = [x * 3 for x in l1]
n2 = [x + 2 for x in l2]
return n + n2
self.assertEqual(comp([1, 2, 3], [4, 5]), [3, 6, 9, 6, 7])
def test_comprehension_out_type_not_in_type(self):
def list_cast() -> int:
li = [int(i) for i in [torch.tensor(0), torch.tensor(1), torch.tensor(2)]]
return li[0] + li[1] + li[2]
self.checkScript(list_cast, ())
def test_comprehension_iterable(self):
def test_func(fn, inputs):
self.assertEqual(fn(*inputs), torch.jit.script(fn)(*inputs))
def foo(names: List[int], results: List[int]) -> List[Tuple[int, int]]:
return [(k + 5, v - 2) for k, v in zip(names, results)]
test_func(foo, ([1, 2, 4], [4, 7, 9]))
test_func(foo, ([5], [4, 7, 9]))
def fn(x: int) -> List[int]:
return [i for i in range(x)] # noqa: C416
test_func(fn, (9,))
test_func(fn, (0,))
test_func(fn, (-1,))
def changes_type():
a = [float(i) for i in range(5)]
b = [float(i) for i in [1, 2, 3, 4]]
c = [(float(i), j) for i, j in enumerate([1, 2, 3, 8])]
return a, b, c
test_func(changes_type, ())
def test_zero_iter():
return [str(i) for i, j in zip("", "")]
test_func(test_zero_iter, ())
def test_mutable_list_append_2(self):
def test_append_2():
a = [0, 1]
a.append(2)
a = [1]
a.append(4)
return a == [1, 4]
self.checkScript(test_append_2, ())
def test_mutable_list_append_if(self):
def test_append_if():
a = [1]
if 1 == 1:
a.append(4)
return a == [1, 4]
self.checkScript(test_append_if, ())
def test_mutable_list_append_if_else(self):
def test_append_if_else():
a = [1]
if 1 == 2:
a.append(4)
else:
a.append(10)
return a == [1, 10]
self.checkScript(test_append_if_else, ())
def test_mutable_list_append_loop(self):
def test_append_loop():
a = torch.jit.annotate(List[int], [])
for i in range(5):
a.append(i)
return a == [0, 1, 2, 3, 4]
self.checkScript(test_append_loop, ())
def test_mutable_list_append_loop_if(self):
def test_append_loop_if():
a = torch.jit.annotate(List[int], [])
for i in range(5):
if i > 3:
a.append(i)
else:
a.append(0)
return a == [0, 0, 0, 0, 4]
self.checkScript(test_append_loop_if, ())
def test_mutable_list_nested_loop(self):
def test_nested_loop():
a = torch.jit.annotate(List[int], [])
for i in range(2):
for j in range(2):
a.append(i + j)
return a == [0, 1, 1, 2]
self.checkScript(test_nested_loop, ())
def test_mutable_list_function_inline(self):
@torch.jit.script
def bar(y: List[int]) -> None:
y.append(4)
@torch.jit.script
def foo():
x = [1, 2, 3]
bar(x)
return x
self.assertEqual(foo(), [1, 2, 3, 4])
def test_mutable_list_reverse_empty(self):
def test_reverse_empty():
a = []
a.reverse()
return a == []
self.checkScript(test_reverse_empty, ())
def test_mutable_list_reverse(self):
def test_reverse():
a = [1, 2, 3, 4]
a.reverse()
return a == [4, 3, 2, 1]
self.checkScript(test_reverse, ())
def test_mutable_tensor_list_reverse(self):
def test_tensor_reverse():
a = [torch.tensor(1), torch.tensor(2)]
a.reverse()
return a == [torch.tensor(2), torch.tensor(1)]
self.checkScript(test_tensor_reverse, ())
def test_mutable_list_pop_empty(self):
@torch.jit.script
def test_pop_empty():
a = torch.jit.annotate(List[int], [])
return a.pop()
with self.assertRaisesRegexWithHighlight(RuntimeError, "pop from empty list", "a.pop"):
test_pop_empty()
def test_mutable_list_pop(self):
def test_pop():
a = [1, 2, 3, 4]
b = a.pop()
return b == 4
self.checkScript(test_pop, ())
def test_mutable_list_pop2(self):
def test_pop2():
a = [1, 2, 3, 4]
b = a.pop()
return len(a) == 3
self.checkScript(test_pop2, ())
def test_mutable_list_pop_at(self):
def test_pop_at():
a = [1, 2, 3, 4]
b = a.pop(1)
return b == 2
self.checkScript(test_pop_at, ())
def test_mutable_list_pop_at2(self):
def test_pop_at2():
a = [1, 2, 3, 4]
b = a.pop(1)
return len(a) == 3
self.checkScript(test_pop_at2, ())
def test_mutable_list_pop_at_negative(self):
def test_pop_at_negative():
a = [1, 2, 3, 4]
b = a.pop(-2)
return b == 3
self.checkScript(test_pop_at_negative, ())
def test_mutable_list_pop_at_negative2(self):
def test_pop_at_negative2():
a = [1, 2, 3, 4]
b = a.pop(-2)
return len(a) == 3
self.checkScript(test_pop_at_negative2, ())
def test_mutable_list_pop_slice(self):
def test_pop_slice():
a = [1, 2, 3, 4]
b = [1, 2, 3, 4]
a.pop()
b = b[:-1]
return a == b
self.checkScript(test_pop_slice, ())
def test_mutable_list_clear_empty(self):
def test_clear_empty():
a = torch.jit.annotate(List[int], [])
a.clear()
return len(a) == 0
self.checkScript(test_clear_empty, ())
def test_mutable_list_clear(self):
def test_clear():
a = [1, 2, 3, 4]
a.clear()
return len(a) == 0
self.checkScript(test_clear, ())
def test_mutable_list_insert(self):
def test_list_insert():
a = [1, 2, 3, 4]
a.insert(2, 5)
return a == [1, 2, 5, 3, 4]
self.checkScript(test_list_insert, ())
def test_mutable_list_insert_negative(self):
def test_list_insert_negative():
a = [1, 2, 3, 4]
a.insert(-1, 5)
return a == [1, 2, 3, 5, 4]
self.checkScript(test_list_insert_negative, ())
def test_mutable_list_insert_neg_out_of_bounds(self):
def test_list_insert_neg_out_of_bounds():
a = [1, 2, 3, 4]
a.insert(-10, 5)
return a == [5, 1, 2, 3, 4]
self.checkScript(test_list_insert_neg_out_of_bounds, ())
def test_mutable_list_insert_out_of_bounds(self):
def test_list_insert_out_of_bounds():
a = [1, 2, 3, 4]
a.insert(10, 5)
return a == [1, 2, 3, 4, 5]
self.checkScript(test_list_insert_out_of_bounds, ())
def test_mutable_list_remove_not_existing(self):
@torch.jit.script
def test_list_remove_not_existing():
a = [1, 2, 3, 4]
a.remove(5)
return a
with self.assertRaisesRegexWithHighlight(RuntimeError, "x not in list", "a.remove"):
test_list_remove_not_existing()
def test_mutable_list_remove(self):
def test_list_remove():
a = [1, 2, 3, 4]
a.remove(3)
return a == [1, 2, 4]
self.checkScript(test_list_remove, ())
def test_str_list_remove():
a = ["foo", "bar"]
a.remove("foo")
return a == ["bar"]
self.checkScript(test_str_list_remove, ())
def test_list_index_not_existing(self):
@torch.jit.script
def list_index_not_existing():
a = [4, 1, 3, 2]
i = a.index(5)
return i
with self.assertRaisesRegexWithHighlight(RuntimeError, "'5' is not in list", "a.index"):
list_index_not_existing()
def test_list_index(self):
def list_index():
a = [4, 1, 3, 2]
i = a.index(3)
return i == 2
self.checkScript(list_index, ())
def list_str_index():
a = ["foo", "bar"]
i = a.index("bar")
return i == 1
self.checkScript(list_str_index, ())
def test_tensor_list_index(self):
def tensor_list_index():
a = [torch.tensor(4), torch.tensor(1), torch.tensor(3), torch.tensor(2)]
i = a.index(torch.tensor(3))
return i == 2
self.checkScript(tensor_list_index, ())
def test_tensor_list_index_not_existing(self):
@torch.jit.script
def tensor_list_index_not_existing():
a = [torch.tensor(4), torch.tensor(1), torch.tensor(3), torch.tensor(2)]
i = a.index(torch.tensor(5))
return i
with self.assertRaisesRegexWithHighlight(RuntimeError, "is not in list", "a.index"):
tensor_list_index_not_existing()
def test_list_count(self):
def list_count():
a = [4, 1, 4, 2, 4]
i = a.count(4)
return i == 3
self.checkScript(list_count, ())
def list_str_count():
a = ["foo", "bar", "foo"]
i = a.count("foo")
return i == 2
self.checkScript(list_str_count, ())
def test_list_count_not_existing(self):
def list_count_not_existing():
a = [4, 1, 4, 2, 4]
i = a.count(5)
return i == 0
self.checkScript(list_count_not_existing, ())
def test_tensor_list_count(self):
def tensor_list_count():
a = [torch.tensor(4), torch.tensor(1), torch.tensor(4), torch.tensor(4)]
i = a.count(torch.tensor(4))
return i == 3
self.checkScript(tensor_list_count, ())
def test_tensor_list_count_not_existing(self):
def tensor_list_count_not_existing():
a = [torch.tensor(4), torch.tensor(1), torch.tensor(4), torch.tensor(4)]
i = a.count(torch.tensor(5))
return i == 0
self.checkScript(tensor_list_count_not_existing, ())
def test_mutable_list_remove_tensor(self):
def test_list_remove_tensor():
a = [torch.ones(1), torch.zeros(1), torch.ones(2)]
a.remove(torch.zeros(1))
return len(a) == 2
self.checkScript(test_list_remove_tensor, ())
def test_mutable_list_remove2(self):
def test_list_remove2():
a = [1]
a.remove(1)
return len(a) == 0
self.checkScript(test_list_remove2, ())
def test_extend_list_mutable(self):
@torch.jit.script
def extend_list(a: List[Tensor], b: List[Tensor]) -> List[Tensor]:
a.extend(b)
return a
for l in [[], [torch.rand(2)], [torch.rand(2), torch.rand(2), torch.rand(2)]]:
for r in [[], [torch.rand(2)], [torch.rand(2), torch.rand(2), torch.rand(2)]]:
self.assertEqual(extend_list(l, r), l + r)
def test_extend_list_immutable(self):
@torch.jit.script
def extend_list(a: List[int], b: List[int]) -> List[int]:
a.extend(b)
return a
for l in [[], [1], [1, 2, 3]]:
for r in [[], [1], [1, 2, 3]]:
self.assertEqual(extend_list(l, r), l + r)
def test_copy_list_mutable(self):
@torch.jit.script
def copy_list(a: List[Tensor]) -> List[Tensor]:
return a.copy()
for l in [[], [torch.rand(2)], [torch.rand(2), torch.rand(2), torch.rand(2)]]:
self.assertEqual(copy_list(l), l)
def test_copy_list_immutable(self):
@torch.jit.script
def copy_list(a: List[int]) -> List[int]:
return a.copy()
for l in [[], [1], [1, 2, 3]]:
self.assertEqual(copy_list(l), l)
def test_min_max_single_list(self):
def min_intlist(li: List[int]) -> int:
return min(li)
def max_intlist(li: List[int]) -> int:
return max(li)
def min_boollist(li: List[bool]) -> bool:
return min(li)
def max_boollist(li: List[bool]) -> bool:
return max(li)
def min_floatlist(li: List[float]) -> float:
return min(li)
def max_floatlist(li: List[float]) -> float:
return max(li)
int_lists = [1], [2, 1, 2], [-3, 4, 2], [-2, -7, 1, 4], [2, 1, 0, 4], []
def check_list(fn, li):
if len(li) == 0:
self.checkScriptRaisesRegex(fn, (li,), Exception, "arg is an empty sequence")
else:
self.checkScript(fn, (li,))
for int_list in int_lists:
check_list(min_intlist, int_list)
check_list(max_intlist, int_list)
bool_li = [bool(x) for x in int_list]
check_list(min_boollist, bool_li)
check_list(max_boollist, bool_li)
float_li = [float(x) for x in int_list]
check_list(min_floatlist, float_li)
check_list(max_floatlist, float_li)
def test_to_list(self):
"""Unit tests for Tensor.tolist() function."""
"""
Boolean dtype unit tests.
"""
def to_list_bool_0D(x: torch.Tensor) -> bool:
li = torch.jit.annotate(bool, x.tolist())
return li
def to_list_bool_1D(x: torch.Tensor) -> List[bool]:
li = torch.jit.annotate(List[bool], x.tolist())
return li
def to_list_bool_2D(x: torch.Tensor) -> List[List[bool]]:
li = torch.jit.annotate(List[List[bool]], x.tolist())
return li
def to_list_bool_3D(x: torch.Tensor) -> List[List[List[bool]]]:
li = torch.jit.annotate(List[List[List[bool]]], x.tolist())
return li
self.checkScript(to_list_bool_0D, (torch.tensor(False, dtype=torch.bool),))
bool_input_1D = torch.tensor([True, False, True, False], dtype=torch.bool)
self.checkScript(to_list_bool_1D, (bool_input_1D,))
bool_input_2D = torch.tensor(
[[True, True, False], [False, True, False]], dtype=torch.bool
)
self.checkScript(to_list_bool_2D, (bool_input_2D,))
bool_input_3D = torch.tensor(
[[[True, False], [False, True]], [[True, False], [False, False]]],
dtype=torch.bool,
)
self.checkScript(to_list_bool_3D, (bool_input_3D,))
bool_input_noncontiguous = torch.tensor(
[[[True, False], [False, True]], [[True, False], [False, False]]],
dtype=torch.bool,
).transpose(0, 1)
self.checkScript(to_list_bool_3D, (bool_input_noncontiguous,))
"""
Int dtype unit tests.
"""
def to_list_int_0D(x: torch.Tensor) -> int:
li = torch.jit.annotate(int, x.tolist())
return li
def to_list_int_1D(x: torch.Tensor) -> List[int]:
li = torch.jit.annotate(List[int], x.tolist())
return li
def to_list_int_2D(x: torch.Tensor) -> List[List[int]]:
li = torch.jit.annotate(List[List[int]], x.tolist())
return li
def to_list_int_3D(x: torch.Tensor) -> List[List[List[int]]]:
li = torch.jit.annotate(List[List[List[int]]], x.tolist())
return li
self.checkScript(to_list_int_0D, (torch.tensor(1, dtype=torch.long),))
int_input_1D = torch.tensor([1, 2, 3, 4], dtype=torch.long)
self.checkScript(to_list_int_1D, (int_input_1D,))
int_input_2D = torch.tensor([[1, 2, 3], [3, 4, 5]], dtype=torch.long)
self.checkScript(to_list_int_2D, (int_input_2D,))
int_input_3D = torch.tensor(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=torch.long
)
self.checkScript(to_list_int_3D, (int_input_3D,))
int_input_noncontiguous = torch.tensor(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=torch.long
).transpose(0, 1)
self.checkScript(to_list_int_3D, (int_input_noncontiguous,))
"""
Float dtype unit tests.
"""
def to_list_float_0D(x: torch.Tensor) -> float:
li = torch.jit.annotate(float, x.tolist())
return li
def to_list_float_1D(x: torch.Tensor) -> List[float]:
li = torch.jit.annotate(List[float], x.tolist())
return li
def to_list_float_2D(x: torch.Tensor) -> List[List[float]]:
li = torch.jit.annotate(List[List[float]], x.tolist())
return li
def to_list_float_3D(x: torch.Tensor) -> List[List[List[float]]]:
li = torch.jit.annotate(List[List[List[float]]], x.tolist())
return li
# Test with torch.float dtype Tensors to check that they are converted to double automatically.
self.checkScript(to_list_float_0D, (torch.randn(5, dtype=torch.float)[0],))
self.checkScript(to_list_float_1D, (torch.randn(5, dtype=torch.float),))
self.checkScript(to_list_float_2D, (torch.randn(5, 6, dtype=torch.float),))
self.checkScript(to_list_float_3D, (torch.randn(5, 6, 7, dtype=torch.float),))
self.checkScript(to_list_float_3D, (torch.randn(5, 6, 7, dtype=torch.float).transpose(0, 1),))
self.checkScript(to_list_float_0D, (torch.randn(5, dtype=torch.double)[0],))
self.checkScript(to_list_float_1D, (torch.randn(5, dtype=torch.double),))
self.checkScript(to_list_float_2D, (torch.randn(5, 6, dtype=torch.double),))
self.checkScript(to_list_float_3D, (torch.randn(5, 6, 7, dtype=torch.double),))
self.checkScript(to_list_float_3D, (torch.randn(5, 6, 7, dtype=torch.double).transpose(0, 1),))
"""
Complex dtype unit tests.
"""
def to_list_complex_0D(x: torch.Tensor) -> complex:
li = torch.jit.annotate(complex, x.tolist())
return li
def to_list_complex_1D(x: torch.Tensor) -> List[complex]:
li = torch.jit.annotate(List[complex], x.tolist())
return li
def to_list_complex_2D(x: torch.Tensor) -> List[List[complex]]:
li = torch.jit.annotate(List[List[complex]], x.tolist())
return li
def to_list_complex_3D(x: torch.Tensor) -> List[List[List[complex]]]:
li = torch.jit.annotate(List[List[List[complex]]], x.tolist())
return li
# Test with torch.complex dtype Tensors to check that they are converted to double automatically.
self.checkScript(to_list_complex_0D, (torch.randn(5, dtype=torch.cfloat)[0],))
self.checkScript(to_list_complex_1D, (torch.randn(5, dtype=torch.cfloat),))
self.checkScript(to_list_complex_2D, (torch.randn(5, 6, dtype=torch.cfloat),))
self.checkScript(to_list_complex_3D, (torch.randn(5, 6, 7, dtype=torch.cfloat),))
self.checkScript(to_list_complex_3D, (torch.randn(5, 6, 7, dtype=torch.cfloat).transpose(0, 1),))
self.checkScript(to_list_complex_0D, (torch.randn(5, dtype=torch.cdouble)[0],))
self.checkScript(to_list_complex_1D, (torch.randn(5, dtype=torch.cdouble),))
self.checkScript(to_list_complex_2D, (torch.randn(5, 6, dtype=torch.cdouble),))
self.checkScript(to_list_complex_3D, (torch.randn(5, 6, 7, dtype=torch.cdouble),))
self.checkScript(to_list_complex_3D, (torch.randn(5, 6, 7, dtype=torch.cdouble).transpose(0, 1),))
"""
Non-happy path tests:
- missing type annotation
- mismatch between type annotation and input
- type annotation with unsupported type
- type annotation with the wrong dimension
- type annotation with scalar type that doesn't match the input scalar type
"""
def to_list_missing_type_annotation(x: torch.Tensor) -> List[float]:
li = x.tolist()
return li
def to_list_incorrect_type_annotation(x: torch.Tensor) -> List[float]:
li = torch.jit.annotate(float, x.tolist())
return li
def to_list_unsupported_type_annotation(x: torch.Tensor) -> List[float]:
li = torch.jit.annotate(List[str], x.tolist())
return li
def to_list_type_annotation_wrong_dim(x: torch.Tensor) -> List[List[float]]:
li = torch.jit.annotate(List[List[float]], x.tolist())
return li
def to_list_type_annotation_incorrect_scalar_type(x: torch.Tensor) -> List[float]:
li = torch.jit.annotate(List[float], x.tolist())
return li
with self.assertRaisesRegexWithHighlight(
RuntimeError,
r"Expected type hint for result of tolist()",
"x.tolist("
):
self.checkScript(to_list_missing_type_annotation, (torch.randn(5),))
with self.assertRaisesRegexWithHighlight(
RuntimeError,
r"Return value was annotated as having type List\[float\] but is actually of type float",
"return li"
):
self.checkScript(to_list_incorrect_type_annotation, (torch.randn(5),))
with self.assertRaisesRegex(
RuntimeError, r"str is not one of the supported element types for tolist"
):
self.checkScript(to_list_unsupported_type_annotation, (torch.randn(5),))
with self.assertRaisesRegex(
RuntimeError,
r"Output annotation list dimension and runtime tensor dimension must match",
):
self.checkScript(to_list_type_annotation_wrong_dim, (torch.randn(5, dtype=torch.double),))
with self.assertRaisesRegex(
RuntimeError,
r"Output annotation element type and runtime tensor element type must match",
):
self.checkScript(
to_list_type_annotation_incorrect_scalar_type,
(torch.ones(5, dtype=torch.long),),
)
def test_to_list_gpu(self):
"""GPU tests for Tensor.tolist() function."""
if not torch.cuda.is_available() or torch.cuda.device_count() == 0:
self.skipTest("CUDA is not available")
def to_list_bool_1D(x: torch.Tensor) -> List[bool]:
li = torch.jit.annotate(List[bool], x.tolist())
return li
def to_list_int_1D(x: torch.Tensor) -> List[int]:
li = torch.jit.annotate(List[int], x.tolist())
return li
def to_list_float_1D(x: torch.Tensor) -> List[float]:
li = torch.jit.annotate(List[float], x.tolist())
return li
self.checkScript(to_list_bool_1D, (torch.tensor(
[True, False, True, False], dtype=torch.bool).cuda(),))
self.checkScript(to_list_int_1D, (torch.tensor(
[1, 2, 3, 4], dtype=torch.long).cuda(),))
self.checkScript(to_list_float_1D, (torch.randn(
5, dtype=torch.double).cuda(),))
def test_no_element_type_annotation(self):
def fn_with_comment(x: torch.Tensor) -> List:
a: List = x.tolist()
return a
def annotated_fn(x: torch.Tensor) -> List:
a: List = x.tolist()
return a
with self.assertRaisesRegex(RuntimeError, r"Attempted to use List without a contained type"):
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(fn_with_comment)))
with self.assertRaisesRegex(RuntimeError, r"Attempted to use List without a contained type"):
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(annotated_fn)))
with self.assertRaisesRegex(RuntimeError, r"Attempted to use List without a contained type"):
torch.jit.script(fn_with_comment)
with self.assertRaisesRegex(RuntimeError, r"Attempted to use List without a contained type"):
torch.jit.script(annotated_fn)
def test_list_none(self):
with self.assertRaisesRegex(RuntimeError, "Can not create ListType with None type"):
x = torch._C.ListType(None)
def test_list_unification_hint(self):
with self.assertRaisesRegex(RuntimeError, "Expected an annotation of type List"):
@torch.jit.script
def x():
b : int = [2, 3]
return b
class TestDict(JitTestCase):
def dict(self):
return {u'a': torch.ones(1), u'b': torch.ones(1) + 1, u'c': torch.ones(1) + 2}
def dict2(self):
return {'x': torch.ones(1) + 100, 'y': torch.ones(1) + 101, 'z': torch.ones(1) + 102}
def dict_bool(self):
return {True: 1}
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_dict_bool_conversion(self):
def if_predicate(d: Dict[int, int]):
if d:
s, t = 0, 0
for k, v in d.items():
s += k
t += v
return s, t
else:
return -1, -1
self.checkScript(if_predicate, ({1: 2, 3: 5},))
self.checkScript(if_predicate, ({},))
def while_predicate(d: Dict[int, int]):
while d:
d.clear()
self.checkScript(while_predicate, ({1: 2, 3: 5},))
self.checkScript(while_predicate, ({},))
def ternary_predicate(d: Dict[int, int]):
return "non-empty" if d else "empty"
self.checkScript(ternary_predicate, ({1: 2, 3: 5},))
self.checkScript(ternary_predicate, ({},))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_del(self):
def inputs():
return {'hi': 2, 'bye': 3}
def fn(x: Dict[str, int]) -> Dict[str, int]:
del x['hi']
return x
python_out = fn(inputs())
# checkScript reuses the same object, but here it's being mutated so do
# it manually
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(fn)))
self.assertEqual(cu.fn(inputs()), python_out)
self.assertEqual(torch.jit.script(fn)(inputs()), python_out)
with self.assertRaisesRegexWithHighlight(RuntimeError, "KeyError", "x['hi']"):
self.checkScript(fn, [{}])
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_dict_variance(self):
"""
`Dict[T1, _]` is not a subtype of `Dict[T2, _]`, even if `T1` is
a subtype of `T2`; similarly `Dict[_, T1]` would not be a
subtype of `Dict[_, T2]`.
However, if we have a temporary dict object (that is, a dict
comprehension or a dict literal) on the rhs of an assignment
statement, we want to ignore the inferred type of the rhs if we
can prove that: 1) both the lhs and the rhs are dicts with the
same key types (TorchScript has a restricted set of allowed key
types, so we don't need to worry about subtyping relationships
here), and 2) the value type of the dict is a subtype of the
value type of the rhs dict.
"""
def test_dictliteral_is_typed_from_annotation():
x: Dict[str, Optional[int]] = {"foo": None, "bar": None, "baz": None}
return x
self.checkScript(test_dictliteral_is_typed_from_annotation, ())
def test_dictcomprehension_is_typed_from_annotation():
metasyntactics = ["foo", "bar", "baz"]
x: Dict[str, Optional[int]] = {word: None for word in metasyntactics}
return x
self.checkScript(test_dictcomprehension_is_typed_from_annotation, ())
def test_dicts_with_different_value_types_are_invariant(self):
x: Dict[str, int] = {"foo": 1, "bar": 2, "baz": 3}
y: Dict[str, Optional[int]] = x
return x
with self.assertRaisesRegex(RuntimeError, "Variable 'y' is "
"annotated with type "
r"Dict\[str, Optional\[int\]\] but "
"is being assigned to a value of "
r"type Dict\[str, int\]"):
torch.jit.script(test_dicts_with_different_value_types_are_invariant)
def test_dicts_with_different_value_types_are_invariant_recursive(self):
x: Dict[str, int] = {"foo": 1, "bar": 2, "baz": 3}
y: Dict[str, Dict[str, int]] = {"foo": x, "bar": x, "baz": x}
z: Dict[str, Dict[str, Optional[int]]] = y
return x
with self.assertRaisesRegex(RuntimeError, "Variable 'z' is "
"annotated with type "
r"Dict\[str, Dict\[str, Optional"
r"\[int\]\]\] but is being assigned"
r" to a value of type Dict\[str, "
r"Dict\[str, int\]\]"):
torch.jit.script(test_dicts_with_different_value_types_are_invariant_recursive)
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_keys(self):
@torch.jit.script
def keys(x: Dict[str, Tensor]) -> List[str]:
return list(x.keys())
self.assertEqual(set(keys(self.dict())), set(self.dict().keys()))
@torch.jit.script
def specialized_list():
li = {1: 1, 2: 2}.keys()
li.append(3)
return li
self.assertTrue(set(specialized_list()) == set([1, 2, 3]))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_values(self):
@torch.jit.script
def values(x: Dict[str, Tensor]) -> List[Tensor]:
return list(x.values())
the_dict = self.dict()
self.assertEqual(set(values(the_dict)), set(the_dict.values()))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_len(self):
def length(x: Dict[str, Tensor]) -> int:
return len(x)
self.checkScript(length, (self.dict(),))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_copy(self):
def func(x: Dict[str, Tensor]) -> Dict[str, Tensor]:
return x.copy()
self.checkScript(func, (self.dict(),))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_items(self):
def func(x: Dict[str, Tensor]) -> List[Tuple[str, Tensor]]:
return x.items()
# The value returned by Python is in arbitrary order, so we can't use
# checkScript
scripted_func = torch.jit.script(func)
eager_out = (func(self.dict()))
script_out = (scripted_func(self.dict()))
self.assertEqual(len(eager_out), len(script_out))
for item in eager_out:
self.assertTrue(item in script_out)
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_pop(self):
def pop(x: Dict[str, Tensor], key: str) -> Tuple[Tensor, Dict[str, Tensor]]:
return x.pop(key), x
# checkScript doesn't copy the inputs, so we can't use it since this mutates
# the dict
def tester(fn, *args):
eager_out = fn(self.dict(), *args)
script_out = torch.jit.script(fn)(self.dict(), *args)
self.assertEqual(eager_out, script_out)
tester(pop, 'a')
with self.assertRaisesRegexWithHighlight(RuntimeError, "KeyError", "x.pop"):
torch.jit.script(pop)(self.dict(), 'x')
def default_pop(x: Dict[str, Tensor], key: str, default: Tensor) -> Tuple[Tensor, Dict[str, Tensor]]:
return x.pop(key, default), x
tester(default_pop, 'a', torch.randn(2, 2))
tester(default_pop, 'x', torch.randn(2, 2))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_setdefault(self):
def setdefault(x: Dict[str, Tensor], key: str, default: Tensor) -> Dict[str, Tensor]:
x.setdefault(key, default)
return x
self.checkScript(setdefault, (self.dict(), 'a', torch.randn(2, 2)))
self.checkScript(setdefault, (self.dict(), 'nonexistant', torch.randn(2, 2)))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_update(self):
def update(a: Dict[str, Tensor], b: Dict[str, Tensor]) -> Tuple[Dict[str, Tensor], Dict[str, Tensor]]:
a.update(b)
return a, b
self.checkScript(update, (self.dict(), self.dict()))
self.checkScript(update, (self.dict(), self.dict2()))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_update_existing_key(self):
def foo() -> Dict[str, int]:
a: Dict[str, int] = {}
for i in range(3):
a.update({'a': i})
return a
self.checkScript(foo, ())
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_aug_assign(self):
def aug_assign_dict_tensor(a: Dict[str, Tensor]) -> Dict[str, Tensor]:
a['a'] += 1
a['b'] -= 12
a['c'] *= 122
a['c'] /= 2
a['c'] %= 2
return a
def aug_assign_dict_prim(a: Dict[str, float]) -> Dict[str, float]:
a['a'] += 3.4
a['b'] -= 2.4
a['c'] *= 3.0
a['c'] /= 2.0
a['c'] %= 2.0
return a
self.checkScript(aug_assign_dict_tensor, (self.dict(),))
self.checkScript(aug_assign_dict_prim, ({'a': 3.0, 'b': 2.0, 'c': 4.0},))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_popitem(self):
@torch.jit.script
def popitem(x: Dict[str, Tensor]) -> Tuple[Tuple[str, Tensor], Dict[str, Tensor]]:
item = x.popitem()
return item, x
# The value returned by Python is arbitrary, so we can't use checkScript
eager_in = self.dict()
eager_out = (eager_in.popitem(), eager_in)
script_out = popitem(self.dict())
# Check that an item was removed
self.assertEqual(len(eager_out[1]), len(script_out[1]))
# Check that the item is the correct types
self.assertTrue(isinstance(script_out[0][0], str))
self.assertTrue(isinstance(script_out[0][1], torch.Tensor))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_clear(self):
def clear(x: Dict[str, Tensor]) -> Dict[str, Tensor]:
x.clear()
return x
self.checkScript(clear, (self.dict(),))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_get(self):
def get(x: Dict[str, Tensor], key: str) -> Optional[Tensor]:
return x.get(key)
self.checkScript(get, (self.dict(), 'a'))
self.checkScript(get, (self.dict(), "doesn't exist"))
def get_default(x: Dict[str, Tensor], key: str) -> Optional[Tensor]:
return x.get(key, torch.randn(2, 2))
self.checkScript(get, (self.dict(), 'a'))
self.checkScript(get, (self.dict(), "doesn't exist"))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_get_boolkey(self):
def get(x: Dict[bool, int], key: bool) -> Optional[int]:
return x.get(key)
self.checkScript(get, (self.dict_bool(), True))
self.checkScript(get, (self.dict_bool(), False))
def get_default(x: Dict[bool, int], key: bool) -> int:
return x.get(key, 42)
self.checkScript(get_default, (self.dict_bool(), True))
self.checkScript(get_default, (self.dict_bool(), False))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_basic(self):
def simple(x: Dict[str, int]) -> Dict[str, int]:
return x
self.checkScript(simple, ({'item': 20, 'other_item': 120},))
def index(x: Dict[str, int]) -> int:
return x['item']
self.checkScript(index, ({'item': 20, 'other_item': 120},))
def type_default() -> Dict[str, Tensor]:
return {}
self.checkScript(type_default, ())
@torch.jit.script
def missing_index(x: Dict[str, int]) -> int:
return x['dne']
with self.assertRaisesRegexWithHighlight(RuntimeError, "KeyError", "x['dne'"):
missing_index({'item': 20, 'other_item': 120})
code = dedent('''
def literal1():
return torch.jit.annotate(Dict[int, float], {})
def literal2():
return torch.jit.annotate(Dict[int, float], {10: 1.2})
''')
cu = torch.jit.CompilationUnit(code)
self.assertEqual({}, cu.literal1())
self.assertEqual({10: 1.2}, cu.literal2())
cu = torch.jit.CompilationUnit(dedent('''
def literal3():
return torch.jit.annotate(Dict[int, float], {10: 1.2, 11: 1.3})
'''))
self.assertEqual({10: 1.2, 11: 1.3}, cu.literal3())
def list_of_dicts() -> List[Dict[str, Tensor]]:
return [{'word': torch.ones(2) + 3}, {'other word': torch.ones(1) + 2}]
self.checkScript(list_of_dicts, ())
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_mutability(self):
@torch.jit.script
def fn() -> Dict[str, int]:
a = torch.jit.annotate(Dict[str, int], {})
a['ok'] = 10
return a
self.assertEqual(fn(), {'ok': 10})
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_key_type(self):
with self.assertRaisesRegexWithHighlight(RuntimeError, "but instead found type", "a[None]"):
@torch.jit.script
def fn(a: Dict[str, int]) -> int:
return a[None]
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_loop(self):
@torch.jit.script
def fn(x: int) -> Dict[str, int]:
a = torch.jit.annotate(Dict[str, int], {})
for i in range(x):
a['ok'] = i
return a
self.assertEqual(fn(10), {'ok': 9})
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_view(self):
def fn(x, y):
l = {"a": x}
x_view = l["a"]
a = x + x
x_view.add_(y)
b = x + x
return a == b
self.checkScript(fn, (torch.rand(2, 3), torch.rand(2, 3)))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_membership(self):
def fn(x: Dict[int, int], y: int) -> int:
return x.get(y, 3)
d = {1: 2, 3: 4}
self.checkScript(fn, (d, 3))
self.checkScript(fn, (d, 2))
def optional(x: Dict[int, int], y: int) -> bool:
res = x.get(y)
return res is None
self.checkScript(fn, (d, 3))
self.checkScript(fn, (d, 2))
with self.assertRaisesRegexWithHighlight(RuntimeError, "is actually of type Optional", "return x.get(y"):
@torch.jit.script
def bad_types(x: Dict[int, int], y: int) -> int:
return x.get(y) # noqa: T484
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_dict_to_python(self):
@torch.jit.ignore
def python_lookup(my_dict: Dict[str, int], keys: List[str]) -> List[int]:
return [my_dict[k] for k in keys]
def fn(my_dict: Dict[str, int], keys: List[str]) -> List[int]:
return python_lookup(my_dict, keys)
a_dict = {'a': torch.ones(1), 'b': torch.ones(1) + 1, 'c': torch.ones(1) + 2}
self.checkScript(fn, (a_dict, ('a', 'c')))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_ordered_dict(self):
def test_func(fn, inputs):
self.assertEqual(fn(*inputs), torch.jit.script(fn)(*inputs))
def repeated_key():
return OrderedDict([(1, 2), (2, 3), (1, 4)])
test_func(repeated_key, ())
def no_args():
a = OrderedDict()
a["one"] = torch.tensor(1)
a["two"] = torch.tensor(2)
test_func(no_args, ())
def test_dict_constructor():
a = dict()
a["one"] = torch.tensor(1)
return a, dict([(1, 2), (2, 3), (1, 4)]) # noqa: C406
test_func(test_dict_constructor, ())
def test_dict_initializer_list():
a = {"1": torch.tensor(1), "2": torch.tensor(2)}
output_order = []
for key in a:
output_order.append(a[key])
return output_order
test_func(test_dict_initializer_list, ())
def test_dict_error():
a = dict()
a[1] = 2
return a
with self.assertRaisesRegexWithHighlight(Exception, "Arguments for call are not", "a[1] = 2"):
torch.jit.script(test_dict_error)
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_type_annotation_missing_contained_type(self):
"""
Test that the use of a Dict type annotation without contained
key and value types produces an error.
"""
# This function uses a type comment.
def fn_with_comment(input: Dict) -> Any:
return input
# This function uses Python3 style type annotations.
def annotated_fn(input: Dict) -> Any:
return input
with self.assertRaisesRegex(RuntimeError, r"Attempted to use Dict without contained types"):
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(fn_with_comment)))
with self.assertRaisesRegex(RuntimeError, r"Attempted to use Dict without contained types"):
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(annotated_fn)))
with self.assertRaisesRegex(RuntimeError, r"Attempted to use Dict without contained types"):
m = torch.jit.script(fn_with_comment)
with self.assertRaisesRegex(RuntimeError, r"Attempted to use Dict without contained types"):
m = torch.jit.script(annotated_fn)
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_dict_preserves_order(self):
def dict_ordering():
a : Dict[int, int] = {}
for i in range(1000):
a[i] = i + 1
return a
self.checkScript(dict_ordering, ())
di = torch.jit.script(dict_ordering)()
res = list(di.items())
for i in range(1000):
key, value = res[i]
self.assertTrue(key == i and value == i + 1)
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_optional_dict_construct(self):
class M(torch.nn.Module):
def use(self, buffer: Dict[str, Optional[torch.Tensor]]):
return buffer["prev_key"]
def forward(self, x):
prev_key = torch.rand(2, 3)
next_key = torch.rand(2, 3)
saved_state: Dict[str, Optional[torch.Tensor]] = {
"prev_key": prev_key,
"next_key": next_key,
}
return self.use(saved_state)
self.checkModule(M(), (torch.rand(2, 2),))
class TestNamedTuple(JitTestCase):
def test_namedtuple(self):
class FeatureVector(NamedTuple):
float_features: float
sequence_features: List[float]
time_since_first: float
@torch.jit.script
def foo(x) -> float:
fv = FeatureVector(3.0, [3.0], 3.0)
rv = fv.float_features
for val in fv.sequence_features:
rv += val
rv *= fv.time_since_first
return rv
self.assertEqual(foo(torch.rand(3, 4)), 18.0)
def test_namedtuple_constant(self):
class Tup(NamedTuple):
a: int
b: int
@torch.jit.script
def foo():
return Tup(1, 2)
self.assertEqual(foo(), Tup(1, 2))
def test_return_named_tuple(self):
class FeatureVector(NamedTuple):
float_features: float
sequence_features: List[float]
time_since_first: float
@torch.jit.script
def foo(x):
fv = FeatureVector(3.0, [3.0], 3.0)
return fv
out = foo(torch.rand(3, 4))
out = foo(torch.rand(3, 4))
self.assertEqual(out.float_features, 3.0)
self.assertEqual(out.sequence_features, [3.0])
self.assertEqual(out.time_since_first, 3.0)
def test_namedtuple_as_attr(self):
class Config(NamedTuple):
size: int
class MyMod(nn.Module):
configs: Dict[int, Config]
def __init__(self, configs):
super().__init__()
self.configs = configs
def forward(self, x):
for _id, config in self.configs.items():
x += config.size
return x
s = torch.jit.script(MyMod({0: Config(size=16)}))
def test_namedtuple_resolution(self):
class TheType(NamedTuple):
t: int
class MyModule(types.ModuleType):
def __init__(self):
super(MyModule, self).__init__('MyModule')
def __getattr__(self, attr):
return TheType
some_module = MyModule()
def fn() -> some_module.Type:
return some_module.Type(1)
self.checkScript(fn, [])
def test_namedtuple_slice_unpack(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
@torch.jit.script
def foo(a : int, b : float, c : List[int]):
tup = MyCoolNamedTuple(a, b, c)
my_a, my_b, my_c = tup
return tup[:1], my_a, my_c
self.assertEqual(foo(3, 3.5, [6]), ((3,), 3, [6]))
def test_namedtuple_lower(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
@torch.jit.script
def foo(a : int):
tup = MyCoolNamedTuple(a, 3.14, [9])
return tup
FileCheck().check('TupleConstruct').run(foo.graph)
torch._C._jit_pass_lower_all_tuples(foo.graph)
FileCheck().check_not('TupleConstruct').run(foo.graph)
def test_namedtuple_type_annotation(self):
global MyCoolNamedTuple # see [local resolution in python]
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
@torch.jit.script
def foo(x : MyCoolNamedTuple) -> MyCoolNamedTuple:
return x
mnt = MyCoolNamedTuple(42, 420.0, [666])
self.assertEqual(foo(mnt), mnt)
def test_namedtuple_wrong_types(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
with self.assertRaisesRegex(RuntimeError, "Expected a value of type 'int' for argument 'a'"
" but instead found type 'str'"):
@torch.jit.script
def foo():
tup = MyCoolNamedTuple('foo', 'bar', 'baz')
return tup
def test_namedtuple_kwarg_construct(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
@torch.jit.script
def foo():
tup = MyCoolNamedTuple(c=[1, 2, 3], b=3.5, a=9)
return tup
tup = foo()
self.assertEqual(tup.a, 9)
self.assertEqual(tup.b, 3.5)
self.assertEqual(tup.c, [1, 2, 3])
@unittest.skipIf(True, "broken while these tests were not in CI")
def test_namedtuple_serialization(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
class MyMod(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self):
return MyCoolNamedTuple(3, 3.5, [3, 4, 5])
mm = MyMod()
mm.save('foo.zip')
torch.testing._internal.jit_utils.clear_class_registry()
loaded = torch.jit.load('foo.zip')
out = mm()
out_loaded = loaded()
for name in ['a', 'b', 'c']:
self.assertEqual(getattr(out_loaded, name), getattr(out, name))
class TestScriptDict(JitTestCase):
"""
This class contains a suite of tests for torch.jit.script, a
function that returns a dictionary-like object that has reference
semantics across the Python/TorchScript boundary. That is,
it can be passed to a TorchScript function that mutates it
and those modifications are visible in the scope of the Python
caller of said TorchScript function.
The vast majority of tests are for making sure that objects returned
by torch.jit.script behave like dictionaries do so that they are fungible
in almost all cirumstances with regular dictionaries.
"""
def _script_dict_add(self, d: torch._C.ScriptDict, k: int, v: int):
"""
This is a helper function that inserts the pair (k, v) into the
dictionary d in TorchScript. It is used for testing reference
semantics.
"""
@torch.jit.script
def dict_add(d: Dict[int, int], k: int, v: int):
d[k] = v
dict_add(d, k, v)
def _compare_eager_and_script(self, fn, input_dict, script_input_dict=None):
"""
This is a helper function that facilitates comparing behaviour between
Python dictionaries and "scripted" dictionaries.
Args:
fn: The function to test and compare the behaviour of.
input_dict: The input dictionary to use for the test (passed to fn).
script_input_dict: The scripted input dictionary to use for the tests.
If None, input_dict is scripted with torch.jit.script
and used instead.
"""
# Create ScriptDict version of input_dict if needed.
script_input_dict = script_input_dict or torch.jit.script(input_dict)
# Run fn with both input_dict and scripted_dict.
eager_raised, script_raised = False, False
try:
eager_out = fn(input_dict)
except Exception as e:
eager_exception = e
eager_raised = True
try:
script_out = fn(script_input_dict)
except Exception as e:
script_exception = e
script_raised = True
# Check that both calls raised or none of them raised.
self.assertEqual(eager_raised, script_raised)
if eager_raised:
# If fn raised an exception, it should be the same between
# regular and scripted dictionaries.
self.assertEqual(type(eager_exception), type(script_exception))
else:
# Otherwise, make sure the outputs match and the dictionaries
# match (the latter may not be the same as the output).
self.assertEqual(eager_out, script_out)
self.assertEqual(input_dict, script_input_dict)
def test_repr(self):
"""
Test the __repr__ method.
"""
self._compare_eager_and_script(lambda d: repr(d), {1: 2})
def test_bool(self):
"""
Test the __bool__ method. This should return True
if the dictionary is non-empty and False otherwise.
"""
self._compare_eager_and_script(lambda d: bool(d), {1: 2})
self._compare_eager_and_script(lambda d: bool(d), {})
def test_iter(self):
"""
Test iteration over a dictionary's keys.
"""
def sum_keys(input_dict):
s = 0
for k in input_dict:
s += k
return s
self._compare_eager_and_script(sum_keys, {1: 2, 3: 4})
def test_items(self):
"""
Test .items().
"""
def sum_pair_product(input_dict):
s = 0
for k, v in input_dict.items():
s += k * v
return s
self._compare_eager_and_script(sum_pair_product, {1: 2, 3: 4})
def test_getitem(self):
"""
Test accessing dictionary values using the [] operator.
"""
data = {1: 2, 3: 4}
self._compare_eager_and_script(lambda d: d[1], data)
self._compare_eager_and_script(lambda d: d[4], data)
self._compare_eager_and_script(lambda d: d[2], data)
self._compare_eager_and_script(lambda d: d["key"], data)
def test_setitem(self):
"""
Test setting dictionary values using the [] operator.
"""
data = {1: 2, 3: 4}
def fn(input_dict):
input_dict[1] = 10
input_dict[3] = 11
self._compare_eager_and_script(fn, data)
# Check that using improperly typed keys and values
# throws TypeError.
# _compare_eager_and_script cannot be used here since
# the following uses of __setitem__ are valid in
# Python.
script_data = torch.jit.script(data)
with self.assertRaises(TypeError):
script_data["str"] = 3
with self.assertRaises(TypeError):
script_data[3] = "str"
def test_contains(self):
"""
Test membership checks (x in y, x not in y).
"""
data = {1: 2, 3: 4}
def fn(input_dict):
return 1 in input_dict, 2 not in input_dict, 3 in input_dict, 4 not in input_dict
self._compare_eager_and_script(fn, data)
# Check that using an improperly typed key
# throws KeyError.
script_data = torch.jit.script(data)
with self.assertRaises(KeyError):
a = "str" in script_data
def test_delitem(self):
"""
Test deletion.
"""
data = {1: 2, 3: 4}
def del_fn(input_dict):
del input_dict[1]
def del_fn_raises(input_dict):
del input_dict[10]
self._compare_eager_and_script(del_fn, data)
self._compare_eager_and_script(del_fn_raises, data)
# Check that using an improperly typed key
# throws TypeError.
script_data = torch.jit.script(data)
with self.assertRaises(TypeError):
del script_data["str"]
def test_len(self):
"""
Test len() builtin function.
"""
self._compare_eager_and_script(lambda d: len(d), {1: 2})
self._compare_eager_and_script(lambda d: len(d), {})
@unittest.skip("Cannot pass until all dicts returned from TorchScript are ScriptDicts")
def test_nested(self):
"""
Test that reference semantics are honoured when the ScriptDict that is
mutated using TorchScript is inside another.
"""
nested = torch.jit.script({1: {1: 2}, 2: {3: 4}}, type_hint=Dict[int, Dict[int, int]])
one = nested[1]
two = nested[2]
self._script_dict_add(one, 9, 10)
self._script_dict_add(two, 11, 12)
# The mutation should be visible in the original dictionary, nested.
self.assertEqual(len(one), 2)
self.assertEqual(len(two), 2)
self.assertEqual(len(nested[1]), 2)
self.assertEqual(len(nested[2]), 2)
def test_reference_semantics(self):
"""
Test that reference semantics are honoured; that modifications made
to a ScriptDict in TorchScript are visible in Python.
"""
data = torch.jit.script({1: 2})
self._script_dict_add(data, 3, 4)
# The mutation should be visible in the original dictionary.
self.assertEqual(len(data), 2)
self.assertTrue(3 in data)
self.assertEqual(data[3], 4)
class TestScriptList(JitTestCase):
"""
This class contains a suite of tests for torch._C.ScriptList, a
function that returns a list-like object that has reference
semantics across the Python/TorchScript boundary. That is,
it can be passed to a TorchScript function that mutates it
and those modifications are visible in the scope of the Python
caller of said TorchScript function.
The vast majority of tests are for making sure that instances of
torch._C.ScriptList behave like lists do so that they are fungible
in almost all cirumstances with regular list.
"""
def _script_list_add(self, l: torch._C.ScriptList, e: int):
"""
This is a helper function that inserts the element e into the
list l in TorchScript. It is used for testing reference
semantics.
"""
@torch.jit.script
def list_add(l: List[int], e: int):
l.append(e)
list_add(l, e)
def _compare_eager_and_script(self, fn, input_list, script_input_list=None):
"""
This is a helper function that facilitates comparing behaviour between
Python lists and "scripted" lists.
Args:
fn: The function to test and compare the behaviour of.
input_list: The input list to use for the test (passed to fn).
script_input_list: The scripted input list to use for the tests.
If None, input_list is scripted with torch.jit.script
and used instead.
"""
# Create ScriptDict version of input_list if needed.
script_input_list = script_input_list or torch.jit.script(input_list)
# Run fn with both input_list and scripted_dict.
eager_raised, script_raised = False, False
try:
eager_out = fn(input_list)
except Exception as e:
eager_exception = e
eager_raised = True
try:
script_out = fn(script_input_list)
except Exception as e:
script_exception = e
script_raised = True
# Check that both calls raised or none of them raised.
self.assertEqual(eager_raised, script_raised)
if eager_raised:
# If fn raised an exception, it should be the same between
# regular and scripted lists.
self.assertEqual(type(eager_exception), type(script_exception))
else:
# Otherwise, make sure the outputs match and the lists
# match (the latter may not be the same as the output).
self.assertEqual(eager_out, script_out)
self.assertEqual(input_list, script_input_list)
def test_repr(self):
"""
Test the __repr__ method.
"""
self._compare_eager_and_script(lambda l: repr(l), [1])
def test_bool(self):
"""
Test the __bool__ method. This should return True
if the list is non-empty and False otherwise.
"""
self._compare_eager_and_script(lambda l: bool(l), [1])
self._compare_eager_and_script(lambda l: bool(l), [])
def test_iter(self):
"""
Test iteration over a list's elements.
"""
def sum_elements(input_list):
s = 0
for k in input_list:
s += k
return s
self._compare_eager_and_script(sum_elements, [1, 2, 3, 4])
def test_getitem(self):
"""
Test accessing list elements using the [] operator.
"""
data = [1, 2, 3, 4]
# Test regular indexing.
self._compare_eager_and_script(lambda l: l[1], data)
self._compare_eager_and_script(lambda l: l[3], data)
self._compare_eager_and_script(lambda l: l[-1], data)
# Test slicing.
self._compare_eager_and_script(lambda l: l[1:3], data)
self._compare_eager_and_script(lambda l: l[:], data)
self._compare_eager_and_script(lambda l: l[1:], data)
self._compare_eager_and_script(lambda l: l[:2], data)
self._compare_eager_and_script(lambda l: l[-1], data)
self._compare_eager_and_script(lambda l: l[-1::-1], data)
# Test errors.
self._compare_eager_and_script(lambda l: l[5], data)
self._compare_eager_and_script(lambda l: l[-7], data)
self._compare_eager_and_script(lambda l: l["key"], data)
def test_setitem(self):
"""
Test setting list elements using the [] operator.
"""
data = [1, 2, 3, 4]
# Test regular assignment.
def setitem(input_list):
input_list[1] = 10
input_list[3] = 11
input_list[-1] = 12
self._compare_eager_and_script(setitem, data.copy())
# Test slice assignment.
# TODO: Something like input_list[:1] = [1, 2, 3, 4, 5]
# is allowed in Python, but pybind11/stl_bind.h does not
# allow it. Should we?
def setitem_slice(input_list):
input_list[:4:2] = [10, 11]
input_list[-2:] = [15, 16]
self._compare_eager_and_script(setitem_slice, data)
# Test errors.
def out_of_range(input_list):
input_list[11] = 3
def out_of_range_negative(input_list):
input_list[-11] = 3
def wrong_index_type(input_list):
input_list["str"] = 3
self._compare_eager_and_script(out_of_range, data)
self._compare_eager_and_script(out_of_range_negative, data)
self._compare_eager_and_script(wrong_index_type, data)
# Check that using value of an incorrect type throws TypeError.
# _compare_eager_and_script cannot be used here since
# the following use of __setitem__ is valid in
# Python.
script_data = torch.jit.script(data)
with self.assertRaises(TypeError):
script_data[0] = "str"
def test_contains(self):
"""
Test membership checks (x in y, x not in y).
"""
data = [1, 2, 3, 4]
def fn(input_list):
return 1 in input_list, 2 not in input_list, 3 in input_list, 4 not in input_list
self._compare_eager_and_script(fn, data)
# Check that using a value of an incorrect type throws a TypeError.
script_data = torch.jit.script(data)
with self.assertRaises(TypeError):
a = "str" in script_data
def test_delitem(self):
"""
Test deletion.
"""
data = [1, 2, 3, 4]
def del_fn(input_list):
del input_list[1]
def del_fn_out_of_range(input_list):
del input_list[10]
def del_fn_wrong_type(input_list):
del input_list["str"]
self._compare_eager_and_script(del_fn, data.copy())
self._compare_eager_and_script(del_fn_out_of_range, data)
self._compare_eager_and_script(del_fn_wrong_type, data)
def test_len(self):
"""
Test len() builtin function.
"""
self._compare_eager_and_script(lambda l: len(l), [1, 2, 3, 4])
self._compare_eager_and_script(lambda l: len(l), [])
def test_count(self):
"""
Test count method.
"""
self._compare_eager_and_script(lambda l: l.count(3), [1, 2, 3, 3])
# Check that using a value of an incorrect type throws TypeError.
script_data = torch.jit.script([1])
with self.assertRaises(TypeError):
script_data.count("str")
def test_remove(self):
"""
Test remove method.
"""
self._compare_eager_and_script(lambda l: l.remove(1), [1, 2, 3])
self._compare_eager_and_script(lambda l: l.remove(10), [1, 2, 3])
# Check that using a value of an incorrect type throws TypeError.
script_data = torch.jit.script([1])
with self.assertRaises(TypeError):
script_data.remove("str")
def test_append(self):
"""
Test append method.
"""
self._compare_eager_and_script(lambda l: l.append(1), [4, 3, 2])
# Check that using a value of an incorrect type throws TypeError.
script_data = torch.jit.script([1])
with self.assertRaises(TypeError):
script_data.append("str")
def test_clear(self):
"""
Test clear.
"""
self._compare_eager_and_script(lambda l: l.clear(), [4, 3, 2])
def test_extend(self):
"""
Test extend.
"""
class Iterable(object):
def __init__(self, limit: int):
self.limit = limit
self.value = 0
def __iter__(self):
return self
def __next__(self):
if self.value == limit:
raise StopIteration()
ret = self.value
self.value += 1
return ret
data = [1, 2, 3]
def extend_list(input_list):
input_list.extend([4, 5, 6])
def extend_dict(input_list):
input_list.extend({4: 10, 5: 11, 6: 12})
def extend_iterable(input_list):
input_list.extend(Iterable(3))
self._compare_eager_and_script(extend_list, data.copy())
self._compare_eager_and_script(extend_dict, data.copy())
self._compare_eager_and_script(extend_iterable, data)
# Check that using a value of an incorrect type throws TypeError.
script_data = torch.jit.script([1])
with self.assertRaises(TypeError):
script_data.extend(["a"])
with self.assertRaises(TypeError):
script_data.extend({"a": 1})
def test_insert(self):
"""
Test insert.
"""
data = [1, 2, 4]
self._compare_eager_and_script(lambda l: l.insert(3, 3), data.copy())
self._compare_eager_and_script(lambda l: l.insert(0, 3), data.copy())
self._compare_eager_and_script(lambda l: l.insert(-2, 3), data)
# Check that using a value of an incorrect type throws TypeError.
script_data = torch.jit.script([1])
with self.assertRaises(TypeError):
script_data.insert((0, "str"))
def test_pop(self):
"""
Test pop.
"""
data = [1, 2, 3, 4, 5]
# Test normal cases.
self._compare_eager_and_script(lambda l: l.pop(), data.copy())
self._compare_eager_and_script(lambda l: l.pop(2), data.copy())
self._compare_eager_and_script(lambda l: l.pop(-3), data.copy())
# Test error cases.
self._compare_eager_and_script(lambda l: l.pop(10), data)
@unittest.skip("Cannot pass until all list returned from TorchScript are ScriptLists")
def test_nested(self):
"""
Test that reference semantics are honoured when the ScriptList that is
mutated using TorchScript is inside another.
"""
nested = torch.jit.script([[1], [2]], List[List[int]])
one = nested[0]
two = nested[1]
self._script_list_add(one, 3)
self._script_list_add(two, 4)
# The mutation should be visible in the original list, nested.
self.assertEqual(len(one), 2)
self.assertEqual(len(two), 2)
self.assertEqual(one[len(one) - 1], 3)
self.assertEqual(two[len(one) - 1], 4)
self.assertEqual(len(nested[0]), 2)
self.assertEqual(len(nested[1]), 2)
def test_reference_semantics(self):
"""
Test that reference semantics are honoured; that modifications made
to a ScriptList in TorchScript are visible in Python.
"""
l = torch.jit.script([1, 2])
self._script_list_add(l, 3)
self.assertEqual(len(l), 3)
self.assertTrue(3 in l)
self.assertEqual(l[2], 3)
|
pytorch-master
|
test/jit/test_list_dict.py
|
# Owner(s): ["oncall: jit"]
import torch
import os
import sys
from torch.testing._internal.jit_utils import JitTestCase, execWrapper
from torch.testing._internal.common_utils import IS_MACOS
from typing import List, Dict
from itertools import product
from textwrap import dedent
import cmath
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
class TestComplex(JitTestCase):
def test_script(self):
def fn(a: complex):
return a
self.checkScript(fn, (3 + 5j,))
def test_complexlist(self):
def fn(a: List[complex], idx: int):
return a[idx]
input = [1j, 2, 3 + 4j, -5, -7j]
self.checkScript(fn, (input, 2))
def test_complexdict(self):
def fn(a: Dict[complex, complex], key: complex) -> complex:
return a[key]
input = {2 + 3j : 2 - 3j, -4.3 - 2j: 3j}
self.checkScript(fn, (input, -4.3 - 2j))
def test_pickle(self):
class ComplexModule(torch.jit.ScriptModule):
def __init__(self):
super().__init__()
self.a = 3 + 5j
self.b = [2 + 3j, 3 + 4j, 0 - 3j, -4 + 0j]
self.c = {2 + 3j : 2 - 3j, -4.3 - 2j: 3j}
@torch.jit.script_method
def forward(self, b: int):
return b + 2j
loaded = self.getExportImportCopy(ComplexModule())
self.assertEqual(loaded.a, 3 + 5j)
self.assertEqual(loaded.b, [2 + 3j, 3 + 4j, -3j, -4])
self.assertEqual(loaded.c, {2 + 3j : 2 - 3j, -4.3 - 2j: 3j})
self.assertEqual(loaded(2), 2 + 2j)
def test_complex_parse(self):
def fn(a: int, b: torch.Tensor, dim: int):
# verifies `emitValueToTensor()` 's behavior
b[dim] = 2.4 + 0.5j
return (3 * 2j) + a + 5j - 7.4j - 4
t1 = torch.tensor(1)
t2 = torch.tensor([0.4, 1.4j, 2.35])
self.checkScript(fn, (t1, t2, 2))
def test_complex_constants_and_ops(self):
vals = ([0.0, 1.0, 2.2, -1.0, -0.0, -2.2, 1, 0, 2]
+ [10.0 ** i for i in range(2)] + [-(10.0 ** i) for i in range(2)])
complex_vals = tuple(complex(x, y) for x, y in product(vals, vals))
funcs_template = dedent('''
def func(a: complex):
return cmath.{func_or_const}(a)
''')
def checkCmath(func_name, funcs_template=funcs_template):
funcs_str = funcs_template.format(func_or_const=func_name)
scope = {}
execWrapper(funcs_str, globals(), scope)
cu = torch.jit.CompilationUnit(funcs_str)
f_script = cu.func
f = scope['func']
if func_name in ['isinf', 'isnan', 'isfinite']:
new_vals = vals + ([float('inf'), float('nan'), -1 * float('inf')])
final_vals = tuple(complex(x, y) for x, y in product(new_vals, new_vals))
else:
final_vals = complex_vals
for a in final_vals:
res_python = None
res_script = None
try:
res_python = f(a)
except Exception as e:
res_python = e
try:
res_script = f_script(a)
except Exception as e:
res_script = e
if res_python != res_script:
if isinstance(res_python, Exception):
continue
msg = f"Failed on {func_name} with input {a}. Python: {res_python}, Script: {res_script}"
self.assertEqual(res_python, res_script, msg=msg)
unary_ops = ['log', 'log10', 'sqrt', 'exp', 'sin', 'cos', 'asin', 'acos', 'atan', 'sinh', 'cosh',
'tanh', 'asinh', 'acosh', 'atanh', 'phase', 'isinf', 'isnan', 'isfinite']
# --- Unary ops ---
for op in unary_ops:
checkCmath(op)
def fn(x: complex):
return abs(x)
for val in complex_vals:
self.checkScript(fn, (val, ))
def pow_complex_float(x: complex, y: float):
return pow(x, y)
def pow_float_complex(x: float, y: complex):
return pow(x, y)
self.checkScript(pow_float_complex, (2, 3j))
self.checkScript(pow_complex_float, (3j, 2))
def pow_complex_complex(x: complex, y: complex):
return pow(x, y)
for x, y in zip(complex_vals, complex_vals):
# Reference: https://github.com/pytorch/pytorch/issues/54622
if (x == 0):
continue
self.checkScript(pow_complex_complex, (x, y))
if not IS_MACOS:
# --- Binary op ---
def rect_fn(x: float, y: float):
return cmath.rect(x, y)
for x, y in product(vals, vals):
self.checkScript(rect_fn, (x, y, ))
func_constants_template = dedent('''
def func():
return cmath.{func_or_const}
''')
float_consts = ['pi', 'e', 'tau', 'inf', 'nan']
complex_consts = ['infj', 'nanj']
for x in (float_consts + complex_consts):
checkCmath(x, funcs_template=func_constants_template)
def test_infj_nanj_pickle(self):
class ComplexModule(torch.jit.ScriptModule):
def __init__(self):
super().__init__()
self.a = 3 + 5j
@torch.jit.script_method
def forward(self, infj: int, nanj: int):
if infj == 2:
return infj + cmath.infj
else:
return nanj + cmath.nanj
loaded = self.getExportImportCopy(ComplexModule())
self.assertEqual(loaded(2, 3), 2 + cmath.infj)
self.assertEqual(loaded(3, 4), 4 + cmath.nanj)
def test_complex_constructor(self):
# Test all scalar types
def fn_int(real: int, img: int):
return complex(real, img)
self.checkScript(fn_int, (0, 0, ))
self.checkScript(fn_int, (-1234, 0, ))
self.checkScript(fn_int, (0, -1256, ))
self.checkScript(fn_int, (-167, -1256, ))
def fn_float(real: float, img: float):
return complex(real, img)
self.checkScript(fn_float, (0.0, 0.0, ))
self.checkScript(fn_float, (-1234.78, 0, ))
self.checkScript(fn_float, (0, 56.18, ))
self.checkScript(fn_float, (-1.9, -19.8, ))
def fn_bool(real: bool, img: bool):
return complex(real, img)
self.checkScript(fn_bool, (True, True, ))
self.checkScript(fn_bool, (False, False, ))
self.checkScript(fn_bool, (False, True, ))
self.checkScript(fn_bool, (True, False, ))
def fn_bool_int(real: bool, img: int):
return complex(real, img)
self.checkScript(fn_bool_int, (True, 0, ))
self.checkScript(fn_bool_int, (False, 0, ))
self.checkScript(fn_bool_int, (False, -1, ))
self.checkScript(fn_bool_int, (True, 3, ))
def fn_int_bool(real: int, img: bool):
return complex(real, img)
self.checkScript(fn_int_bool, (0, True, ))
self.checkScript(fn_int_bool, (0, False, ))
self.checkScript(fn_int_bool, (-3, True, ))
self.checkScript(fn_int_bool, (6, False, ))
def fn_bool_float(real: bool, img: float):
return complex(real, img)
self.checkScript(fn_bool_float, (True, 0.0, ))
self.checkScript(fn_bool_float, (False, 0.0, ))
self.checkScript(fn_bool_float, (False, -1.0, ))
self.checkScript(fn_bool_float, (True, 3.0, ))
def fn_float_bool(real: float, img: bool):
return complex(real, img)
self.checkScript(fn_float_bool, (0.0, True, ))
self.checkScript(fn_float_bool, (0.0, False, ))
self.checkScript(fn_float_bool, (-3.0, True, ))
self.checkScript(fn_float_bool, (6.0, False, ))
def fn_float_int(real: float, img: int):
return complex(real, img)
self.checkScript(fn_float_int, (0.0, 1, ))
self.checkScript(fn_float_int, (0.0, -1, ))
self.checkScript(fn_float_int, (1.8, -3, ))
self.checkScript(fn_float_int, (2.7, 8, ))
def fn_int_float(real: int, img: float):
return complex(real, img)
self.checkScript(fn_int_float, (1, 0.0, ))
self.checkScript(fn_int_float, (-1, 1.7, ))
self.checkScript(fn_int_float, (-3, 0.0, ))
self.checkScript(fn_int_float, (2, -8.9, ))
def test_torch_complex_constructor_with_tensor(self):
tensors = ([torch.rand(1), torch.randint(-5, 5, (1, )), torch.tensor([False])])
def fn_tensor_float(real, img: float):
return complex(real, img)
def fn_tensor_int(real, img: int):
return complex(real, img)
def fn_tensor_bool(real, img: bool):
return complex(real, img)
def fn_float_tensor(real: float, img):
return complex(real, img)
def fn_int_tensor(real: int, img):
return complex(real, img)
def fn_bool_tensor(real: bool, img):
return complex(real, img)
for tensor in tensors:
self.checkScript(fn_tensor_float, (tensor, 1.2))
self.checkScript(fn_tensor_int, (tensor, 3))
self.checkScript(fn_tensor_bool, (tensor, True))
self.checkScript(fn_float_tensor, (1.2, tensor))
self.checkScript(fn_int_tensor, (3, tensor))
self.checkScript(fn_bool_tensor, (True, tensor))
def fn_tensor_tensor(real, img):
return complex(real, img) + complex(2)
for x, y in product(tensors, tensors):
self.checkScript(fn_tensor_tensor, (x, y, ))
def test_comparison_ops(self):
def fn1(a: complex, b: complex):
return a == b
def fn2(a: complex, b: complex):
return a != b
def fn3(a: complex, b: float):
return a == b
def fn4(a: complex, b: float):
return a != b
x, y = 2 - 3j, 4j
self.checkScript(fn1, (x, x))
self.checkScript(fn1, (x, y))
self.checkScript(fn2, (x, x))
self.checkScript(fn2, (x, y))
x1, y1 = 1 + 0j, 1.0
self.checkScript(fn3, (x1, y1))
self.checkScript(fn4, (x1, y1))
def test_div(self):
def fn1(a: complex, b: complex):
return a / b
x, y = 2 - 3j, 4j
self.checkScript(fn1, (x, y))
def test_complex_list_sum(self):
def fn(x: List[complex]):
return sum(x)
self.checkScript(fn, (torch.randn(4, dtype=torch.cdouble).tolist(), ))
def test_tensor_attributes(self):
def tensor_real(x):
return x.real
def tensor_imag(x):
return x.imag
t = torch.randn(2, 3, dtype=torch.cdouble)
self.checkScript(tensor_real, (t, ))
self.checkScript(tensor_imag, (t, ))
def test_binary_op_complex_tensor(self):
def mul(x: complex, y: torch.Tensor):
return x * y
def add(x: complex, y: torch.Tensor):
return x + y
def eq(x: complex, y: torch.Tensor):
return x == y
def ne(x: complex, y: torch.Tensor):
return x != y
def sub(x: complex, y: torch.Tensor):
return x - y
def div(x: complex, y: torch.Tensor):
return x - y
ops = [mul, add, eq, ne, sub, div]
for shape in [(1, ), (2, 2)]:
x = 0.71 + 0.71j
y = torch.randn(shape, dtype=torch.cfloat)
for op in ops:
eager_result = op(x, y)
scripted = torch.jit.script(op)
jit_result = scripted(x, y)
self.assertEqual(eager_result, jit_result)
|
pytorch-master
|
test/jit/test_complex.py
|
# Owner(s): ["oncall: jit"]
import torch
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase, make_global
class TestDCE(JitTestCase):
def test_setattr_no_aliasdb(self):
class Net(torch.nn.Module):
def __init__(self):
super().__init__()
self.x = torch.empty([2, 2])
def forward(self):
x = torch.rand([3, 3])
self.x = x
net = torch.jit.script(Net())
FileCheck().check("prim::SetAttr").run(net.graph)
def test_setattr_removed(self):
@torch.jit.script
class Thing1(object):
def __init__(self):
self.x = torch.zeros([2, 2])
make_global(Thing1)
class Thing2(torch.nn.Module):
def forward(self):
x = torch.rand([2, 2])
y = torch.rand([2, 2])
t1 = Thing1()
t1.x = x
return y
unscripted = Thing2()
t2 = torch.jit.script(unscripted)
t2.eval()
# freezing inlines t1.__init__(), after which DCE can occur.
t2 = torch.jit.freeze(t2)
FileCheck().check_not("prim::SetAttr").run(t2.graph)
|
pytorch-master
|
test/jit/test_dce.py
|
# Owner(s): ["oncall: jit"]
import torch
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestPythonIr(JitTestCase):
def test_param_strides(self):
def trace_me(arg):
return arg
t = torch.zeros(1, 3, 16, 16)
traced = torch.jit.trace(trace_me, t)
value = list(traced.graph.param_node().outputs())[0]
real_strides = list(t.stride())
type_strides = value.type().strides()
self.assertEqual(real_strides, type_strides)
|
pytorch-master
|
test/jit/test_python_ir.py
|
# Owner(s): ["oncall: jit"]
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
import torch
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestGetDefaultAttr(JitTestCase):
def test_getattr_with_default(self):
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.init_attr_val = 1.0
def forward(self, x):
y = getattr(self, "init_attr_val") # noqa: B009
w : list[float] = [1.0]
z = getattr(self, "missing", w) # noqa: B009
z.append(y)
return z
result = A().forward(0.0)
self.assertEqual(2, len(result))
graph = torch.jit.script(A()).graph
# The "init_attr_val" attribute exists
FileCheck().check("prim::GetAttr[name=\"init_attr_val\"]").run(graph)
# The "missing" attribute does not exist, so there should be no corresponding GetAttr in AST
FileCheck().check_not("missing").run(graph)
# instead the getattr call will emit the default value, which is a list with one float element
FileCheck().check("float[] = prim::ListConstruct").run(graph)
|
pytorch-master
|
test/jit/test_attr.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestFunctionalBlocks(JitTestCase):
def test_subgraph_creation(self):
def fn(x, y, z):
x = x + 1
y = y + 1
z = z + 1
z.add_(2)
z = z * z
y = y * z
if y < 2:
y = y + 5
return x + y + z
graph = torch.jit.script(fn).graph
self.run_pass('create_functional_graphs', graph)
# all uses of x and y should be sunk
FileCheck().check(r"%x").check_not(r"%x").check("FunctionalGraph").check(r"%x").run(graph)
FileCheck().check(r"%y").check_not(r"%y").check("FunctionalGraph").check(r"%y").run(graph)
# Don't allow any outputs which escape scope, so there is one final addition in the graph
FileCheck().check("Tensor = prim::Functional").check_next("aten::add").run(graph)
# z + 1, z.add_(2) considered non functional, z = z * z should be considered functional
FileCheck().check("add").check("add_").check_not("mul").check("FunctionalGraph").run(graph)
|
pytorch-master
|
test/jit/test_functional_blocks.py
|
# Owner(s): ["oncall: jit"]
import torch
import os
import sys
from torch.testing._internal.jit_utils import JitTestCase
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestModules(JitTestCase):
def test_script_module_with_constants_list(self):
"""
Test that a module that has __constants__ set to something
that is not a set can be scripted.
"""
# torch.nn.Linear has a __constants__ attribute defined
# and intialized to a list.
class Net(torch.nn.Linear):
x: torch.jit.Final[int]
def __init__(self):
super().__init__(5, 10)
self.x = 0
self.checkModule(Net(), (torch.randn(5),))
|
pytorch-master
|
test/jit/test_modules.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
from itertools import product
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing import FileCheck
import unittest
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
activations = [
F.celu,
F.elu,
F.hardsigmoid,
F.hardswish,
F.hardtanh,
F.leaky_relu,
F.relu,
F.relu6,
F.rrelu,
F.selu,
F.silu,
]
class TestFunctionalToInplaceActivation(JitTestCase):
def test_check_no_type_promotion(self):
dtypes = [
torch.bool,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.float32,
torch.float64,
]
# restore_mutation.h contains a mapping from activation operators
# to whether they allow type conversion. Use this checking to
# guard the mapping, and if any later change breaks the assumption
# we need to update the mapping correspondingly.
for activation, dtype in product(activations, dtypes):
inp = torch.normal(0, 5, size=(4, 4)).to(dtype)
try:
out = activation(inp)
self.assertEqual(dtype, out.dtype)
except RuntimeError:
# Skip the not implemented error
pass
def test_functional_to_inplace_activation(self):
for activation in activations:
def test_basic(x):
y = x + 1
z = activation(y)
return z
fn = torch.jit.script(test_basic)
self.run_pass("inline", fn.graph)
self.run_pass("constant_propagation", fn.graph)
FileCheck().check(f"aten::{activation.__name__}(").run(fn.graph)
self.run_pass('functional_to_inplace_activation', fn.graph)
FileCheck().check_not(f"aten::{activation.__name__}(").run(fn.graph)
FileCheck().check(f"aten::{activation.__name__}_").run(fn.graph)
inp = torch.rand([2, 2])
self.assertEqual(fn(inp), test_basic(inp))
def test_no_functional_to_inplace(self):
# inplace conversion should not happen because sigmoid may
# perform type conversion
def test1():
y = torch.ones([2, 2])
z = torch.sigmoid(y)
return z
fn = torch.jit.script(test1)
self.run_pass('functional_to_inplace_activation', fn.graph)
FileCheck().check_not("aten::sigmoid_").run(fn.graph)
# inplace conversion should not happen because y is alias
# the input x
def test2(x):
y = x[0]
z = torch.relu(y)
return z
fn = torch.jit.script(test2)
self.run_pass('functional_to_inplace_activation', fn.graph)
FileCheck().check_not("aten::relu_").run(fn.graph)
# inplace conversion should not happen because self.x is
# at the global scope
class Test3(nn.Module):
def __init__(self, x):
super(Test3, self).__init__()
self.x = x
def forward(self):
y = torch.relu(self.x)
return y
fn = torch.jit.script(Test3(torch.rand([2, 2])).eval())
self.run_pass('functional_to_inplace_activation', fn.graph)
FileCheck().check_not("aten::relu_").run(fn.graph)
@skipIfNoTorchVision
def test_resnet18_correctness(self):
model = torchvision.models.resnet18()
frozen_model = torch.jit.freeze(torch.jit.script(model.eval()))
N, C, H, W, = 10, 3, 224, 224
inp = torch.randn(N, C, H, W)
self.run_pass('functional_to_inplace_activation', frozen_model.graph)
self.assertEqual(model(inp), frozen_model(inp))
class TestInplaceToFunctionalActivation(JitTestCase):
def test_inplace_to_functional_activation(self):
for activation in activations:
def test_basic(x):
y = x + 1
activation(y, inplace=True)
return y
fn = torch.jit.script(test_basic)
self.run_pass("inline", fn.graph)
self.run_pass("constant_propagation", fn.graph)
FileCheck().check(f"aten::{activation.__name__}_").run(fn.graph)
self.run_pass('inplace_to_functional_activation', fn.graph)
FileCheck().check_not(f"aten::{activation.__name__}_").run(fn.graph)
FileCheck().check(f"aten::{activation.__name__}(").run(fn.graph)
for activation in [
torch.relu_,
torch.sigmoid_,
torch.tanh_,
]:
def test_basic(x):
y = x + 1
activation(y)
return y
fn = torch.jit.script(test_basic)
self.run_pass("inline", fn.graph)
self.run_pass("constant_propagation", fn.graph)
FileCheck().check(f"aten::{activation.__name__}").run(fn.graph)
self.run_pass('inplace_to_functional_activation', fn.graph)
FileCheck().check_not(f"aten::{activation.__name__}").run(fn.graph)
FileCheck().check(f"aten::{activation.__name__[:-1]}(").run(fn.graph)
inp = torch.rand([2, 2])
self.assertEqual(fn(inp), test_basic(inp))
@skipIfNoTorchVision
def test_resnet18_correctness(self):
model = torchvision.models.resnet18()
frozen_model = torch.jit.freeze(torch.jit.script(model.eval()))
N, C, H, W, = 10, 3, 224, 224
inp = torch.randn(N, C, H, W)
self.run_pass('inplace_to_functional_activation', frozen_model.graph)
self.assertEqual(model(inp), frozen_model(inp))
|
pytorch-master
|
test/jit/test_convert_activation.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
import warnings
from typing import List, Any, Dict, Tuple, Optional
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
# Tests for torch.jit.isinstance
class TestIsinstance(JitTestCase):
def test_int(self):
def int_test(x: Any):
assert torch.jit.isinstance(x, int)
assert not torch.jit.isinstance(x, float)
x = 1
self.checkScript(int_test, (x,))
def test_float(self):
def float_test(x: Any):
assert torch.jit.isinstance(x, float)
assert not torch.jit.isinstance(x, int)
x = 1.0
self.checkScript(float_test, (x,))
def test_bool(self):
def bool_test(x: Any):
assert torch.jit.isinstance(x, bool)
assert not torch.jit.isinstance(x, float)
x = False
self.checkScript(bool_test, (x,))
def test_list(self):
def list_str_test(x: Any):
assert torch.jit.isinstance(x, List[str])
assert not torch.jit.isinstance(x, List[int])
assert not torch.jit.isinstance(x, Tuple[int])
x = ["1", "2", "3"]
self.checkScript(list_str_test, (x,))
def test_list_tensor(self):
def list_tensor_test(x: Any):
assert torch.jit.isinstance(x, List[torch.Tensor])
assert not torch.jit.isinstance(x, Tuple[int])
x = [torch.tensor([1]), torch.tensor([2]), torch.tensor([3])]
self.checkScript(list_tensor_test, (x,))
def test_dict(self):
def dict_str_int_test(x: Any):
assert torch.jit.isinstance(x, Dict[str, int])
assert not torch.jit.isinstance(x, Dict[int, str])
assert not torch.jit.isinstance(x, Dict[str, str])
x = {"a": 1, "b": 2}
self.checkScript(dict_str_int_test, (x,))
def test_dict_tensor(self):
def dict_int_tensor_test(x: Any):
assert torch.jit.isinstance(x, Dict[int, torch.Tensor])
x = {2: torch.tensor([2])}
self.checkScript(dict_int_tensor_test, (x,))
def test_tuple(self):
def tuple_test(x: Any):
assert torch.jit.isinstance(x, Tuple[str, int, str])
assert not torch.jit.isinstance(x, Tuple[int, str, str])
assert not torch.jit.isinstance(x, Tuple[str])
x = ("a", 1, "b")
self.checkScript(tuple_test, (x,))
def test_tuple_tensor(self):
def tuple_tensor_test(x: Any):
assert torch.jit.isinstance(x, Tuple[torch.Tensor, torch.Tensor])
x = (torch.tensor([1]), torch.tensor([[2], [3]]))
self.checkScript(tuple_tensor_test, (x,))
def test_optional(self):
def optional_test(x: Any):
assert torch.jit.isinstance(x, Optional[torch.Tensor])
assert not torch.jit.isinstance(x, Optional[str])
x = torch.ones(3, 3)
self.checkScript(optional_test, (x,))
def test_optional_none(self):
def optional_test_none(x: Any):
assert torch.jit.isinstance(x, Optional[torch.Tensor])
# assert torch.jit.isinstance(x, Optional[str])
# TODO: above line in eager will evaluate to True while in
# the TS interpreter will evaluate to False as the
# first torch.jit.isinstance refines the 'None' type
x = None
self.checkScript(optional_test_none, (x,))
def test_list_nested(self):
def list_nested(x: Any):
assert torch.jit.isinstance(x, List[Dict[str, int]])
assert not torch.jit.isinstance(x, List[List[str]])
x = [{"a": 1, "b": 2}, {"aa": 11, "bb": 22}]
self.checkScript(list_nested, (x,))
def test_dict_nested(self):
def dict_nested(x: Any):
assert torch.jit.isinstance(x, Dict[str, Tuple[str, str, str]])
assert not torch.jit.isinstance(x, Dict[str, Tuple[int, int, int]])
x = {"a": ("aa", "aa", "aa"), "b": ("bb", "bb", "bb")}
self.checkScript(dict_nested, (x,))
def test_tuple_nested(self):
def tuple_nested(x: Any):
assert torch.jit.isinstance(
x, Tuple[Dict[str, Tuple[str, str, str]], List[bool], Optional[str]]
)
assert not torch.jit.isinstance(x, Dict[str, Tuple[int, int, int]])
assert not torch.jit.isinstance(x, Tuple[str])
assert not torch.jit.isinstance(x, Tuple[List[bool], List[str], List[int]])
x = (
{"a": ("aa", "aa", "aa"), "b": ("bb", "bb", "bb")},
[True, False, True],
None,
)
self.checkScript(tuple_nested, (x,))
def test_optional_nested(self):
def optional_nested(x: Any):
assert torch.jit.isinstance(x, Optional[List[str]])
x = ["a", "b", "c"]
self.checkScript(optional_nested, (x,))
def test_list_tensor_type_true(self):
def list_tensor_type_true(x: Any):
assert torch.jit.isinstance(x, List[torch.Tensor])
x = [torch.rand(3, 3), torch.rand(4, 3)]
self.checkScript(list_tensor_type_true, (x,))
def test_tensor_type_false(self):
def list_tensor_type_false(x: Any):
assert not torch.jit.isinstance(x, List[torch.Tensor])
x = [1, 2, 3]
self.checkScript(list_tensor_type_false, (x,))
def test_in_if(self):
def list_in_if(x: Any):
if torch.jit.isinstance(x, List[int]):
assert True
if torch.jit.isinstance(x, List[str]):
assert not True
x = [1, 2, 3]
self.checkScript(list_in_if, (x,))
def test_if_else(self):
def list_in_if_else(x: Any):
if torch.jit.isinstance(x, Tuple[str, str, str]):
assert True
else:
assert not True
x = ("a", "b", "c")
self.checkScript(list_in_if_else, (x,))
def test_in_while_loop(self):
def list_in_while_loop(x: Any):
count = 0
while torch.jit.isinstance(x, List[Dict[str, int]]) and count <= 0:
count = count + 1
assert count == 1
x = [{"a": 1, "b": 2}, {"aa": 11, "bb": 22}]
self.checkScript(list_in_while_loop, (x,))
def test_type_refinement(self):
def type_refinement(obj: Any):
hit = False
if torch.jit.isinstance(obj, List[torch.Tensor]):
hit = not hit
for el in obj:
# perform some tensor operation
y = el.clamp(0, 0.5)
if torch.jit.isinstance(obj, Dict[str, str]):
hit = not hit
str_cat = ""
for val in obj.values():
str_cat = str_cat + val
assert "111222" == str_cat
assert hit
x = [torch.rand(3, 3), torch.rand(4, 3)]
self.checkScript(type_refinement, (x,))
x = {"1": "111", "2": "222"}
self.checkScript(type_refinement, (x,))
def test_list_no_contained_type(self):
def list_no_contained_type(x: Any):
assert torch.jit.isinstance(x, List)
x = ["1", "2", "3"]
err_msg = "Attempted to use List without a contained type. " \
r"Please add a contained type, e.g. List\[int\]"
with self.assertRaisesRegex(RuntimeError, err_msg,):
torch.jit.script(list_no_contained_type)
with self.assertRaisesRegex(RuntimeError, err_msg,):
list_no_contained_type(x)
def test_tuple_no_contained_type(self):
def tuple_no_contained_type(x: Any):
assert torch.jit.isinstance(x, Tuple)
x = ("1", "2", "3")
err_msg = "Attempted to use Tuple without a contained type. " \
r"Please add a contained type, e.g. Tuple\[int\]"
with self.assertRaisesRegex(RuntimeError, err_msg,):
torch.jit.script(tuple_no_contained_type)
with self.assertRaisesRegex(RuntimeError, err_msg,):
tuple_no_contained_type(x)
def test_optional_no_contained_type(self):
def optional_no_contained_type(x: Any):
assert torch.jit.isinstance(x, Optional)
x = ("1", "2", "3")
err_msg = "Attempted to use Optional without a contained type. " \
r"Please add a contained type, e.g. Optional\[int\]"
with self.assertRaisesRegex(RuntimeError, err_msg,):
torch.jit.script(optional_no_contained_type)
with self.assertRaisesRegex(RuntimeError, err_msg,):
optional_no_contained_type(x)
def test_dict_no_contained_type(self):
def dict_no_contained_type(x: Any):
assert torch.jit.isinstance(x, Dict)
x = {"a": "aa"}
err_msg = "Attempted to use Dict without contained types. " \
r"Please add contained type, e.g. Dict\[int, int\]"
with self.assertRaisesRegex(RuntimeError, err_msg,):
torch.jit.script(dict_no_contained_type)
with self.assertRaisesRegex(RuntimeError, err_msg,):
dict_no_contained_type(x)
def test_tuple_rhs(self):
def fn(x: Any):
assert torch.jit.isinstance(x, (int, List[str]))
assert not torch.jit.isinstance(x, (List[float], Tuple[int, str]))
assert not torch.jit.isinstance(x, (List[float], str))
self.checkScript(fn, (2,))
self.checkScript(fn, (["foo", "bar", "baz"],))
def test_nontuple_container_rhs_throws_in_eager(self):
def fn1(x: Any):
assert torch.jit.isinstance(x, [int, List[str]])
def fn2(x: Any):
assert not torch.jit.isinstance(x, {List[str], Tuple[int, str]})
err_highlight = "must be a type or a tuple of types"
with self.assertRaisesRegex(RuntimeError, err_highlight):
fn1(2)
with self.assertRaisesRegex(RuntimeError, err_highlight):
fn2(2)
def test_empty_container_throws_warning_in_eager(self):
def fn(x: Any):
torch.jit.isinstance(x, List[int])
with warnings.catch_warnings(record=True) as w:
x: List[int] = []
fn(x)
self.assertEqual(len(w), 1)
with warnings.catch_warnings(record=True) as w:
x: int = 2
fn(x)
self.assertEqual(len(w), 0)
def test_empty_container_special_cases(self):
# Should not throw "Boolean value of Tensor with no values is
# ambiguous" error
torch._jit_internal.check_empty_containers(torch.Tensor([]))
# Should not throw "Boolean value of Tensor with more than
# one value is ambiguous" error
torch._jit_internal.check_empty_containers(torch.rand(2, 3))
|
pytorch-master
|
test/jit/test_isinstance.py
|
# Owner(s): ["oncall: jit"]
import io
import os
import sys
import unittest
import torch
import torch.nn as nn
from torch.testing import FileCheck
from typing import Any
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, make_global
import torch.testing._internal.jit_utils
from torch.testing._internal.common_utils import IS_SANDCASTLE, skipIfTorchDynamo
from typing import List, Tuple, Iterable, Optional, Dict
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestClassType(JitTestCase):
def test_reference_semantics(self):
"""
Test that modifications made to a class instance in TorchScript
are visible in eager.
"""
class Foo(object):
def __init__(self, a: int):
self.a = a
def set_a(self, value: int):
self.a = value
def get_a(self) -> int:
return self.a
@property
def attr(self):
return self.a
make_global(Foo) # see [local resolution in python]
def test_fn(obj: Foo):
obj.set_a(2)
scripted_fn = torch.jit.script(test_fn)
obj = torch.jit.script(Foo(1))
self.assertEqual(obj.get_a(), 1)
self.assertEqual(obj.attr, 1)
scripted_fn(obj)
self.assertEqual(obj.get_a(), 2)
self.assertEqual(obj.attr, 2)
def test_get_with_method(self):
class FooTest(object):
def __init__(self, x):
self.foo = x
def getFooTest(self):
return self.foo
def fn(x):
foo = FooTest(x)
return foo.getFooTest()
input = torch.ones(2, 3)
self.assertEqual(fn(input), input)
def test_get_attr(self):
class FooTest(object): # noqa: B903
def __init__(self, x):
self.foo = x
@torch.jit.script
def fn(x):
foo = FooTest(x)
return foo.foo
input = torch.ones(2, 3)
self.assertEqual(fn(input), input)
def test_in(self):
class FooTest(object): # noqa: B903
def __init__(self):
pass
def __contains__(self, key: str) -> bool:
return key == 'hi'
@torch.jit.script
def fn():
foo = FooTest()
return 'hi' in foo, 'no' in foo
self.assertEqual(fn(), (True, False))
def test_set_attr_in_method(self):
class FooTest(object):
def __init__(self, x: int) -> None:
self.foo = x
def incFooTest(self, y: int) -> None:
self.foo = self.foo + y
@torch.jit.script
def fn(x: int) -> int:
foo = FooTest(x)
foo.incFooTest(2)
return foo.foo
self.assertEqual(fn(1), 3)
def test_set_attr_type_mismatch(self):
with self.assertRaisesRegexWithHighlight(RuntimeError, "Wrong type for attribute assignment", "self.foo = 10"):
@torch.jit.script
class FooTest(object):
def __init__(self, x):
self.foo = x
self.foo = 10 # should error since int != Tensor
def test_get_attr_not_initialized(self):
with self.assertRaisesRegexWithHighlight(RuntimeError, "object has no attribute or method", "self.asdf"):
@torch.jit.script
class FooTest(object):
def __init__(self, x):
self.foo = x
def get_non_initialized(self):
return self.asdf # asdf isn't an attr
def test_set_attr_non_initialized(self):
with self.assertRaisesRegexWithHighlight(RuntimeError, "Tried to set nonexistent attribute", "self.bar = y"):
@torch.jit.script
class FooTest(object):
def __init__(self, x):
self.foo = x
def set_non_initialized(self, y):
self.bar = y # can't assign to non-initialized attr
def test_schema_human_readable(self):
"""
Make sure that the schema is human readable, ie the mode parameter should read "nearest" instead of being displayed in octal
aten::__interpolate(Tensor input, int? size=None, float[]? scale_factor=None,
str mode='\156\145\141\162\145\163\164', bool? align_corners=None) -> (Tensor):
Expected a value of type 'Optional[int]' for argument 'size' but instead found type 'Tensor'.
"""
with self.assertRaisesRegexWithHighlight(RuntimeError, "nearest", ""):
@torch.jit.script
def FooTest(x):
return torch.nn.functional.interpolate(x, 'bad')
def test_type_annotations(self):
with self.assertRaisesRegexWithHighlight(RuntimeError, "Expected a value of type \'bool", ""):
@torch.jit.script # noqa: B903
class FooTest(object): # noqa: B903
def __init__(self, x: bool) -> None:
self.foo = x
@torch.jit.script
def fn(x):
FooTest(x)
fn(2)
def test_conditional_set_attr(self):
with self.assertRaisesRegexWithHighlight(RuntimeError, "assignment cannot be in a control-flow block", ""):
@torch.jit.script
class FooTest(object):
def __init__(self, x):
if 1 == 1:
self.attr = x
def test_class_type_as_param(self):
class FooTest(object): # noqa: B903
def __init__(self, x):
self.attr = x
make_global(FooTest) # see [local resolution in python]
@torch.jit.script
def fn(foo: FooTest) -> torch.Tensor:
return foo.attr
@torch.jit.script
def fn2(x):
foo = FooTest(x)
return fn(foo)
input = torch.ones(1)
self.assertEqual(fn2(input), input)
def test_out_of_order_methods(self):
class FooTest(object):
def __init__(self, x):
self.x = x
self.x = self.get_stuff(x)
def get_stuff(self, y):
return self.x + y
@torch.jit.script
def fn(x):
f = FooTest(x)
return f.x
input = torch.ones(1)
self.assertEqual(fn(input), input + input)
def test_save_load_with_classes(self):
class FooTest(object):
def __init__(self, x):
self.x = x
def get_x(self):
return self.x
class MyMod(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
foo = FooTest(a)
return foo.get_x()
m = MyMod()
buffer = io.BytesIO()
torch.jit.save(m, buffer)
# classes are globally registered for now, so we need to clear the JIT
# registry to simulate loading a new model
buffer.seek(0)
m_loaded = torch.jit.load(buffer)
input = torch.rand(2, 3)
output = m_loaded(input)
self.assertEqual(input, output)
def test_save_load_with_classes_returned(self):
class FooTest(object):
def __init__(self, x):
self.x = x
def clone(self):
clone = FooTest(self.x)
return clone
class MyMod(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
foo = FooTest(a)
foo_clone = foo.clone()
return foo_clone.x
m = MyMod()
buffer = io.BytesIO()
torch.jit.save(m, buffer)
# classes are globally registered for now, so we need to clear the JIT
# registry to simulate loading a new model
torch.testing._internal.jit_utils.clear_class_registry()
buffer.seek(0)
m_loaded = torch.jit.load(buffer)
input = torch.rand(2, 3)
output = m_loaded(input)
self.assertEqual(input, output)
def test_save_load_with_classes_nested(self):
class FooNestedTest(object): # noqa: B903
def __init__(self, y):
self.y = y
class FooNestedTest2(object):
def __init__(self, y):
self.y = y
self.nested = FooNestedTest(y)
class FooTest(object):
def __init__(self, x):
self.class_attr = FooNestedTest(x)
self.class_attr2 = FooNestedTest2(x)
self.x = self.class_attr.y + self.class_attr2.y
class MyMod(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
foo = FooTest(a)
return foo.x
m = MyMod()
buffer = io.BytesIO()
torch.jit.save(m, buffer)
# classes are globally registered for now, so we need to clear the JIT
# registry to simulate loading a new model
torch.testing._internal.jit_utils.clear_class_registry()
buffer.seek(0)
m_loaded = torch.jit.load(buffer)
input = torch.rand(2, 3)
output = m_loaded(input)
self.assertEqual(2 * input, output)
def test_python_interop(self):
class Foo(object): # noqa: B903
def __init__(self, x, y):
self.x = x
self.y = y
make_global(Foo) # see [local resolution in python]
@torch.jit.script
def use_foo(foo: Foo) -> Foo:
return foo
# create from python
x = torch.ones(2, 3)
y = torch.zeros(2, 3)
f = Foo(x, y)
self.assertEqual(x, f.x)
self.assertEqual(y, f.y)
# pass in and out of script
f2 = use_foo(f)
self.assertEqual(x, f2.x)
self.assertEqual(y, f2.y)
def test_class_specialization(self):
class Foo(object): # noqa: B903
def __init__(self, x, y):
self.x = x
self.y = y
make_global(Foo) # see [local resolution in python]
def use_foo(foo: Foo, foo2: Foo, tup: Tuple[Foo, Foo]) -> torch.Tensor:
a, b = tup
return foo.x + foo2.y + a.x + b.y
# create from python
x = torch.ones(2, 3)
y = torch.zeros(2, 3)
f = Foo(x, y)
f2 = Foo(x * 2, y * 3)
f3 = Foo(x * 4, y * 4)
input = (f, f2, (f, f3))
sfoo = self.checkScript(use_foo, input)
graphstr = str(sfoo.graph_for(*input))
FileCheck().check_count("prim::GetAttr", 4).run(graphstr)
def test_class_sorting(self):
class Foo(object): # noqa: B903
def __init__(self, x: int) -> None:
self.x = x
def __lt__(self, other) -> bool:
# type: (Foo) -> bool
return self.x < other.x
def getVal(self):
return self.x
make_global(Foo) # see [local resolution in python]
def test(li: List[Foo], reverse: bool = False) -> Tuple[List[int], List[int]]:
li_sorted = sorted(li)
ret_sorted = torch.jit.annotate(List[int], [])
for foo in li_sorted:
ret_sorted.append(foo.getVal())
li.sort(reverse=reverse)
ret_sort = torch.jit.annotate(List[int], [])
for foo in li:
ret_sort.append(foo.getVal())
return ret_sorted, ret_sort
self.checkScript(test, ([Foo(2), Foo(1), Foo(3)],))
self.checkScript(test, ([Foo(2), Foo(1), Foo(3)], True))
self.checkScript(test, ([Foo(2)],))
self.checkScript(test, ([],))
@torch.jit.script
def test_list_no_reverse():
li = [Foo(3), Foo(1)]
li.sort()
return li[0].getVal()
self.assertEqual(test_list_no_reverse(), 1)
@torch.jit.script
def test_sorted_copies():
li = [Foo(3), Foo(1)]
li_sorted = sorted(li)
return li[0].getVal(), li_sorted[0].getVal()
self.assertEqual(test_sorted_copies(), (3, 1))
@torch.jit.script
def test_nested_inside_tuple():
li = [(1, Foo(12)), (1, Foo(11))]
li.sort()
return [(li[0][0], li[0][1].getVal()), (li[1][0], li[1][1].getVal())]
self.assertEqual(test_nested_inside_tuple(), [(1, 11), (1, 12)])
with self.assertRaisesRegexWithHighlight(RuntimeError, "bool\' for argument \'reverse", ""):
@torch.jit.script
def test():
li = [Foo(1)]
li.sort(li)
return li
test()
with self.assertRaisesRegexWithHighlight(RuntimeError, "must define a __lt__", ""):
@torch.jit.script
class NoMethod(object):
def __init__(self):
pass
@torch.jit.script
def test():
li = [NoMethod(), NoMethod()]
li.sort()
return li
test()
@torch.jit.script
class WrongLt(object):
def __init__(self):
pass
# lt method defined with the wrong signature
def __lt__(self, other):
pass
with self.assertRaisesRegexWithHighlight(RuntimeError, "must define a __lt__", ""):
@torch.jit.script
def test():
li = [WrongLt(), WrongLt()]
li.sort()
return li
test()
def test_class_inheritance(self):
@torch.jit.script
class Base(object):
def __init__(self):
self.b = 2
def two(self, x):
return x + self.b
with self.assertRaisesRegexWithHighlight(RuntimeError, "does not support inheritance", ""):
@torch.jit.script
class Derived(Base):
def two(self, x):
return x + self.b + 2
def test_class_inheritance_implicit(self):
"""
Test that inheritance is detected in
implicit scripting codepaths (e.g. try_ann_to_type).
"""
class A:
def __init__(self, t):
self.t = t
@staticmethod
def f(a: torch.Tensor):
return A(a + 1)
class B(A):
def __init__(self, t):
self.t = t + 10
@staticmethod
def f(a: torch.Tensor):
return A(a + 1)
x = A(torch.tensor([3]))
def fun(x: Any):
if isinstance(x, A):
return A.f(x.t)
else:
return B.f(x.t)
with self.assertRaisesRegexWithHighlight(RuntimeError, "object has no attribute or method", ""):
sc = torch.jit.script(fun)
@skipIfTorchDynamo("Test does not work with TorchDynamo")
@unittest.skipIf(IS_SANDCASTLE, "Importing like this doesn't work in fbcode")
def test_imported_classes(self):
import jit._imported_class_test.foo
import jit._imported_class_test.bar
import jit._imported_class_test.very.very.nested
class MyMod(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
foo = jit._imported_class_test.foo.FooSameName(a)
bar = jit._imported_class_test.bar.FooSameName(a)
three = jit._imported_class_test.very.very.nested.FooUniqueName(a)
return foo.x + bar.y + three.y
m = MyMod()
buffer = io.BytesIO()
torch.jit.save(m, buffer)
# classes are globally registered for now, so we need to clear the JIT
# registry to simulate loading a new model
torch.testing._internal.jit_utils.clear_class_registry()
buffer.seek(0)
m_loaded = torch.jit.load(buffer)
input = torch.rand(2, 3)
output = m_loaded(input)
self.assertEqual(3 * input, output)
def test_interface(self):
@torch.jit.script
class Foo(object):
def __init__(self):
pass
def one(self, x, y):
return x + y
def two(self, x):
return 2 * x
@torch.jit.script
class Bar(object):
def __init__(self):
pass
def one(self, x, y):
return x * y
def two(self, x):
return 2 / x
@torch.jit.interface
class OneTwo(object):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
pass
def two(self, x: torch.Tensor) -> torch.Tensor:
pass
@torch.jit.interface
class OneTwoThree(object):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
pass
def two(self, x: torch.Tensor) -> torch.Tensor:
pass
def three(self, x: torch.Tensor) -> torch.Tensor:
pass
@torch.jit.interface
class OneTwoWrong(object):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
pass
def two(self, x: int) -> int:
pass
@torch.jit.script
class NotMember(object):
def __init__(self):
pass
def one(self, x, y):
return x + y
# missing two
@torch.jit.script
class NotMember2(object):
def __init__(self):
pass
def one(self, x, y):
return x + y
def two(self, x: int) -> int:
return 3
make_global(Foo, Bar, OneTwo, OneTwoThree, OneTwoWrong, NotMember, NotMember2)
def use_them(x):
a = Foo()
b = Bar()
c = torch.jit.annotate(List[OneTwo], [a, b])
for i in range(len(c)):
x = c[i].one(x, x)
x = c[i].two(x)
return x
self.checkScript(use_them, (torch.rand(3, 4),))
@torch.jit.script
def as_interface(x: OneTwo) -> OneTwo:
return x
@torch.jit.script
def inherit(x: OneTwoThree) -> OneTwo:
return as_interface(x)
with self.assertRaisesRegexWithHighlight(RuntimeError, "does not have method", ""):
@torch.jit.script
def wrong1():
return as_interface(NotMember())
with self.assertRaisesRegexWithHighlight(RuntimeError, "is not compatible with interface", ""):
@torch.jit.script
def wrong2():
return as_interface(NotMember2())
with self.assertRaisesRegexWithHighlight(RuntimeError, "does not have method", ""):
@torch.jit.script
def wrong3():
return inherit(as_interface(Foo()))
with self.assertRaisesRegexWithHighlight(RuntimeError, "is not compatible with interface", ""):
@torch.jit.script
def wrong4(x: OneTwoWrong) -> int:
return as_interface(x)
# Test interface/class python assignment
class TestPyAssign(nn.Module):
def __init__(self):
super(TestPyAssign, self).__init__()
self.proxy_mod = Foo()
def forward(self, x):
return self.proxy_mod.two(x)
TestPyAssign.__annotations__ = {'proxy_mod': OneTwo}
input = torch.rand(3, 4)
scripted_pyassign_mod = torch.jit.script(TestPyAssign())
imported_mod = self.getExportImportCopy(scripted_pyassign_mod)
self.assertEqual(scripted_pyassign_mod(input), imported_mod(input))
class TestPyAssignError(nn.Module):
def __init__(self, obj):
super(TestPyAssignError, self).__init__()
self.proxy_mod = obj
def forward(self, x):
return self.proxy_mod.two(x)
TestPyAssignError.__annotations__ = {'proxy_mod': OneTwoThree}
with self.assertRaisesRegexWithHighlight(RuntimeError,
"is not compatible with interface __torch__", ""):
torch.jit.script(TestPyAssignError(Foo()))
# test pure python object assignment to interface fails
class PyClass(object):
def __init__(self):
pass
with self.assertRaisesRegexWithHighlight(RuntimeError,
"the value is not a TorchScript compatible type", ""):
torch.jit.script(TestPyAssignError(PyClass()))
# TODO test: interface-interface class-interface inheritance errors,
# NamedTuple inheritance errors
def test_overloaded_fn(self):
@torch.jit.script
class Foo(object):
def __init__(self, x):
self.x = x
def __len__(self) -> int:
return len(self.x)
def __neg__(self):
self.x = -self.x
return self
def __mul__(self, other: torch.Tensor) -> torch.Tensor:
return self.x * other
def test_overload():
a = Foo(torch.ones([3, 3]))
return len(a), -a * torch.zeros([3, 3])
make_global(Foo) # see [local resolution in python]
self.checkScript(test_overload, ())
# unary ops tested above
# TODO - support compiling classes from strings in jit.CompilationUnit
@torch.jit.script
class MyClass(object):
def __init__(self, x: int) -> None:
self.x = x
def __add__(self, other: int) -> int:
return self.x + other
def __sub__(self, other: int) -> int:
return self.x - other
def __mul__(self, other: int) -> int:
return self.x * other
def __pow__(self, other: int) -> int:
return int(self.x ** other)
def __truediv__(self, other: int) -> float:
return self.x / other
def __mod__(self, other: int) -> int:
return self.x % other
def __ne__(self, other: int) -> bool:
return self.x != other
def __eq__(self, other: int) -> bool:
return self.x == other
def __lt__(self, other: int) -> bool:
return self.x < other
def __gt__(self, other: int) -> bool:
return self.x > other
def __le__(self, other: int) -> bool:
return self.x <= other
def __ge__(self, other: int) -> bool:
return self.x >= other
def __and__(self, other: int) -> int:
return self.x & other
def __or__(self, other: int) -> int:
return self.x | other
def __xor__(self, other: int) -> int:
return self.x ^ other
def __getitem__(self, other: int) -> int:
return other + 1
def __setitem__(self, idx: int, val: int) -> None:
self.x = val * idx
def __call__(self, val: int) -> int:
return self.x * val * 3
make_global(Foo) # see [local resolution in python]
def add():
return MyClass(4) + 3
def sub(): # noqa: E306
return MyClass(4) - 3
def mul(): # noqa: E306
return MyClass(4) * 3
def pow(): # noqa: E306
return MyClass(4) ** 3
def truediv(): # noqa: E306
return MyClass(4) / 3
def ne(): # noqa: E306
return MyClass(4) != 3
def eq(): # noqa: E306
return MyClass(4) == 3
def lt(): # noqa: E306
return MyClass(4) < 3
def gt(): # noqa: E306
return MyClass(4) > 3
def le(): # noqa: E306
return MyClass(4) <= 3
def ge(): # noqa: E306
return MyClass(4) >= 3
def _and(): # noqa: E306
return MyClass(4) & 3
def _or(): # noqa: E306
return MyClass(4) | 3
def _xor(): # noqa: E306
return MyClass(4) ^ 3
def getitem(): # noqa: E306
return MyClass(4)[1]
def setitem(): # noqa: E306
a = MyClass(4)
a[1] = 5
return a.x
def call(): # noqa: E306
a = MyClass(5)
return a(2)
ops = [add, sub, mul, pow, ne, eq, lt, gt, le, ge, _and, _or, _xor, getitem, setitem, call]
ops.append(truediv)
for func in ops:
self.checkScript(func, ())
with self.assertRaisesRegexWithHighlight(RuntimeError, "object has no attribute or method", ""):
@torch.jit.script
def test():
return Foo(torch.tensor(1)) + Foo(torch.tensor(1))
def test_cast_overloads(self):
@torch.jit.script
class Foo(object):
def __init__(self, val: float) -> None:
self.val = val
def __int__(self):
return int(self.val)
def __float__(self):
return self.val
def __bool__(self):
return bool(self.val)
def __str__(self):
return str(self.val)
make_global(Foo) # see [local resolution in python]
def test(foo: Foo) -> Tuple[int, float, bool]:
if foo:
pass
return int(foo), float(foo), bool(foo)
fn = torch.jit.script(test)
self.assertEqual(fn(Foo(0.5)), test(0.5))
self.assertEqual(fn(Foo(0.)), test(0.0))
# str has slightly different formatting
self.assertTrue("0.5" in (str(Foo(0.5))))
self.assertTrue("0." in (str(Foo(0.0))))
@torch.jit.script
class BadBool(object):
def __init__(self):
pass
def __bool__(self):
return (1, 2)
with self.assertRaisesRegexWithHighlight(RuntimeError, "expected a bool expression for condition", ""):
@torch.jit.script
def test():
if BadBool():
print(1)
pass
def test_init_compiled_first(self):
@torch.jit.script # noqa: B903
class Foo(object): # noqa: B903
def __before_init__(self):
# accessing this field should not throw, since __init__ should be compiled
return self.x
def __init__(self, x, y):
self.x = x
self.y = y
def test_class_constructs_itself(self):
@torch.jit.script # noqa: B903
class LSTMStateStack(object): # noqa: B903
def __init__(self, num_layers: int, hidden_size: int) -> None:
self.num_layers = num_layers
self.hidden_size = hidden_size
self.last_state = (
torch.zeros(num_layers, 1, hidden_size),
torch.zeros(num_layers, 1, hidden_size),
)
self.stack = [(self.last_state[0][-1], self.last_state[0][-1])]
def copy(self):
# should be able to construct a class inside its own methods
other = LSTMStateStack(self.num_layers, self.hidden_size)
other.stack = list(self.stack)
return other
def test_optional_type_promotion(self):
@torch.jit.script
class Leaf(object):
def __init__(self):
self.x = 1
# should not throw
@torch.jit.script # noqa: B903
class Tree(object): # noqa: B903
def __init__(self):
self.child = torch.jit.annotate(Optional[Leaf], None)
def add_child(self, child: Leaf) -> None:
self.child = child
def test_recursive_class(self):
"""
Recursive class types not yet supported. We should give a good error message.
"""
with self.assertRaises(RuntimeError):
@torch.jit.script # noqa: B903
class Tree(object): # noqa: B903
def __init__(self):
self.parent = torch.jit.annotate(Optional[Tree], None)
def test_class_constant(self):
class M(torch.nn.Module):
__constants__ = ["w"]
def __init__(self, w):
super(M, self).__init__()
self.w = w
def forward(self, x):
# Make sure class constant is accessible in method
y = self.w
return x, y
# Test serialization/deserialization of class constant
for c in (2, 1.0, None, True, 'str', (2, 3), [5.9, 7.3]):
m = torch.jit.script(M(c))
buffer = io.BytesIO()
torch.jit.save(m, buffer)
buffer.seek(0)
m_loaded = torch.jit.load(buffer)
input = torch.rand(2, 3)
self.assertEqual(m(input), m_loaded(input))
# Make sure class constant is accessible from module
self.assertEqual(m.w, m_loaded.w)
def test_py_class_to_ivalue_missing_attribute(self):
class Foo(object):
i : int
f : float
def __init__(self, i : int, f : float):
self.i = i
self.f = f
make_global(Foo) # see [local resolution in python]
@torch.jit.script
def test_fn(x : Foo) -> float:
return x.i + x.f
test_fn(Foo(3, 4.0))
with self.assertRaisesRegexWithHighlight(RuntimeError, 'missing attribute i', ""):
test_fn(torch.rand(3, 4))
def test_unused_method(self):
"""
Test unused methods on scripted classes.
"""
@torch.jit.script
class Unused(object):
def __init__(self):
self.count: int = 0
self.items: List[int] = []
def used(self):
self.count += 1
return self.count
@torch.jit.unused
def unused(self, x: int, y: Iterable[int], **kwargs) -> int:
a = next(self.items)
return a
def uses_unused(self) -> int:
return self.unused(y="hi", x=3)
class ModuleWithUnused(nn.Module):
def __init__(self):
super().__init__()
self.obj = Unused()
def forward(self):
return self.obj.used()
@torch.jit.export
def calls_unused(self):
return self.obj.unused(3, "hi")
@torch.jit.export
def calls_unused_indirectly(self):
return self.obj.uses_unused()
python_module = ModuleWithUnused()
script_module = torch.jit.script(ModuleWithUnused())
# Forward should work because it does not used any methods marked unused.
self.assertEqual(python_module.forward(), script_module.forward())
# Calling a method marked unused should throw.
with self.assertRaises(torch.jit.Error):
script_module.calls_unused()
with self.assertRaises(torch.jit.Error):
script_module.calls_unused_indirectly()
def test_self_referential_method(self):
"""
Test that a scripted class can have a method that refers to the class itself
in its type annotations.
"""
@torch.jit.script
class Meta(object):
def __init__(self, a: int):
self.a = a
def method(self, other: List['Meta']) -> 'Meta':
return Meta(len(other))
class ModuleWithMeta(torch.nn.Module):
def __init__(self, a: int):
super().__init__()
self.meta = Meta(a)
def forward(self):
new_obj = self.meta.method([self.meta])
return new_obj.a
self.checkModule(ModuleWithMeta(5), ())
def test_type_annotation(self):
"""
Test that annotating container attributes with types works correctly
"""
@torch.jit.script
class CompetitiveLinkingTokenReplacementUtils:
def __init__(self):
self.my_list : List[Tuple[float, int, int]] = []
self.my_dict : Dict[int, int] = {}
@torch.jit.script
def foo():
y = CompetitiveLinkingTokenReplacementUtils()
new_dict : Dict[int, int] = {1: 1, 2: 2}
y.my_dict = new_dict
new_list : List[Tuple[float, int, int]] = [(1.0, 1, 1)]
y.my_list = new_list
return y
def test_default_args(self):
"""
Test that methods on class types can have default arguments.
"""
@torch.jit.script
class ClassWithDefaultArgs:
def __init__(
self,
a: int = 1,
b: Optional[List[int]] = None,
c: Tuple[int, int, int] = (1, 2, 3),
d: Optional[Dict[int, int]] = None,
e: Optional[str] = None,
):
self.int = a
self.tup = c
self.str = e
self.list = [1, 2, 3]
if b is not None:
self.list = b
self.dict = {1: 2, 3: 4}
if d is not None:
self.dict = d
def add(self, b: int, scale: float = 1.0) -> float:
return self.int * scale + b
def all_defaults() -> int:
obj: ClassWithDefaultArgs = ClassWithDefaultArgs()
return obj.int + obj.list[2] + obj.tup[1]
def some_defaults() -> int:
obj: ClassWithDefaultArgs = ClassWithDefaultArgs(b=[5, 6, 7])
return obj.int + obj.list[2] + obj.dict[1]
def override_defaults() -> int:
obj: ClassWithDefaultArgs = ClassWithDefaultArgs(3, [9, 10, 11], (12, 13, 14), {3: 4}, "str")
s: int = obj.int
for x in obj.list:
s += x
for y in obj.tup:
s += y
s += obj.dict[3]
st = obj.str
if st is not None:
s += len(st)
return s
def method_defaults() -> float:
obj: ClassWithDefaultArgs = ClassWithDefaultArgs()
return obj.add(3) + obj.add(3, 0.25)
self.checkScript(all_defaults, ())
self.checkScript(some_defaults, ())
self.checkScript(override_defaults, ())
self.checkScript(method_defaults, ())
# The constructor of this class below has some arguments without default values.
class ClassWithSomeDefaultArgs: # noqa: B903
def __init__(
self,
a: int,
b: int = 1,
):
self.a = a
self.b = b
def default_b() -> int:
obj: ClassWithSomeDefaultArgs = ClassWithSomeDefaultArgs(1)
return obj.a + obj.b
def set_b() -> int:
obj: ClassWithSomeDefaultArgs = ClassWithSomeDefaultArgs(1, 4)
return obj.a + obj.b
self.checkScript(default_b, ())
self.checkScript(set_b, ())
# The constructor of this class below has mutable arguments. This should throw
# an error.
class ClassWithMutableArgs: # noqa: B903
def __init__(
self,
a: List[int] = [1, 2, 3], # noqa: B006
):
self.a = a
def should_fail():
obj: ClassWithMutableArgs = ClassWithMutableArgs()
with self.assertRaisesRegexWithHighlight(RuntimeError, "Mutable default parameters are not supported", ""):
torch.jit.script(should_fail)
def test_staticmethod(self):
"""
Test static methods on class types.
"""
@torch.jit.script
class ClassWithStaticMethod:
def __init__(self, a: int, b: int):
self.a: int = a
self.b: int = b
def get_a(self):
return self.a
def get_b(self):
return self.b
def __eq__(self, other: 'ClassWithStaticMethod'):
return self.a == other.a and self.b == other.b
# staticmethod that calls constructor.
@staticmethod
def create(args: List['ClassWithStaticMethod']) -> 'ClassWithStaticMethod':
return ClassWithStaticMethod(args[0].a, args[0].b)
# staticmethod that calls another staticmethod.
@staticmethod
def create_from(a: int, b: int) -> 'ClassWithStaticMethod':
a = ClassWithStaticMethod(a, b)
return ClassWithStaticMethod.create([a])
# Script function that calls staticmethod.
def test_function(a: int, b: int) -> 'ClassWithStaticMethod':
return ClassWithStaticMethod.create_from(a, b)
make_global(ClassWithStaticMethod)
self.checkScript(test_function, (1, 2))
def test_classmethod(self):
"""
Test classmethods on class types.
"""
@torch.jit.script
class ClassWithClassMethod:
def __init__(self, a: int):
self.a: int = a
def __eq__(self, other: 'ClassWithClassMethod'):
return self.a == other.a
@classmethod
def create(cls, a: int) -> 'ClassWithClassMethod':
return cls(a)
make_global(ClassWithClassMethod)
def test_function(a: int) -> 'ClassWithClassMethod':
x = ClassWithClassMethod(a)
# Support calling classmethod with an instance
# Calling with the class is not supported.
return x.create(a)
self.checkScript(test_function, (1,))
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_properties(self):
"""
Test that a scripted class can make use of the @property decorator.
"""
def free_function(x: int) -> int:
return x + 1
@torch.jit.script
class Properties(object):
__jit_unused_properties__ = ["unsupported"]
def __init__(self, a: int):
self.a = a
@property
def attr(self) -> int:
return self.a - 1
@property
def unsupported(self) -> int:
return sum([self.a])
@torch.jit.unused
@property
def unsupported_2(self) -> int:
return sum([self.a])
@unsupported_2.setter
def unsupported_2(self, value):
self.a = sum([self.a])
@attr.setter
def attr(self, value: int):
self.a = value + 3
@torch.jit.script
class NoSetter(object):
def __init__(self, a: int):
self.a = a
@property
def attr(self) -> int:
return free_function(self.a)
@torch.jit.script
class MethodThatUsesProperty(object):
def __init__(self, a: int):
self.a = a
@property
def attr(self) -> int:
return self.a - 2
@attr.setter
def attr(self, value: int):
self.a = value + 4
def forward(self):
return self.attr
class ModuleWithProperties(torch.nn.Module):
def __init__(self, a: int):
super().__init__()
self.props = Properties(a)
def forward(self, a: int, b: int, c: int, d: int):
self.props.attr = a
props = Properties(b)
no_setter = NoSetter(c)
method_uses_property = MethodThatUsesProperty(a + b)
props.attr = c
method_uses_property.attr = d
return self.props.attr + no_setter.attr + method_uses_property.forward()
self.checkModule(ModuleWithProperties(5), (5, 6, 7, 8,))
def test_custom_delete(self):
"""
Test that del can be called on an instance of a class that
overrides __delitem__.
"""
class Example(object):
def __init__(self):
self._data: Dict[str, torch.Tensor] = {"1": torch.tensor(1.0)}
def check(self, key: str) -> bool:
return key in self._data
def __delitem__(self, key: str):
del self._data[key]
def fn() -> bool:
example = Example()
del example["1"]
return example.check("1")
self.checkScript(fn, ())
# Test the case in which the class does not have __delitem__ defined.
class NoDelItem(object):
def __init__(self):
self._data: Dict[str, torch.Tensor] = {"1": torch.tensor(1.0)}
def check(self, key: str) -> bool:
return key in self._data
def fn() -> bool:
example = NoDelItem()
key = "1"
del example[key]
return example.check(key)
with self.assertRaisesRegexWithHighlight(RuntimeError, r"Class does not define __delitem__", "example[key]"):
self.checkScript(fn, ())
def test_recursive_script_builtin_type_resolution(self):
"""
Test resolution of built-in torch types(e.g. torch.Tensor, torch.device) when a class is recursively compiled.
"""
# A will be implicitly compiled because it is not annotated with @torch.jit.script
# but is used in g() below.
tensor_t = torch.Tensor
device_t = torch.device
device_ty = torch.device
class A(object):
def __init__(self):
pass
def f(self, x: tensor_t, y: torch.device) -> tensor_t:
return x.to(device=y)
def g(self, x: device_t) -> device_ty:
return x
def h(self, a: 'A') -> 'A':
return A()
def i(self, a: List[int]) -> int:
return a[0]
def j(self, l: List[device_t]) -> device_ty:
return l[0]
def call_f():
a = A()
return a.f(torch.tensor([1]), torch.device("cpu"))
def call_g():
a = A()
return a.g(torch.device("cpu"))
def call_i():
a = A()
return a.i([3])
def call_j():
a = A()
return a.j([torch.device("cpu"), torch.device("cpu")])
for fn in [call_f, call_g, call_i, call_j]:
self.checkScript(fn, ())
s = self.getExportImportCopy(torch.jit.script(fn))
self.assertEqual(s(), fn())
def test_recursive_script_module_builtin_type_resolution(self):
"""
Test resolution of built-in torch types(e.g. torch.Tensor, torch.device) when a class is recursively compiled
when compiling a module.
"""
class Wrapper():
def __init__(self, t):
self.t = t
def to(self, l: List[torch.device], device: Optional[torch.device] = None):
return self.t.to(device=device)
class A(nn.Module):
def forward(self):
return Wrapper(torch.rand(4, 4))
scripted = torch.jit.script(A())
self.getExportImportCopy(scripted)
def test_class_attribute_wrong_type(self):
"""
Test that the error message displayed when convering a class type
to an IValue that has an attribute of the wrong type.
"""
@torch.jit.script # noqa: B903
class ValHolder(object): # noqa: B903
def __init__(self, val):
self.val = val
class Mod(nn.Module):
def __init__(self):
super(Mod, self).__init__()
self.mod1 = ValHolder("1")
self.mod2 = ValHolder("2")
def forward(self, cond: bool):
if cond:
mod = self.mod1
else:
mod = self.mod2
return mod.val
with self.assertRaisesRegexWithHighlight(RuntimeError, "Could not cast attribute 'val' to type Tensor", ""):
torch.jit.script(Mod())
def test_recursive_scripting(self):
"""
Test that class types are recursively scripted when an Python instance of one
is encountered as a module attribute.
"""
class Class(object):
def __init__(self, a: int):
self.a = a
def get_a(self) -> int:
return self.a
class M(torch.nn.Module):
def __init__(self, obj):
super().__init__()
self.obj = obj
def forward(self) -> int:
return self.obj.get_a()
self.checkModule(M(Class(4)), ())
def test_recursive_scripting_failed(self):
"""
Test that class types module attributes that fail to script
are added as failed attributes and do not cause compilation itself
to fail unless they are used in scripted code.
"""
class UnscriptableClass(object):
def __init__(self, a: int):
self.a = a
def get_a(self) -> bool:
return issubclass(self.a, int)
# This Module has an attribute of type UnscriptableClass
# and tries to use it in scripted code. This should fail.
class ShouldNotCompile(torch.nn.Module):
def __init__(self, obj):
super().__init__()
self.obj = obj
def forward(self) -> bool:
return self.obj.get_a()
with self.assertRaisesRegexWithHighlight(RuntimeError, "failed to convert Python type", ""):
torch.jit.script(ShouldNotCompile(UnscriptableClass(4)))
# This Module has an attribute of type UnscriptableClass
# and does not try to use it in scripted code. This should not fail.
class ShouldCompile(torch.nn.Module):
def __init__(self, obj):
super().__init__()
self.obj = obj
@torch.jit.ignore
def ignored_method(self) -> bool:
return self.obj.get_a()
def forward(self, x: int) -> int:
return x + x
self.checkModule(ShouldCompile(UnscriptableClass(4)), (4,))
def test_unresolved_class_attributes(self):
class UnresolvedAttrClass(object):
def __init__(self):
pass
(attr_a, attr_b), [attr_c, attr_d] = ("", ""), ["", ""]
attr_e: int = 0
def fn_a():
u = UnresolvedAttrClass()
return u.attr_a
def fn_b():
u = UnresolvedAttrClass()
return u.attr_b
def fn_c():
u = UnresolvedAttrClass()
return u.attr_c
def fn_d():
u = UnresolvedAttrClass()
return u.attr_d
def fn_e():
u = UnresolvedAttrClass()
return u.attr_e
error_message_regex = "object has no attribute or method.*is defined as a class attribute"
for fn in (fn_a, fn_b, fn_c, fn_d, fn_e):
with self.assertRaisesRegex(RuntimeError, error_message_regex):
torch.jit.script(fn)
|
pytorch-master
|
test/jit/test_class_type.py
|
# Owner(s): ["oncall: jit"]
from torch.testing._internal.jit_utils import JitTestCase
import torch
import torch._C
from torch.testing import FileCheck
class TestGraphRewritePasses(JitTestCase):
def test_fuse_linear(self):
class FunctionalLinear(torch.nn.Module):
def __init__(self, weight, bias):
super(FunctionalLinear, self).__init__()
self.weight = weight
self.bias = bias
def forward(self, x):
res = torch.matmul(x, self.weight.t())
if self.bias is not None:
res.add_(self.bias)
return res
x1 = torch.rand(3)
w1 = torch.rand(5, 3)
b1 = torch.rand(5)
for has_bias in [True, False]:
bias = b1 if has_bias else None
model = torch.jit.trace(FunctionalLinear(w1, bias), [x1])
for node in model.graph.nodes():
if node.kind() == "aten::matmul":
source_range_1 = node.sourceRange()
torch._C._jit_pass_fuse_linear(model.graph)
for node in model.graph.nodes():
if node.kind() == "aten::linear":
source_range_2 = node.sourceRange()
FileCheck().check("aten::linear").run(model.graph)
check_not = ["aten::matmul", "aten::addmm", "aten::add_", "aten::t("]
for cn in check_not:
FileCheck().check_not(cn).run(model.graph)
self.assertTrue(source_range_1 == source_range_2)
# make sure it runs
model(x1)
# check matmuls are not fused
class Matmul(torch.nn.Module):
def __init__(self, weight):
super(Matmul, self).__init__()
self.weight = weight
def forward(self, x):
return torch.matmul(x, self.weight)
x = torch.rand(5, 6, 5)
w = torch.rand(5, 5, 100)
model = torch.jit.trace(Matmul(w), [x])
torch._C._jit_pass_fuse_linear(model.graph)
# check 3d matmul is not fused
FileCheck().check("aten::matmul").run(model.graph)
FileCheck().check_not("aten::linear").run(model.graph)
# make sure it runs
model(x)
|
pytorch-master
|
test/jit/test_graph_rewrite_passes.py
|
# Owner(s): ["oncall: jit"]
import torch
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TestPythonBindings\n\n"
"instead."
)
class TestPythonBindings(JitTestCase):
def test_cu_get_functions(self):
@torch.jit.script
def test_get_python_cu_fn(x: torch.Tensor):
return 2 * x
cu = torch.jit._state._python_cu
self.assertTrue(
"test_get_python_cu_fn" in (str(fn.name) for fn in cu.get_functions())
)
def test_cu_create_function(self):
@torch.jit.script
def fn(x: torch.Tensor):
return 2 * x
cu = torch._C.CompilationUnit()
cu.create_function("test_fn", fn.graph)
inp = torch.randn(5)
self.assertEqual(inp * 2, cu.find_function("test_fn")(inp))
self.assertEqual(cu.find_function("doesnt_exist"), None)
self.assertEqual(inp * 2, cu.test_fn(inp))
with self.assertRaises(AttributeError):
cu.doesnt_exist(inp)
def test_invalidation(self):
@torch.jit.script
def test_invalidation_fn(x: torch.Tensor):
return 2 * x
gr = test_invalidation_fn.graph.copy()
n = gr.insertNode(gr.create("prim::profile"))
v = n.output()
# check that they work
str((n, v))
torch._C._jit_pass_dce(gr)
with self.assertRaisesRegex(RuntimeError, "invalidated"):
str(n)
with self.assertRaisesRegex(RuntimeError, "invalidated"):
str(v)
def test_graph_iterator_keepalive(self):
@torch.jit.script
def test_iterator_keepalive_fn(x: torch.Tensor):
return 2 * x
# the list would segfault before because inlined_graph
# is temporary and had been deleted (see issue #50454)
n = test_iterator_keepalive_fn.inlined_graph.nodes()
list(n)
i = test_iterator_keepalive_fn.inlined_graph.inputs()
list(i)
o = test_iterator_keepalive_fn.inlined_graph.outputs()
list(o)
def test_aliasdb(self):
@torch.jit.script
def test_aliasdb_fn(x: torch.Tensor):
return 2 * x
gr = test_aliasdb_fn.graph.copy()
alias_db = gr.alias_db()
self.assertTrue("WILDCARD" in str(alias_db))
self.assertTrue("digraph alias_db" in alias_db.to_graphviz_str())
def test_graph_create(self):
gr = torch._C.Graph()
with self.assertRaises(ValueError):
gr.create("prim::Constant", [None])
def test_canonicalize(self):
ir = """
graph(%p207 : Tensor,
%1 : Tensor,
%p407 : int):
%11 : Tensor = aten::view_expand_placeholder(%1)
%12 : Tensor = aten::pointwise_placeholder(%11, %p207, %p407)
%13 : Tensor = aten::view_expand_placeholder(%12)
%14 : Tensor = aten::pointwise_placeholder(%13)
return (%14)
"""
graph1 = torch._C.parse_ir(ir)
graph1 = torch._C._jit_pass_canonicalize(graph1, True)
graph2 = torch._C.parse_ir(ir)
graph2 = torch._C._jit_pass_canonicalize(graph2)
self.assertEqual(str(graph1), str(graph2))
FileCheck().check("%p207").check_not("%14").run(graph1)
graph3 = torch._C.parse_ir(ir)
graph3 = torch._C._jit_pass_canonicalize(graph3, False)
FileCheck().check_not("%p207").run(graph3)
|
pytorch-master
|
test/jit/test_python_bindings.py
|
# Owner(s): ["oncall: jit"]
from collections import namedtuple
from typing import Dict, List, Optional, Tuple
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing import FileCheck
from textwrap import dedent
from jit.test_module_interface import TestModuleInterface # noqa: F401
import inspect
import os
import sys
import torch
import torch.testing._internal.jit_utils
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestTypesAndAnnotation(JitTestCase):
def test_pep585_type(self):
# TODO add test to use PEP585 type annotation for return type after py3.9
# see: https://www.python.org/dev/peps/pep-0585/#id5
def fn(x: torch.Tensor) -> Tuple[Tuple[torch.Tensor], Dict[str, int]]:
xl: list[tuple[torch.Tensor]] = []
xd: dict[str, int] = {}
xl.append((x,))
xd['foo'] = 1
return xl.pop(), xd
self.checkScript(fn, [torch.randn(2, 2)])
x = torch.randn(2, 2)
expected = fn(x)
scripted = torch.jit.script(fn)(x)
self.assertEqual(expected, scripted)
def test_types_as_values(self):
def fn(m: torch.Tensor) -> torch.device:
return m.device
self.checkScript(fn, [torch.randn(2, 2)])
GG = namedtuple('GG', ['f', 'g'])
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
@torch.jit.ignore
def foo(self, x: torch.Tensor, z: torch.Tensor) -> Tuple[GG, GG]:
return GG(x, z), GG(x, z)
def forward(self, x, z):
return self.foo(x, z)
foo = torch.jit.script(Foo())
y = foo(torch.randn(2, 2), torch.randn(2, 2))
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
@torch.jit.ignore
def foo(self, x, z) -> Tuple[GG, GG]:
return GG(x, z)
def forward(self, x, z):
return self.foo(x, z)
foo = torch.jit.script(Foo())
y = foo(torch.randn(2, 2), torch.randn(2, 2))
def test_ignore_with_types(self):
@torch.jit.ignore
def fn(x: Dict[str, Optional[torch.Tensor]]):
return x + 10
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, in_batch: Dict[str, Optional[torch.Tensor]]) -> torch.Tensor:
self.dropout_modality(in_batch)
fn(in_batch)
return torch.tensor(1)
@torch.jit.ignore
def dropout_modality(self, in_batch: Dict[str, Optional[torch.Tensor]]) -> Dict[str, Optional[torch.Tensor]]:
return in_batch
sm = torch.jit.script(M())
FileCheck().check("dropout_modality").check("in_batch").run(str(sm.graph))
def test_python_callable(self):
class MyPythonClass(object):
@torch.jit.ignore
def __call__(self, *args) -> str:
return str(type(args[0]))
the_class = MyPythonClass()
@torch.jit.script
def fn(x):
return the_class(x)
# This doesn't involve the string frontend, so don't use checkScript
x = torch.ones(2)
self.assertEqual(fn(x), the_class(x))
def test_bad_types(self):
@torch.jit.ignore
def fn(my_arg):
return my_arg + 10
with self.assertRaisesRegex(RuntimeError, "argument 'my_arg'"):
@torch.jit.script
def other_fn(x):
return fn('2')
def test_type_annotate_py3(self):
def fn():
a : List[int] = []
b : torch.Tensor = torch.ones(2, 2)
c : Optional[torch.Tensor] = None
d : Optional[torch.Tensor] = torch.ones(3, 4)
for _ in range(10):
a.append(4)
c = torch.ones(2, 2)
d = None
return a, b, c, d
self.checkScript(fn, ())
def wrong_type():
wrong : List[int] = [0.5]
return wrong
with self.assertRaisesRegex(RuntimeError, "List type annotation"
r" `List\[int\]` did not match the "
"types of the given list elements"):
torch.jit.script(wrong_type)
def test_optional_no_element_type_annotation(self):
"""
Test that using an optional with no contained types produces an error.
"""
def fn_with_comment(x: torch.Tensor) -> Optional:
return (x, x)
def annotated_fn(x: torch.Tensor) -> Optional:
return (x, x)
with self.assertRaisesRegex(RuntimeError, r"Attempted to use Optional without a contained type"):
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(fn_with_comment)))
with self.assertRaisesRegex(RuntimeError, r"Attempted to use Optional without a contained type"):
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(annotated_fn)))
with self.assertRaisesRegex(RuntimeError, r"Attempted to use Optional without a contained type"):
torch.jit.script(fn_with_comment)
with self.assertRaisesRegex(RuntimeError, r"Attempted to use Optional without a contained type"):
torch.jit.script(annotated_fn)
def test_tuple_no_element_type_annotation(self):
"""
Test that using a tuple with no contained types produces an error.
"""
def fn_with_comment(x: torch.Tensor) -> Tuple:
return (x, x)
def annotated_fn(x: torch.Tensor) -> Tuple:
return (x, x)
with self.assertRaisesRegex(RuntimeError, r"Attempted to use Tuple without a contained type"):
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(fn_with_comment)))
with self.assertRaisesRegex(RuntimeError, r"Attempted to use Tuple without a contained type"):
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(annotated_fn)))
with self.assertRaisesRegex(RuntimeError, r"Attempted to use Tuple without a contained type"):
torch.jit.script(fn_with_comment)
with self.assertRaisesRegex(RuntimeError, r"Attempted to use Tuple without a contained type"):
torch.jit.script(annotated_fn)
def test_ignoring_module_attributes(self):
"""
Test that module attributes can be ignored.
"""
class Sub(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, a: int) -> int:
return sum([a])
class ModuleWithIgnoredAttr(torch.nn.Module):
__jit_ignored_attributes__ = ["a", "sub"]
def __init__(self, a: int, b: int):
super().__init__()
self.a = a
self.b = b
self.sub = Sub()
def forward(self) -> int:
return self.b
@torch.jit.ignore
def ignored_fn(self) -> int:
return self.sub.forward(self.a)
mod = ModuleWithIgnoredAttr(1, 4)
scripted_mod = torch.jit.script(mod)
self.assertEqual(scripted_mod(), 4)
self.assertEqual(scripted_mod.ignored_fn(), 1)
# Test the error message for ignored attributes.
class ModuleUsesIgnoredAttr(torch.nn.Module):
__jit_ignored_attributes__ = ["a", "sub"]
def __init__(self, a: int):
super().__init__()
self.a = a
self.sub = Sub()
def forward(self) -> int:
return self.sub(self.b)
mod = ModuleUsesIgnoredAttr(1)
with self.assertRaisesRegexWithHighlight(RuntimeError, r"attribute was ignored during compilation", "self.sub"):
scripted_mod = torch.jit.script(mod)
def test_unimported_type_resolution(self):
# verify fallback from the python resolver to the c++ resolver
@ torch.jit.script
def fn(x):
# type: (number) -> number
return x + 1
FileCheck().check('Scalar').run(fn.graph)
def test_parser_bug(self):
def parser_bug(o: Optional[torch.Tensor]):
pass
def test_mismatched_annotation(self):
with self.assertRaisesRegex(RuntimeError, 'annotated with type'):
@torch.jit.script
def foo():
x : str = 4
return x
def test_reannotate(self):
with self.assertRaisesRegex(RuntimeError, 'declare and annotate'):
@torch.jit.script
def foo():
x = 5
if 1 == 1:
x : Optional[int] = 7
def test_annotate_outside_init(self):
msg = "annotations on instance attributes must be declared in __init__"
highlight = "self.x: int"
# Simple case
with self.assertRaisesRegexWithHighlight(ValueError, msg, highlight):
@torch.jit.script
class BadModule(object):
def __init__(self, x: int):
self.x = x
def set(self, val: int):
self.x: int = val
# Type annotation in a loop
with self.assertRaisesRegexWithHighlight(ValueError, msg, highlight):
@torch.jit.script
class BadModuleLoop(object):
def __init__(self, x: int):
self.x = x
def set(self, val: int):
for i in range(3):
self.x: int = val
# Type annotation in __init__, should not fail
@torch.jit.script
class GoodModule(object):
def __init__(self, x: int):
self.x: int = x
def set(self, val: int):
self.x = val
def test_inferred_type_error_message(self):
inferred_type = torch._C.InferredType("ErrorReason")
with self.assertRaisesRegex(RuntimeError,
"Tried to get the type from an InferredType but the type is null."):
t = inferred_type.type()
with self.assertRaisesRegex(RuntimeError, "ErrorReason"):
t = inferred_type.type()
|
pytorch-master
|
test/jit/test_types.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
from torch.testing import FileCheck
from enum import Enum
from typing import Any, List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, make_global
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestEnum(JitTestCase):
def test_enum_value_types(self):
class IntEnum(Enum):
FOO = 1
BAR = 2
class FloatEnum(Enum):
FOO = 1.2
BAR = 2.3
class StringEnum(Enum):
FOO = "foo as in foo bar"
BAR = "bar as in foo bar"
make_global(IntEnum, FloatEnum, StringEnum)
@torch.jit.script
def supported_enum_types(a: IntEnum, b: FloatEnum, c: StringEnum):
return (a.name, b.name, c.name)
FileCheck() \
.check("IntEnum") \
.check("FloatEnum") \
.check("StringEnum") \
.run(str(supported_enum_types.graph))
class TensorEnum(Enum):
FOO = torch.tensor(0)
BAR = torch.tensor(1)
make_global(TensorEnum)
def unsupported_enum_types(a: TensorEnum):
return a.name
# TODO: rewrite code so that the highlight is not empty.
with self.assertRaisesRegexWithHighlight(RuntimeError, "Cannot create Enum with value type 'Tensor'", ""):
torch.jit.script(unsupported_enum_types)
def test_enum_comp(self):
class Color(Enum):
RED = 1
GREEN = 2
make_global(Color)
@torch.jit.script
def enum_comp(x: Color, y: Color) -> bool:
return x == y
FileCheck().check("aten::eq").run(str(enum_comp.graph))
self.assertEqual(enum_comp(Color.RED, Color.RED), True)
self.assertEqual(enum_comp(Color.RED, Color.GREEN), False)
def test_enum_comp_diff_classes(self):
class Foo(Enum):
ITEM1 = 1
ITEM2 = 2
class Bar(Enum):
ITEM1 = 1
ITEM2 = 2
make_global(Foo, Bar)
@torch.jit.script
def enum_comp(x: Foo) -> bool:
return x == Bar.ITEM1
FileCheck() \
.check("prim::Constant") \
.check_same("Bar.ITEM1") \
.check("aten::eq") \
.run(str(enum_comp.graph))
self.assertEqual(enum_comp(Foo.ITEM1), False)
def test_heterogenous_value_type_enum_error(self):
class Color(Enum):
RED = 1
GREEN = "green"
make_global(Color)
def enum_comp(x: Color, y: Color) -> bool:
return x == y
# TODO: rewrite code so that the highlight is not empty.
with self.assertRaisesRegexWithHighlight(RuntimeError, "Could not unify type list", ""):
torch.jit.script(enum_comp)
def test_enum_name(self):
class Color(Enum):
RED = 1
GREEN = 2
make_global(Color)
@torch.jit.script
def enum_name(x: Color) -> str:
return x.name
FileCheck() \
.check("Color") \
.check_next("prim::EnumName") \
.check_next("return") \
.run(str(enum_name.graph))
self.assertEqual(enum_name(Color.RED), Color.RED.name)
self.assertEqual(enum_name(Color.GREEN), Color.GREEN.name)
def test_enum_value(self):
class Color(Enum):
RED = 1
GREEN = 2
make_global(Color)
@torch.jit.script
def enum_value(x: Color) -> int:
return x.value
FileCheck() \
.check("Color") \
.check_next("prim::EnumValue") \
.check_next("return") \
.run(str(enum_value.graph))
self.assertEqual(enum_value(Color.RED), Color.RED.value)
self.assertEqual(enum_value(Color.GREEN), Color.GREEN.value)
def test_enum_as_const(self):
class Color(Enum):
RED = 1
GREEN = 2
make_global(Color)
@torch.jit.script
def enum_const(x: Color) -> bool:
return x == Color.RED
FileCheck() \
.check("prim::Constant[value=__torch__.jit.test_enum.Color.RED]") \
.check_next("aten::eq") \
.check_next("return") \
.run(str(enum_const.graph))
self.assertEqual(enum_const(Color.RED), True)
self.assertEqual(enum_const(Color.GREEN), False)
def test_non_existent_enum_value(self):
class Color(Enum):
RED = 1
GREEN = 2
make_global(Color)
def enum_const(x: Color) -> bool:
if x == Color.PURPLE:
return True
else:
return False
with self.assertRaisesRegexWithHighlight(RuntimeError, "has no attribute 'PURPLE'", "Color.PURPLE"):
torch.jit.script(enum_const)
def test_enum_ivalue_type(self):
class Color(Enum):
RED = 1
GREEN = 2
make_global(Color)
@torch.jit.script
def is_color_enum(x: Any):
return isinstance(x, Color)
FileCheck() \
.check("prim::isinstance[types=[Enum<__torch__.jit.test_enum.Color>]]") \
.check_next("return") \
.run(str(is_color_enum.graph))
self.assertEqual(is_color_enum(Color.RED), True)
self.assertEqual(is_color_enum(Color.GREEN), True)
self.assertEqual(is_color_enum(1), False)
def test_closed_over_enum_constant(self):
class Color(Enum):
RED = 1
GREEN = 2
a = Color
@torch.jit.script
def closed_over_aliased_type():
return a.RED.value
FileCheck() \
.check("prim::Constant[value={}]".format(a.RED.value)) \
.check_next("return") \
.run(str(closed_over_aliased_type.graph))
self.assertEqual(closed_over_aliased_type(), Color.RED.value)
b = Color.RED
@torch.jit.script
def closed_over_aliased_value():
return b.value
FileCheck() \
.check("prim::Constant[value={}]".format(b.value)) \
.check_next("return") \
.run(str(closed_over_aliased_value.graph))
self.assertEqual(closed_over_aliased_value(), Color.RED.value)
def test_enum_as_module_attribute(self):
class Color(Enum):
RED = 1
GREEN = 2
class TestModule(torch.nn.Module):
def __init__(self, e: Color):
super(TestModule, self).__init__()
self.e = e
def forward(self):
return self.e.value
m = TestModule(Color.RED)
scripted = torch.jit.script(m)
FileCheck() \
.check("TestModule") \
.check_next("Color") \
.check_same("prim::GetAttr[name=\"e\"]") \
.check_next("prim::EnumValue") \
.check_next("return") \
.run(str(scripted.graph))
self.assertEqual(scripted(), Color.RED.value)
def test_string_enum_as_module_attribute(self):
class Color(Enum):
RED = "red"
GREEN = "green"
class TestModule(torch.nn.Module):
def __init__(self, e: Color):
super(TestModule, self).__init__()
self.e = e
def forward(self):
return (self.e.name, self.e.value)
make_global(Color)
m = TestModule(Color.RED)
scripted = torch.jit.script(m)
self.assertEqual(scripted(), (Color.RED.name, Color.RED.value))
def test_enum_return(self):
class Color(Enum):
RED = 1
GREEN = 2
make_global(Color)
@torch.jit.script
def return_enum(cond: bool):
if cond:
return Color.RED
else:
return Color.GREEN
self.assertEqual(return_enum(True), Color.RED)
self.assertEqual(return_enum(False), Color.GREEN)
def test_enum_module_return(self):
class Color(Enum):
RED = 1
GREEN = 2
class TestModule(torch.nn.Module):
def __init__(self, e: Color):
super(TestModule, self).__init__()
self.e = e
def forward(self):
return self.e
make_global(Color)
m = TestModule(Color.RED)
scripted = torch.jit.script(m)
FileCheck() \
.check("TestModule") \
.check_next("Color") \
.check_same("prim::GetAttr[name=\"e\"]") \
.check_next("return") \
.run(str(scripted.graph))
self.assertEqual(scripted(), Color.RED)
def test_enum_iterate(self):
class Color(Enum):
RED = 1
GREEN = 2
BLUE = 3
def iterate_enum(x: Color):
res: List[int] = []
for e in Color:
if e != x:
res.append(e.value)
return res
make_global(Color)
scripted = torch.jit.script(iterate_enum)
FileCheck() \
.check("Enum<__torch__.jit.test_enum.Color>[]") \
.check_same("Color.RED") \
.check_same("Color.GREEN") \
.check_same("Color.BLUE") \
.run(str(scripted.graph))
# PURPLE always appears last because we follow Python's Enum definition order.
self.assertEqual(scripted(Color.RED), [Color.GREEN.value, Color.BLUE.value])
self.assertEqual(scripted(Color.GREEN), [Color.RED.value, Color.BLUE.value])
# Tests that explicitly and/or repeatedly scripting an Enum class is permitted.
def test_enum_explicit_script(self):
@torch.jit.script
class Color(Enum):
RED = 1
GREEN = 2
torch.jit.script(Color)
|
pytorch-master
|
test/jit/test_enum.py
|
pytorch-master
|
test/jit/_imported_class_test/__init__.py
|
|
import torch
# This file contains definitions of script classes.
# They are used by test_jit.py to test ScriptClass imports
@torch.jit.script # noqa: B903
class FooSameName(object): # noqa: B903
def __init__(self, y):
self.y = y
|
pytorch-master
|
test/jit/_imported_class_test/bar.py
|
import torch
from . import bar
# This file contains definitions of script classes.
# They are used by test_jit.py to test ScriptClass imports
@torch.jit.script # noqa: B903
class FooSameName(object):
def __init__(self, x):
self.x = x
self.nested = bar.FooSameName(x)
|
pytorch-master
|
test/jit/_imported_class_test/foo.py
|
pytorch-master
|
test/jit/_imported_class_test/very/__init__.py
|
|
pytorch-master
|
test/jit/_imported_class_test/very/very/__init__.py
|
|
import torch
# This file contains definitions of script classes.
# They are used by test_jit.py to test ScriptClass imports
@torch.jit.script # noqa: B903
class FooUniqueName(object): # noqa: B903
def __init__(self, y):
self.y = y
|
pytorch-master
|
test/jit/_imported_class_test/very/very/nested.py
|
import torch
from typing import Union
class TestVersionedDivTensorExampleV7(torch.nn.Module):
def __init__(self):
super(TestVersionedDivTensorExampleV7, self).__init__()
def forward(self, a, b):
result_0 = a / b
result_1 = torch.div(a, b)
result_2 = a.div(b)
return result_0, result_1, result_2
class TestVersionedLinspaceV7(torch.nn.Module):
def __init__(self):
super(TestVersionedLinspaceV7, self).__init__()
def forward(self, a: Union[int, float, complex], b: Union[int, float, complex]):
c = torch.linspace(a, b, steps=5)
d = torch.linspace(a, b)
return c, d
class TestVersionedLinspaceOutV7(torch.nn.Module):
def __init__(self):
super(TestVersionedLinspaceOutV7, self).__init__()
def forward(self, a: Union[int, float, complex], b: Union[int, float, complex], out: torch.Tensor):
return torch.linspace(a, b, out=out)
class TestVersionedLogspaceV8(torch.nn.Module):
def __init__(self):
super(TestVersionedLogspaceV8, self).__init__()
def forward(self, a: Union[int, float, complex], b: Union[int, float, complex]):
c = torch.logspace(a, b, steps=5)
d = torch.logspace(a, b)
return c, d
class TestVersionedLogspaceOutV8(torch.nn.Module):
def __init__(self):
super(TestVersionedLogspaceOutV8, self).__init__()
def forward(self, a: Union[int, float, complex], b: Union[int, float, complex], out: torch.Tensor):
return torch.logspace(a, b, out=out)
class TestVersionedGeluV9(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch._C._nn.gelu(x)
class TestVersionedGeluOutV9(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
out = torch.zeros_like(x)
return torch._C._nn.gelu(x, out=out)
|
pytorch-master
|
test/jit/fixtures_srcs/fixtures_src.py
|
import io
import logging
import sys
import zipfile
from pathlib import Path
from typing import Set
import torch
# Use asterisk symbol so developer doesn't need to import here when they add tests for upgraders.
from test.jit.fixtures_srcs.fixtures_src import * # noqa: F403
from torch.jit.mobile import _load_for_lite_interpreter, _export_operator_list
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
"""
This file is used to generate model for test operator change. Please refer to
https://github.com/pytorch/rfcs/blob/master/RFC-0017-PyTorch-Operator-Versioning.md for more details.
A systematic workflow to change operator is needed to ensure
Backwards Compatibility (BC) / Forwards Compatibility (FC) for operator changes. For BC-breaking operator change,
an upgrader is needed. Here is the flow to properly land a BC-breaking operator change.
1. Write an upgrader in caffe2/torch/csrc/jit/operator_upgraders/upgraders_entry.cpp file. The softly enforced
naming format is <operator_name>_<operator_overload>_<start>_<end>. For example, the below example means that
div.Tensor at version from 0 to 3 needs to be replaced by this upgrader.
```
/*
div_Tensor_0_3 is added for a change of operator div in pr xxxxxxx.
Create date: 12/02/2021
Expire date: 06/02/2022
*/
{"div_Tensor_0_3", R"SCRIPT(
def div_Tensor_0_3(self: Tensor, other: Tensor) -> Tensor:
if (self.is_floating_point() or other.is_floating_point()):
return self.true_divide(other)
return self.divide(other, rounding_mode='trunc')
)SCRIPT"},
```
2. In caffe2/torch/csrc/jit/operator_upgraders/version_map.h, add changes like below.
You will need to make sure that the entry is SORTED according to the version bump number.
```
{"div.Tensor",
{{4,
"div_Tensor_0_3",
"aten::div.Tensor(Tensor self, Tensor other) -> Tensor"}}},
```
3. After rebuild PyTorch, run the following command and it will auto generate a change to
fbcode/caffe2/torch/csrc/jit/mobile/upgrader_mobile.cpp
```
python pytorch/torchgen/operator_versions/gen_mobile_upgraders.py
```
4. Generate the test to cover upgrader.
4.1 Switch the commit before the operator change, and add a module in
`test/jit/fixtures_srcs/fixtures_src.py`. The reason why switching to commit is that,
an old model with the old operator before the change is needed to ensure the upgrader
is working as expected. In `test/jit/fixtures_srcs/generate_models.py`, add the module and
it's corresponding changed operator like following
```
ALL_MODULES = {
TestVersionedDivTensorExampleV7(): "aten::div.Tensor",
}
```
This module should includes the changed operator. If the operator isn't covered in the model,
the model export process in step 4.2 will fail.
4.2 Export the model to `test/jit/fixtures` by running
```
python /Users/chenlai/pytorch/test/jit/fixtures_src/generate_models.py
```
4.3 In `test/jit/test_save_load_for_op_version.py`, add a test to cover the old models and
ensure the result is equivalent between current module and old module + upgrader.
4.4 Save all change in 4.1, 4.2 and 4.3, as well as previous changes made in step 1, 2, 3.
Submit a pr
"""
"""
A map of test modules and it's according changed operator
key: test module
value: changed operator
"""
ALL_MODULES = {
TestVersionedDivTensorExampleV7(): "aten::div.Tensor",
TestVersionedLinspaceV7(): "aten::linspace",
TestVersionedLinspaceOutV7(): "aten::linspace.out",
TestVersionedLogspaceV8(): "aten::logspace",
TestVersionedLogspaceOutV8(): "aten::logspace.out",
TestVersionedGeluV9(): "aten::gelu",
TestVersionedGeluOutV9(): "aten::gelu.out",
}
"""
Get the path to `test/jit/fixtures`, where all test models for operator changes
(upgrader/downgrader) are stored
"""
def get_fixtures_path() -> Path:
pytorch_dir = Path(__file__).resolve().parents[3]
fixtures_path = pytorch_dir / "test" / "jit" / "fixtures"
return fixtures_path
"""
Get all models' name in `test/jit/fixtures`
"""
def get_all_models(model_directory_path: Path) -> Set[str]:
files_in_fixtures = model_directory_path.glob('**/*')
all_models_from_fixtures = [fixture.stem for fixture in files_in_fixtures if fixture.is_file()]
return set(all_models_from_fixtures)
"""
Check if a given model already exist in `test/jit/fixtures`
"""
def model_exist(model_file_name: str, all_models: Set[str]) -> bool:
return model_file_name in all_models
"""
Get the operator list given a module
"""
def get_operator_list(script_module: torch) -> Set[str]:
buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
mobile_module = _load_for_lite_interpreter(buffer)
operator_list = _export_operator_list(mobile_module)
return operator_list
"""
Get the output model operator version, given a module
"""
def get_output_model_version(script_module: torch.nn.Module) -> int:
buffer = io.BytesIO()
torch.jit.save(script_module, buffer)
buffer.seek(0)
zipped_model = zipfile.ZipFile(buffer)
try:
version = int(zipped_model.read('archive/version').decode("utf-8"))
return version
except KeyError:
version = int(zipped_model.read('archive/.data/version').decode("utf-8"))
return version
"""
Loop through all test modules. If the corresponding model doesn't exist in
`test/jit/fixtures`, generate one. For the following reason, a model won't be exported:
1. The test module doens't cover the changed operator. For example, test_versioned_div_tensor_example_v4
is supposed to test the operator aten::div.Tensor. If the model doesn't include this operator, it will fail.
The error message includes the actual operator list from the model.
2. The output model version is not the same as expected version. For example, test_versioned_div_tensor_example_v4
is used to test an operator change aten::div.Tensor, and the operator version will be bumped to v5. This script is
supposed to run before the operator change (before the commit to make the change). If the actual model version is v5,
likely this script is running with the commit to make the change.
3. The model already exists in `test/jit/fixtures`.
"""
def generate_models(model_directory_path: Path):
all_models = get_all_models(model_directory_path)
for a_module, expect_operator in ALL_MODULES.items():
# For example: TestVersionedDivTensorExampleV7
torch_module_name = type(a_module).__name__
if not isinstance(a_module, torch.nn.Module):
logger.error(
f"The module {torch_module_name} "
f"is not a torch.nn.module instance. "
f"Please ensure it's a subclass of torch.nn.module in fixtures_src.py"
f"and it's registered as an instance in ALL_MODULES in generated_models.py")
# The corresponding model name is: test_versioned_div_tensor_example_v4
model_name = ''.join([
'_' + char.lower() if char.isupper() else char for char in torch_module_name
]).lstrip('_')
# Some models may not compile anymore, so skip the ones
# that already has pt file for them.
logger.info(f"Processing {torch_module_name}")
if model_exist(model_name, all_models):
logger.info(f"Model {model_name} already exists, skipping")
continue
script_module = torch.jit.script(a_module)
actual_model_version = get_output_model_version(script_module)
current_operator_version = torch._C._get_max_operator_version()
if actual_model_version >= current_operator_version + 1:
logger.error(
f"Actual model version {actual_model_version} "
f"is equal or larger than {current_operator_version} + 1. "
f"Please run the script before the commit to change operator.")
continue
actual_operator_list = get_operator_list(script_module)
if expect_operator not in actual_operator_list:
logger.error(
f"The model includes operator: {actual_operator_list}, "
f"however it doesn't cover the operator {expect_operator}."
f"Please ensure the output model includes the tested operator.")
continue
export_model_path = str(model_directory_path / (str(model_name) + ".ptl"))
script_module._save_for_lite_interpreter(export_model_path)
logger.info(f"Generating model {model_name} and it's save to {export_model_path}")
def main() -> None:
model_directory_path = get_fixtures_path()
generate_models(model_directory_path)
if __name__ == '__main__':
main()
|
pytorch-master
|
test/jit/fixtures_srcs/generate_models.py
|
pytorch-master
|
test/jit/fixtures_srcs/__init__.py
|
|
# Owner(s): ["oncall: mobile"]
import torch
from test.jit.fixtures_srcs.generate_models import ALL_MODULES
from torch.testing._internal.common_utils import TestCase, run_tests
class TestUpgraderModelGeneration(TestCase):
def test_all_modules(self):
for a_module, expect_operator in ALL_MODULES.items():
module_name = type(a_module).__name__
self.assertTrue(
isinstance(a_module, torch.nn.Module),
f"The module {module_name} "
f"is not a torch.nn.module instance. "
f"Please ensure it's a subclass of torch.nn.module in fixtures_src.py"
f"and it's registered as an instance in ALL_MODULES in generated_models.py")
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/jit/fixtures_srcs/test_upgrader_models_generation.py
|
import argparse
import os
import sys
import torch
# grab modules from test_jit_hooks.cpp
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from jit.test_hooks_modules import (
create_forward_tuple_input, create_module_forward_multiple_inputs,
create_module_forward_single_input, create_module_hook_return_nothing,
create_module_multiple_hooks_multiple_inputs,
create_module_multiple_hooks_single_input, create_module_no_forward_input,
create_module_same_hook_repeated, create_submodule_forward_multiple_inputs,
create_submodule_forward_single_input,
create_submodule_hook_return_nothing,
create_submodule_multiple_hooks_multiple_inputs,
create_submodule_multiple_hooks_single_input,
create_submodule_same_hook_repeated,
create_submodule_to_call_directly_with_hooks)
# Create saved modules for JIT forward hooks and pre-hooks
def main():
parser = argparse.ArgumentParser(
description="Serialize a script modules with hooks attached"
)
parser.add_argument("--export-script-module-to", required=True)
options = parser.parse_args()
global save_name
save_name = options.export_script_module_to + "_"
tests = [
("test_submodule_forward_single_input", create_submodule_forward_single_input()),
("test_submodule_forward_multiple_inputs", create_submodule_forward_multiple_inputs()),
("test_submodule_multiple_hooks_single_input", create_submodule_multiple_hooks_single_input()),
("test_submodule_multiple_hooks_multiple_inputs", create_submodule_multiple_hooks_multiple_inputs()),
("test_submodule_hook_return_nothing", create_submodule_hook_return_nothing()),
("test_submodule_same_hook_repeated", create_submodule_same_hook_repeated()),
("test_module_forward_single_input", create_module_forward_single_input()),
("test_module_forward_multiple_inputs", create_module_forward_multiple_inputs()),
("test_module_multiple_hooks_single_input", create_module_multiple_hooks_single_input()),
("test_module_multiple_hooks_multiple_inputs", create_module_multiple_hooks_multiple_inputs()),
("test_module_hook_return_nothing", create_module_hook_return_nothing()),
("test_module_same_hook_repeated", create_module_same_hook_repeated()),
("test_module_no_forward_input", create_module_no_forward_input()),
("test_forward_tuple_input", create_forward_tuple_input()),
("test_submodule_to_call_directly_with_hooks", create_submodule_to_call_directly_with_hooks())
]
for name, model in tests:
m_scripted = torch.jit.script(model)
filename = save_name + name + ".pt"
torch.jit.save(m_scripted, filename)
print("OK: completed saving modules with hooks!")
if __name__ == "__main__":
main()
|
pytorch-master
|
test/jit_hooks/model.py
|
# this file contains a simple parser that parses report
# from cuda-memcheck
class ParseError(Exception):
"""Whenever the simple parser is unable to parse the report, this exception will be raised"""
pass
class Report:
"""A report is a container of errors, and a summary on how many errors are found"""
def __init__(self, text, errors):
# text is something like
# ERROR SUMMARY: 1 error
# or
# ERROR SUMMARY: 2 errors
self.text = text
self.num_errors = int(text.strip().split()[2])
self.errors = errors
if len(errors) != self.num_errors:
if len(errors) == 10000 and self.num_errors > 10000:
# When there are more than 10k errors, cuda-memcheck only display 10k
self.num_errors = 10000
else:
raise ParseError("Number of errors does not match")
class Error:
"""Each error is a section in the output of cuda-memcheck.
Each error in the report has an error message and a backtrace. It looks like:
========= Program hit cudaErrorInvalidValue (error 1) due to "invalid argument" on CUDA API call to cudaGetLastError.
========= Saved host backtrace up to driver entry point at error
========= Host Frame:/usr/lib/x86_64-linux-gnu/libcuda.so.1 [0x38c7b3]
========= Host Frame:/usr/local/cuda/lib64/libcudart.so.10.1 (cudaGetLastError + 0x163) [0x4c493]
========= Host Frame:/home/xgao/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so [0x5b77a05]
========= Host Frame:/home/xgao/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so [0x39d6d1d]
========= .....
"""
def __init__(self, lines):
self.message = lines[0]
lines = lines[2:]
self.stack = [l.strip() for l in lines]
def parse(message):
"""A simple parser that parses the report of cuda-memcheck. This parser is meant to be simple
and it only split the report into separate errors and a summary. Where each error is further
splitted into error message and backtrace. No further details are parsed.
A report contains multiple errors and a summary on how many errors are detected. It looks like:
========= CUDA-MEMCHECK
========= Program hit cudaErrorInvalidValue (error 1) due to "invalid argument" on CUDA API call to cudaPointerGetAttributes.
========= Saved host backtrace up to driver entry point at error
========= Host Frame:/usr/lib/x86_64-linux-gnu/libcuda.so.1 [0x38c7b3]
========= Host Frame:/usr/local/cuda/lib64/libcudart.so.10.1 (cudaPointerGetAttributes + 0x1a9) [0x428b9]
========= Host Frame:/home/xgao/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so [0x5b778a9]
========= .....
=========
========= Program hit cudaErrorInvalidValue (error 1) due to "invalid argument" on CUDA API call to cudaGetLastError.
========= Saved host backtrace up to driver entry point at error
========= Host Frame:/usr/lib/x86_64-linux-gnu/libcuda.so.1 [0x38c7b3]
========= Host Frame:/usr/local/cuda/lib64/libcudart.so.10.1 (cudaGetLastError + 0x163) [0x4c493]
========= .....
=========
========= .....
=========
========= Program hit cudaErrorInvalidValue (error 1) due to "invalid argument" on CUDA API call to cudaGetLastError.
========= Saved host backtrace up to driver entry point at error
========= Host Frame:/usr/lib/x86_64-linux-gnu/libcuda.so.1 [0x38c7b3]
========= .....
========= Host Frame:python (_PyEval_EvalFrameDefault + 0x6a0) [0x1d0ad0]
========= Host Frame:python (_PyEval_EvalCodeWithName + 0xbb9) [0x116db9]
=========
========= ERROR SUMMARY: 4 errors
"""
errors = []
HEAD = '========='
headlen = len(HEAD)
started = False
in_message = False
message_lines = []
lines = message.splitlines()
for l in lines:
if l == HEAD + ' CUDA-MEMCHECK':
started = True
continue
if not started or not l.startswith(HEAD):
continue
l = l[headlen + 1:]
if l.startswith('ERROR SUMMARY:'):
return Report(l, errors)
if not in_message:
in_message = True
message_lines = [l]
elif l == '':
errors.append(Error(message_lines))
in_message = False
else:
message_lines.append(l)
raise ParseError("No error summary found")
|
pytorch-master
|
test/scripts/cuda_memcheck_common.py
|
#!/usr/bin/env python3
"""This script runs cuda-memcheck on the specified unit test. Each test case
is run in its isolated process with a timeout so that:
1) different test cases won't influence each other, and
2) in case of hang, the script would still finish in a finite amount of time.
The output will be written to a log file result.log
Example usage:
python run_cuda_memcheck.py ../test_torch.py 600
Note that running cuda-memcheck could be very slow.
"""
import asyncio
import torch
import multiprocessing
import argparse
import subprocess
import tqdm
import os
import sys
import cuda_memcheck_common as cmc
ALL_TESTS = []
GPUS = torch.cuda.device_count()
# parse arguments
parser = argparse.ArgumentParser(description="Run isolated cuda-memcheck on unit tests")
parser.add_argument('filename', help="the python file for a test, such as test_torch.py")
parser.add_argument('timeout', type=int, help='kill the test if it does not terminate in a certain amount of seconds')
parser.add_argument('--strict', action='store_true',
help='Whether to show cublas/cudnn errors. These errors are ignored by default because'
'cublas/cudnn does not run error-free under cuda-memcheck, and ignoring these errors')
parser.add_argument('--nproc', type=int, default=multiprocessing.cpu_count(),
help='Number of processes running tests, default to number of cores in the system')
parser.add_argument('--gpus', default='all',
help='GPU assignments for each process, it could be "all", or : separated list like "1,2:3,4:5,6"')
parser.add_argument('--ci', action='store_true',
help='Whether this script is executed in CI. When executed inside a CI, this script fails when '
'an error is detected. Also, it will not show tqdm progress bar, but directly print the error'
'to stdout instead.')
parser.add_argument('--nohang', action='store_true', help='Treat timeout as success')
parser.add_argument('--split', type=int, default=1, help='Split the job into pieces')
parser.add_argument('--rank', type=int, default=0, help='Which piece this process should pick')
args = parser.parse_args()
# Filters that ignores cublas/cudnn errors
# TODO (@zasdfgbnm): When can we remove this? Will cublas/cudnn run error-free under cuda-memcheck?
def is_ignored_only(output):
try:
report = cmc.parse(output)
except cmc.ParseError:
# in case the simple parser fails parsing the output of cuda memcheck
# then this error is never ignored.
return False
count_ignored_errors = 0
for e in report.errors:
if 'libcublas' in ''.join(e.stack) or 'libcudnn' in ''.join(e.stack) or 'libcufft' in ''.join(e.stack):
count_ignored_errors += 1
return count_ignored_errors == report.num_errors
# Set environment PYTORCH_CUDA_MEMCHECK=1 to allow skipping some tests
os.environ['PYTORCH_CUDA_MEMCHECK'] = '1'
# Discover tests:
# To get a list of tests, run:
# pytest --setup-only test/test_torch.py
# and then parse the output
proc = subprocess.Popen(['pytest', '--setup-only', args.filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
lines = stdout.decode().strip().splitlines()
for line in lines:
if '(fixtures used:' in line:
line = line.strip().split()[0]
line = line[line.find('::') + 2:]
line = line.replace('::', '.')
ALL_TESTS.append(line)
# Do a simple filtering:
# if 'cpu' or 'CPU' is in the name and 'cuda' or 'CUDA' is not in the name, then skip it
def is_cpu_only(name):
name = name.lower()
return ('cpu' in name) and not ('cuda' in name)
ALL_TESTS = [x for x in ALL_TESTS if not is_cpu_only(x)]
# Split all tests into chunks, and only on the selected chunk
ALL_TESTS.sort()
chunk_size = (len(ALL_TESTS) + args.split - 1) // args.split
start = chunk_size * args.rank
end = chunk_size * (args.rank + 1)
ALL_TESTS = ALL_TESTS[start:end]
# Run tests:
# Since running cuda-memcheck on PyTorch unit tests is very slow, these tests must be run in parallel.
# This is done by using the coroutine feature in new Python versions. A number of coroutines are created;
# they create subprocesses and awaiting them to finish. The number of running subprocesses could be
# specified by the user and by default is the same as the number of CPUs in the machine.
# These subprocesses are balanced across different GPUs on the system by assigning one devices per process,
# or as specified by the user
progress = 0
if not args.ci:
logfile = open('result.log', 'w')
progressbar = tqdm.tqdm(total=len(ALL_TESTS))
else:
logfile = sys.stdout
# create a fake progress bar that does not display anything
class ProgressbarStub:
def update(self, *args):
return
progressbar = ProgressbarStub()
async def run1(coroutine_id):
global progress
if args.gpus == 'all':
gpuid = coroutine_id % GPUS
else:
gpu_assignments = args.gpus.split(':')
assert args.nproc == len(gpu_assignments), 'Please specify GPU assignmnent for each process, separated by :'
gpuid = gpu_assignments[coroutine_id]
while progress < len(ALL_TESTS):
test = ALL_TESTS[progress]
progress += 1
cmd = f'CUDA_VISIBLE_DEVICES={gpuid} cuda-memcheck --error-exitcode 1 python {args.filename} {test}'
proc = await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
try:
stdout, stderr = await asyncio.wait_for(proc.communicate(), args.timeout)
except asyncio.TimeoutError:
print('Timeout:', test, file=logfile)
proc.kill()
if args.ci and not args.nohang:
sys.exit("Hang detected on cuda-memcheck")
else:
if proc.returncode == 0:
print('Success:', test, file=logfile)
else:
stdout = stdout.decode()
stderr = stderr.decode()
should_display = args.strict or not is_ignored_only(stdout)
if should_display:
print('Fail:', test, file=logfile)
print(stdout, file=logfile)
print(stderr, file=logfile)
if args.ci:
sys.exit("Failure detected on cuda-memcheck")
else:
print('Ignored:', test, file=logfile)
del proc
progressbar.update(1)
async def main():
tasks = [asyncio.ensure_future(run1(i)) for i in range(args.nproc)]
for t in tasks:
await t
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
pytorch-master
|
test/scripts/run_cuda_memcheck.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["module: unknown"]
import logging
import torch
import torch.ao.quantization as tq
from torch import nn
from torch.ao import sparsity
from torch.testing._internal.common_utils import TestCase
from torch.ao.quantization.quantize_fx import prepare_fx, convert_fx, convert_to_reference_fx, prepare_qat_fx
from torch.ao.sparsity import fqn_to_module
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
sparse_defaults = {
"sparsity_level": 0.8,
"sparse_block_shape": (1, 4),
"zeros_per_block": 4,
}
def _get_model_and_sparsifier_and_sparse_config(qconfig=None):
model = nn.Sequential(
nn.Linear(4, 4), # 0
nn.ReLU(),
nn.Linear(4, 4), # 2
nn.ReLU(),
tq.QuantStub(),
nn.Linear(4, 4), # 5
nn.ReLU(),
tq.DeQuantStub(),
)
if qconfig:
model[4].qconfig = qconfig
model[5].qconfig = qconfig
sparsifier = sparsity.WeightNormSparsifier(**sparse_defaults)
sparse_config = [
{
"tensor_fqn": '5.weight',
"sparsity_level": 0.7,
"sparse_block_shape": (1, 4),
"zeros_per_block": 4,
},
{"tensor_fqn": "0.weight"},
]
return model, sparsifier, sparse_config
def _squash_mask_calibrate_and_convert(model, sparsifier, input):
sparsifier.step()
sparsifier.squash_mask()
model(input)
tq.convert(model, inplace=True)
def _calculate_sparsity(tensor):
return ((tensor == 0).sum() / tensor.numel()).item()
# This series of tests are to check the composability goals for sparsity and quantization. Namely
# that performing quantization and sparsity model manipulations in various orderings
# does not cause problems
class TestComposability(TestCase):
# This test checks whether performing quantization prepare before sparse prepare
# causes any issues and verifies that the correct observers are inserted and that
# the quantized model works as expected
def test_q_prep_before_s_prep(self):
(
mod,
sparsifier,
sparse_config,
) = _get_model_and_sparsifier_and_sparse_config(tq.get_default_qconfig("fbgemm"))
tq.prepare(mod, inplace=True)
sparsifier.prepare(mod, config=sparse_config)
# check that correct modules had parametrizations added
self.assertTrue(hasattr(mod[0], "parametrizations"))
self.assertTrue(hasattr(mod[5], "parametrizations"))
# check that correct observers were inserted
self.assertTrue(hasattr(mod[5], "activation_post_process"))
_squash_mask_calibrate_and_convert(
mod, sparsifier, torch.randn(1, 4, 4, 4)
)
# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(mod[5], torch.nn.quantized.Linear))
self.assertEqual(mod(torch.randn(1, 4, 4, 4)).shape, torch.Size([1, 4, 4, 4]))
# This test checks whether performing sparsity prepare before quantization prepare
# causes any issues. In particular, previous quantization flow was unable to match
# the post sparse prepare module names (adding parametrizations changes the module class names)
# which would result in those parametrized modules not being quantized. This test verifies that
# the fix for this was successful.
def test_s_prep_before_q_prep(self):
(
mod,
sparsifier,
sparse_config,
) = _get_model_and_sparsifier_and_sparse_config(tq.get_default_qconfig("fbgemm"))
sparsifier.prepare(mod, config=sparse_config)
tq.prepare(mod, inplace=True)
# check that correct modules had parametrizations added and
# that none were lost during prepare
self.assertTrue(hasattr(mod[0], "parametrizations"))
self.assertTrue(hasattr(mod[5], "parametrizations"))
# check that correct observers were inserted and that matching
# occured successfully
self.assertTrue(hasattr(mod[5], "activation_post_process"))
_squash_mask_calibrate_and_convert(
mod, sparsifier, torch.randn(1, 4, 4, 4)
)
# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(mod[5], torch.nn.quantized.Linear))
self.assertEqual(mod(torch.randn(1, 4, 4, 4)).shape, torch.Size([1, 4, 4, 4]))
# if the sparsified modules have not undergone the final squash mask operation, its possible
# that the problem outlined in test_s_prep_before_q_prep would occur. This test verifies
# both that the fix to the convert flow avoids this issue and that the resulting quantized
# module uses the sparse version of the weight value.
def test_convert_without_squash_mask(self):
(
mod,
sparsifier,
sparse_config,
) = _get_model_and_sparsifier_and_sparse_config(tq.get_default_qconfig("fbgemm"))
sparsifier.prepare(mod, config=sparse_config)
tq.prepare(mod, inplace=True)
# check that correct modules had parametrizations added and
# that none were lost during prepare
self.assertTrue(hasattr(mod[0], "parametrizations"))
self.assertTrue(hasattr(mod[5], "parametrizations"))
# check that correct observers were inserted and that matching
# occured successfully
self.assertTrue(hasattr(mod[5], "activation_post_process"))
sparsifier.step()
sparsity_level = _calculate_sparsity(mod[5].weight)
mod(torch.randn(1, 4, 4, 4))
tq.convert(mod, inplace=True)
# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(mod[5], torch.nn.quantized.Linear))
self.assertEqual(mod(torch.randn(1, 4, 4, 4)).shape, torch.Size([1, 4, 4, 4]))
# check that module was actually sparsified
cur_sparsity = _calculate_sparsity(mod[5]._weight_bias()[0])
self.assertGreaterAlmostEqual(cur_sparsity, sparsity_level)
self.assertGreaterAlmostEqual(
sparsity_level, sparse_config[0]["sparsity_level"]
)
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])
# This tests whether performing sparse prepare before fusion causes any issues. The
# worry was that the link created between the sparsifier and the modules that need to
# be sparsified would be broken.
def test_s_prep_before_fusion(self):
(
mod,
sparsifier,
sparse_config,
) = _get_model_and_sparsifier_and_sparse_config(tq.get_default_qconfig("fbgemm"))
sparsifier.prepare(mod, config=sparse_config)
tq.fuse_modules(mod, [["5", "6"]], inplace=True)
mod[5].qconfig = tq.get_default_qconfig("fbgemm")
tq.prepare(mod, inplace=True)
# check that correct modules had parametrizations added and
# that none were lost during prepare or fusion
self.assertTrue(hasattr(mod[0], "parametrizations"))
self.assertTrue(hasattr(mod[5][0], "parametrizations"))
# check that correct observers were inserted and that matching
# occured successfully
self.assertTrue(hasattr(mod[5], "activation_post_process"))
_squash_mask_calibrate_and_convert(
mod, sparsifier, torch.randn(1, 4, 4, 4)
)
# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(mod[5], torch.nn.intrinsic.quantized.LinearReLU))
self.assertEqual(mod(torch.randn(1, 4, 4, 4)).shape, torch.Size([1, 4, 4, 4]))
# This tests whether performing fusion before sparse prepare causes and issues. The
# main worry was that the links to the modules in the sparse config would be broken by fusion.
def test_fusion_before_s_prep(self):
(
mod,
sparsifier,
_,
) = _get_model_and_sparsifier_and_sparse_config(tq.get_default_qconfig("fbgemm"))
tq.fuse_modules(mod, [["5", "6"]], inplace=True)
# its absolutely broken by fusion but will still work if you put the correct fqn in
sparse_config = [
{
"tensor_fqn": "5.0.weight",
"sparsity_level": 0.7,
"sparse_block_shape": (1, 4),
"zeros_per_block": 4,
},
{"tensor_fqn": "0.weight"},
]
sparsifier.prepare(mod, config=sparse_config)
mod[5].qconfig = tq.get_default_qconfig("fbgemm")
tq.prepare(mod, inplace=True)
# check that correct modules had parametrizations added and
# that none were lost during prepare
self.assertTrue(hasattr(mod[0], "parametrizations"))
self.assertTrue(hasattr(mod[5][0], "parametrizations"))
# check that correct observers were inserted and that matching
# occured successfully
self.assertTrue(hasattr(mod[5], "activation_post_process"))
sparsifier.step()
sparsity_level = _calculate_sparsity(mod[5][0].weight)
mod(torch.randn(1, 4, 4, 4))
tq.convert(mod, inplace=True)
# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(mod[5], torch.nn.intrinsic.quantized.LinearReLU))
self.assertEqual(mod(torch.randn(1, 4, 4, 4)).shape, torch.Size([1, 4, 4, 4]))
# check that module was actually sparsified
cur_sparsity = _calculate_sparsity(mod[5]._weight_bias()[0])
self.assertGreaterAlmostEqual(cur_sparsity, sparsity_level)
self.assertGreaterAlmostEqual(
sparsity_level, sparse_config[0]["sparsity_level"]
)
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])
# This tests whether performing sparse prepare before qat prepare causes issues.
# The primary worries were that qat_prep wouldn't recognize the parametrized
# modules and that the convert step for qat would remove the paramerizations
# from the modules.
def test_s_prep_before_qat_prep(self):
(
mod,
sparsifier,
sparse_config,
) = _get_model_and_sparsifier_and_sparse_config(
tq.get_default_qat_qconfig("fbgemm")
)
sparsifier.prepare(mod, config=sparse_config)
tq.prepare_qat(mod, inplace=True)
self.assertTrue(hasattr(mod[0], "parametrizations"))
self.assertTrue(hasattr(mod[5], "parametrizations"))
# check that correct observers were inserted and that matching
# occured successfully
self.assertTrue(hasattr(mod[5], "activation_post_process"))
self.assertTrue(isinstance(mod[5], torch.nn.qat.Linear))
_squash_mask_calibrate_and_convert(
mod, sparsifier, torch.randn(1, 4, 4, 4)
)
# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(mod[5], torch.nn.quantized.Linear))
self.assertEqual(mod(torch.randn(1, 4, 4, 4)).shape, torch.Size([1, 4, 4, 4]))
# check that module was actually sparsified
cur_sparsity = _calculate_sparsity(mod[5]._weight_bias()[0])
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])
# This tests whether performing qat prepare before sparse prepare causes issues.
def test_qat_prep_before_s_prep(self):
mod, sparsifier, _ = _get_model_and_sparsifier_and_sparse_config(
tq.get_default_qat_qconfig("fbgemm")
)
tq.prepare_qat(mod, inplace=True)
# need to setup sparse_config on new modules
sparse_config = [
{
"tensor_fqn": "5.weight",
"sparsity_level": 0.7,
"sparse_block_shape": (1, 4),
"zeros_per_block": 4,
},
{"tensor_fqn": "0.weight"},
]
sparsifier.prepare(mod, config=sparse_config)
# check that correct modules had parametrizations added and
# that none were lost during qat prepare
self.assertTrue(hasattr(mod[0], "parametrizations"))
self.assertTrue(hasattr(mod[5], "parametrizations"))
# check that correct observers were inserted and that matching
# occured successfully
self.assertTrue(hasattr(mod[5], "activation_post_process"))
self.assertTrue(isinstance(mod[5], torch.nn.qat.Linear))
_squash_mask_calibrate_and_convert(
mod, sparsifier, torch.randn(1, 4, 4, 4)
)
# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(mod[5], torch.nn.quantized.Linear))
self.assertEqual(mod(torch.randn(1, 4, 4, 4)).shape, torch.Size([1, 4, 4, 4]))
# check that module was actually sparsified
cur_sparsity = _calculate_sparsity(mod[5]._weight_bias()[0])
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])
def _module_has_activation_post_process(model, fqn_of_module):
for node in model.graph.nodes:
# look for an observer whose arg is the target module
if "activation_post_process" in node.name:
if node.args[0].target == fqn_of_module:
return True
return False
class TestFxComposability(TestCase):
r"""This series of tests checks that various steps of the quantization and sparsity flow
compose cleanly despite variation in sequencing.
"""
def test_q_prep_fx_before_s_prep(self):
r"""
This test checks that the ordering of prepare_fx -> sparse prepare -> convert_fx
compose cleanly without issue and that the final result is sparsified without
having to call squash mask between sparse prepare and convert_fx. This also tests the
automatic fusion that occurs during prepare_fx.
"""
(
mod,
sparsifier,
_,
) = _get_model_and_sparsifier_and_sparse_config()
example = torch.randn(1, 4, 4, 4)
qconfig = tq.get_default_qconfig("fbgemm")
qconfig_mapping = tq.QConfigMapping() \
.set_module_name("4", qconfig) \
.set_module_name("5", qconfig)
mod = prepare_fx(mod, qconfig_mapping, (example,))
# its absolutely broken by auto fusion in fx
# but will still work if you put the correct fqn in
sparse_config = [
{
"tensor_fqn": "5.0.weight",
"sparsity_level": 0.7,
"sparse_block_shape": (1, 4),
"zeros_per_block": 4,
},
{"tensor_fqn": "0.0.weight"},
]
sparsifier.prepare(mod, config=sparse_config)
# check that correct modules had parametrizations added and
# that none were lost during prepare
self.assertTrue(hasattr(fqn_to_module(mod, "0.0"), "parametrizations"))
self.assertTrue(hasattr(fqn_to_module(mod, "5.0"), "parametrizations"))
# check that correct observers were inserted and that matching
# occured successfully
self.assertTrue(_module_has_activation_post_process(mod, "5"))
sparsifier.step()
sparsity_level = _calculate_sparsity(fqn_to_module(mod, "5.0.weight"))
mod(example)
mod = convert_fx(mod)
# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(fqn_to_module(mod, "5"), torch.nn.intrinsic.quantized.LinearReLU))
self.assertEqual(mod(example).shape, torch.Size([1, 4, 4, 4]))
# check that module was actually sparsified
cur_sparsity = _calculate_sparsity(fqn_to_module(mod, "5")._weight_bias()[0])
self.assertGreaterAlmostEqual(cur_sparsity, sparsity_level)
self.assertGreaterAlmostEqual(
sparsity_level, sparse_config[0]["sparsity_level"]
)
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])
def test_q_prep_fx_s_prep_ref_conv(self):
r"""
This checks that the ordering: prepare_fx -> sparse prepare -> convert_to_reference_fx
compose cleanly without issue and that the final result is sparsified without
having to call squash mask before convert_to_reference_fx.
"""
(
mod,
sparsifier,
_,
) = _get_model_and_sparsifier_and_sparse_config()
example = torch.randn(1, 4, 4, 4)
qconfig = tq.get_default_qconfig("fbgemm")
qconfig_mapping = tq.QConfigMapping() \
.set_module_name("4", qconfig) \
.set_module_name("5", qconfig)
mod = prepare_fx(mod, qconfig_mapping, (example,))
# its absolutely broken by auto fusion in fx
# but will still work if you put the correct fqn in
sparse_config = [
{
"tensor_fqn": "5.0.weight",
"sparsity_level": 0.7,
"sparse_block_shape": (1, 4),
"zeros_per_block": 4,
},
{"tensor_fqn": "0.0.weight"},
]
sparsifier.prepare(mod, config=sparse_config)
# check that correct modules had parametrizations added and
# that none were lost during prepare
self.assertTrue(hasattr(fqn_to_module(mod, "0.0"), "parametrizations"))
self.assertTrue(hasattr(fqn_to_module(mod, "5.0"), "parametrizations"))
# check that correct observers were inserted and that matching
# occured successfully
self.assertTrue(_module_has_activation_post_process(mod, "5"))
sparsifier.step()
sparsity_level = _calculate_sparsity(fqn_to_module(mod, "5.0.weight"))
mod(example)
mod = convert_to_reference_fx(mod)
# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(fqn_to_module(mod, "5"), torch.nn.intrinsic.LinearReLU))
self.assertEqual(mod(example).shape, torch.Size([1, 4, 4, 4]))
self.assertTrue(isinstance(fqn_to_module(mod, "5.0"), torch.nn.quantized._reference.Linear))
# check that module was actually sparsified
cur_sparsity = _calculate_sparsity(fqn_to_module(mod, "5.0.weight"))
self.assertGreaterAlmostEqual(cur_sparsity, sparsity_level)
self.assertGreaterAlmostEqual(
sparsity_level, sparse_config[0]["sparsity_level"]
)
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])
def test_s_prep_before_q_prep_fx(self):
r"""
This test checks that the ordering of sparse prepare -> prepare_fx -> convert_fx
compose cleanly without issue and that the final result is sparsified without
having to call squash mask before convert_fx.
"""
(
mod,
sparsifier,
sparse_config,
) = _get_model_and_sparsifier_and_sparse_config()
sparsifier.prepare(mod, config=sparse_config)
example = torch.randn(1, 4, 4, 4)
qconfig = tq.get_default_qconfig("fbgemm")
qconfig_mapping = tq.QConfigMapping() \
.set_module_name("4", qconfig) \
.set_module_name("5", qconfig)
mod = prepare_fx(mod, qconfig_mapping, (example,))
# check that correct modules had parametrizations added and
# that none were lost during prepare
self.assertTrue(hasattr(fqn_to_module(mod, "0.0"), "parametrizations"))
self.assertTrue(hasattr(fqn_to_module(mod, "5.0"), "parametrizations"))
# check that correct observers were inserted and that matching
# occured successfully
self.assertTrue(_module_has_activation_post_process(mod, "5"))
sparsifier.step()
sparsity_level = _calculate_sparsity(fqn_to_module(mod, "5.0.weight"))
mod(example)
mod = convert_fx(mod)
# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(fqn_to_module(mod, "5"), torch.nn.intrinsic.quantized.LinearReLU))
self.assertEqual(mod(example).shape, torch.Size([1, 4, 4, 4]))
# check that module was actually sparsified
cur_sparsity = _calculate_sparsity(fqn_to_module(mod, "5")._weight_bias()[0])
self.assertGreaterAlmostEqual(cur_sparsity, sparsity_level)
self.assertGreaterAlmostEqual(
sparsity_level, sparse_config[0]["sparsity_level"]
)
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])
def test_s_prep_before_qat_prep_fx(self):
r"""
This test checks that the ordering of sparse prepare -> prepare_qat_fx -> convert_fx
compose cleanly without issue and that the final result is sparsified without
having to call squash mask before convert_fx.
"""
(
mod,
sparsifier,
sparse_config,
) = _get_model_and_sparsifier_and_sparse_config()
sparsifier.prepare(mod, config=sparse_config)
example = torch.randn(1, 4, 4, 4)
qconfig = tq.get_default_qat_qconfig("fbgemm")
qconfig_mapping = tq.QConfigMapping() \
.set_module_name("4", qconfig) \
.set_module_name("5", qconfig)
mod = prepare_qat_fx(mod, qconfig_mapping, (example,))
# check that correct modules had parametrizations added and
# that none were lost during prepare
self.assertTrue(hasattr(fqn_to_module(mod, "0.0"), "parametrizations"))
self.assertTrue(hasattr(fqn_to_module(mod, "5"), "parametrizations"))
self.assertTrue(isinstance(fqn_to_module(mod, "5"), torch.nn.intrinsic.qat.LinearReLU))
# check that correct observers were inserted and that matching
# occured successfully
self.assertTrue(_module_has_activation_post_process(mod, "5"))
sparsifier.step()
sparsity_level = _calculate_sparsity(fqn_to_module(mod, "5.weight"))
mod(example)
mod = convert_fx(mod)
# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(fqn_to_module(mod, "5"), torch.nn.intrinsic.quantized.LinearReLU))
self.assertEqual(mod(example).shape, torch.Size([1, 4, 4, 4]))
# check that module was actually sparsified
cur_sparsity = _calculate_sparsity(fqn_to_module(mod, "5")._weight_bias()[0])
self.assertGreaterAlmostEqual(cur_sparsity, sparsity_level)
self.assertGreaterAlmostEqual(
sparsity_level, sparse_config[0]["sparsity_level"]
)
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])
def test_s_prep_q_prep_fx_ref(self):
r"""
This checks that the ordering: sparse prepare -> prepare_fx -> convert_to_reference_fx
compose cleanly without issue and that the final result is sparsified without
having to call squash mask before convert_to_reference_fx.
"""
(
mod,
sparsifier,
sparse_config,
) = _get_model_and_sparsifier_and_sparse_config()
sparsifier.prepare(mod, config=sparse_config)
example = torch.randn(1, 4, 4, 4)
qconfig = tq.get_default_qconfig("fbgemm")
qconfig_mapping = tq.QConfigMapping() \
.set_module_name("4", qconfig) \
.set_module_name("5", qconfig)
mod = prepare_fx(mod, qconfig_mapping, (example,))
# check that correct modules had parametrizations added and
# that none were lost during prepare
self.assertTrue(hasattr(fqn_to_module(mod, "0.0"), "parametrizations"))
self.assertTrue(hasattr(fqn_to_module(mod, "5.0"), "parametrizations"))
# check that correct observers were inserted and that matching
# occured successfully
self.assertTrue(_module_has_activation_post_process(mod, "5"))
sparsifier.step()
sparsity_level = _calculate_sparsity(fqn_to_module(mod, "5.0.weight"))
mod(example)
mod = convert_to_reference_fx(mod)
# check that final module is the expected quantized module and that the model runs
self.assertTrue(isinstance(fqn_to_module(mod, "5"), torch.nn.intrinsic.LinearReLU))
self.assertEqual(mod(example).shape, torch.Size([1, 4, 4, 4]))
self.assertTrue(isinstance(fqn_to_module(mod, "5.0"), torch.nn.quantized._reference.Linear))
# check that module was actually sparsified
cur_sparsity = _calculate_sparsity(fqn_to_module(mod, "5.0.weight"))
self.assertGreaterAlmostEqual(cur_sparsity, sparsity_level)
self.assertGreaterAlmostEqual(
sparsity_level, sparse_config[0]["sparsity_level"]
)
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])
|
pytorch-master
|
test/ao/sparsity/test_composability.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["module: unknown"]
import copy
import logging
import torch
from torch import nn
from torch.ao.sparsity._experimental.pruner import BasePruner, PruningParametrization, ZeroesParametrization
from torch.nn.utils import parametrize
from torch.testing._internal.common_utils import TestCase, skipIfTorchDynamo
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
DEVICES = {
torch.device("cpu"),
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
}
NEEDS_ZEROS = { # these layers should have pruned indices zero-ed, not removed
nn.BatchNorm2d
}
class Linear(nn.Module):
r"""Model with Linear layers, in Sequential and outside, without biases"""
def __init__(self):
super().__init__()
self.seq = nn.Sequential(
nn.Linear(16, 16, bias=False)
)
self.linear = nn.Linear(16, 16, bias=False)
def forward(self, x):
x = self.seq(x)
x = self.linear(x)
return x
class LinearB(nn.Module):
r"""Model with Linear layers, in Sequential and outside, with biases"""
def __init__(self):
super().__init__()
self.seq = nn.Sequential(
nn.Linear(16, 16, bias=True)
)
self.linear = nn.Linear(16, 16, bias=True)
def forward(self, x):
x = self.seq(x)
x = self.linear(x)
return x
class MultipleLinear(nn.Module):
r"""Model with multiple Linear layers, in Sequential and outside, without biases
and with activation functions"""
def __init__(self):
super().__init__()
self.seq = nn.Sequential(
nn.Linear(7, 5, bias=False),
nn.ReLU(),
nn.Linear(5, 8, bias=False),
nn.ReLU(),
nn.Linear(8, 6, bias=False)
)
self.linear = nn.Linear(6, 4, bias=False)
def forward(self, x):
x = self.seq(x)
x = self.linear(x)
return x
class MultipleLinearB(nn.Module):
r"""Model with multiple Linear layers, in Sequential and outside, with biases
and with activation functions"""
def __init__(self):
super().__init__()
self.seq = nn.Sequential(
nn.Linear(7, 5, bias=True),
nn.ReLU(),
nn.Linear(5, 8, bias=True),
nn.ReLU(),
nn.Linear(8, 6, bias=True)
)
self.linear = nn.Linear(6, 4, bias=True)
def forward(self, x):
x = self.seq(x)
x = self.linear(x)
return x
class MultipleLinearMixed(nn.Module):
r"""Model with multiple Linear layers, in Sequential and outside, some with biases
and with activation functions"""
def __init__(self):
super().__init__()
self.seq = nn.Sequential(
nn.Linear(7, 5, bias=True),
nn.ReLU(),
nn.Linear(5, 8, bias=False),
nn.ReLU(),
nn.Linear(8, 6, bias=True)
)
self.linear = nn.Linear(6, 4, bias=False)
def forward(self, x):
x = self.seq(x)
x = self.linear(x)
return x
class Conv2dA(nn.Module):
r"""Model with Conv2d layers, in Sequential and outside, without biases"""
def __init__(self):
super().__init__()
self.seq = nn.Sequential(
nn.Conv2d(1, 32, 3, 1, bias=False),
)
self.conv2d = nn.Conv2d(32, 64, 3, 1, bias=False)
def forward(self, x):
x = self.seq(x)
x = self.conv2d(x)
return x
class Conv2dB(nn.Module):
r"""Model with Conv2d layers, in Sequential and outside, with biases"""
def __init__(self):
super().__init__()
self.seq = nn.Sequential(
nn.Conv2d(1, 32, 3, 1, bias=True),
)
self.conv2d = nn.Conv2d(32, 64, 3, 1, bias=True)
def forward(self, x):
x = self.seq(x)
x = self.conv2d(x)
return x
class Conv2dC(nn.Module):
r"""Model with Conv2d layers, in Sequential and outside, with and without biases"""
def __init__(self):
super().__init__()
self.seq = nn.Sequential(
nn.Conv2d(1, 32, 3, 1, bias=True),
)
self.conv2d = nn.Conv2d(32, 64, 3, 1, bias=False)
def forward(self, x):
x = self.seq(x)
x = self.conv2d(x)
return x
class Conv2dBN(nn.Module):
r"""Model with Conv2d layers and BatchNorms"""
def __init__(self):
super().__init__()
self.seq = nn.Sequential(
nn.Conv2d(1, 32, 3, 1, bias=True),
nn.BatchNorm2d(32)
)
self.conv2d = nn.Conv2d(32, 64, 3, 1, bias=True)
self.bn = nn.BatchNorm2d(64)
def forward(self, x):
x = self.seq(x)
x = self.conv2d(x)
x = self.bn(x)
return x
class SimplePruner(BasePruner):
def update_mask(self, module, tensor_name, **kwargs):
getattr(module.parametrizations, tensor_name)[0].pruned_outputs.add(1)
class MultiplePruner(BasePruner):
def update_mask(self, module, tensor_name, **kwargs):
getattr(module.parametrizations, tensor_name)[0].pruned_outputs.update([1, 2])
class TestBasePruner(TestCase):
def _check_pruner_prepared(self, model, pruner, device):
for config in pruner.groups:
modules = []
if type(config['module']) is tuple:
for module in config['module']:
modules.append(module)
else:
module = config['module']
modules.append(module)
for module in modules:
assert module.weight.device.type == device.type
# Check mask exists
assert hasattr(module, 'mask')
# Check parametrization exists and is correct
assert parametrize.is_parametrized(module)
assert hasattr(module, "parametrizations")
# Assume that this is the 1st/only parametrization
if isinstance(module, tuple(NEEDS_ZEROS)):
assert type(module.parametrizations.weight[0]) == ZeroesParametrization
else:
assert type(module.parametrizations.weight[0]) == PruningParametrization
def _check_pruner_mask_squashed(self, model, pruner, device):
for config in pruner.groups:
modules = []
if type(config['module']) is tuple:
for module in config['module']:
modules.append(module)
else:
module = config['module']
modules.append(module)
for module in modules:
assert module.weight.device.type == device.type
assert not hasattr(module, "parametrizations")
assert not hasattr(module, 'mask')
def _check_pruner_valid_before_step(self, model, pruner, device):
for config in pruner.groups:
modules = []
if type(config['module']) is tuple:
for module in config['module']:
modules.append(module)
else:
module = config['module']
modules.append(module)
for module in modules:
assert module.weight.device.type == device.type
assert module.parametrizations.weight[0].pruned_outputs == set()
def _check_pruner_valid_after_step(self, model, pruner, pruned_set, device):
for config in pruner.groups:
modules = []
if type(config['module']) is tuple:
for module in config['module']:
modules.append(module)
else:
module = config['module']
modules.append(module)
for module in modules:
assert module.weight.device.type == device.type
assert module.parametrizations.weight[0].pruned_outputs == pruned_set
def _test_constructor_on_device(self, model, device):
self.assertRaisesRegex(TypeError, 'BasePruner .* update_mask',
BasePruner)
model1 = copy.deepcopy(model).to(device)
pruner = SimplePruner(None)
pruner.prepare(model1, None)
for g in pruner.groups:
module = g['module']
assert module.weight.device.type == device.type
assert len(pruner.groups) == 2
pruner.step()
# Can instantiate the model with configs
model2 = copy.deepcopy(model).to(device)
pruner = SimplePruner({'test': 3})
pruner.prepare(model2, [model2.linear])
assert len(pruner.groups) == 1
assert pruner.groups[0]['module_fqn'] == 'linear'
assert 'test' in pruner.groups[0]
assert pruner.groups[0]['test'] == 3
def test_constructor(self):
model = Linear()
for device in DEVICES:
self._test_constructor_on_device(model, torch.device(device))
def _test_prepare_linear_on_device(self, model, device):
model = copy.deepcopy(model).to(device)
x = torch.ones(128, 16, device=device)
pruner = SimplePruner(None)
pruner.prepare(model, None)
self._check_pruner_prepared(model, pruner, device)
assert model(x).shape == (128, 16)
def test_prepare_linear(self):
models = [Linear(), LinearB()] # without and with bias
for device in DEVICES:
for model in models:
self._test_prepare_linear_on_device(model, torch.device(device))
def _test_prepare_conv2d_on_device(self, model, config, device):
x = torch.ones((1, 1, 28, 28), device=device)
pruner = SimplePruner(None)
pruner.prepare(model, config)
self._check_pruner_prepared(model, pruner, device)
assert model(x).shape == (1, 64, 24, 24)
def test_prepare_conv2d(self):
bn_model = Conv2dBN()
bn_config = [(bn_model.seq[0], bn_model.seq[1]), (bn_model.conv2d, bn_model.bn)]
models = [Conv2dA(), Conv2dB(), Conv2dC(), bn_model]
configs = [None, None, None, bn_config]
for device in DEVICES:
for model, config in zip(models, configs):
model = model.to(device)
self._test_prepare_conv2d_on_device(model, config, torch.device(device))
def _test_squash_mask_linear_on_device(self, model, device):
model = copy.deepcopy(model).to(device)
x = torch.ones(128, 16, device=device)
pruner = SimplePruner(None)
pruner.prepare(model, None)
pruner.squash_mask()
self._check_pruner_mask_squashed(model, pruner, device)
assert model(x).shape == (128, 16)
def test_squash_mask_linear(self):
models = [Linear(), LinearB()] # without and with bias
for device in DEVICES:
for model in models:
self._test_squash_mask_linear_on_device(model, torch.device(device))
def _test_squash_mask_conv2d_on_device(self, model, config, device):
model = copy.deepcopy(model).to(device)
x = torch.ones((1, 1, 28, 28), device=device)
pruner = SimplePruner(None)
pruner.prepare(model, config)
pruner.squash_mask()
self._check_pruner_mask_squashed(model, pruner, device)
assert model(x).shape == (1, 64, 24, 24)
def test_squash_mask_conv2d(self):
bn_model = Conv2dBN()
bn_config = [(bn_model.seq[0], bn_model.seq[1]), (bn_model.conv2d, bn_model.bn)]
models = [Conv2dA(), Conv2dB(), Conv2dC(), bn_model]
configs = [None, None, None, bn_config]
for device in DEVICES:
for model, config in zip(models, configs):
model = model.to(device)
self._test_squash_mask_conv2d_on_device(model, config, torch.device(device))
def _test_step_linear_on_device(self, model, is_basic, device):
model = model.to(device)
if is_basic:
x = torch.ones(16, 16)
pruner = SimplePruner(None)
pruner.prepare(model, None)
self._check_pruner_valid_before_step(model, pruner, device)
pruner.step()
self._check_pruner_valid_after_step(model, pruner, {1}, device)
else:
x = torch.ones(7, 7)
pruner = MultiplePruner(None)
pruner.prepare(model, None)
self._check_pruner_valid_before_step(model, pruner, device)
pruner.step()
self._check_pruner_valid_after_step(model, pruner, {1, 2}, device)
def test_step_linear(self):
basic_models = [Linear(), LinearB()]
complex_models = [MultipleLinear(), MultipleLinearB(), MultipleLinearMixed()]
for device in DEVICES:
for model in basic_models:
self._test_step_linear_on_device(model, True, torch.device(device))
for model in complex_models:
self._test_step_linear_on_device(model, False, torch.device(device))
def _test_step_conv2d_on_device(self, model, config, device):
model = model.to(device)
x = torch.ones((1, 1, 28, 28)).to(device)
pruner = SimplePruner(None)
pruner.prepare(model, config)
self._check_pruner_valid_before_step(model, pruner, device)
pruner.step()
if type(model) is Conv2dBN:
assert pruner.get_module_pruned_outputs(model.seq[1]) == pruner.get_module_pruned_outputs(model.seq[0])
assert pruner.get_module_pruned_outputs(model.bn) == pruner.get_module_pruned_outputs(model.conv2d)
self._check_pruner_valid_after_step(model, pruner, {1}, device)
assert model(x).shape == (1, 64, 24, 24)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_step_conv2d(self):
bn_model = Conv2dBN()
bn_config = [(bn_model.seq[0], bn_model.seq[1]),
(bn_model.conv2d, bn_model.bn)]
models = [Conv2dA(), Conv2dB(), Conv2dC(), bn_model]
configs = [None, None, None, None, bn_config]
for device in DEVICES:
for model, config in zip(models, configs):
self._test_step_conv2d_on_device(model, config, torch.device(device))
|
pytorch-master
|
test/ao/sparsity/test_pruner.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["module: unknown"]
import logging
import warnings
from torch.testing._internal.common_utils import TestCase
from torch import nn
import torch
from typing import Tuple
import copy
from torch.ao.sparsity._experimental.data_sparsifier import DataNormSparsifier
from torch.ao.sparsity._experimental.data_scheduler import BaseDataScheduler
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class ImplementedDataScheduler(BaseDataScheduler):
def __init__(self, sparsifier, sparsifier_hyperparam, last_epoch=-1, verbose=False):
super().__init__(sparsifier, sparsifier_hyperparam, last_epoch, verbose)
def get_schedule_param(self):
if self.last_epoch > 0:
return {name: config['sparsity_level'] * 0.5
for name, config in self.data_sparsifier.data_groups.items()}
else:
return self.base_param
class TestBaseDataScheduler(TestCase):
def _get_data(self):
tensor1, param1, emb1 = torch.randn(5, 5), nn.Parameter(torch.randn(10, 10)), nn.Embedding(50, 5)
data_list = [
('tensor1', tensor1), ('param1', param1), ('emb1', emb1)
]
defaults = {
'sparsity_level': 0.7,
'sparse_block_shape': (1, 4),
'zeros_per_block': 2
}
data_with_config = [
{
'name': 'tensor2', 'data': torch.randn(4, 4),
'config': {'sparsity_level': 0.3}
}
]
return data_list, data_with_config, defaults
def _get_sparsifier(self, data_list, data_with_config, defaults):
sparsifier = DataNormSparsifier(data_list, **defaults)
for data_config_dict in data_with_config:
name, data, config = data_config_dict['name'], data_config_dict['data'], data_config_dict['config']
sparsifier.add_data(name=name, data=data, **config)
return sparsifier
def _get_scheduler(self, sparsifier, schedule_param):
scheduler = ImplementedDataScheduler(sparsifier, schedule_param)
return scheduler
def _get_schedule_param(self):
return 'sparsity_level'
def _get_name_data_config(self, some_data, defaults):
config = copy.deepcopy(defaults)
if isinstance(some_data, Tuple):
# dealing with data_list
name, data = some_data
else:
# dealing with data_with_config
name, data, new_config = some_data['name'], some_data['data'], some_data['config']
config.update(new_config)
return name, data, config
def test_constructor(self):
"""Checks if the warning is thrown if the scheduler step is called
before the sparsifier step"""
data_list, data_with_config, defaults = self._get_data()
sparsifier = self._get_sparsifier(data_list, data_with_config, defaults)
schedule_param = self._get_schedule_param()
scheduler = self._get_scheduler(sparsifier, schedule_param)
assert scheduler.data_sparsifier == sparsifier
assert scheduler._step_count == 1
for name, config in sparsifier.data_groups.items():
assert scheduler.base_param[name] == config.get(schedule_param, None)
def test_order_of_steps(self):
data_list, data_with_config, defaults = self._get_data()
sparsifier = self._get_sparsifier(data_list, data_with_config, defaults)
schedule_param = self._get_schedule_param()
scheduler = self._get_scheduler(sparsifier, schedule_param)
# Sparsifier step is not called
with self.assertWarns(UserWarning):
scheduler.step()
# Correct order has no warnings
# Note: This will trigger if other warnings are present.
with warnings.catch_warnings(record=True) as w:
sparsifier.step()
scheduler.step()
# Make sure there is no warning related to the base_data_scheduler
for warning in w:
fname = warning.filename
fname = '/'.join(fname.split('/')[-5:])
assert fname != 'torch/ao/sparsity/experimental/scheduler/data_scheduler/base_data_scheduler.py'
def test_step(self):
data_list, data_with_config, defaults = self._get_data()
sparsifier = self._get_sparsifier(data_list, data_with_config, defaults)
schedule_param = self._get_schedule_param()
scheduler = self._get_scheduler(sparsifier, schedule_param)
all_data = data_list + data_with_config
for some_data in all_data:
name, _, config = self._get_name_data_config(some_data, defaults)
assert sparsifier.data_groups[name][schedule_param] == config[schedule_param]
sparsifier.step()
scheduler.step()
for some_data in all_data:
name, _, config = self._get_name_data_config(some_data, defaults)
assert sparsifier.data_groups[name][schedule_param] == config[schedule_param] * 0.5
# checking step count
step_cnt = 5
for _ in range(0, step_cnt):
sparsifier.step()
scheduler.step()
assert scheduler._step_count == step_cnt + 2 # step_cnt + step above + 1 step in constructor
def test_state_dict(self):
data_list, data_with_config, defaults = self._get_data()
sparsifier = self._get_sparsifier(data_list, data_with_config, defaults)
schedule_param = self._get_schedule_param()
scheduler1 = self._get_scheduler(sparsifier, schedule_param)
sparsifier.step()
scheduler1.step()
scheduler2 = self._get_scheduler(sparsifier, schedule_param)
all_data = data_list + data_with_config
for some_data in all_data:
name, _, _ = self._get_name_data_config(some_data, defaults)
assert scheduler1.base_param[name] != scheduler2.base_param[name]
assert scheduler1._last_param[name] == scheduler2.base_param[name]
scheduler1_state = scheduler1.state_dict()
scheduler2.load_state_dict(scheduler1_state)
for some_data in all_data:
name, _, _ = self._get_name_data_config(some_data, defaults)
assert scheduler1.base_param[name] == scheduler2.base_param[name]
assert scheduler1._last_param[name] == scheduler2._last_param[name]
|
pytorch-master
|
test/ao/sparsity/test_data_scheduler.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["module: unknown"]
import logging
from torch import nn
from torch.ao.sparsity.sparsifier import utils
from torch.nn.utils import parametrize
import torch
from torch.testing._internal.common_utils import TestCase
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class ModelUnderTest(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.linear = nn.Linear(16, 16, bias=bias)
self.seq = nn.Sequential(
nn.Linear(16, 16, bias=bias),
nn.Linear(16, 16, bias=bias)
)
# Make sure the weights are not random
self.linear.weight = nn.Parameter(torch.zeros_like(self.linear.weight) + 1.0)
self.seq[0].weight = nn.Parameter(torch.zeros_like(self.seq[0].weight) + 2.0)
self.seq[1].weight = nn.Parameter(torch.zeros_like(self.seq[1].weight) + 3.0)
if bias:
self.linear = nn.Parameter(torch.zeros_like(self.linear.bias) + 10.0)
self.seq[0] = nn.Parameter(torch.zeros_like(self.seq[0].bias) + 20.0)
self.seq[0] = nn.Parameter(torch.zeros_like(self.seq[0].bias) + 30.0)
def forward(self, x):
x = self.linear(x)
x = self.seq(x)
return x
class TestFakeSparsity(TestCase):
def test_masking_logic(self):
model = nn.Linear(16, 16, bias=False)
model.weight = nn.Parameter(torch.eye(16))
x = torch.randn(3, 16)
self.assertEqual(torch.mm(x, torch.eye(16)), model(x))
mask = torch.zeros(16, 16)
sparsity = utils.FakeSparsity(mask)
parametrize.register_parametrization(model, 'weight', sparsity)
x = torch.randn(3, 16)
self.assertEqual(torch.zeros(3, 16), model(x))
def test_weights_parametrized(self):
model = ModelUnderTest(bias=False)
assert not hasattr(model.linear, 'parametrizations')
assert not hasattr(model.seq[0], 'parametrizations')
assert not hasattr(model.seq[1], 'parametrizations')
mask = torch.eye(16)
parametrize.register_parametrization(model.linear, 'weight',
utils.FakeSparsity(mask))
mask = torch.eye(16)
parametrize.register_parametrization(model.seq[0], 'weight',
utils.FakeSparsity(mask))
mask = torch.eye(16)
parametrize.register_parametrization(model.seq[1], 'weight',
utils.FakeSparsity(mask))
assert hasattr(model.linear, 'parametrizations')
assert parametrize.is_parametrized(model.linear, 'weight')
assert hasattr(model.seq[0], 'parametrizations')
assert parametrize.is_parametrized(model.linear, 'weight')
assert hasattr(model.seq[1], 'parametrizations')
assert parametrize.is_parametrized(model.linear, 'weight')
def test_state_dict_preserved(self):
model_save = ModelUnderTest(bias=False)
mask = torch.eye(16)
parametrize.register_parametrization(model_save.linear, 'weight',
utils.FakeSparsity(mask))
mask = torch.eye(16)
parametrize.register_parametrization(model_save.seq[0], 'weight',
utils.FakeSparsity(mask))
mask = torch.eye(16)
parametrize.register_parametrization(model_save.seq[1], 'weight',
utils.FakeSparsity(mask))
state_dict = model_save.state_dict()
model_load = ModelUnderTest(bias=False)
mask = torch.zeros(model_load.linear.weight.shape)
parametrize.register_parametrization(model_load.linear, 'weight',
utils.FakeSparsity(mask))
mask = torch.zeros(model_load.seq[0].weight.shape)
parametrize.register_parametrization(model_load.seq[0], 'weight',
utils.FakeSparsity(mask))
mask = torch.zeros(model_load.seq[1].weight.shape)
parametrize.register_parametrization(model_load.seq[1], 'weight',
utils.FakeSparsity(mask))
# Keep this strict, as we are not loading the 'mask'
model_load.load_state_dict(state_dict, strict=False)
# Check the parametrizations are preserved
assert hasattr(model_load.linear, 'parametrizations')
assert parametrize.is_parametrized(model_load.linear, 'weight')
assert hasattr(model_load.seq[0], 'parametrizations')
assert parametrize.is_parametrized(model_load.linear, 'weight')
assert hasattr(model_load.seq[1], 'parametrizations')
assert parametrize.is_parametrized(model_load.linear, 'weight')
# Check the weigths are preserved
self.assertEqual(model_save.linear.parametrizations['weight'].original,
model_load.linear.parametrizations['weight'].original)
self.assertEqual(model_save.seq[0].parametrizations['weight'].original,
model_load.seq[0].parametrizations['weight'].original)
self.assertEqual(model_save.seq[1].parametrizations['weight'].original,
model_load.seq[1].parametrizations['weight'].original)
# Check the masks are not preserved in the state_dict
# We store the state_dicts in the sparsifier, not in the model itself.
# TODO: Need to find a clean way of exporting the parametrized model
self.assertNotEqual(model_save.linear.parametrizations['weight'][0].mask,
model_load.linear.parametrizations['weight'][0].mask)
self.assertNotEqual(model_save.seq[0].parametrizations['weight'][0].mask,
model_load.seq[0].parametrizations['weight'][0].mask)
self.assertNotEqual(model_save.seq[1].parametrizations['weight'][0].mask,
model_load.seq[1].parametrizations['weight'][0].mask)
def test_jit_trace(self):
model = ModelUnderTest(bias=False)
mask = torch.eye(16)
parametrize.register_parametrization(model.linear, 'weight',
utils.FakeSparsity(mask))
mask = torch.eye(16)
parametrize.register_parametrization(model.seq[0], 'weight',
utils.FakeSparsity(mask))
mask = torch.eye(16)
parametrize.register_parametrization(model.seq[1], 'weight',
utils.FakeSparsity(mask))
# Tracing
example_x = torch.ones(3, 16)
model_trace = torch.jit.trace_module(model, {'forward': example_x})
x = torch.randn(3, 16)
y = model(x)
y_hat = model_trace(x)
self.assertEqual(y_hat, y)
|
pytorch-master
|
test/ao/sparsity/test_parametrization.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["module: unknown"]
from torch import nn
from torch.ao.sparsity import WeightNormSparsifier
from torch.ao.sparsity import BaseScheduler, LambdaSL
from torch.testing._internal.common_utils import TestCase
import warnings
class ImplementedScheduler(BaseScheduler):
def get_sl(self):
if self.last_epoch > 0:
return [group['sparsity_level'] * 0.5
for group in self.sparsifier.groups]
else:
return list(self.base_sl)
class TestScheduler(TestCase):
def test_constructor(self):
model = nn.Sequential(
nn.Linear(16, 16)
)
sparsifier = WeightNormSparsifier()
sparsifier.prepare(model, config=None)
scheduler = ImplementedScheduler(sparsifier)
assert scheduler.sparsifier is sparsifier
assert scheduler._step_count == 1
assert scheduler.base_sl == [sparsifier.groups[0]['sparsity_level']]
def test_order_of_steps(self):
"""Checks if the warning is thrown if the scheduler step is called
before the sparsifier step"""
model = nn.Sequential(
nn.Linear(16, 16)
)
sparsifier = WeightNormSparsifier()
sparsifier.prepare(model, config=None)
scheduler = ImplementedScheduler(sparsifier)
# Sparsifier step is not called
with self.assertWarns(UserWarning):
scheduler.step()
# Correct order has no warnings
# Note: This will trigger if other warnings are present.
with warnings.catch_warnings(record=True) as w:
sparsifier.step()
scheduler.step()
# Make sure there is no warning related to the base_scheduler
for warning in w:
fname = warning.filename
fname = '/'.join(fname.split('/')[-5:])
assert fname != 'torch/ao/sparsity/scheduler/base_scheduler.py'
def test_step(self):
model = nn.Sequential(
nn.Linear(16, 16)
)
sparsifier = WeightNormSparsifier()
sparsifier.prepare(model, config=None)
assert sparsifier.groups[0]['sparsity_level'] == 0.5
scheduler = ImplementedScheduler(sparsifier)
assert sparsifier.groups[0]['sparsity_level'] == 0.5
sparsifier.step()
scheduler.step()
assert sparsifier.groups[0]['sparsity_level'] == 0.25
def test_lambda_scheduler(self):
model = nn.Sequential(
nn.Linear(16, 16)
)
sparsifier = WeightNormSparsifier()
sparsifier.prepare(model, config=None)
assert sparsifier.groups[0]['sparsity_level'] == 0.5
scheduler = LambdaSL(sparsifier, lambda epoch: epoch * 10)
assert sparsifier.groups[0]['sparsity_level'] == 0.0 # Epoch 0
scheduler.step()
assert sparsifier.groups[0]['sparsity_level'] == 5.0 # Epoch 1
|
pytorch-master
|
test/ao/sparsity/test_scheduler.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["module: unknown"]
import logging
import torch
from torch.ao.sparsity.sparsifier.utils import (
fqn_to_module,
get_arg_info_from_tensor_fqn,
module_to_fqn,
)
from torch.testing._internal.common_quantization import (
ConvBnReLUModel,
ConvModel,
FunctionalLinear,
LinearAddModel,
ManualEmbeddingBagLinear,
SingleLayerLinearModel,
TwoLayerLinearModel,
)
from torch.testing._internal.common_utils import TestCase
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
model_list = [
ConvModel,
SingleLayerLinearModel,
TwoLayerLinearModel,
LinearAddModel,
ConvBnReLUModel,
ManualEmbeddingBagLinear,
FunctionalLinear,
]
class TestSparsityUtilFunctions(TestCase):
def test_module_to_fqn(self):
"""
Tests that module_to_fqn works as expected when compared to known good
module.get_submodule(fqn) function
"""
for model_class in model_list:
model = model_class()
list_of_modules = [m for _, m in model.named_modules()] + [model]
for module in list_of_modules:
fqn = module_to_fqn(model, module)
check_module = model.get_submodule(fqn)
self.assertEqual(module, check_module)
def test_module_to_fqn_fail(self):
"""
Tests that module_to_fqn returns None when an fqn that doesn't
correspond to a path to a node/tensor is given
"""
for model_class in model_list:
model = model_class()
fqn = module_to_fqn(model, torch.nn.Linear(3, 3))
self.assertEqual(fqn, None)
def test_module_to_fqn_root(self):
"""
Tests that module_to_fqn returns '' when model and target module are the same
"""
for model_class in model_list:
model = model_class()
fqn = module_to_fqn(model, model)
self.assertEqual(fqn, "")
def test_fqn_to_module(self):
"""
Tests that fqn_to_module operates as inverse
of module_to_fqn
"""
for model_class in model_list:
model = model_class()
list_of_modules = [m for _, m in model.named_modules()] + [model]
for module in list_of_modules:
fqn = module_to_fqn(model, module)
check_module = fqn_to_module(model, fqn)
self.assertEqual(module, check_module)
def test_fqn_to_module_fail(self):
"""
Tests that fqn_to_module returns None when it tries to
find an fqn of a module outside the model
"""
for model_class in model_list:
model = model_class()
fqn = "foo.bar.baz"
check_module = fqn_to_module(model, fqn)
self.assertEqual(check_module, None)
def test_fqn_to_module_for_tensors(self):
"""
Tests that fqn_to_module works for tensors, actually all parameters
of the model. This is tested by identifying a module with a tensor,
and generating the tensor_fqn using module_to_fqn on the module +
the name of the tensor.
"""
for model_class in model_list:
model = model_class()
list_of_modules = [m for _, m in model.named_modules()] + [model]
for module in list_of_modules:
module_fqn = module_to_fqn(model, module)
for tensor_name, tensor in module.named_parameters(recurse=False):
tensor_fqn = ( # string manip to handle tensors on root
module_fqn + ("." if module_fqn != "" else "") + tensor_name
)
check_tensor = fqn_to_module(model, tensor_fqn)
self.assertEqual(tensor, check_tensor)
def test_get_arg_info_from_tensor_fqn(self):
"""
Tests that get_arg_info_from_tensor_fqn works for all parameters of the model.
Generates a tensor_fqn in the same way as test_fqn_to_module_for_tensors and
then compares with known (parent) module and tensor_name as well as module_fqn
from module_to_fqn.
"""
for model_class in model_list:
model = model_class()
list_of_modules = [m for _, m in model.named_modules()] + [model]
for module in list_of_modules:
module_fqn = module_to_fqn(model, module)
for tensor_name, tensor in module.named_parameters(recurse=False):
tensor_fqn = (
module_fqn + ("." if module_fqn != "" else "") + tensor_name
)
arg_info = get_arg_info_from_tensor_fqn(model, tensor_fqn)
self.assertEqual(arg_info["module"], module)
self.assertEqual(arg_info["module_fqn"], module_fqn)
self.assertEqual(arg_info["tensor_name"], tensor_name)
self.assertEqual(arg_info["tensor_fqn"], tensor_fqn)
def test_get_arg_info_from_tensor_fqn_fail(self):
"""
Tests that get_arg_info_from_tensor_fqn works as expected for invalid tensor_fqn
inputs. The string outputs still work but the output module is expected to be None.
"""
for model_class in model_list:
model = model_class()
tensor_fqn = "foo.bar.baz"
arg_info = get_arg_info_from_tensor_fqn(model, tensor_fqn)
self.assertEqual(arg_info["module"], None)
self.assertEqual(arg_info["module_fqn"], "foo.bar")
self.assertEqual(arg_info["tensor_name"], "baz")
self.assertEqual(arg_info["tensor_fqn"], "foo.bar.baz")
|
pytorch-master
|
test/ao/sparsity/test_sparsity_utils.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["module: unknown"]
from torch.testing._internal.common_utils import run_tests
import copy
import numpy as np
import io
import logging
from itertools import product
import torch
import torch.ao.quantization as tq
from torch import nn
from torch.ao.sparsity.sparsifier.utils import fqn_to_module
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_quantized import (
override_cpu_allocator_for_qnnpack,
override_qengines,
qengine_is_qnnpack,
qengine_is_fbgemm,
qengine_is_onednn,
)
# TODO: Once more test files are created, move the contents to a ao folder.
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class TestQuantizedSparseKernels(TestCase):
@override_qengines
def test_sparse_qlinear(self):
batch_size = 12
input_channels = 16
output_channels = 4
decimal_val = 4
row_block_size = 1
col_block_size = 4
# X86 implementation of sparse ops in qnnpack only support
# block pattern 1x4.
# arm kernels have support for both 1x4 and 8x1.
# This distinction is only because x86 implementations exist
# only to enable testing of integration path.
# We do plan to add 8x1 as well so that testing does not have to
# special case like this. At the moment it is deprioritized due
# to other higher priority works.
if qengine_is_qnnpack() and not (row_block_size == 1 and col_block_size == 4):
return
# ONEDNN does not support this yet
if qengine_is_onednn():
return
dense_prepack = torch.ops.quantized.linear_prepack
dense_qlinear = torch.ops.quantized.linear
dense_qlinear_dynamic = torch.ops.quantized.linear_dynamic
sparse_prepack = torch.ops.sparse.qlinear_prepack
sparse_qlinear = torch.ops.sparse.qlinear
sparse_qlinear_dynamic = torch.ops.sparse.qlinear_dynamic
X_scale = 0.2
X_zp = 2
X_fp32 = torch.randn(batch_size, input_channels, dtype=torch.float32)
float_bias = torch.randn(output_channels, dtype=torch.float32)
W_scales = torch.rand(output_channels, dtype=torch.float32)
W_zps = torch.zeros(output_channels, dtype=torch.int32)
W_fp32 = torch.randn(output_channels, input_channels, dtype=torch.float32)
with override_cpu_allocator_for_qnnpack(qengine_is_qnnpack()):
X_q = torch.quantize_per_tensor(
X_fp32, scale=X_scale, zero_point=X_zp, dtype=torch.quint8
)
for use_channelwise, dynamic_mode in product([True, False], [True, False]):
if qengine_is_fbgemm() and dynamic_mode:
logging.info("dynamic sparse qlinear is only available in qnnpack")
continue
if qengine_is_qnnpack() and not dynamic_mode:
logging.info("static sparse qlinear is only available in fbgemm")
continue
if use_channelwise:
W_q = torch.quantize_per_channel(
W_fp32, scales=W_scales, zero_points=W_zps, axis=0, dtype=torch.qint8
)
else:
W_q = torch.quantize_per_tensor(
W_fp32, scale=W_scales[0], zero_point=W_zps[0], dtype=torch.qint8
)
Y_scale = 1.1234
Y_zp = 5
W_prepack_dense = dense_prepack(W_q, float_bias)
W_prepack_sparse = sparse_prepack(W_q, float_bias, row_block_size, col_block_size)
if dynamic_mode:
Y = sparse_qlinear_dynamic(X_fp32, W_prepack_sparse)
Y_ref = dense_qlinear_dynamic(X_fp32, W_prepack_dense)
np.testing.assert_array_almost_equal(Y_ref.numpy(), Y.numpy(), decimal=decimal_val)
else:
Y_q = sparse_qlinear(X_q, W_prepack_sparse, Y_scale, Y_zp)
Y_q_ref = dense_qlinear(X_q, W_prepack_dense, Y_scale, Y_zp)
np.testing.assert_array_almost_equal(
Y_q_ref.int_repr().numpy(), Y_q.int_repr().numpy(), decimal=decimal_val
)
def _sparse_layer_test_helper(
model_class,
sparse_mapping,
ref_mapping,
qconfig_dict,
fqn_to_check,
test_class,
test_scripting,
):
# SET UP TEST PARAMETERS, INPUTS AND WEIGHTS
# ------------------------------------------
batch_size = 12
input_channels = 4
output_channels = 7
model = model_class(input_channels, output_channels)
# For sparse kernels both the activation and weight ZP = 0
X_scale = 0.2
X_zp = 2
W_scale = 1e-2
W_zp = 0
X_fp32 = torch.randn(batch_size, input_channels, dtype=torch.float32)
float_bias = torch.randn(output_channels, dtype=torch.float32)
# generate a weight which we'll insert into the model
W_fp32 = torch.randn(output_channels, input_channels, dtype=torch.float32)
mask = torch.randint(0, 2, W_fp32.shape)
W_fp32 *= mask
with override_cpu_allocator_for_qnnpack(qengine_is_qnnpack()):
X_q = torch.quantize_per_tensor(
X_fp32, scale=X_scale, zero_point=X_zp, dtype=torch.quint8
)
X_fp32 = X_q.dequantize()
W_q = torch.quantize_per_tensor(W_fp32, W_scale, W_zp, torch.qint8)
# PREPARE MODELS FOR QUANTIZATION
# -------------------------------
model.linear.weight = nn.Parameter(W_q.dequantize())
model.eval()
# Add `sparse_params` to the model. The test for correct
# sparse_param addition is in the sparsifier tests
model.linear.sparse_params = {"sparse_block_shape": (1, 4)}
# generate model versions
qmodel = copy.deepcopy(model)
sqmodel = copy.deepcopy(model)
# generate model versions and apply qconfigs
tq.propagate_qconfig_(qmodel, qconfig_dict)
tq.propagate_qconfig_(sqmodel, qconfig_dict)
tq.prepare(qmodel, inplace=True)
tq.prepare(sqmodel, inplace=True)
# calibrate
with torch.no_grad():
qmodel(X_fp32)
sqmodel(X_fp32)
# ACTUAL TESTING BEGINS HERE
# --------------------------
# Make sure the quantization parameters are computed the same way
qparams = qmodel.linear.qconfig.weight().calculate_qparams()
sqparams = sqmodel.linear.qconfig.weight().calculate_qparams()
test_class.assertEqual(qparams, sqparams)
sqmodule_to_check = fqn_to_module(sqmodel, fqn_to_check)
sqmodule_start_class = sqmodule_to_check.__class__
sqmodule_expected_converted_class = sparse_mapping[sqmodule_start_class]
qmodule_to_check = fqn_to_module(qmodel, fqn_to_check)
qmodule_start_class = qmodule_to_check.__class__
qmodule_expected_converted_class = ref_mapping[qmodule_start_class]
# need to determine whether dynamic quantization is being performed since
# input dtype will be different at the end
is_dynamic = isinstance(
qmodule_to_check.activation_post_process, tq.PlaceholderObserver
)
tq.convert(sqmodel, inplace=True, mapping=sparse_mapping)
tq.convert(qmodel, inplace=True, mapping=ref_mapping)
# this code is a duplicate of above since the references do not
# update to the post-convert modules
sqmodule_to_check = fqn_to_module(sqmodel, fqn_to_check)
qmodule_to_check = fqn_to_module(qmodel, fqn_to_check)
# check that the modules were converted as expected
assert isinstance(
sqmodule_to_check, sqmodule_expected_converted_class
), "Convert failed"
assert isinstance(
qmodule_to_check, qmodule_expected_converted_class
), "Mapping failed"
row_block_size, col_block_size = sqmodel.linear._packed_params._weight_bias()[
2:
]
assert row_block_size == 1 and col_block_size == 4
# only run during serialization/deserialization tests
# makes sure script/save/load doesn't malform the sqmodel
if test_scripting:
scripted_sqmodel = torch.jit.script(sqmodel)
scripted_sqmodel.eval()
buffer = io.BytesIO()
torch.jit.save(scripted_sqmodel, buffer)
buffer.seek(0)
sqmodel = torch.jit.load(buffer)
# use correct input dtype
if is_dynamic:
Y_ref = qmodel(X_fp32)
Y_hat = sqmodel(X_fp32)
test_class.assertEqual(Y_ref, Y_hat)
else:
Y_ref = qmodel(X_q)
Y_hat = sqmodel(X_q)
test_class.assertEqual(Y_ref.dequantize(), Y_hat.dequantize())
class SparseQuantizedModel(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.linear = nn.Linear(in_channels, out_channels)
def forward(self, x):
return self.linear(x)
class TestQuantizedSparseLayers(TestCase):
@override_qengines
def test_sparse_qlinear(self):
# Note: At the moment, for sparse kernels
# fbgemm supports only static quantized sparse linear
# qnnpack supports only dynamically quantized sparse linear
# Hence we have two different tests.
# fbgemm tests static flow, qnnpack tests dynamic.
# Should be unified later on and tests should be fixed
# appropriately.
model_class = SparseQuantizedModel
fqn_to_check = "linear"
if qengine_is_fbgemm():
sparse_mapping = tq.get_default_static_sparse_quant_module_mappings()
ref_mapping = tq.get_default_static_quant_module_mappings()
qconfig_dict = {nn.Linear: tq.get_default_qconfig("fbgemm")}
elif qengine_is_qnnpack():
sparse_mapping = tq.get_default_dynamic_sparse_quant_module_mappings()
ref_mapping = tq.get_default_dynamic_quant_module_mappings()
qconfig_dict = {nn.Linear: tq.qconfig.default_dynamic_qconfig}
else:
return
_sparse_layer_test_helper(
model_class=model_class,
sparse_mapping=sparse_mapping,
ref_mapping=ref_mapping,
qconfig_dict=qconfig_dict,
fqn_to_check=fqn_to_check,
test_class=self,
test_scripting=False,
)
@override_qengines
def test_sparse_qlinear_serdes(self):
# Note: At the moment, for sparse kernels
# fbgemm supports only static quantized sparse linear
# qnnpack supports only dynamically quantized sparse linear
# Hence we have two different tests.
# fbgemm tests static flow, qnnpack tests dynamic.
# Should be unified later on and tests should be fixed
# appropriately.
model_class = SparseQuantizedModel
fqn_to_check = "linear"
if qengine_is_fbgemm():
sparse_mapping = tq.get_default_static_sparse_quant_module_mappings()
ref_mapping = tq.get_default_static_quant_module_mappings()
qconfig_dict = {nn.Linear: tq.get_default_qconfig("fbgemm")}
elif qengine_is_qnnpack():
sparse_mapping = tq.get_default_dynamic_sparse_quant_module_mappings()
ref_mapping = tq.get_default_dynamic_quant_module_mappings()
qconfig_dict = {nn.Linear: tq.qconfig.default_dynamic_qconfig}
else:
return
_sparse_layer_test_helper(
model_class=model_class,
sparse_mapping=sparse_mapping,
ref_mapping=ref_mapping,
qconfig_dict=qconfig_dict,
fqn_to_check=fqn_to_check,
test_class=self,
test_scripting=True,
)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/ao/sparsity/test_kernels.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["module: unknown"]
import copy
from torch.testing._internal.common_utils import TestCase, skipIfTorchDynamo
import logging
import torch
from torch.ao.sparsity._experimental.activation_sparsifier.activation_sparsifier import ActivationSparsifier
import torch.nn as nn
import torch.nn.functional as F
from torch.ao.sparsity.sparsifier.utils import module_to_fqn
from typing import List
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
self.conv2 = nn.Conv2d(32, 32, kernel_size=3)
self.identity1 = nn.Identity()
self.max_pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.linear1 = nn.Linear(4608, 128)
self.identity2 = nn.Identity()
self.linear2 = nn.Linear(128, 10)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = self.identity1(out)
out = self.max_pool1(out)
batch_size = x.shape[0]
out = out.reshape(batch_size, -1)
out = F.relu(self.identity2(self.linear1(out)))
out = self.linear2(out)
return out
class TestActivationSparsifier(TestCase):
def _check_constructor(self, activation_sparsifier, model, defaults, sparse_config):
"""Helper function to check if the model, defaults and sparse_config are loaded correctly
in the activation sparsifier
"""
sparsifier_defaults = activation_sparsifier.defaults
combined_defaults = {**defaults, 'sparse_config': sparse_config}
# more keys are populated in activation sparsifier (eventhough they may be None)
assert len(combined_defaults) <= len(activation_sparsifier.defaults)
for key, config in sparsifier_defaults.items():
# all the keys in combined_defaults should be present in sparsifier defaults
assert config == combined_defaults.get(key, None)
def _check_register_layer(self, activation_sparsifier, defaults, sparse_config, layer_args_list):
"""Checks if layers in the model are correctly mapped to it's arguments.
Args:
activation_sparsifier (sparsifier object)
activation sparsifier object that is being tested.
defaults (Dict)
all default config (except sparse_config)
sparse_config (Dict)
default sparse config passed to the sparsifier
layer_args_list (list of tuples)
Each entry in the list corresponds to the layer arguments.
First entry in the tuple corresponds to all the arguments other than sparse_config
Second entry in the tuple corresponds to sparse_config
"""
# check args
data_groups = activation_sparsifier.data_groups
assert len(data_groups) == len(layer_args_list)
for layer_args in layer_args_list:
layer_arg, sparse_config_layer = layer_args
# check sparse config
sparse_config_actual = copy.deepcopy(sparse_config)
sparse_config_actual.update(sparse_config_layer)
name = module_to_fqn(activation_sparsifier.model, layer_arg['layer'])
assert data_groups[name]['sparse_config'] == sparse_config_actual
# assert the rest
other_config_actual = copy.deepcopy(defaults)
other_config_actual.update(layer_arg)
other_config_actual.pop('layer')
for key, value in other_config_actual.items():
assert key in data_groups[name]
assert value == data_groups[name][key]
# get_mask should raise error
with self.assertRaises(ValueError):
activation_sparsifier.get_mask(name=name)
def _check_pre_forward_hook(self, activation_sparsifier, data_list):
"""Registering a layer attaches a pre-forward hook to that layer. This function
checks if the pre-forward hook works as expected. Specifically, checks if the
input is aggregated correctly.
Basically, asserts that the aggregate of input activations is the same as what was
computed in the sparsifier.
Args:
activation_sparsifier (sparsifier object)
activation sparsifier object that is being tested.
data_list (list of torch tensors)
data input to the model attached to the sparsifier
"""
# can only check for the first layer
data_agg_actual = data_list[0]
model = activation_sparsifier.model
layer_name = module_to_fqn(model, model.conv1)
agg_fn = activation_sparsifier.data_groups[layer_name]['aggregate_fn']
for i in range(1, len(data_list)):
data_agg_actual = agg_fn(data_agg_actual, data_list[i])
assert 'data' in activation_sparsifier.data_groups[layer_name]
assert torch.all(activation_sparsifier.data_groups[layer_name]['data'] == data_agg_actual)
return data_agg_actual
def _check_step(self, activation_sparsifier, data_agg_actual):
"""Checks if .step() works as expected. Specifically, checks if the mask is computed correctly.
Args:
activation_sparsifier (sparsifier object)
activation sparsifier object that is being tested.
data_agg_actual (torch tensor)
aggregated torch tensor
"""
model = activation_sparsifier.model
layer_name = module_to_fqn(model, model.conv1)
assert layer_name is not None
reduce_fn = activation_sparsifier.data_groups[layer_name]['reduce_fn']
data_reduce_actual = reduce_fn(data_agg_actual)
mask_fn = activation_sparsifier.data_groups[layer_name]['mask_fn']
sparse_config = activation_sparsifier.data_groups[layer_name]['sparse_config']
mask_actual = mask_fn(data_reduce_actual, **sparse_config)
mask_model = activation_sparsifier.get_mask(layer_name)
assert torch.all(mask_model == mask_actual)
for _, config in activation_sparsifier.data_groups.items():
assert 'data' not in config
def _check_squash_mask(self, activation_sparsifier, data):
"""Makes sure that squash_mask() works as usual. Specifically, checks
if the sparsifier hook is attached correctly.
This is achieved by only looking at the identity layers and making sure that
the output == layer(input * mask).
Args:
activation_sparsifier (sparsifier object)
activation sparsifier object that is being tested.
data (torch tensor)
dummy batched data
"""
# create a forward hook for checking ouput == layer(input * mask)
def check_output(name):
mask = activation_sparsifier.get_mask(name)
features = activation_sparsifier.data_groups[name].get('features')
feature_dim = activation_sparsifier.data_groups[name].get('feature_dim')
def hook(module, input, output):
input_data = input[0]
if features is None:
assert torch.all(mask * input_data == output)
else:
for feature_idx in range(0, len(features)):
feature = torch.Tensor([features[feature_idx]], device=input_data.device).long()
inp_data_feature = torch.index_select(input_data, feature_dim, feature)
out_data_feature = torch.index_select(output, feature_dim, feature)
assert torch.all(mask[feature_idx] * inp_data_feature == out_data_feature)
return hook
for name, config in activation_sparsifier.data_groups.items():
if 'identity' in name:
config['layer'].register_forward_hook(check_output(name))
activation_sparsifier.model(data)
def _check_state_dict(self, sparsifier1):
"""Checks if loading and restoring of state_dict() works as expected.
Basically, dumps the state of the sparsifier and loads it in the other sparsifier
and checks if all the configuration are in line.
This function is called at various times in the workflow to makes sure that the sparsifier
can be dumped and restored at any point in time.
"""
state_dict = sparsifier1.state_dict()
new_model = Model()
# create an empty new sparsifier
sparsifier2 = ActivationSparsifier(new_model)
assert sparsifier2.defaults != sparsifier1.defaults
assert len(sparsifier2.data_groups) != len(sparsifier1.data_groups)
sparsifier2.load_state_dict(state_dict)
assert sparsifier2.defaults == sparsifier1.defaults
# import pdb; pdb.set_trace()
for name, state in sparsifier2.state.items():
assert name in sparsifier1.state
mask1 = sparsifier1.state[name]['mask']
mask2 = state['mask']
if mask1 is None:
assert mask2 is None
else:
assert type(mask1) == type(mask2)
if isinstance(mask1, List):
assert len(mask1) == len(mask2)
for idx in range(len(mask1)):
assert torch.all(mask1[idx] == mask2[idx])
else:
# import pdb; pdb.set_trace()
assert torch.all(mask1 == mask2)
# make sure that the state dict is stored as torch sparse
for _, state in state_dict['state'].items():
mask = state['mask']
if mask is not None:
if isinstance(mask, List):
for idx in range(len(mask)):
assert mask[idx].is_sparse
else:
assert mask.is_sparse
dg1, dg2 = sparsifier1.data_groups, sparsifier2.data_groups
for layer_name, config in dg1.items():
assert layer_name in dg2
# exclude hook and layer
config1 = {key: value for key, value in config.items() if key not in ['hook', 'layer']}
config2 = {key: value for key, value in dg2[layer_name].items() if key not in ['hook', 'layer']}
assert config1 == config2
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_activation_sparsifier(self):
"""Simulates the workflow of the activation sparsifier, starting from object creation
till squash_mask().
The idea is to check that everything works as expected while in the workflow.
"""
# defining aggregate, reduce and mask functions
def agg_fn(x, y):
return x + y
def reduce_fn(x):
return torch.mean(x, dim=0)
def _vanilla_norm_sparsifier(data, sparsity_level):
r"""Similar to data norm spasifier but block_shape = (1,1).
Simply, flatten the data, sort it and mask out the values less than threshold
"""
data_norm = torch.abs(data).flatten()
_, sorted_idx = torch.sort(data_norm)
threshold_idx = round(sparsity_level * len(sorted_idx))
sorted_idx = sorted_idx[:threshold_idx]
mask = torch.ones_like(data_norm)
mask.scatter_(dim=0, index=sorted_idx, value=0)
mask = mask.reshape(data.shape)
return mask
# Creating default function and sparse configs
# default sparse_config
sparse_config = {
'sparsity_level': 0.5
}
defaults = {
'aggregate_fn': agg_fn,
'reduce_fn': reduce_fn
}
# simulate the workflow
# STEP 1: make data and activation sparsifier object
model = Model() # create model
activation_sparsifier = ActivationSparsifier(model, **defaults, **sparse_config)
# Test Constructor
self._check_constructor(activation_sparsifier, model, defaults, sparse_config)
# STEP 2: Register some layers
register_layer1_args = {
'layer': model.conv1,
'mask_fn': _vanilla_norm_sparsifier
}
sparse_config_layer1 = {'sparsity_level': 0.3}
register_layer2_args = {
'layer': model.linear1,
'features': [0, 10, 234],
'feature_dim': 1,
'mask_fn': _vanilla_norm_sparsifier
}
sparse_config_layer2 = {'sparsity_level': 0.1}
register_layer3_args = {
'layer': model.identity1,
'mask_fn': _vanilla_norm_sparsifier
}
sparse_config_layer3 = {'sparsity_level': 0.3}
register_layer4_args = {
'layer': model.identity2,
'features': [0, 10, 20],
'feature_dim': 1,
'mask_fn': _vanilla_norm_sparsifier
}
sparse_config_layer4 = {'sparsity_level': 0.1}
layer_args_list = [(register_layer1_args, sparse_config_layer1), (register_layer2_args, sparse_config_layer2)]
layer_args_list += [(register_layer3_args, sparse_config_layer3), (register_layer4_args, sparse_config_layer4)]
# Registering..
for layer_args in layer_args_list:
layer_arg, sparse_config_layer = layer_args
activation_sparsifier.register_layer(**layer_arg, **sparse_config_layer)
# check if things are registered correctly
self._check_register_layer(activation_sparsifier, defaults, sparse_config, layer_args_list)
# check state_dict after registering and before model forward
self._check_state_dict(activation_sparsifier)
# check if forward pre hooks actually work
# some dummy data
data_list = []
num_data_points = 5
for _ in range(0, num_data_points):
rand_data = torch.randn(16, 1, 28, 28)
activation_sparsifier.model(rand_data)
data_list.append(rand_data)
data_agg_actual = self._check_pre_forward_hook(activation_sparsifier, data_list)
# check state_dict() before step()
self._check_state_dict(activation_sparsifier)
# STEP 3: sparsifier step
activation_sparsifier.step()
# check state_dict() after step() and before squash_mask()
self._check_state_dict(activation_sparsifier)
# self.check_step()
self._check_step(activation_sparsifier, data_agg_actual)
# STEP 4: squash mask
activation_sparsifier.squash_mask()
self._check_squash_mask(activation_sparsifier, data_list[0])
# check state_dict() after squash_mask()
self._check_state_dict(activation_sparsifier)
|
pytorch-master
|
test/ao/sparsity/test_activation_sparsifier.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: mobile"]
import tempfile
import torch
from torch.ao.nn.sparse.quantized.dynamic.linear import Linear
from torch.testing._internal.common_quantized import (
qengine_is_qnnpack,
override_quantized_engine,
override_cpu_allocator_for_qnnpack
)
from torch.testing._internal.common_utils import TestCase
class TestQlinearPackedParams(TestCase):
def test_qlinear_packed_params(self, allow_non_zero_zero_points=False):
# copied from https://pytorch.org/docs/stable/sparse.html#csr-tensor-operations,
# so row/col block indices match that example, but with blocks and
# scaled rows
weight_fp32 = torch.Tensor([
[0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0],
[6, 6, 6, 6, 12, 12, 12, 12, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
])
row_block_size = 1
col_block_size = 4
out_features = weight_fp32.shape[0]
in_features = weight_fp32.shape[1]
scales = [2.0, 6.0, 12.0]
zero_points = [
((i + 1) if allow_non_zero_zero_points else 0) for i in range(out_features)
]
dtype = torch.qint8
wide_weight_fp32 = torch.zeros((3, 4008)) # 4000 is tile width for Fbgemm
wide_weight_fp32[0][0] = 4
wide_weight_fp32[0][4004] = 6
wide_weight_fp32[1][0] = 8
per_tensor_small = (
torch.quantize_per_tensor(
weight_fp32,
scales[0],
zero_points[0],
dtype
),
True,
[0, 1, 3, 3],
[2, 0, 1],
[x + (1 if allow_non_zero_zero_points else 0) for x in [
1, 1, 1, 1, 3, 3, 3, 3, 6, 6, 6, 6
]],
)
per_channel_small = (
torch.quantize_per_channel(
weight_fp32,
torch.Tensor(scales),
torch.Tensor(zero_points).to(torch.int),
0, # axis = 0
dtype,
),
False,
[0, 1, 3, 3],
[2, 0, 1],
[x + ([1, 2, 2][i // 4] if allow_non_zero_zero_points else 0) for (i, x) in enumerate([
1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2
])],
)
per_tensor_large = (
torch.quantize_per_tensor(
wide_weight_fp32,
scales[0],
zero_points[0],
dtype,
),
True,
[0, 2, 3, 3],
[0, 1001, 0],
[x + (1 if allow_non_zero_zero_points else 0) for x in [
2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0
]],
)
for (weight, is_per_tensor_quantized, expected_row_block_indices, expected_col_block_indices, expected_weights) in [
per_tensor_small, per_channel_small, per_tensor_large
]:
lin = Linear(
out_features=weight.shape[0],
in_features=weight.shape[1],
row_block_size=row_block_size,
col_block_size=col_block_size,
bias=True,
dtype=dtype,
)
bias = torch.ones(size=(weight.shape[0],))
lin.set_weight_bias(weight, bias, row_block_size, col_block_size)
serialized = lin._packed_params._packed_params.__getstate__()
(
_, # version
bias_,
out_features_block_size_,
in_features_block_size_,
weight_scales_,
weight_zero_points_,
quantization_scheme_,
row_block_indices_,
col_block_indices_,
weights_,
output_channels_,
input_channels_
) = serialized[0]
# Test Serialization
self.assertEqual(bias_, bias)
self.assertEqual(out_features_block_size_, row_block_size)
self.assertEqual(in_features_block_size_, col_block_size)
self.assertEqual(weight_scales_, [scales[0]] if is_per_tensor_quantized else scales)
self.assertEqual(weight_zero_points_, [zero_points[0]] if is_per_tensor_quantized else zero_points)
self.assertEqual(quantization_scheme_, is_per_tensor_quantized)
self.assertEqual(row_block_indices_, expected_row_block_indices)
self.assertEqual(col_block_indices_, expected_col_block_indices)
self.assertEqual(weights_.tolist(), [v + 128 for v in expected_weights]) # weights are serialized as +128
self.assertEqual(output_channels_, weight.shape[0])
self.assertEqual(input_channels_, weight.shape[1])
# Test Unpacking
(weights_, bias_, out_features_block_size_, in_features_block_size_) = lin._weight_bias()
self.assertEqual(torch.dequantize(weights_), torch.dequantize(weight))
self.assertEqual(bias_, bias)
self.assertEqual(out_features_block_size_, row_block_size)
self.assertEqual(in_features_block_size_, col_block_size)
# Test Deserialization
with tempfile.TemporaryFile() as file_buff:
torch.save(lin, file_buff)
file_buff.seek(0)
lin2 = torch.load(file_buff)
self.assertEqual(lin._weight_bias(), lin2._weight_bias())
# Serialize -> Deserialize -> Serialize should match Serialize
self.assertEqual(serialized, lin2._packed_params._packed_params.__getstate__())
# Test that op output is preserved by serialize -> deserialize
if qengine_is_qnnpack():
x = torch.rand(size=(1, weight.shape[1]))
y1 = lin(x)
y2 = lin2(x)
self.assertEqual(y1, y2)
def test_qlinear_packed_params_qnnpack(self):
torch.manual_seed(0)
with override_quantized_engine('qnnpack'):
with override_cpu_allocator_for_qnnpack(qengine_is_qnnpack()):
self.test_qlinear_packed_params(allow_non_zero_zero_points=True)
|
pytorch-master
|
test/ao/sparsity/test_qlinear_packed_params.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["module: unknown"]
import itertools
import logging
import re
import torch
from torch import nn
from torch.ao.sparsity import BaseSparsifier, WeightNormSparsifier, FakeSparsity, NearlyDiagonalSparsifier
from torch.nn.utils.parametrize import is_parametrized
from torch.testing._internal.common_utils import TestCase
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.seq = nn.Sequential(
nn.Linear(16, 16)
)
self.linear = nn.Linear(16, 16)
self.head = nn.Linear(16, 4)
def forward(self, x):
x = self.seq(x)
x = self.linear(x)
x = self.head(x)
return x
class ImplementedSparsifier(BaseSparsifier):
def __init__(self, **kwargs):
super().__init__(defaults=kwargs)
def update_mask(self, module, **kwargs):
module.parametrizations.weight[0].mask[0] = 0
linear_state = self.state['linear.weight']
linear_state['step_count'] = linear_state.get('step_count', 0) + 1
class TestBaseSparsifier(TestCase):
def test_constructor(self):
# Cannot instantiate the abstract base
self.assertRaises(TypeError, BaseSparsifier)
# Can instantiate the model with no configs
model = Model()
sparsifier = ImplementedSparsifier(test=3)
sparsifier.prepare(model, config=None)
assert len(sparsifier.groups) == 3
sparsifier.step()
# Can instantiate the model with configs
sparsifier = ImplementedSparsifier(test=3)
sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}])
assert len(sparsifier.groups) == 1
assert sparsifier.groups[0]['tensor_fqn'] == 'linear.weight'
assert 'test' in sparsifier.groups[0]
assert sparsifier.groups[0]['test'] == 3
def test_prepare_config(self):
model = Model()
sparsifier = ImplementedSparsifier(test=3)
# Make sure there are no parametrizations before `prepare`
assert not hasattr(model.seq[0], 'parametrizations')
assert not hasattr(model.linear, 'parametrizations')
assert not hasattr(model.head, 'parametrizations')
sparsifier.prepare(model, config=[
{'tensor_fqn': 'seq.0.weight', 'test': 42},
# No 'linear' to make sure it will be skipped in the sparsification
{'tensor_fqn': 'head.weight'}
])
assert len(sparsifier.groups) == 2
# Check if default argument is not assigned if explicit
assert sparsifier.groups[0]['tensor_fqn'] == 'seq.0.weight'
assert sparsifier.groups[0]['test'] == 42
# Check if FQN and module are pointing to the same location
assert sparsifier.groups[1]['tensor_fqn'] == 'head.weight'
assert sparsifier.groups[1]['module'] == model.head
# Check if parameterizations are attached
assert hasattr(model.seq[0], 'parametrizations')
assert not hasattr(model.linear, 'parametrizations')
assert hasattr(model.head, 'parametrizations')
def test_step(self):
model = Model()
sparsifier = ImplementedSparsifier(test=3)
sparsifier.enable_mask_update = True
sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}])
sparsifier.step()
assert torch.all(model.linear.parametrizations.weight[0].mask[0] == 0)
def test_state_dict(self):
step_count = 3
model0 = Model()
sparsifier0 = ImplementedSparsifier(test=3)
sparsifier0.prepare(model0, [{'tensor_fqn': 'linear.weight'}])
mask = model0.linear.parametrizations['weight'][0].mask
mask.data = torch.arange(mask.shape[0] * mask.shape[1]).reshape(mask.shape)
for step in range(step_count):
sparsifier0.step()
state_dict = sparsifier0.state_dict()
# Check the expected keys in the state_dict
assert 'state' in state_dict
assert 'step_count' in state_dict['state']['linear.weight']
assert state_dict['state']['linear.weight']['step_count'] == 3
assert 'groups' in state_dict
assert 'test' in state_dict['groups'][0]
assert 'tensor_fqn' in state_dict['groups'][0]
assert state_dict['groups'][0]['tensor_fqn'] == 'linear.weight'
# Check loading static_dict creates an equivalent model
model1 = Model()
sparsifier1 = ImplementedSparsifier()
sparsifier1.prepare(model1, None)
assert sparsifier0.state != sparsifier1.state
# Make sure the masks are different in the beginning
for mg in sparsifier0.groups:
if mg['tensor_fqn'] == 'linear.weight':
mask0 = mg['module'].parametrizations.weight[0].mask
for mg in sparsifier1.groups:
if mg['tensor_fqn'] == 'linear.weight':
mask1 = mg['module'].parametrizations.weight[0].mask
self.assertNotEqual(mask0, mask1)
sparsifier1.load_state_dict(state_dict)
# Make sure the states are loaded, and are correct
assert sparsifier0.state == sparsifier1.state
# Make sure the masks (and all dicts) are the same after loading
assert len(sparsifier0.groups) == len(sparsifier1.groups)
for idx in range(len(sparsifier0.groups)):
mg0 = sparsifier0.groups[idx]
mg1 = sparsifier1.groups[idx]
for key in mg0.keys():
assert key in mg1
if key == 'module':
# We cannot compare modules as they are different
param0 = mg0[key].parametrizations.weight[0]
param1 = mg1[key].parametrizations.weight[0]
assert hasattr(param0, 'mask')
assert hasattr(param1, 'mask')
self.assertEqual(param0.__dict__, param1.__dict__)
else:
assert mg0[key] == mg1[key]
def test_mask_squash(self):
model = Model()
sparsifier = ImplementedSparsifier(test=3)
sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}])
assert hasattr(model.linear.parametrizations.weight[0], 'mask')
assert is_parametrized(model.linear, 'weight')
assert not is_parametrized(model.seq[0], 'weight')
sparsifier.squash_mask()
assert not is_parametrized(model.seq[0], 'weight')
assert not is_parametrized(model.linear, 'weight')
def test_mask_squash_with_params1(self):
model = Model()
sparsifier = ImplementedSparsifier(foo=3, bar=2, baz=1)
sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}, {'tensor_fqn': 'seq.0.weight'}])
sparsifier.squash_mask(
params_to_keep_per_layer={
'linear': ('foo', 'bar'),
'seq.0': ('baz',)
})
assert not is_parametrized(model.seq[0], 'weight')
assert not is_parametrized(model.linear, 'weight')
assert hasattr(model.seq[0], 'sparse_params')
assert hasattr(model.linear, 'sparse_params')
assert model.seq[0].sparse_params.get('foo', None) is None
assert model.seq[0].sparse_params.get('bar', None) is None
assert model.seq[0].sparse_params.get('baz', None) == 1
assert model.linear.sparse_params.get('foo', None) == 3
assert model.linear.sparse_params.get('bar', None) == 2
assert model.linear.sparse_params.get('baz', None) is None
def test_mask_squash_with_params2(self):
model = Model()
sparsifier = ImplementedSparsifier(foo=3, bar=2, baz=1)
sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}, {'tensor_fqn': 'seq.0.weight'}])
sparsifier.squash_mask(params_to_keep=('foo', 'bar'))
assert not is_parametrized(model.seq[0], 'weight')
assert not is_parametrized(model.linear, 'weight')
assert hasattr(model.seq[0], 'sparse_params')
assert hasattr(model.linear, 'sparse_params')
assert model.seq[0].sparse_params.get('foo', None) == 3
assert model.seq[0].sparse_params.get('bar', None) == 2
assert model.seq[0].sparse_params.get('baz', None) is None
assert model.linear.sparse_params.get('foo', None) == 3
assert model.linear.sparse_params.get('bar', None) == 2
assert model.linear.sparse_params.get('baz', None) is None
def test_mask_squash_with_params3(self):
model = Model()
sparsifier = ImplementedSparsifier(foo=3, bar=2, baz=1)
sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}, {'tensor_fqn': 'seq.0.weight'}])
sparsifier.squash_mask(
params_to_keep=('foo', 'bar'),
params_to_keep_per_layer={'seq.0': ('baz',)})
assert not is_parametrized(model.seq[0], 'weight')
assert not is_parametrized(model.linear, 'weight')
assert hasattr(model.seq[0], 'sparse_params')
assert hasattr(model.linear, 'sparse_params')
assert model.seq[0].sparse_params.get('foo', None) == 3
assert model.seq[0].sparse_params.get('bar', None) == 2
assert model.seq[0].sparse_params.get('baz', None) == 1
assert model.linear.sparse_params.get('foo', None) == 3
assert model.linear.sparse_params.get('bar', None) == 2
assert model.linear.sparse_params.get('baz', None) is None
class TestWeightNormSparsifier(TestCase):
def test_constructor(self):
model = Model()
sparsifier = WeightNormSparsifier()
sparsifier.prepare(model, config=None)
for g in sparsifier.groups:
assert isinstance(g['module'], nn.Linear)
# The groups are unordered
assert g['module_fqn'] in ('seq.0', 'linear', 'head')
def test_step(self):
model = Model()
sparsifier = WeightNormSparsifier(sparsity_level=0.5)
sparsifier.prepare(model, config=[{'tensor_fqn': 'linear.weight'}])
for g in sparsifier.groups:
# Before step
module = g['module']
assert (1.0 - module.parametrizations['weight'][0].mask.mean()) == 0 # checking sparsity level is 0
sparsifier.enable_mask_update = True
sparsifier.step()
self.assertAlmostEqual(model.linear.parametrizations['weight'][0].mask.mean().item(), 0.5, places=2)
for g in sparsifier.groups:
# After step
module = g['module']
assert (1.0 - module.parametrizations['weight'][0].mask.mean()) > 0 # checking sparsity level has increased
# Test if the mask collapses to all zeros if the weights are randomized
iters_before_collapse = 1000
for _ in range(iters_before_collapse):
model.linear.weight.data = torch.randn(model.linear.weight.shape)
sparsifier.step()
for g in sparsifier.groups:
# After step
module = g['module']
assert (1.0 - module.parametrizations['weight'][0].mask.mean()) > 0 # checking sparsity level did not collapse
def test_step_2_of_4(self):
model = Model()
sparsifier = WeightNormSparsifier(sparsity_level=1.0,
sparse_block_shape=(1, 4),
zeros_per_block=2)
sparsifier.prepare(model, config=[{'tensor_fqn': 'linear.weight'}])
sparsifier.step()
# make sure the sparsity level is approximately 50%
mask = model.linear.parametrizations['weight'][0].mask.to(torch.float) # mean works on float only
self.assertAlmostEqual(mask.mean().item(), 0.5, places=2)
# Make sure each block has exactly 50% zeros
module = sparsifier.groups[0]['module']
mask = module.parametrizations['weight'][0].mask
for row in mask:
for idx in range(0, len(row), 4):
block = row[idx:idx + 4]
block, _ = block.sort()
assert (block[:2] == 0).all()
assert (block[2:] != 0).all()
def test_prepare(self):
model = Model()
sparsifier = WeightNormSparsifier()
sparsifier.prepare(model, config=None)
for g in sparsifier.groups:
module = g['module']
# Check mask exists
assert hasattr(module.parametrizations['weight'][0], 'mask')
# Check parametrization exists and is correct
assert is_parametrized(module, 'weight')
assert type(module.parametrizations.weight[0]) == FakeSparsity
def test_mask_squash(self):
model = Model()
sparsifier = WeightNormSparsifier()
sparsifier.prepare(model, config=None)
sparsifier.squash_mask()
for g in sparsifier.groups:
module = g['module']
assert not is_parametrized(module, 'weight')
assert not hasattr(module, 'mask')
def test_sparsity_levels(self):
sparsity_levels = [-1.0, 0.0, 0.5, 1.0, 2.0]
sparse_block_shapes = [(1, 1), (1, 4), (2, 2), (4, 1)]
zeros_per_blocks = [0, 1, 2, 3, 4]
testcases = itertools.tee(itertools.product(sparsity_levels,
sparse_block_shapes,
zeros_per_blocks))
# Create a config and model with all the testcases
model = nn.Sequential()
sparsifier = WeightNormSparsifier()
sparsity_per_layer_config = []
p = re.compile(r'[-\.\s]')
for sl, sbs, zpb in testcases[0]:
# Make sure the number of zeros is not > values in a block
if zpb > sbs[0] * sbs[1]:
continue
layer_name = f'{sl}_{sbs}_{zpb}'
layer_name = p.sub('_', layer_name)
layer = nn.Linear(12, 12, bias=False)
layer.weight = nn.Parameter(torch.ones(12, 12))
model.add_module(layer_name, layer)
config = {
'tensor_fqn': layer_name + ".weight",
'sparsity_level': sl,
'sparse_block_shape': sbs,
'zeros_per_block': zpb
}
sparsity_per_layer_config.append(config)
sparsifier.prepare(model, sparsity_per_layer_config)
sparsifier.step()
sparsifier.squash_mask()
model.eval()
for sl, sbs, zpb in testcases[1]:
if zpb > sbs[0] * sbs[1]:
continue
layer_name = f'{sl}_{sbs}_{zpb}'
layer_name = p.sub('_', layer_name)
layer = getattr(model, layer_name)
# Level of sparsity is achieved
sparse_mask = (layer.weight == 0).float()
if zpb == 0:
assert sparse_mask.mean() == 0
else:
# Ratio of individual zeros in the tensor
true_sl = min(max(sl, 0.0), 1.0)
true_sl = true_sl * zpb / sbs[0] / sbs[1]
assert sparse_mask.mean() == true_sl
class TestNearlyDiagonalSparsifier(TestCase):
def test_constructor(self):
model = Model()
sparsifier = NearlyDiagonalSparsifier(nearliness=1)
sparsifier.prepare(model, config=None)
for g in sparsifier.groups:
assert isinstance(g['module'], nn.Linear)
# The groups are unordered
assert g['module_fqn'] in ('seq.0', 'linear', 'head')
def test_step(self):
model = Model()
sparsifier = NearlyDiagonalSparsifier(nearliness=1)
sparsifier.prepare(model, config=[{'tensor_fqn': 'linear.weight'}])
for g in sparsifier.groups:
# Before step
module = g['module']
assert (1.0 - module.parametrizations['weight'][0].mask.mean()) == 0 # checking sparsity level is 0
sparsifier.enable_mask_update = True
sparsifier.step()
mask = module.parametrizations['weight'][0].mask
height, width = mask.shape
assert torch.all(mask == torch.eye(height, width))
for g in sparsifier.groups:
# After step
module = g['module']
assert (1.0 - module.parametrizations['weight'][0].mask.mean()) > 0 # checking sparsity level has increased
# Test if the mask collapses to all zeros if the weights are randomized
iters_before_collapse = 1000
for _ in range(iters_before_collapse):
model.linear.weight.data = torch.randn(model.linear.weight.shape)
sparsifier.step()
for g in sparsifier.groups:
# After step
module = g['module']
assert (1.0 - module.parametrizations['weight'][0].mask.mean()) > 0 # checking sparsity level did not collapse
def test_prepare(self):
model = Model()
sparsifier = NearlyDiagonalSparsifier(nearliness=1)
sparsifier.prepare(model, config=None)
for g in sparsifier.groups:
module = g['module']
# Check mask exists
assert hasattr(module.parametrizations['weight'][0], 'mask')
# Check parametrization exists and is correct
assert is_parametrized(module, 'weight')
assert type(module.parametrizations.weight[0]) == FakeSparsity
def test_mask_squash(self):
model = Model()
sparsifier = NearlyDiagonalSparsifier(nearliness=1)
sparsifier.prepare(model, config=None)
sparsifier.step()
sparsifier.squash_mask()
for g in sparsifier.groups:
module = g['module']
assert not is_parametrized(module, 'weight')
assert not hasattr(module, 'mask')
weights = module.weight
height, width = weights.shape
assert torch.all(weights == torch.eye(height, width) * weights) # only diagonal to be present
def test_sparsity_levels(self):
nearliness_levels = list(nearliness for nearliness in range(-1, 100))
model = nn.Sequential()
p = re.compile(r'[-\.\s]')
for nearliness in nearliness_levels:
sparsifier = NearlyDiagonalSparsifier(nearliness=1)
layer_name = f'{nearliness}'
layer_name = p.sub('_', layer_name)
layer = nn.Linear(32, 32, bias=False)
layer.weight = nn.Parameter(torch.ones(32, 32))
width, height = layer.weight.shape
model.add_module(layer_name, layer)
config = {
'tensor_fqn': layer_name + ".weight",
'nearliness': nearliness
}
sparsifier.prepare(model, [config])
# should raise a ValueError when nearliness arg is illegal
if (nearliness > 0 and nearliness % 2 == 0) or (nearliness // 2 >= min(width, height)):
with self.assertRaises(ValueError):
sparsifier.step()
else:
sparsifier.step()
sparsifier.squash_mask()
model.eval()
layer = getattr(model, layer_name)
# verify that mask created corresponds to the nearliness
self._verify_nearliness(layer.weight, nearliness)
# helper function to verify nearliness of a mask
def _verify_nearliness(self, mask: torch.Tensor, nearliness: int):
if nearliness <= 0:
assert torch.all(mask == torch.zeros(mask.shape[0], mask.shape[1]))
else:
height, width = mask.shape
dist_to_diagonal = nearliness // 2
for row in range(0, height):
for col in range(0, width):
if abs(row - col) <= dist_to_diagonal:
assert mask[row, col] == 1
else:
assert mask[row, col] == 0
|
pytorch-master
|
test/ao/sparsity/test_sparsifier.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["module: unknown"]
import logging
import torch
from torch.nn.utils.parametrize import is_parametrized
import unittest
from torch.testing._internal.common_utils import TestCase, TEST_WITH_ASAN
from typing import Tuple
from torch import nn
import itertools
import math
import copy
from torch.ao.sparsity._experimental.data_sparsifier import BaseDataSparsifier, DataNormSparsifier
from torch.ao.sparsity._experimental.data_sparsifier.quantization_utils import post_training_sparse_quantize
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class ImplementedSparsifier(BaseDataSparsifier):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def update_mask(self, name, data, **kwargs):
mask = self.get_mask(name)
mask[0] = 0
linear_state = self.state[name]
linear_state['step_count'] = linear_state.get('step_count', 0) + 1
class _BaseDataSparsiferTestCase(TestCase):
r"""This helper test class takes in any supported type of and runs some tests.
The user is required to pass in the data that needs to sparsified and the
runner will run some tests that needs to be passed in order for the data
type to be supported.
TODO: Change the structure by creating a separate test case class for each
member function
"""
def run_all_checks(self, data_list, data_with_config, defaults):
self.check_constructor(data_list, data_with_config, defaults)
self.check_squash_mask(data_list, data_with_config, defaults)
self.check_add_data(data_list, data_with_config, defaults)
self.check_step(data_list, data_with_config, defaults)
self.check_state_dict(data_list, data_with_config, defaults)
self.check_memory_reference(data_list, data_with_config, defaults)
@staticmethod
def _get_name_data_config(some_data, defaults=None):
if isinstance(some_data, Tuple):
# dealing with data_list
name, data = some_data
config = defaults
else:
# dealing with data_with_config
name, data, config = some_data['name'], some_data['data'], some_data['config']
return name, data, config
@staticmethod
def _make_sparsifier(data_list, data_with_config, defaults,
sparsifier_type=None, sparsifier_kwargs=None):
if sparsifier_type is None:
sparsifier = ImplementedSparsifier(data_list=data_list, **defaults)
else:
kwargs = copy.deepcopy(defaults)
kwargs.update(sparsifier_kwargs)
kwargs['data_list'] = data_list
sparsifier = sparsifier_type(**kwargs)
assert len(sparsifier.data_groups) == len(data_list)
for data_config_dict in data_with_config:
name, data, config = data_config_dict['name'], data_config_dict['data'], data_config_dict['config']
sparsifier.add_data(name=name, data=data, **config)
return sparsifier
def check_constructor(self, data_list, data_with_config, defaults, **kwargs):
sparsifier = self._make_sparsifier(data_list, data_with_config, defaults=defaults, **kwargs)
self.assertEqual(len(sparsifier.data_groups),
len(data_list) + len(data_with_config),
msg="Sparsifier data groups don't match the input "
f"({len(sparsifier.data_groups)} vs. "
f"{len(data_list) + len(data_with_config)}).")
all_data = data_list + data_with_config
for some_data in all_data:
name, _, config = self._get_name_data_config(some_data, defaults=defaults)
self.assertIn(name, sparsifier.data_groups)
self.assertEqual(sparsifier.data_groups[name], config)
def check_step(self, data_list, data_with_config, defaults, **kwargs):
sparsifier = self._make_sparsifier(data_list, data_with_config, defaults=defaults, **kwargs)
all_data = data_list + data_with_config
# Check data and mask before doing the step
for some_data in all_data:
name, data, _ = self._get_name_data_config(some_data)
data = sparsifier._extract_weight(data)
sparsified_data = sparsifier.get_data(name=name, return_original=False)
original_data = sparsifier.get_data(name=name, return_original=True)
mask = sparsifier.get_mask(name=name)
self.assertEqual(sparsified_data, data)
self.assertEqual(original_data, data)
self.assertEqualBroadcasting(mask[0], 1)
step_count = 3
for _ in range(0, step_count):
sparsifier.step()
for some_data in all_data:
name, data, _ = self._get_name_data_config(some_data)
data = sparsifier._extract_weight(data)
sparsified_data = sparsifier.get_data(name=name, return_original=False)
original_data = sparsifier.get_data(name=name, return_original=True)
mask = sparsifier.get_mask(name=name)
self.assertEqualBroadcasting(sparsified_data[0], 0)
self.assertEqual(original_data, data)
self.assertEqualBroadcasting(mask[0], 0)
assert 'step_count' in sparsifier.state[name]
assert sparsifier.state[name]['step_count'] == 3
def check_squash_mask(self, data_list, data_with_config, defaults, **kwargs):
sparsifier = self._make_sparsifier(data_list, data_with_config, defaults=defaults, **kwargs)
all_data = data_list + data_with_config
for some_data in all_data:
name, _, _ = self._get_name_data_config(some_data)
assert hasattr(sparsifier._container, name)
assert is_parametrized(sparsifier._container, name)
sparsifier.step()
sparsifier.squash_mask()
for some_data in all_data:
name, _, _ = self._get_name_data_config(some_data)
assert not is_parametrized(sparsifier._container, name) # not parametrized anymore
with self.assertRaises(ValueError):
sparsifier.get_data(name, return_original=True)
def check_add_data(self, data_list, data_with_config, defaults, **kwargs):
sparsifier = self._make_sparsifier(data_list, data_with_config, defaults=defaults, **kwargs)
all_data = data_list + data_with_config
for some_data in all_data:
name1, data1, config = self._get_name_data_config(some_data, defaults=defaults)
data1 = sparsifier._extract_weight(data1)
data1_old = copy.deepcopy(data1)
assert torch.all(data1 == sparsifier.get_data(name=name1))
sparsifier.step()
mask = sparsifier.get_mask(name1)
data2 = torch.randn(data1.shape) # add another data with the same shape as original data
sparsifier.add_data(name=name1, data=data2)
assert torch.all(data2 == sparsifier.get_data(name=name1))
assert torch.all(sparsifier.get_mask(name1) == mask) # mask should not change
assert torch.all(data1_old == data1)
assert sparsifier.data_groups[name1] == config # if replaced old_config should match new config
def check_state_dict(self, data_list, data_with_config, defaults, **kwargs):
sparsifier1 = self._make_sparsifier(data_list, data_with_config, defaults=defaults, **kwargs)
sparsifier2 = self._make_sparsifier(data_list=[data_list[0]], data_with_config=[], defaults=defaults, **kwargs)
sparsifier1.step()
state_dict1 = sparsifier1.state_dict()
assert sparsifier1.state != sparsifier2.state
name, _, _ = self._get_name_data_config(data_list[0])
self.assertNotEqual(sparsifier1.get_mask(name), sparsifier2.get_mask(name))
sparsifier2.load_state_dict(state_dict1)
assert len(sparsifier1.state) == len(sparsifier2.state)
assert len(sparsifier1.data_groups) == len(sparsifier2.data_groups)
state1 = state_dict1['state']
for name in state1.keys():
# compare mask
assert name in sparsifier2.state
assert 'mask' in sparsifier2.state[name]
assert 'mask' in sparsifier1.state[name]
mask1, mask2 = state1[name]['mask'], sparsifier2.state[name]['mask']
assert mask1.is_sparse and not mask2.is_sparse
assert torch.all(mask1.to_dense() == mask2) # mask1 is stored as sparse coo now
# compare data_groups
dg1, dg2 = sparsifier1.data_groups, sparsifier2.data_groups
assert name in dg1 and name in dg2
assert dg1[name] == dg2[name]
# compare container
container1, container2 = sparsifier1._container, sparsifier2._container
assert torch.all(getattr(container1, name) == getattr(container2, name))
assert is_parametrized(container1, name) == is_parametrized(container2, name)
if is_parametrized(container1, name):
param1 = getattr(container1.parametrizations, name)[0]
param2 = getattr(container2.parametrizations, name)[0]
assert hasattr(param1, 'mask')
assert hasattr(param2, 'mask')
self.assertEqual(param1.__dict__, param2.__dict__)
def check_memory_reference(self, data_list, data_with_config, defaults, **kwargs):
"""Checks if the data is truly "attached" to the sparsifier. Meaning, when the
data is changed outside of the sparsifier, the changes must be reflected on the data
inside the data sparsifier as well.
This makes sure that the sparsifier is holding the memory reference of the data and
not copies.
This test modifies the data and asserts that data in the sparsifier is changed as well
"""
sparsifier = self._make_sparsifier(data_list, data_with_config, defaults=defaults, **kwargs)
all_data = data_list + data_with_config
for some_data in all_data:
name, data, _ = self._get_name_data_config(some_data)
weight = sparsifier._extract_weight(data)
weight.data = weight + torch.randn(*weight.shape)
contained_data = sparsifier.get_data(name=name)
assert id(weight.data) == id(contained_data.data)
assert torch.all(contained_data == weight)
class _NormDataSparsifierTestCase(_BaseDataSparsiferTestCase):
r"""This helper test class takes in any supported type of and runs some tests.
This inherits the TestBaseDataSparsifierRuner wherein some functions are
over-ridden to take accomodate the specific sparsifier.
TODO: Change the structure by creating a separate test case class for each
member function
"""
def run_all_checks(self, data_list, defaults, data_with_config, norm_type='L1'):
assert norm_type in ['L1', 'L2']
kwargs = {
'sparsifier_type': DataNormSparsifier,
'sparsifier_kwargs': {'norm': norm_type}
}
self.check_constructor(data_list, data_with_config, defaults, **kwargs)
self.check_squash_mask(data_list, data_with_config, defaults, **kwargs)
self.check_add_data(data_list, data_with_config, defaults, **kwargs)
self.check_state_dict(data_list, data_with_config, defaults, **kwargs)
self.check_step(data_list, data_with_config, defaults, norm_type=norm_type)
self.check_step_2_of_4(norm_type=norm_type)
self.check_sparsity_level(data_list, data_with_config, defaults, norm_type=norm_type)
self.check_memory_reference(data_list, data_with_config, defaults, **kwargs)
@staticmethod
def _get_bounds_on_actual_sparsity(config, tensor_shape):
r"""This function gets the bounds on actual sparsity.
Note::
Although we specify the sparsity_level parameter, this does not mean that
the actual sparsity obtained after sparsification is the same as sparsity_level.
The actual sparsity depends largely on the shape and the data itself.
"""
sparsity_level = config['sparsity_level']
zeros_per_block = config['zeros_per_block']
sparse_block_shape = config['sparse_block_shape']
height, width = tensor_shape[-2], tensor_shape[-1]
block_height, block_width = sparse_block_shape
number_blocks = math.ceil(height / block_height) * math.ceil(width / block_width)
values_per_block = block_height * block_width
if zeros_per_block == 0:
return (1.0, 1.0)
else:
# min value assumes zeros_per_block is 1
min_values_sparsified = round(number_blocks * sparsity_level)
# max value assumes actual zeros_per_block
max_values_sparsified = min_values_sparsified * min(values_per_block, zeros_per_block)
lower_bound = min_values_sparsified / (height * width)
upper_bound = min(1.0, max_values_sparsified / (height * width))
lower_bound, upper_bound = round(lower_bound, 3), round(upper_bound, 3)
return lower_bound, upper_bound
def check_step(self, data_list, data_with_config, defaults, norm_type='L1'):
sparsifier = self._make_sparsifier(data_list, data_with_config, defaults,
sparsifier_type=DataNormSparsifier,
sparsifier_kwargs={'norm': norm_type})
all_data = data_list + data_with_config
# mask before step() should not be sparsified
for some_data in all_data:
name, _, _ = self._get_name_data_config(some_data)
mask = sparsifier.get_mask(name=name)
assert (1.0 - mask.mean()) == 0 # checking sparsity level is 0
sparsifier.step()
for some_data in all_data:
name, _, _ = self._get_name_data_config(some_data)
mask = sparsifier.get_mask(name=name)
config = sparsifier.data_groups[name]
lb, ub = self._get_bounds_on_actual_sparsity(config, mask.shape)
mask = mask.to(torch.float)
actual_sparsity = round(1 - mask.mean().item(), 3)
assert actual_sparsity >= lb and actual_sparsity <= ub
assert actual_sparsity > 0.0 # exact sparsity level cannot be achieved due to size of tensor
iters_before_collapse = 100
test_sparsifier = DataNormSparsifier(sparsity_level=0.5,
sparse_block_shape=(1, 4),
zeros_per_block=4,
norm=norm_type)
for _ in range(iters_before_collapse):
new_data = torch.randn(20, 20)
test_sparsifier.add_data(name='test_data', data=new_data)
test_sparsifier.step()
mask = test_sparsifier.get_mask(name='test_data')
mask = mask.to(torch.float)
assert (1.0 - mask.mean().item()) > 0 # some sparsity achieved
def check_step_2_of_4(self, norm_type):
# overriding default config for test purposes
default_config = {'sparsity_level': 1.0, 'zeros_per_block': 2, 'sparse_block_shape': (1, 4)}
data_list = [('test_data', torch.randn(4, 4))]
sparsifier = DataNormSparsifier(data_list=data_list, norm=norm_type, **default_config)
sparsifier.step()
for some_data in data_list:
name, _ = some_data
mask = sparsifier.get_mask(name=name)
mask = mask.to(torch.float)
self.assertAlmostEqual(1.0 - mask.mean().item(), 0.5, places=2)
for row in mask:
for idx in range(0, len(row), 4):
block = row[idx:idx + 4]
block, _ = block.sort()
assert (block[:2] == 0).all()
assert (block[2:] != 0).all()
def check_sparsity_level(self, data_list, data_with_config, defaults, norm_type='L1'):
sparsity_levels = [-1.0, 0.0, 0.5, 1.0, 2.0]
sparse_block_shapes = [(1, 1), (1, 4), (2, 2), (4, 1)]
zeros_per_blocks = [0, 1, 2, 3, 4]
sparsifier = DataNormSparsifier(data_list=data_list, norm=norm_type)
testcases = itertools.tee(itertools.product(sparsity_levels,
sparse_block_shapes,
zeros_per_blocks))
assert len(data_with_config) > 0 and 'name' in data_with_config[0] and 'data' in data_with_config[0]
# get some data
name, data = data_with_config[0]['name'], data_with_config[0]['data']
for idx, (sl, sbs, zpb) in enumerate(testcases[0]):
new_name = f'{name}_{idx}'
if zpb > sbs[0] * sbs[1]:
continue
current_config = {'sparsity_level': sl, 'sparse_block_shape': sbs, 'zeros_per_block': zpb}
sparsifier.add_data(name=new_name, data=data, **current_config)
if zpb > sbs[0] * sbs[1]:
continue
sparsifier.step()
sparsifier.squash_mask()
for idx, (sl, sbs, zpb) in enumerate(testcases[0]):
new_name = f'{name}_{idx}'
sparsified_data = sparsifier.get_data(name=new_name, original=False)
# sparse mask
sparse_mask = (sparsified_data == 0).float()
if zpb == 0:
assert sparse_mask.mean() == 0
else:
# Ratio of individual zeros in the tensor
true_sl = min(max(sl, 0.0), 1.0)
true_sl = true_sl * zpb / sbs[0] / sbs[1]
assert sparse_mask.mean() == true_sl
class TestBaseDataSparsifier(_BaseDataSparsiferTestCase):
"""To add unit tests to support new data types for the BaseDataSparsifier, create the following
data_list: List of tuples of name, data to be added to the constructor
defaults: default config for the above data in data_list
data_with_config: list of dictionaries defining name, data and config (look test_tensors())
Once the above is done, create an instance of TestBaseDataSparsifierType and call all the run_tests()
"""
def test_tensors(self):
tensor1, tensor2, tensor3 = torch.randn(3, 3), torch.randn(4, 4), torch.randn(5, 5)
tensor4, tensor5 = torch.randn(1, 1), torch.randn(4, 4)
data_list = [('tensor1', tensor1), ('tensor2', tensor2), ('tensor3', tensor3)]
defaults = {'test': 3}
data_with_config = [
{
'name': 'tensor4', 'data': tensor4, 'config': {'test': 7}
},
{
'name': 'tensor5', 'data': tensor5, 'config': {'test': 8}
},
]
self.run_all_checks(data_list=data_list, defaults=defaults, data_with_config=data_with_config)
def test_nn_parameters(self):
param1, param2, param3 = nn.Parameter(torch.randn(3, 3)), nn.Parameter(torch.randn(4, 4)), nn.Parameter(torch.randn(5, 5))
param4, param5 = nn.Parameter(torch.randn(1, 1)), nn.Parameter(torch.randn(4, 4))
data_list = [('param1', param1), ('param2', param2), ('param3', param3)]
defaults = {'test': 3}
data_with_config = [
{
'name': 'param4', 'data': param4, 'config': {'test': 7}
},
{
'name': 'param5', 'data': param5, 'config': {'test': 8}
},
]
self.run_all_checks(data_list=data_list, defaults=defaults, data_with_config=data_with_config)
def test_nn_embeddings(self):
emb1, emb2, = nn.Embedding(10, 3), nn.Embedding(20, 3)
emb1_bag, emb2_bag = nn.EmbeddingBag(10, 3), nn.EmbeddingBag(20, 3)
emb3, emb3_bag = nn.Embedding(15, 3), nn.EmbeddingBag(20, 3)
data_list = [('emb1', emb1), ('emb1_bag', emb1_bag), ('emb2', emb2), ('emb2_bag', emb2_bag)]
defaults = {'test': 3}
data_with_config = [
{
'name': 'emb3', 'data': emb3, 'config': {'test': 7}
},
{
'name': 'emb3_bag', 'data': emb3_bag, 'config': {'test': 8}
},
]
self.run_all_checks(data_list=data_list, defaults=defaults, data_with_config=data_with_config)
class TestNormDataSparsifiers(_NormDataSparsifierTestCase):
"""To add unit tests to support new data types for the NormDataSparsifier, create the following
data_list: List of tuples of name, data to be added to the constructor
defaults: default config for the above data in data_list
data_with_config: list of dictionaries defining name, data and config (look test_tensors())
Once the above is done, create an instance of _NormDataSparsifierTestRunner and call run_tests()
"""
def test_tensors(self):
tensor1, tensor2, tensor3 = torch.randn(1, 10), torch.randn(4, 4), torch.randn(1, 5)
tensor4, tensor5 = torch.randn(1, 2), torch.randn(4, 4)
data_list = [('tensor1', tensor1), ('tensor2', tensor2), ('tensor3', tensor3)]
defaults = {'sparsity_level': 0.5, 'sparse_block_shape': (1, 4), 'zeros_per_block': 4}
data_with_config = [
{
'name': 'tensor4', 'data': tensor4,
'config': {'sparsity_level': 0.7, 'sparse_block_shape': (2, 3), 'zeros_per_block': 6}
},
{
'name': 'tensor5', 'data': tensor5,
'config': {'sparsity_level': 0.3, 'sparse_block_shape': (2, 3), 'zeros_per_block': 6}
},
]
self.run_all_checks(data_list=data_list, defaults=defaults,
data_with_config=data_with_config, norm_type='L1')
self.run_all_checks(data_list=data_list, defaults=defaults,
data_with_config=data_with_config, norm_type='L2')
def test_nn_parameters(self):
param1, param2, param3 = nn.Parameter(torch.randn(1, 8)), nn.Parameter(torch.randn(4, 4)), nn.Parameter(torch.randn(5, 5))
param4, param5 = nn.Parameter(torch.randn(10, 10)), nn.Parameter(torch.randn(4, 4))
data_list = [('param1', param1), ('param2', param2), ('param3', param3)]
defaults = {'sparsity_level': 0.5, 'sparse_block_shape': (1, 4), 'zeros_per_block': 4}
data_with_config = [
{
'name': 'param4', 'data': param4,
'config': {'sparsity_level': 0.7, 'sparse_block_shape': (2, 3), 'zeros_per_block': 6}
},
{
'name': 'param5', 'data': param5,
'config': {'sparsity_level': 0.3, 'sparse_block_shape': (2, 3), 'zeros_per_block': 6}
},
]
self.run_all_checks(data_list=data_list, defaults=defaults,
data_with_config=data_with_config, norm_type='L1')
self.run_all_checks(data_list=data_list, defaults=defaults,
data_with_config=data_with_config, norm_type='L2')
def test_nn_embeddings(self):
emb1, emb2, = nn.Embedding(10, 3), nn.Embedding(20, 3)
emb1_bag, emb2_bag = nn.EmbeddingBag(10, 3), nn.EmbeddingBag(20, 3)
emb3, emb3_bag = nn.Embedding(15, 3), nn.EmbeddingBag(20, 3)
data_list = [('emb1', emb1), ('emb1_bag', emb1_bag), ('emb2', emb2), ('emb2_bag', emb2_bag)]
defaults = {'sparsity_level': 0.5, 'sparse_block_shape': (1, 4), 'zeros_per_block': 4}
data_with_config = [
{
'name': 'emb3', 'data': emb3,
'config': {'sparsity_level': 0.7, 'sparse_block_shape': (2, 3), 'zeros_per_block': 6}
},
{
'name': 'emb3_bag', 'data': emb3_bag,
'config': {'sparsity_level': 0.3, 'sparse_block_shape': (2, 3), 'zeros_per_block': 6}
},
]
self.run_all_checks(data_list=data_list, defaults=defaults,
data_with_config=data_with_config, norm_type='L1')
self.run_all_checks(data_list=data_list, defaults=defaults,
data_with_config=data_with_config, norm_type='L2')
class Model(nn.Module):
def __init__(self):
super().__init__()
self.emb1 = nn.Embedding(100, 3)
self.embbag1 = nn.EmbeddingBag(200, 32)
self.emb_seq = nn.Sequential(nn.Embedding(150, 3), nn.EmbeddingBag(100, 3))
self.linear1 = nn.Linear(32, 32)
self.linear2 = nn.Linear(16, 16)
class TestQuantizationUtils(TestCase):
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN due to address sanitization")
def test_ptq_sparsify_first(self):
"""The expectation is post_training_sparse_quantize function
1. Takes in a model
2. Sparsifies the embeddings
3. Quantize the embeddings
This unit test checks that
1. Embeddings and EmbeddingBags are sparsified to the right sparsity levels
2. Embeddings and EmbeddingBags are quantized
3. Linear modules are not quanitzed
"""
model = Model()
sparse_config = {'sparsity_level': 0.80, 'sparse_block_shape': (1, 1)}
select_embeddings = [model.embbag1, model.emb1]
post_training_sparse_quantize(model,
data_sparsifier_class=DataNormSparsifier,
sparsify_first=True,
select_embeddings=select_embeddings,
**sparse_config)
assert type(model.emb1) == torch.nn.quantized.modules.embedding_ops.Embedding
assert type(model.embbag1) == torch.nn.quantized.modules.embedding_ops.EmbeddingBag
assert type(model.emb_seq[0] == nn.Embedding)
assert type(model.emb_seq[1] == nn.EmbeddingBag)
assert type(model.linear1) == nn.Linear
assert type(model.linear2) == nn.Linear
dequant_emb1 = torch.dequantize(model.emb1.weight())
dequant_embbag1 = torch.dequantize(model.embbag1.weight())
threshold = 1e-2
sl_emb1 = (torch.abs(dequant_emb1) < threshold).float().mean()
sl_embbag1 = (torch.abs(dequant_embbag1) < threshold).float().mean()
assert abs(sl_emb1 - 0.80) <= 0.05 # +- 5% leeway
assert abs(sl_embbag1 - 0.80) <= 0.05 # +- 5% leeway
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN due to address sanitization")
def test_ptq_quantize_first(self):
"""The expectation is post_training_sparse_quantize function
1. Takes in a model
2. Quantize the embeddings
3. Sparsifies the embeddings
This unit test checks that
1. Embeddings and EmbeddingBags are sparsified to the right sparsity levels
2. Embeddings and EmbeddingBags are quantized
3. Linear modules are not quanitzed
"""
model = Model()
sparse_config = {'sparsity_level': 0.8, 'sparse_block_shape': (1, 1)}
post_training_sparse_quantize(model, DataNormSparsifier, sparsify_first=False, **sparse_config)
assert type(model.emb1) == torch.nn.quantized.modules.embedding_ops.Embedding
assert type(model.embbag1) == torch.nn.quantized.modules.embedding_ops.EmbeddingBag
assert type(model.emb_seq[0] == torch.nn.quantized.modules.embedding_ops.Embedding)
assert type(model.emb_seq[1] == torch.nn.quantized.modules.embedding_ops.EmbeddingBag)
assert type(model.linear1) == nn.Linear # not quantized
assert type(model.linear2) == nn.Linear # not quantized
dequant_emb1 = torch.dequantize(model.emb1.weight())
dequant_embbag1 = torch.dequantize(model.embbag1.weight())
dequant_emb_seq_0 = torch.dequantize(model.emb_seq[0].weight())
dequant_emb_seq_1 = torch.dequantize(model.emb_seq[1].weight())
# higher threshold as quantization occurs before sparsity
threshold = 1 # zero points seem to have higher magnitude with sparsity occuring after
sl_emb1 = (torch.abs(dequant_emb1) < threshold).float().mean()
sl_embbag1 = (torch.abs(dequant_embbag1) < threshold).float().mean()
sl_emb_seq_0 = (torch.abs(dequant_emb_seq_0) < threshold).float().mean()
sl_emb_seq_1 = (torch.abs(dequant_emb_seq_1) < threshold).float().mean()
assert abs(sl_emb1 - 0.80) <= 0.05 # +- 5% leeway
assert abs(sl_embbag1 - 0.80) <= 0.05 # +- 5% leeway
assert abs(sl_emb_seq_0 - 0.80) <= 0.05 # +- 5% leeway
assert abs(sl_emb_seq_1 - 0.80) <= 0.05 # +- 5% leeway
|
pytorch-master
|
test/ao/sparsity/test_data_sparsifier.py
|
# Owner(s): ["oncall: jit"]
import unittest
from torch._lazy.ts_backend import init as init_ts_backend
init_ts_backend()
from torch._lazy import config
from torch._lazy.extract_compiled_graph import extract_compiled_graph
import torch
from torch import nn
import dis
import inspect
from torch import fx
import re
from contextlib import contextmanager
import copy
class ModuleConstScale(nn.Module):
def __init__(self):
super(ModuleConstScale, self).__init__()
def forward(self, a):
return a * 2
class ModuleSub(nn.Module):
def __init__(self):
super(ModuleSub, self).__init__()
def forward(self, a, b):
return a - b
class ModuleAddcmul(nn.Module):
"""
addcmul function takes a at::Scalar which results in a special TSData containing a Scalar rather than a Tensor.
"""
def __init__(self):
super(ModuleAddcmul, self).__init__()
def forward(self, a, b, c):
return torch.addcmul(a, b, c, value=5)
class ModuleReturnMulti(nn.Module):
def __init__(self):
super(ModuleReturnMulti, self).__init__()
def forward(self, a, b):
return (b + 1, a - 1)
# The default fx tracer will convert torch.randn to a constant.. We may need
# a custom tracer.
# class ModuleEagerTensor(nn.Module):
# def __init__(self):
# super(ModuleEagerTensor, self).__init__()
#
# def forward(self, a):
# b = torch.randn(2, 3, device="cpu") # eager device
# return a + b
# The module was planned to cover the case that a Fx graph return an eager
# tensor on the default device. It's harder than ModuleEagerTensor because
# we can not just override the device argument to Lazy since there is no
# explicit device argument.
#
# Unfortunately, the default fx tracer convert the return value of the forward
# method to a constant.. Comment out for now
# class ModuleReturnEagerTensorOnDefaultDevice(nn.Module):
# def __init__(self):
# super(ModuleReturnEagerTensorOnDefaultDevice, self).__init__()
#
# def forward(self):
# return torch.tensor((2, 3), dtype=torch.float32)
class ModuleReturnDupTensor(nn.Module):
"""
Handle the corner case that the same tensor appears multiple times in the
returned tuple. torchbench like drq will hit this corner case when running
thru torchdynamo..
"""
def __init__(self):
super(ModuleReturnDupTensor, self).__init__()
def forward(self, a, b):
c = a + b
return a - b, c, a + 1, c
class ModuleInplaceUpdate(nn.Module):
def __init__(self):
super(ModuleInplaceUpdate, self).__init__()
def forward(self, a, b):
a.sub_(b)
return b - 1, b + 1
@contextmanager
def force_fallback_ctx_mgr(fallback_op):
oldconfig = config.get_force_fallback()
config.set_force_fallback(fallback_op)
try:
yield None
finally:
config.set_force_fallback(oldconfig)
@contextmanager
def nop_ctx_mgr():
try:
yield None
finally:
pass
def gen_rand_args(mod):
args = []
for _ in range(len(inspect.signature(mod.forward).parameters)):
args.append(torch.randn(2, 3))
return args
def allclose(expected, actual):
def unwrap(cont):
if isinstance(cont, (list, tuple)) and len(cont) == 1:
return cont[0]
return cont
expected = unwrap(expected)
actual = unwrap(actual)
if isinstance(expected, torch.Tensor) and isinstance(actual, torch.Tensor):
return torch.allclose(expected, actual)
elif isinstance(expected, (tuple, list)) and isinstance(actual, (tuple, list)):
return len(expected) == len(actual) and all(torch.allclose(a, b) for a, b in zip(expected, actual))
else:
raise RuntimeError("Unexpected types")
def verify_reusing_compiled_graph(mod, exception_msg_pattern, ncase=10):
args = gen_rand_args(mod)
out = mod(*args)
dis.dis(mod.forward)
try:
optimized_mod = extract_compiled_graph(fx.symbolic_trace(mod), args)
except RuntimeError as e:
if exception_msg_pattern is None:
raise e # reraise the exception
exception_message = str(e)
if not re.search(exception_msg_pattern, exception_message):
raise RuntimeError(f"Expection message does not match the required pattern: {exception_message}")
else:
# We are done for the test case that expects an exception
return
if exception_msg_pattern is not None:
raise RuntimeError(f"Expect an exception matching pattern {exception_msg_pattern}")
print("return value of optimized_mod", optimized_mod(*args))
# check correctness
failed_index = []
for i in range(ncase):
rand_args = gen_rand_args(mod)
rand_args_copy = copy.deepcopy(rand_args)
expected = mod(*rand_args)
actual = optimized_mod(*rand_args_copy)
if not allclose(expected, actual):
print(f"Incorrect results. expected {expected}, actual {actual}")
failed_index.append(i)
continue
# make sure arguments match after calling the model forward method to handle inplace
# updates.
if not allclose(rand_args, rand_args_copy):
print(f"Incorrect updated arguments. expected {rand_args}, actual {rand_args_copy}")
failed_index.append(i)
continue
if len(failed_index) > 0:
raise RuntimeError(f"Failed {len(failed_index)}/{ncase} cases")
def maketest(module_cls, exception_msg_pattern=None, ctxmgr=None):
def wrapper(self):
nonlocal ctxmgr
if not ctxmgr:
ctxmgr = nop_ctx_mgr()
with ctxmgr:
verify_reusing_compiled_graph(module_cls(), exception_msg_pattern)
return wrapper
class OptimizeTest(unittest.TestCase):
test_sub = maketest(ModuleSub)
# Same as test_sub but force aten::sub to fallback
# We expect an exception caught because of LTC fallabck.
test_ltc_fallback = maketest(ModuleSub, exception_msg_pattern="fallback.*aten::sub", ctxmgr=force_fallback_ctx_mgr("aten::sub"))
test_const_scale = maketest(ModuleConstScale)
test_addcmul = maketest(ModuleAddcmul)
test_return_multi = maketest(ModuleReturnMulti)
test_return_dup_tensor = maketest(ModuleReturnDupTensor)
test_inplace_update = maketest(ModuleInplaceUpdate)
|
pytorch-master
|
test/lazy/test_extract_compiled_graph.py
|
# Owner(s): ["oncall: jit"]
import torch
import torch._lazy
import torch._lazy.config
import torch._lazy.ir_cache
import torch._lazy.ts_backend
import torch._lazy.metrics as metrics
from torch.testing._internal.common_utils import IS_WINDOWS, run_tests, TestCase
import os
import unittest
torch._lazy.ts_backend.init()
torch._lazy.config.set_reuse_ir(True)
def get_test_device():
return 'cuda' if 'LTC_TS_CUDA' in os.environ else 'cpu'
@unittest.skipIf(IS_WINDOWS, "To be fixed")
class TestLazyReuseIr(TestCase):
def testAdd(self):
device = get_test_device()
x = torch.randn(2, 3, 4, device=device)
y = torch.randn(2, 3, 4, device=device)
z = torch.zeros(2, 3, 4, device=device)
device = 'lazy'
x_lazy = x.detach().clone().to(device=device)
y_lazy = y.detach().clone().to(device=device)
z_lazy = z.detach().clone().to(device=device)
for i in range(10):
z += (x + y)
for i in range(10):
z_lazy += (x_lazy + y_lazy)
torch._lazy.mark_step()
torch.testing.assert_close(z.cpu(), z_lazy.cpu())
assert metrics.counter_value("IrNodeReused_torch::lazy::AddTensor") >= 14
metrics.reset()
torch._lazy.ir_cache.reset()
def testAddSub(self):
device = get_test_device()
x = torch.randn(2, 3, 4, device=device)
y = torch.randn(2, 3, 4, device=device)
z = torch.zeros(2, 3, 4, device=device)
device = 'lazy'
x_lazy = x.detach().clone().to(device=device)
y_lazy = y.detach().clone().to(device=device)
z_lazy = z.detach().clone().to(device=device)
for i in range(10):
if i < 5:
z += (x + y)
else:
z += (x - y)
for i in range(10):
if i < 5:
z_lazy += (x_lazy + y_lazy)
else:
z_lazy += (x_lazy - y_lazy)
torch._lazy.mark_step()
torch.testing.assert_close(z.cpu(), z_lazy.cpu())
assert metrics.counter_value("IrNodeReused_torch::lazy::AddTensor") >= 8
metrics.reset()
torch._lazy.ir_cache.reset()
def testAddSubFallback(self):
torch._lazy.config.set_force_fallback("aten::sub")
device = get_test_device()
x = torch.randn(2, 3, 4, device=device)
y = torch.randn(2, 3, 4, device=device)
z = torch.zeros(2, 3, 4, device=device)
device = 'lazy'
x_lazy = x.detach().clone().to(device=device)
y_lazy = y.detach().clone().to(device=device)
z_lazy = z.detach().clone().to(device=device)
for i in range(10):
if i < 5:
z += (x + y)
else:
z += (x - y)
for i in range(10):
if i < 5:
z_lazy += (x_lazy + y_lazy)
else:
z_lazy += (x_lazy - y_lazy)
torch._lazy.mark_step()
torch.testing.assert_close(z.cpu(), z_lazy.cpu())
assert metrics.counter_value("IrNodeReused_torch::lazy::AddTensor") >= 8
metrics.reset()
torch._lazy.ir_cache.reset()
torch._lazy.config.set_force_fallback("")
def testBatchNorm(self):
device = get_test_device()
x = torch.randn(16, 3, 224, 224, device=device)
weight = torch.randn(3, device=device)
bias = torch.randn(3, device=device)
for i in range(10):
# BatchNorm2d does extra checks on dimensions which SymInts don't support yet
# so we call `torch.ops.aten.native_batch_norm` to bypass the checks.
z, _, _ = torch.ops.aten.native_batch_norm(x, weight, bias, None, None, True, 0.1, 1e-5)
device = "lazy"
x_lazy = x.detach().clone().to(device=device)
weight_lazy = weight.detach().clone().to(device=device)
bias_lazy = bias.detach().clone().to(device=device)
for i in range(10):
z_lazy, _, _ = torch.ops.aten.native_batch_norm(x_lazy, weight_lazy, bias_lazy, None, None, True, 0.1, 1e-5)
torch._lazy.mark_step()
torch.testing.assert_close(z.cpu(), z_lazy.cpu())
assert metrics.counter_value("IrNodeReused_torch::lazy::NativeBatchNorm") >= 7
metrics.reset()
torch._lazy.ir_cache.reset()
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/lazy/test_reuse_ir.py
|
pytorch-master
|
test/lazy/__init__.py
|
|
# Owner(s): ["oncall: jit"]
from typing import Sequence
import torch
import functools
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests
import torch._lazy
import torch._lazy.config
import torch._lazy.metrics
import torch._lazy.ir_cache
import torch._lazy.ts_backend
import itertools
import yaml
import os
import pathlib
from unittest import skip
torch._lazy.ts_backend.init()
def get_test_device():
return 'cuda' if 'LTC_TS_CUDA' in os.environ else 'cpu'
def remove_suffixes(l):
return [x.split(".")[0] for x in l]
def init_lists():
path_to_script = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
TS_NATIVE_FUNCTIONS_PATH = path_to_script.parent.parent / "aten/src/ATen/native/ts_native_functions.yaml"
with open(TS_NATIVE_FUNCTIONS_PATH) as f:
yaml_ts = yaml.load(f, yaml.Loader)
LAZY_OPS_LIST = set(remove_suffixes(itertools.chain(yaml_ts["full_codegen"], yaml_ts["supported"], yaml_ts["autograd"])))
FALLBACK_LIST = set(["clamp"])
SKIP_RUNTIME_ERROR_LIST = set([
'index_select', # Empty output_sizes is not supported
'clone', # is clone decomposed?
# General ASAN Failure due to related to generating bool values.
# https://github.com/pytorch/pytorch/issues/74519
# https://github.com/pytorch/pytorch/issues/63034
'nonzero', # ASAN failure (paste: P501906539)
'all', # ASAN failure
'any', # ASAN failure
'logdet', # ASAN failure
])
SKIP_INCORRECT_RESULTS_LIST = set([
'squeeze', # Value out of range
't', # Value out of range
'transpose', # Value out of range
'bernoulli', # incorrect results
'pow', # incorrect results
'addcdiv', # incorrect results (on CI not locally?)
])
# The following ops all show up directly in ts_native_functions.yaml,
# but run functionalized versions of the composite kernels in core.
# This means that we don't expect the ops to show directly in the LTC metrics.
FUNCTIONAL_DECOMPOSE_LIST = set([
'block_diag',
'new_empty_strided',
'narrow_copy',
'pixel_shuffle',
'pixel_unshuffle',
'select_backward',
'_trilinear',
'linalg_inv_ex',
'linalg_pinv.atol_rtol_tensor',
'logsumexp',
])
return (LAZY_OPS_LIST, FALLBACK_LIST, SKIP_RUNTIME_ERROR_LIST, SKIP_INCORRECT_RESULTS_LIST, FUNCTIONAL_DECOMPOSE_LIST)
(LAZY_OPS_LIST, FALLBACK_LIST, SKIP_RUNTIME_ERROR_LIST, SKIP_INCORRECT_RESULTS_LIST, FUNCTIONAL_DECOMPOSE_LIST) = init_lists()
torch.manual_seed(42)
def clone_move(t):
dev = 'lazy'
copy_t = t.detach().clone().requires_grad_(True).to(device=dev)
return copy_t
class TestLazyTensor(JitTestCase):
@skip("Disable until autograd supports symints")
def testConvolutionBackward(self):
test_device = get_test_device()
inp = torch.rand(1, 3, 128, 128, device=test_device, requires_grad=True)
inp_copy = clone_move(inp)
grad = torch.rand(1, 32, 121, 121, device=test_device) # no requires_grad
grad_copy = clone_move(grad)
weight = torch.rand(32, 3, 8, 8, device=test_device, requires_grad=True)
weight_copy = clone_move(weight)
bias = torch.rand(32, device=test_device, requires_grad=True)
bias_copy = clone_move(bias)
# run eager
conv_out = torch.nn.functional.conv2d(inp, weight, bias)
(inp_grad, weight_grad, bias_grad) = torch.autograd.grad([conv_out], [inp, weight, bias], [grad])
# run lazy
conv_copy_out = torch.nn.functional.conv2d(inp_copy, weight_copy, bias_copy)
(inp_copy_grad, weight_copy_grad, bias_copy_grad) = torch.autograd.grad(
[conv_copy_out], [inp_copy, weight_copy, bias_copy], [grad_copy])
# check numerics
torch.testing.assert_close(bias_copy_grad.cpu(), bias_grad.cpu())
torch.testing.assert_close(weight_copy_grad.cpu(), weight_grad.cpu())
torch.testing.assert_close(inp_copy_grad.cpu(), inp_grad.cpu())
def test_view_mark_step_preserved(self):
test_device = get_test_device()
inp = torch.rand(4, device=test_device)
inp_lazy = clone_move(inp)
def foo(x, *, mark_step):
y = x.view(2, 2)
y.add_(1)
z = x + x
if mark_step:
torch._lazy.mark_step()
# y and x should contiue to be aliased after the mark_step call.
y.add_(1)
return x
out_ref = foo(inp, mark_step=False)
out = foo(inp_lazy, mark_step=True)
# out will have some pending mutations, which will be synced by the .cpu() call.
torch.testing.assert_close(out_ref.cpu(), out.cpu())
def test_tensor_ctr(self):
test_device = get_test_device()
inp = torch.tensor([[1, 2, 3, 4, 5]], device=test_device)
inp_lazy = torch.tensor([[1, 2, 3, 4, 5]], device='lazy')
def foo(x):
# Calling a view op to ensure that functionalization wrapping occurs.
return x.view(-1)
out_ref = foo(inp)
out = foo(inp_lazy)
torch.testing.assert_close(out_ref.cpu(), out.cpu())
class TestLazyOpInfo(TestCase):
@ops([op for op in op_db
if op.name in LAZY_OPS_LIST
and op.name not in SKIP_RUNTIME_ERROR_LIST
and op.name not in FUNCTIONAL_DECOMPOSE_LIST
], allowed_dtypes=(torch.float,))
def test_dispatched_to_lazy(self, device, dtype, op):
def get_name(op):
l = [op.name]
if op.variant_test_name != '':
l.append(op.variant_test_name)
return '.'.join(l)
global FALLBACK_LIST
samples = op.sample_inputs("lazy", dtype, requires_grad=False)
sample = list(samples)[0]
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
torch._lazy.mark_step()
torch._lazy.wait_device_ops()
torch._lazy.metrics.reset()
r = op(*args, **kwargs)
torch._lazy.mark_step()
torch._lazy.wait_device_ops()
prefix = "aten" if op.name in FALLBACK_LIST else "lazy"
found = f"{prefix}::{op.name}" in remove_suffixes(torch._lazy.metrics.counter_names())
# check aliases
if not found:
for alias in op.aliases:
alias_found = f"{prefix}::{alias.name}" in remove_suffixes(torch._lazy.metrics.counter_names())
found = found or alias_found
if found:
break
self.assertTrue(found)
@ops([op for op in op_db if op.name in LAZY_OPS_LIST and op.name not in SKIP_RUNTIME_ERROR_LIST | SKIP_INCORRECT_RESULTS_LIST], allowed_dtypes=(torch.float,)) # noqa: B950
def test_correctness(self, device, dtype, op):
test_device = get_test_device()
def clone_to_device(input, dev):
if isinstance(input, torch.Tensor):
return input.detach().clone().to(device=dev)
if isinstance(input, Sequence) and not isinstance(input, str):
return tuple(map(functools.partial(clone_to_device, dev=dev), input))
return input
def assert_allclose_rec(t):
a, b = t
self.assertEqual(type(a), type(b))
if isinstance(a, torch.Tensor):
self.assertTrue(torch.allclose(clone_to_device(a, test_device), b, atol=1e-4))
if isinstance(a, Sequence):
map(assert_allclose_rec, zip(a, b))
samples = op.sample_inputs("lazy", dtype, requires_grad=False)
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
copy_args = clone_to_device(args, test_device)
r_exp = op(*copy_args, **kwargs)
r_actual = op(*args, **kwargs)
assert_allclose_rec((r_actual, r_exp))
@ops([op for op in op_db if op.name in LAZY_OPS_LIST and op.name not in SKIP_RUNTIME_ERROR_LIST | SKIP_INCORRECT_RESULTS_LIST], allowed_dtypes=(torch.float,)) # noqa: B950
def test_correctness_with_reusing_ir(self, device, dtype, op):
torch._lazy.config.set_reuse_ir(True)
test_device = get_test_device()
def clone_to_device(input, dev):
if isinstance(input, torch.Tensor):
return input.detach().clone().to(device=dev)
if isinstance(input, Sequence) and not isinstance(input, str):
return tuple(map(functools.partial(clone_to_device, dev=dev), input))
return input
def assert_allclose_rec(t):
a, b = t
self.assertEqual(type(a), type(b))
if isinstance(a, torch.Tensor):
self.assertTrue(torch.allclose(clone_to_device(a, test_device), b, atol=1e-4))
if isinstance(a, Sequence):
map(assert_allclose_rec, zip(a, b))
samples = op.sample_inputs("lazy", dtype, requires_grad=False)
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
copy_args = clone_to_device(args, test_device)
r_exp = op(*copy_args, **kwargs)
r_actual = op(*args, **kwargs)
torch._lazy.mark_step()
assert_allclose_rec((r_actual, r_exp))
torch._lazy.ir_cache.reset()
torch._lazy.config.set_reuse_ir(False)
# TODO: after we move to master, add Lazy as a new Device here:
# https://github.com/pytorch/pytorch/blob/master/torch/testing/_internal/common_device_type.py#L532
instantiate_device_type_tests(TestLazyOpInfo, globals(), only_for="cpu")
class TestLazyDynamicOps(TestCase):
@classmethod
def setUpClass(cls) -> None:
# Setup the dynamic shape mode
cls.old_ssa_mode = torch._C._lazy._get_symbolic_shape_mode()
torch._C._lazy._set_symbolic_shape_mode(True)
return super().setUpClass()
@classmethod
def tearDownClass(cls) -> None:
torch._C._lazy._set_symbolic_shape_mode(cls.old_ssa_mode)
return super().tearDownClass()
def test_nonzero_dynamic(self):
# Test that nonzero gives upper bounds sizes when symbolic shape mode is enabled
test_device = get_test_device()
x1 = torch.tensor([[0, 1.0, 2.0], [3.0, 0, 0]], device=test_device, requires_grad=True)
x1_lazy = clone_move(x1)
x2_lazy = torch.nonzero(x1_lazy)
# FIXME: Add bindings to get upper bounds
# self.assertEqual(tuple(x2_lazy.size()), (6, 2))
# We should still be able to instantiate it and get the actual result
x2_eager = x2_lazy.cpu()
self.assertEqual(tuple(x2_eager.size()), (3, 2))
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/lazy/test_ts_opinfo.py
|
# Owner(s): ["oncall: jit"]
import torch._lazy.metrics
def test_metrics():
names = torch._lazy.metrics.counter_names()
assert len(names) == 0, f"Expected no counter names, but got {names}"
|
pytorch-master
|
test/lazy/test_bindings.py
|
# Owner(s): ["module: unknown"]
import torch
x = torch.ones((3, 3), requires_grad=True)
(3 * x).sum().backward()
|
pytorch-master
|
test/bottleneck_test/test.py
|
# Owner(s): ["module: unknown"]
import argparse
import torch
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Required args. Raises error if they aren't passed.
parser.add_argument('--foo', help='foo', required=True)
parser.add_argument('--bar', help='bar', required=True)
_ = parser.parse_args()
x = torch.ones((3, 3), requires_grad=True)
(3 * x).sum().backward()
|
pytorch-master
|
test/bottleneck_test/test_args.py
|
# Owner(s): ["module: unknown"]
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = nn.Linear(20, 20)
def forward(self, input):
out = self.linear(input[:, 10:30])
return out.sum()
def main():
data = torch.randn(10, 50).cuda()
model = Model().cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=0.0001)
for i in range(10):
optimizer.zero_grad()
loss = model(data)
loss.backward()
optimizer.step()
if __name__ == '__main__':
main()
|
pytorch-master
|
test/bottleneck_test/test_cuda.py
|
# Owner(s): ["module: distributions"]
import pytest
import torch
from torch.distributions.utils import tril_matrix_to_vec, vec_to_tril_matrix
@pytest.mark.parametrize('shape', [
(2, 2),
(3, 3),
(2, 4, 4),
(2, 2, 4, 4),
])
def test_tril_matrix_to_vec(shape):
mat = torch.randn(shape)
n = mat.shape[-1]
for diag in range(-n, n):
actual = mat.tril(diag)
vec = tril_matrix_to_vec(actual, diag)
tril_mat = vec_to_tril_matrix(vec, diag)
assert torch.allclose(tril_mat, actual)
if __name__ == '__main__':
pytest.main([__file__])
|
pytorch-master
|
test/distributions/test_utils.py
|
# Owner(s): ["module: distributions"]
import pytest
import torch
from torch.distributions import biject_to, constraints, transform_to
from torch.testing._internal.common_cuda import TEST_CUDA
EXAMPLES = [
(constraints.symmetric, False, [[2., 0], [2., 2]]),
(constraints.positive_semidefinite, False, [[2., 0], [2., 2]]),
(constraints.positive_definite, False, [[2., 0], [2., 2]]),
(constraints.symmetric, True, [[3., -5], [-5., 3]]),
(constraints.positive_semidefinite, False, [[3., -5], [-5., 3]]),
(constraints.positive_definite, False, [[3., -5], [-5., 3]]),
(constraints.symmetric, True, [[1., 2], [2., 4]]),
(constraints.positive_semidefinite, True, [[1., 2], [2., 4]]),
(constraints.positive_definite, False, [[1., 2], [2., 4]]),
(constraints.symmetric, True, [[[1., -2], [-2., 1]], [[2., 3], [3., 2]]]),
(constraints.positive_semidefinite, False, [[[1., -2], [-2., 1]], [[2., 3], [3., 2]]]),
(constraints.positive_definite, False, [[[1., -2], [-2., 1]], [[2., 3], [3., 2]]]),
(constraints.symmetric, True, [[[1., -2], [-2., 4]], [[1., -1], [-1., 1]]]),
(constraints.positive_semidefinite, True, [[[1., -2], [-2., 4]], [[1., -1], [-1., 1]]]),
(constraints.positive_definite, False, [[[1., -2], [-2., 4]], [[1., -1], [-1., 1]]]),
(constraints.symmetric, True, [[[4., 2], [2., 4]], [[3., -1], [-1., 3]]]),
(constraints.positive_semidefinite, True, [[[4., 2], [2., 4]], [[3., -1], [-1., 3]]]),
(constraints.positive_definite, True, [[[4., 2], [2., 4]], [[3., -1], [-1., 3]]]),
]
CONSTRAINTS = [
(constraints.real,),
(constraints.real_vector,),
(constraints.positive,),
(constraints.greater_than, [-10., -2, 0, 2, 10]),
(constraints.greater_than, 0),
(constraints.greater_than, 2),
(constraints.greater_than, -2),
(constraints.greater_than_eq, 0),
(constraints.greater_than_eq, 2),
(constraints.greater_than_eq, -2),
(constraints.less_than, [-10., -2, 0, 2, 10]),
(constraints.less_than, 0),
(constraints.less_than, 2),
(constraints.less_than, -2),
(constraints.unit_interval,),
(constraints.interval, [-4., -2, 0, 2, 4], [-3., 3, 1, 5, 5]),
(constraints.interval, -2, -1),
(constraints.interval, 1, 2),
(constraints.half_open_interval, [-4., -2, 0, 2, 4], [-3., 3, 1, 5, 5]),
(constraints.half_open_interval, -2, -1),
(constraints.half_open_interval, 1, 2),
(constraints.simplex,),
(constraints.corr_cholesky,),
(constraints.lower_cholesky,),
]
def build_constraint(constraint_fn, args, is_cuda=False):
if not args:
return constraint_fn
t = torch.cuda.DoubleTensor if is_cuda else torch.DoubleTensor
return constraint_fn(*(t(x) if isinstance(x, list) else x for x in args))
@pytest.mark.parametrize('constraint_fn, result, value', EXAMPLES)
@pytest.mark.parametrize('is_cuda', [False,
pytest.param(True, marks=pytest.mark.skipif(not TEST_CUDA,
reason='CUDA not found.'))])
def test_constraint(constraint_fn, result, value, is_cuda):
t = torch.cuda.DoubleTensor if is_cuda else torch.DoubleTensor
assert constraint_fn.check(t(value)).all() == result
@pytest.mark.parametrize('constraint_fn, args', [(c[0], c[1:]) for c in CONSTRAINTS])
@pytest.mark.parametrize('is_cuda', [False,
pytest.param(True, marks=pytest.mark.skipif(not TEST_CUDA,
reason='CUDA not found.'))])
def test_biject_to(constraint_fn, args, is_cuda):
constraint = build_constraint(constraint_fn, args, is_cuda=is_cuda)
try:
t = biject_to(constraint)
except NotImplementedError:
pytest.skip('`biject_to` not implemented.')
assert t.bijective, "biject_to({}) is not bijective".format(constraint)
if constraint_fn is constraints.corr_cholesky:
# (D * (D-1)) / 2 (where D = 4) = 6 (size of last dim)
x = torch.randn(6, 6, dtype=torch.double)
else:
x = torch.randn(5, 5, dtype=torch.double)
if is_cuda:
x = x.cuda()
y = t(x)
assert constraint.check(y).all(), '\n'.join([
"Failed to biject_to({})".format(constraint),
"x = {}".format(x),
"biject_to(...)(x) = {}".format(y),
])
x2 = t.inv(y)
assert torch.allclose(x, x2), "Error in biject_to({}) inverse".format(constraint)
j = t.log_abs_det_jacobian(x, y)
assert j.shape == x.shape[:x.dim() - t.domain.event_dim]
@pytest.mark.parametrize('constraint_fn, args', [(c[0], c[1:]) for c in CONSTRAINTS])
@pytest.mark.parametrize('is_cuda', [False,
pytest.param(True, marks=pytest.mark.skipif(not TEST_CUDA,
reason='CUDA not found.'))])
def test_transform_to(constraint_fn, args, is_cuda):
constraint = build_constraint(constraint_fn, args, is_cuda=is_cuda)
t = transform_to(constraint)
if constraint_fn is constraints.corr_cholesky:
# (D * (D-1)) / 2 (where D = 4) = 6 (size of last dim)
x = torch.randn(6, 6, dtype=torch.double)
else:
x = torch.randn(5, 5, dtype=torch.double)
if is_cuda:
x = x.cuda()
y = t(x)
assert constraint.check(y).all(), "Failed to transform_to({})".format(constraint)
x2 = t.inv(y)
y2 = t(x2)
assert torch.allclose(y, y2), "Error in transform_to({}) pseudoinverse".format(constraint)
if __name__ == '__main__':
pytest.main([__file__])
|
pytorch-master
|
test/distributions/test_constraints.py
|
# Owner(s): ["module: distributions"]
"""
Note [Randomized statistical tests]
-----------------------------------
This note describes how to maintain tests in this file as random sources
change. This file contains two types of randomized tests:
1. The easier type of randomized test are tests that should always pass but are
initialized with random data. If these fail something is wrong, but it's
fine to use a fixed seed by inheriting from common.TestCase.
2. The trickier tests are statistical tests. These tests explicitly call
set_rng_seed(n) and are marked "see Note [Randomized statistical tests]".
These statistical tests have a known positive failure rate
(we set failure_rate=1e-3 by default). We need to balance strength of these
tests with annoyance of false alarms. One way that works is to specifically
set seeds in each of the randomized tests. When a random generator
occasionally changes (as in #4312 vectorizing the Box-Muller sampler), some
of these statistical tests may (rarely) fail. If one fails in this case,
it's fine to increment the seed of the failing test (but you shouldn't need
to increment it more than once; otherwise something is probably actually
wrong).
3. `test_geometric_sample`, `test_binomial_sample` and `test_poisson_sample`
are validated against `scipy.stats.` which are not guaranteed to be identical
across different versions of scipy (namely, they yield invalid results in 1.7+)
"""
import math
import numbers
import unittest
from collections import namedtuple
from itertools import product
from random import shuffle
from packaging import version
import torch
# TODO: remove this global setting
# Distributions tests use double as the default dtype
torch.set_default_dtype(torch.double)
from torch._six import inf, nan
from torch.testing._internal.common_utils import \
(TestCase, run_tests, set_rng_seed, TEST_WITH_UBSAN, load_tests,
gradcheck)
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.autograd import grad
import torch.autograd.forward_ad as fwAD
from torch.autograd.functional import jacobian
from torch.distributions import (Bernoulli, Beta, Binomial, Categorical,
Cauchy, Chi2, ContinuousBernoulli, Dirichlet,
Distribution, Exponential, ExponentialFamily,
FisherSnedecor, Gamma, Geometric, Gumbel,
HalfCauchy, HalfNormal, Independent, Kumaraswamy,
LKJCholesky, Laplace, LogisticNormal,
LogNormal, LowRankMultivariateNormal,
MixtureSameFamily, Multinomial, MultivariateNormal,
NegativeBinomial, Normal,
OneHotCategorical, OneHotCategoricalStraightThrough,
Pareto, Poisson, RelaxedBernoulli, RelaxedOneHotCategorical,
StudentT, TransformedDistribution, Uniform,
VonMises, Weibull, Wishart, constraints, kl_divergence)
from torch.distributions.constraint_registry import transform_to
from torch.distributions.constraints import Constraint, is_dependent
from torch.distributions.dirichlet import _Dirichlet_backward
from torch.distributions.kl import _kl_expfamily_expfamily
from torch.distributions.transforms import (AffineTransform, CatTransform, ExpTransform,
StackTransform, identity_transform)
from torch.distributions.utils import (probs_to_logits, lazy_property, tril_matrix_to_vec,
vec_to_tril_matrix)
from torch.nn.functional import softmax
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
TEST_NUMPY = True
try:
import numpy as np
import scipy.stats
import scipy.special
except ImportError:
TEST_NUMPY = False
def pairwise(Dist, *params):
"""
Creates a pair of distributions `Dist` initialized to test each element of
param with each other.
"""
params1 = [torch.tensor([p] * len(p)) for p in params]
params2 = [p.transpose(0, 1) for p in params1]
return Dist(*params1), Dist(*params2)
def is_all_nan(tensor):
"""
Checks if all entries of a tensor is nan.
"""
return (tensor != tensor).all()
# Register all distributions for generic tests.
Example = namedtuple('Example', ['Dist', 'params'])
EXAMPLES = [
Example(Bernoulli, [
{'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)},
{'probs': torch.tensor([0.3], requires_grad=True)},
{'probs': 0.3},
{'logits': torch.tensor([0.], requires_grad=True)},
]),
Example(Geometric, [
{'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)},
{'probs': torch.tensor([0.3], requires_grad=True)},
{'probs': 0.3},
]),
Example(Beta, [
{
'concentration1': torch.randn(2, 3).exp().requires_grad_(),
'concentration0': torch.randn(2, 3).exp().requires_grad_(),
},
{
'concentration1': torch.randn(4).exp().requires_grad_(),
'concentration0': torch.randn(4).exp().requires_grad_(),
},
]),
Example(Categorical, [
{'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)},
{'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)},
{'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)},
]),
Example(Binomial, [
{'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10},
{'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': 10},
{'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([10])},
{'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([10, 8])},
{'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True),
'total_count': torch.tensor([[10., 8.], [5., 3.]])},
{'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True),
'total_count': torch.tensor(0.)},
]),
Example(NegativeBinomial, [
{'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10},
{'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': 10},
{'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([10])},
{'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([10, 8])},
{'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True),
'total_count': torch.tensor([[10., 8.], [5., 3.]])},
{'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True),
'total_count': torch.tensor(0.)},
]),
Example(Multinomial, [
{'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10},
{'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': 10},
]),
Example(Cauchy, [
{'loc': 0.0, 'scale': 1.0},
{'loc': torch.tensor([0.0]), 'scale': 1.0},
{'loc': torch.tensor([[0.0], [0.0]]),
'scale': torch.tensor([[1.0], [1.0]])}
]),
Example(Chi2, [
{'df': torch.randn(2, 3).exp().requires_grad_()},
{'df': torch.randn(1).exp().requires_grad_()},
]),
Example(StudentT, [
{'df': torch.randn(2, 3).exp().requires_grad_()},
{'df': torch.randn(1).exp().requires_grad_()},
]),
Example(Dirichlet, [
{'concentration': torch.randn(2, 3).exp().requires_grad_()},
{'concentration': torch.randn(4).exp().requires_grad_()},
]),
Example(Exponential, [
{'rate': torch.randn(5, 5).abs().requires_grad_()},
{'rate': torch.randn(1).abs().requires_grad_()},
]),
Example(FisherSnedecor, [
{
'df1': torch.randn(5, 5).abs().requires_grad_(),
'df2': torch.randn(5, 5).abs().requires_grad_(),
},
{
'df1': torch.randn(1).abs().requires_grad_(),
'df2': torch.randn(1).abs().requires_grad_(),
},
{
'df1': torch.tensor([1.0]),
'df2': 1.0,
}
]),
Example(Gamma, [
{
'concentration': torch.randn(2, 3).exp().requires_grad_(),
'rate': torch.randn(2, 3).exp().requires_grad_(),
},
{
'concentration': torch.randn(1).exp().requires_grad_(),
'rate': torch.randn(1).exp().requires_grad_(),
},
]),
Example(Gumbel, [
{
'loc': torch.randn(5, 5, requires_grad=True),
'scale': torch.randn(5, 5).abs().requires_grad_(),
},
{
'loc': torch.randn(1, requires_grad=True),
'scale': torch.randn(1).abs().requires_grad_(),
},
]),
Example(HalfCauchy, [
{'scale': 1.0},
{'scale': torch.tensor([[1.0], [1.0]])}
]),
Example(HalfNormal, [
{'scale': torch.randn(5, 5).abs().requires_grad_()},
{'scale': torch.randn(1).abs().requires_grad_()},
{'scale': torch.tensor([1e-5, 1e-5], requires_grad=True)}
]),
Example(Independent, [
{
'base_distribution': Normal(torch.randn(2, 3, requires_grad=True),
torch.randn(2, 3).abs().requires_grad_()),
'reinterpreted_batch_ndims': 0,
},
{
'base_distribution': Normal(torch.randn(2, 3, requires_grad=True),
torch.randn(2, 3).abs().requires_grad_()),
'reinterpreted_batch_ndims': 1,
},
{
'base_distribution': Normal(torch.randn(2, 3, requires_grad=True),
torch.randn(2, 3).abs().requires_grad_()),
'reinterpreted_batch_ndims': 2,
},
{
'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True),
torch.randn(2, 3, 5).abs().requires_grad_()),
'reinterpreted_batch_ndims': 2,
},
{
'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True),
torch.randn(2, 3, 5).abs().requires_grad_()),
'reinterpreted_batch_ndims': 3,
},
]),
Example(Kumaraswamy, [
{
'concentration1': torch.empty(2, 3).uniform_(1, 2).requires_grad_(),
'concentration0': torch.empty(2, 3).uniform_(1, 2).requires_grad_(),
},
{
'concentration1': torch.rand(4).uniform_(1, 2).requires_grad_(),
'concentration0': torch.rand(4).uniform_(1, 2).requires_grad_(),
},
]),
Example(LKJCholesky, [
{
'dim': 2,
'concentration': 0.5
},
{
'dim': 3,
'concentration': torch.tensor([0.5, 1., 2.]),
},
{
'dim': 100,
'concentration': 4.
},
]),
Example(Laplace, [
{
'loc': torch.randn(5, 5, requires_grad=True),
'scale': torch.randn(5, 5).abs().requires_grad_(),
},
{
'loc': torch.randn(1, requires_grad=True),
'scale': torch.randn(1).abs().requires_grad_(),
},
{
'loc': torch.tensor([1.0, 0.0], requires_grad=True),
'scale': torch.tensor([1e-5, 1e-5], requires_grad=True),
},
]),
Example(LogNormal, [
{
'loc': torch.randn(5, 5, requires_grad=True),
'scale': torch.randn(5, 5).abs().requires_grad_(),
},
{
'loc': torch.randn(1, requires_grad=True),
'scale': torch.randn(1).abs().requires_grad_(),
},
{
'loc': torch.tensor([1.0, 0.0], requires_grad=True),
'scale': torch.tensor([1e-5, 1e-5], requires_grad=True),
},
]),
Example(LogisticNormal, [
{
'loc': torch.randn(5, 5).requires_grad_(),
'scale': torch.randn(5, 5).abs().requires_grad_(),
},
{
'loc': torch.randn(1).requires_grad_(),
'scale': torch.randn(1).abs().requires_grad_(),
},
{
'loc': torch.tensor([1.0, 0.0], requires_grad=True),
'scale': torch.tensor([1e-5, 1e-5], requires_grad=True),
},
]),
Example(LowRankMultivariateNormal, [
{
'loc': torch.randn(5, 2, requires_grad=True),
'cov_factor': torch.randn(5, 2, 1, requires_grad=True),
'cov_diag': torch.tensor([2.0, 0.25], requires_grad=True),
},
{
'loc': torch.randn(4, 3, requires_grad=True),
'cov_factor': torch.randn(3, 2, requires_grad=True),
'cov_diag': torch.tensor([5.0, 1.5, 3.], requires_grad=True),
}
]),
Example(MultivariateNormal, [
{
'loc': torch.randn(5, 2, requires_grad=True),
'covariance_matrix': torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True),
},
{
'loc': torch.randn(2, 3, requires_grad=True),
'precision_matrix': torch.tensor([[2.0, 0.1, 0.0],
[0.1, 0.25, 0.0],
[0.0, 0.0, 0.3]], requires_grad=True),
},
{
'loc': torch.randn(5, 3, 2, requires_grad=True),
'scale_tril': torch.tensor([[[2.0, 0.0], [-0.5, 0.25]],
[[2.0, 0.0], [0.3, 0.25]],
[[5.0, 0.0], [-0.5, 1.5]]], requires_grad=True),
},
{
'loc': torch.tensor([1.0, -1.0]),
'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]),
},
]),
Example(Normal, [
{
'loc': torch.randn(5, 5, requires_grad=True),
'scale': torch.randn(5, 5).abs().requires_grad_(),
},
{
'loc': torch.randn(1, requires_grad=True),
'scale': torch.randn(1).abs().requires_grad_(),
},
{
'loc': torch.tensor([1.0, 0.0], requires_grad=True),
'scale': torch.tensor([1e-5, 1e-5], requires_grad=True),
},
]),
Example(OneHotCategorical, [
{'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)},
{'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)},
{'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)},
]),
Example(OneHotCategoricalStraightThrough, [
{'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)},
{'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)},
{'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)},
]),
Example(Pareto, [
{
'scale': 1.0,
'alpha': 1.0
},
{
'scale': torch.randn(5, 5).abs().requires_grad_(),
'alpha': torch.randn(5, 5).abs().requires_grad_()
},
{
'scale': torch.tensor([1.0]),
'alpha': 1.0
}
]),
Example(Poisson, [
{
'rate': torch.randn(5, 5).abs().requires_grad_(),
},
{
'rate': torch.randn(3).abs().requires_grad_(),
},
{
'rate': 0.2,
},
{
'rate': torch.tensor([0.0], requires_grad=True),
},
{
'rate': 0.0,
}
]),
Example(RelaxedBernoulli, [
{
'temperature': torch.tensor([0.5], requires_grad=True),
'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True),
},
{
'temperature': torch.tensor([2.0]),
'probs': torch.tensor([0.3]),
},
{
'temperature': torch.tensor([7.2]),
'logits': torch.tensor([-2.0, 2.0, 1.0, 5.0])
}
]),
Example(RelaxedOneHotCategorical, [
{
'temperature': torch.tensor([0.5], requires_grad=True),
'probs': torch.tensor([[0.1, 0.2, 0.7], [0.5, 0.3, 0.2]], requires_grad=True)
},
{
'temperature': torch.tensor([2.0]),
'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]])
},
{
'temperature': torch.tensor([7.2]),
'logits': torch.tensor([[-2.0, 2.0], [1.0, 5.0]])
}
]),
Example(TransformedDistribution, [
{
'base_distribution': Normal(torch.randn(2, 3, requires_grad=True),
torch.randn(2, 3).abs().requires_grad_()),
'transforms': [],
},
{
'base_distribution': Normal(torch.randn(2, 3, requires_grad=True),
torch.randn(2, 3).abs().requires_grad_()),
'transforms': ExpTransform(),
},
{
'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True),
torch.randn(2, 3, 5).abs().requires_grad_()),
'transforms': [AffineTransform(torch.randn(3, 5), torch.randn(3, 5)),
ExpTransform()],
},
{
'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True),
torch.randn(2, 3, 5).abs().requires_grad_()),
'transforms': AffineTransform(1, 2),
},
{
'base_distribution': Uniform(torch.tensor(1e8).log(), torch.tensor(1e10).log()),
'transforms': ExpTransform(),
},
]),
Example(Uniform, [
{
'low': torch.zeros(5, 5, requires_grad=True),
'high': torch.ones(5, 5, requires_grad=True),
},
{
'low': torch.zeros(1, requires_grad=True),
'high': torch.ones(1, requires_grad=True),
},
{
'low': torch.tensor([1.0, 1.0], requires_grad=True),
'high': torch.tensor([2.0, 3.0], requires_grad=True),
},
]),
Example(Weibull, [
{
'scale': torch.randn(5, 5).abs().requires_grad_(),
'concentration': torch.randn(1).abs().requires_grad_()
}
]),
Example(Wishart, [
{
'covariance_matrix': torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True),
'df': torch.tensor([3.], requires_grad=True),
},
{
'precision_matrix': torch.tensor([[2.0, 0.1, 0.0],
[0.1, 0.25, 0.0],
[0.0, 0.0, 0.3]], requires_grad=True),
'df': torch.tensor([5., 4], requires_grad=True),
},
{
'scale_tril': torch.tensor([[[2.0, 0.0], [-0.5, 0.25]],
[[2.0, 0.0], [0.3, 0.25]],
[[5.0, 0.0], [-0.5, 1.5]]], requires_grad=True),
'df': torch.tensor([5., 3.5, 3], requires_grad=True),
},
{
'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]),
'df': torch.tensor([3.0]),
},
{
'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]),
'df': 3.0,
},
]),
Example(MixtureSameFamily, [
{
'mixture_distribution': Categorical(torch.rand(5, requires_grad=True)),
'component_distribution': Normal(torch.randn(5, requires_grad=True),
torch.rand(5, requires_grad=True)),
},
{
'mixture_distribution': Categorical(torch.rand(5, requires_grad=True)),
'component_distribution': MultivariateNormal(
loc=torch.randn(5, 2, requires_grad=True),
covariance_matrix=torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True)),
},
]),
Example(VonMises, [
{
'loc': torch.tensor(1.0, requires_grad=True),
'concentration': torch.tensor(10.0, requires_grad=True)
},
{
'loc': torch.tensor([0.0, math.pi / 2], requires_grad=True),
'concentration': torch.tensor([1.0, 10.0], requires_grad=True)
},
]),
Example(ContinuousBernoulli, [
{'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)},
{'probs': torch.tensor([0.3], requires_grad=True)},
{'probs': 0.3},
{'logits': torch.tensor([0.], requires_grad=True)},
])
]
BAD_EXAMPLES = [
Example(Bernoulli, [
{'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)},
{'probs': torch.tensor([-0.5], requires_grad=True)},
{'probs': 1.00001},
]),
Example(Beta, [
{
'concentration1': torch.tensor([0.0], requires_grad=True),
'concentration0': torch.tensor([0.0], requires_grad=True),
},
{
'concentration1': torch.tensor([-1.0], requires_grad=True),
'concentration0': torch.tensor([-2.0], requires_grad=True),
},
]),
Example(Geometric, [
{'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)},
{'probs': torch.tensor([-0.3], requires_grad=True)},
{'probs': 1.00000001},
]),
Example(Categorical, [
{'probs': torch.tensor([[-0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)},
{'probs': torch.tensor([[-1.0, 10.0], [0.0, -1.0]], requires_grad=True)},
]),
Example(Binomial, [
{'probs': torch.tensor([[-0.0000001, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True),
'total_count': 10},
{'probs': torch.tensor([[1.0, 0.0], [0.0, 2.0]], requires_grad=True),
'total_count': 10},
]),
Example(NegativeBinomial, [
{'probs': torch.tensor([[-0.0000001, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True),
'total_count': 10},
{'probs': torch.tensor([[1.0, 0.0], [0.0, 2.0]], requires_grad=True),
'total_count': 10},
]),
Example(Cauchy, [
{'loc': 0.0, 'scale': -1.0},
{'loc': torch.tensor([0.0]), 'scale': 0.0},
{'loc': torch.tensor([[0.0], [-2.0]]),
'scale': torch.tensor([[-0.000001], [1.0]])}
]),
Example(Chi2, [
{'df': torch.tensor([0.], requires_grad=True)},
{'df': torch.tensor([-2.], requires_grad=True)},
]),
Example(StudentT, [
{'df': torch.tensor([0.], requires_grad=True)},
{'df': torch.tensor([-2.], requires_grad=True)},
]),
Example(Dirichlet, [
{'concentration': torch.tensor([0.], requires_grad=True)},
{'concentration': torch.tensor([-2.], requires_grad=True)}
]),
Example(Exponential, [
{'rate': torch.tensor([0., 0.], requires_grad=True)},
{'rate': torch.tensor([-2.], requires_grad=True)}
]),
Example(FisherSnedecor, [
{
'df1': torch.tensor([0., 0.], requires_grad=True),
'df2': torch.tensor([-1., -100.], requires_grad=True),
},
{
'df1': torch.tensor([1., 1.], requires_grad=True),
'df2': torch.tensor([0., 0.], requires_grad=True),
}
]),
Example(Gamma, [
{
'concentration': torch.tensor([0., 0.], requires_grad=True),
'rate': torch.tensor([-1., -100.], requires_grad=True),
},
{
'concentration': torch.tensor([1., 1.], requires_grad=True),
'rate': torch.tensor([0., 0.], requires_grad=True),
}
]),
Example(Gumbel, [
{
'loc': torch.tensor([1., 1.], requires_grad=True),
'scale': torch.tensor([0., 1.], requires_grad=True),
},
{
'loc': torch.tensor([1., 1.], requires_grad=True),
'scale': torch.tensor([1., -1.], requires_grad=True),
},
]),
Example(HalfCauchy, [
{'scale': -1.0},
{'scale': 0.0},
{'scale': torch.tensor([[-0.000001], [1.0]])}
]),
Example(HalfNormal, [
{'scale': torch.tensor([0., 1.], requires_grad=True)},
{'scale': torch.tensor([1., -1.], requires_grad=True)},
]),
Example(LKJCholesky, [
{
'dim': -2,
'concentration': 0.1
},
{
'dim': 1,
'concentration': 2.,
},
{
'dim': 2,
'concentration': 0.,
},
]),
Example(Laplace, [
{
'loc': torch.tensor([1., 1.], requires_grad=True),
'scale': torch.tensor([0., 1.], requires_grad=True),
},
{
'loc': torch.tensor([1., 1.], requires_grad=True),
'scale': torch.tensor([1., -1.], requires_grad=True),
},
]),
Example(LogNormal, [
{
'loc': torch.tensor([1., 1.], requires_grad=True),
'scale': torch.tensor([0., 1.], requires_grad=True),
},
{
'loc': torch.tensor([1., 1.], requires_grad=True),
'scale': torch.tensor([1., -1.], requires_grad=True),
},
]),
Example(MultivariateNormal, [
{
'loc': torch.tensor([1., 1.], requires_grad=True),
'covariance_matrix': torch.tensor([[1.0, 0.0], [0.0, -2.0]], requires_grad=True),
},
]),
Example(Normal, [
{
'loc': torch.tensor([1., 1.], requires_grad=True),
'scale': torch.tensor([0., 1.], requires_grad=True),
},
{
'loc': torch.tensor([1., 1.], requires_grad=True),
'scale': torch.tensor([1., -1.], requires_grad=True),
},
{
'loc': torch.tensor([1.0, 0.0], requires_grad=True),
'scale': torch.tensor([1e-5, -1e-5], requires_grad=True),
},
]),
Example(OneHotCategorical, [
{'probs': torch.tensor([[0.1, 0.2, 0.3], [0.1, -10.0, 0.2]], requires_grad=True)},
{'probs': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)},
]),
Example(OneHotCategoricalStraightThrough, [
{'probs': torch.tensor([[0.1, 0.2, 0.3], [0.1, -10.0, 0.2]], requires_grad=True)},
{'probs': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)},
]),
Example(Pareto, [
{
'scale': 0.0,
'alpha': 0.0
},
{
'scale': torch.tensor([0.0, 0.0], requires_grad=True),
'alpha': torch.tensor([-1e-5, 0.0], requires_grad=True)
},
{
'scale': torch.tensor([1.0]),
'alpha': -1.0
}
]),
Example(Poisson, [
{
'rate': torch.tensor([-0.1], requires_grad=True),
},
{
'rate': -1.0,
}
]),
Example(RelaxedBernoulli, [
{
'temperature': torch.tensor([1.5], requires_grad=True),
'probs': torch.tensor([1.7, 0.2, 0.4], requires_grad=True),
},
{
'temperature': torch.tensor([2.0]),
'probs': torch.tensor([-1.0]),
}
]),
Example(RelaxedOneHotCategorical, [
{
'temperature': torch.tensor([0.5], requires_grad=True),
'probs': torch.tensor([[-0.1, 0.2, 0.7], [0.5, 0.3, 0.2]], requires_grad=True)
},
{
'temperature': torch.tensor([2.0]),
'probs': torch.tensor([[-1.0, 0.0], [-1.0, 1.1]])
}
]),
Example(TransformedDistribution, [
{
'base_distribution': Normal(0, 1),
'transforms': lambda x: x,
},
{
'base_distribution': Normal(0, 1),
'transforms': [lambda x: x],
},
]),
Example(Uniform, [
{
'low': torch.tensor([2.0], requires_grad=True),
'high': torch.tensor([2.0], requires_grad=True),
},
{
'low': torch.tensor([0.0], requires_grad=True),
'high': torch.tensor([0.0], requires_grad=True),
},
{
'low': torch.tensor([1.0], requires_grad=True),
'high': torch.tensor([0.0], requires_grad=True),
}
]),
Example(Weibull, [
{
'scale': torch.tensor([0.0], requires_grad=True),
'concentration': torch.tensor([0.0], requires_grad=True)
},
{
'scale': torch.tensor([1.0], requires_grad=True),
'concentration': torch.tensor([-1.0], requires_grad=True)
}
]),
Example(Wishart, [
{
'covariance_matrix': torch.tensor([[1.0, 0.0], [0.0, -2.0]], requires_grad=True),
'df': torch.tensor([1.5], requires_grad=True),
},
{
'covariance_matrix': torch.tensor([[1.0, 1.0], [1.0, -2.0]], requires_grad=True),
'df': torch.tensor([3.], requires_grad=True),
},
{
'covariance_matrix': torch.tensor([[1.0, 1.0], [1.0, -2.0]], requires_grad=True),
'df': 3.,
},
]),
Example(ContinuousBernoulli, [
{'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)},
{'probs': torch.tensor([-0.5], requires_grad=True)},
{'probs': 1.00001},
])
]
class DistributionsTestCase(TestCase):
def setUp(self):
"""The tests assume that the validation flag is set."""
torch.distributions.Distribution.set_default_validate_args(True)
super(DistributionsTestCase, self).setUp()
class TestDistributions(DistributionsTestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
def _gradcheck_log_prob(self, dist_ctor, ctor_params):
# performs gradient checks on log_prob
distribution = dist_ctor(*ctor_params)
s = distribution.sample()
if not distribution.support.is_discrete:
s = s.detach().requires_grad_()
expected_shape = distribution.batch_shape + distribution.event_shape
self.assertEqual(s.size(), expected_shape)
def apply_fn(s, *params):
return dist_ctor(*params).log_prob(s)
gradcheck(apply_fn, (s,) + tuple(ctor_params), raise_exception=True)
def _check_forward_ad(self, fn):
with fwAD.dual_level():
x = torch.tensor(1.)
t = torch.tensor(1.)
dual = fwAD.make_dual(x, t)
dual_out = fn(dual)
self.assertEqual(torch.count_nonzero(fwAD.unpack_dual(dual_out).tangent).item(), 0)
def _check_log_prob(self, dist, asset_fn):
# checks that the log_prob matches a reference function
s = dist.sample()
log_probs = dist.log_prob(s)
log_probs_data_flat = log_probs.view(-1)
s_data_flat = s.view(len(log_probs_data_flat), -1)
for i, (val, log_prob) in enumerate(zip(s_data_flat, log_probs_data_flat)):
asset_fn(i, val.squeeze(), log_prob)
def _check_sampler_sampler(self, torch_dist, ref_dist, message, multivariate=False,
circular=False, num_samples=10000, failure_rate=1e-3):
# Checks that the .sample() method matches a reference function.
torch_samples = torch_dist.sample((num_samples,)).squeeze()
torch_samples = torch_samples.cpu().numpy()
ref_samples = ref_dist.rvs(num_samples).astype(np.float64)
if multivariate:
# Project onto a random axis.
axis = np.random.normal(size=(1,) + torch_samples.shape[1:])
axis /= np.linalg.norm(axis)
torch_samples = (axis * torch_samples).reshape(num_samples, -1).sum(-1)
ref_samples = (axis * ref_samples).reshape(num_samples, -1).sum(-1)
samples = [(x, +1) for x in torch_samples] + [(x, -1) for x in ref_samples]
if circular:
samples = [(np.cos(x), v) for (x, v) in samples]
shuffle(samples) # necessary to prevent stable sort from making uneven bins for discrete
samples.sort(key=lambda x: x[0])
samples = np.array(samples)[:, 1]
# Aggregate into bins filled with roughly zero-mean unit-variance RVs.
num_bins = 10
samples_per_bin = len(samples) // num_bins
bins = samples.reshape((num_bins, samples_per_bin)).mean(axis=1)
stddev = samples_per_bin ** -0.5
threshold = stddev * scipy.special.erfinv(1 - 2 * failure_rate / num_bins)
message = '{}.sample() is biased:\n{}'.format(message, bins)
for bias in bins:
self.assertLess(-threshold, bias, message)
self.assertLess(bias, threshold, message)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def _check_sampler_discrete(self, torch_dist, ref_dist, message,
num_samples=10000, failure_rate=1e-3):
"""Runs a Chi2-test for the support, but ignores tail instead of combining"""
torch_samples = torch_dist.sample((num_samples,)).squeeze()
torch_samples = torch_samples.cpu().numpy()
unique, counts = np.unique(torch_samples, return_counts=True)
pmf = ref_dist.pmf(unique)
pmf = pmf / pmf.sum() # renormalize to 1.0 for chisq test
msk = (counts > 5) & ((pmf * num_samples) > 5)
self.assertGreater(pmf[msk].sum(), 0.9, "Distribution is too sparse for test; try increasing num_samples")
# Add a remainder bucket that combines counts for all values
# below threshold, if such values exist (i.e. mask has False entries).
if not msk.all():
counts = np.concatenate([counts[msk], np.sum(counts[~msk], keepdims=True)])
pmf = np.concatenate([pmf[msk], np.sum(pmf[~msk], keepdims=True)])
chisq, p = scipy.stats.chisquare(counts, pmf * num_samples)
self.assertGreater(p, failure_rate, message)
def _check_enumerate_support(self, dist, examples):
for params, expected in examples:
params = {k: torch.tensor(v) for k, v in params.items()}
expected = torch.tensor(expected)
d = dist(**params)
actual = d.enumerate_support(expand=False)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(actual, expected)
actual = d.enumerate_support(expand=True)
expected_with_expand = expected.expand((-1,) + d.batch_shape + d.event_shape)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(actual, expected_with_expand)
def test_repr(self):
for Dist, params in EXAMPLES:
for param in params:
dist = Dist(**param)
self.assertTrue(repr(dist).startswith(dist.__class__.__name__))
def test_sample_detached(self):
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
variable_params = [p for p in param.values() if getattr(p, 'requires_grad', False)]
if not variable_params:
continue
dist = Dist(**param)
sample = dist.sample()
self.assertFalse(sample.requires_grad,
msg='{} example {}/{}, .sample() is not detached'.format(
Dist.__name__, i + 1, len(params)))
def test_rsample_requires_grad(self):
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
if not any(getattr(p, 'requires_grad', False) for p in param.values()):
continue
dist = Dist(**param)
if not dist.has_rsample:
continue
sample = dist.rsample()
self.assertTrue(sample.requires_grad,
msg='{} example {}/{}, .rsample() does not require grad'.format(
Dist.__name__, i + 1, len(params)))
def test_enumerate_support_type(self):
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(**param)
try:
self.assertTrue(type(dist.sample()) is type(dist.enumerate_support()),
msg=('{} example {}/{}, return type mismatch between ' +
'sample and enumerate_support.').format(Dist.__name__, i + 1, len(params)))
except NotImplementedError:
pass
def test_lazy_property_grad(self):
x = torch.randn(1, requires_grad=True)
class Dummy(object):
@lazy_property
def y(self):
return x + 1
def test():
x.grad = None
Dummy().y.backward()
self.assertEqual(x.grad, torch.ones(1))
test()
with torch.no_grad():
test()
mean = torch.randn(2)
cov = torch.eye(2, requires_grad=True)
distn = MultivariateNormal(mean, cov)
with torch.no_grad():
distn.scale_tril
distn.scale_tril.sum().backward()
self.assertIsNotNone(cov.grad)
def test_has_examples(self):
distributions_with_examples = {e.Dist for e in EXAMPLES}
for Dist in globals().values():
if isinstance(Dist, type) and issubclass(Dist, Distribution) \
and Dist is not Distribution and Dist is not ExponentialFamily:
self.assertIn(Dist, distributions_with_examples,
"Please add {} to the EXAMPLES list in test_distributions.py".format(Dist.__name__))
def test_support_attributes(self):
for Dist, params in EXAMPLES:
for param in params:
d = Dist(**param)
event_dim = len(d.event_shape)
self.assertEqual(d.support.event_dim, event_dim)
try:
self.assertEqual(Dist.support.event_dim, event_dim)
except NotImplementedError:
pass
is_discrete = d.support.is_discrete
try:
self.assertEqual(Dist.support.is_discrete, is_discrete)
except NotImplementedError:
pass
def test_distribution_expand(self):
shapes = [torch.Size(), torch.Size((2,)), torch.Size((2, 1))]
for Dist, params in EXAMPLES:
for param in params:
for shape in shapes:
d = Dist(**param)
expanded_shape = shape + d.batch_shape
original_shape = d.batch_shape + d.event_shape
expected_shape = shape + original_shape
expanded = d.expand(batch_shape=list(expanded_shape))
sample = expanded.sample()
actual_shape = expanded.sample().shape
self.assertEqual(expanded.__class__, d.__class__)
self.assertEqual(d.sample().shape, original_shape)
self.assertEqual(expanded.log_prob(sample), d.log_prob(sample))
self.assertEqual(actual_shape, expected_shape)
self.assertEqual(expanded.batch_shape, expanded_shape)
try:
self.assertEqual(expanded.mean,
d.mean.expand(expanded_shape + d.event_shape))
self.assertEqual(expanded.variance,
d.variance.expand(expanded_shape + d.event_shape))
except NotImplementedError:
pass
def test_distribution_subclass_expand(self):
expand_by = torch.Size((2,))
for Dist, params in EXAMPLES:
class SubClass(Dist):
pass
for param in params:
d = SubClass(**param)
expanded_shape = expand_by + d.batch_shape
original_shape = d.batch_shape + d.event_shape
expected_shape = expand_by + original_shape
expanded = d.expand(batch_shape=expanded_shape)
sample = expanded.sample()
actual_shape = expanded.sample().shape
self.assertEqual(expanded.__class__, d.__class__)
self.assertEqual(d.sample().shape, original_shape)
self.assertEqual(expanded.log_prob(sample), d.log_prob(sample))
self.assertEqual(actual_shape, expected_shape)
def test_bernoulli(self):
p = torch.tensor([0.7, 0.2, 0.4], requires_grad=True)
r = torch.tensor(0.3, requires_grad=True)
s = 0.3
self.assertEqual(Bernoulli(p).sample((8,)).size(), (8, 3))
self.assertFalse(Bernoulli(p).sample().requires_grad)
self.assertEqual(Bernoulli(r).sample((8,)).size(), (8,))
self.assertEqual(Bernoulli(r).sample().size(), ())
self.assertEqual(Bernoulli(r).sample((3, 2)).size(), (3, 2,))
self.assertEqual(Bernoulli(s).sample().size(), ())
self._gradcheck_log_prob(Bernoulli, (p,))
def ref_log_prob(idx, val, log_prob):
prob = p[idx]
self.assertEqual(log_prob, math.log(prob if val else 1 - prob))
self._check_log_prob(Bernoulli(p), ref_log_prob)
self._check_log_prob(Bernoulli(logits=p.log() - (-p).log1p()), ref_log_prob)
self.assertRaises(NotImplementedError, Bernoulli(r).rsample)
# check entropy computation
self.assertEqual(Bernoulli(p).entropy(), torch.tensor([0.6108, 0.5004, 0.6730]), atol=1e-4, rtol=0)
self.assertEqual(Bernoulli(torch.tensor([0.0])).entropy(), torch.tensor([0.0]))
self.assertEqual(Bernoulli(s).entropy(), torch.tensor(0.6108), atol=1e-4, rtol=0)
self._check_forward_ad(torch.bernoulli)
self._check_forward_ad(lambda x: x.bernoulli_())
self._check_forward_ad(lambda x: x.bernoulli_(x.clone().detach()))
self._check_forward_ad(lambda x: x.bernoulli_(x))
def test_bernoulli_enumerate_support(self):
examples = [
({"probs": [0.1]}, [[0], [1]]),
({"probs": [0.1, 0.9]}, [[0], [1]]),
({"probs": [[0.1, 0.2], [0.3, 0.4]]}, [[[0]], [[1]]]),
]
self._check_enumerate_support(Bernoulli, examples)
def test_bernoulli_3d(self):
p = torch.full((2, 3, 5), 0.5).requires_grad_()
self.assertEqual(Bernoulli(p).sample().size(), (2, 3, 5))
self.assertEqual(Bernoulli(p).sample(sample_shape=(2, 5)).size(),
(2, 5, 2, 3, 5))
self.assertEqual(Bernoulli(p).sample((2,)).size(), (2, 2, 3, 5))
def test_geometric(self):
p = torch.tensor([0.7, 0.2, 0.4], requires_grad=True)
r = torch.tensor(0.3, requires_grad=True)
s = 0.3
self.assertEqual(Geometric(p).sample((8,)).size(), (8, 3))
self.assertEqual(Geometric(1).sample(), 0)
self.assertEqual(Geometric(1).log_prob(torch.tensor(1.)), -inf)
self.assertEqual(Geometric(1).log_prob(torch.tensor(0.)), 0)
self.assertFalse(Geometric(p).sample().requires_grad)
self.assertEqual(Geometric(r).sample((8,)).size(), (8,))
self.assertEqual(Geometric(r).sample().size(), ())
self.assertEqual(Geometric(r).sample((3, 2)).size(), (3, 2))
self.assertEqual(Geometric(s).sample().size(), ())
self._gradcheck_log_prob(Geometric, (p,))
self.assertRaises(ValueError, lambda: Geometric(0))
self.assertRaises(NotImplementedError, Geometric(r).rsample)
self._check_forward_ad(lambda x: x.geometric_(0.2))
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_geometric_log_prob_and_entropy(self):
p = torch.tensor([0.7, 0.2, 0.4], requires_grad=True)
s = 0.3
def ref_log_prob(idx, val, log_prob):
prob = p[idx].detach()
self.assertEqual(log_prob, scipy.stats.geom(prob, loc=-1).logpmf(val))
self._check_log_prob(Geometric(p), ref_log_prob)
self._check_log_prob(Geometric(logits=p.log() - (-p).log1p()), ref_log_prob)
# check entropy computation
self.assertEqual(Geometric(p).entropy(), scipy.stats.geom(p.detach().numpy(), loc=-1).entropy(), atol=1e-3, rtol=0)
self.assertEqual(float(Geometric(s).entropy()), scipy.stats.geom(s, loc=-1).entropy().item(), atol=1e-3, rtol=0)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_geometric_sample(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
for prob in [0.01, 0.18, 0.8]:
self._check_sampler_discrete(Geometric(prob),
scipy.stats.geom(p=prob, loc=-1),
'Geometric(prob={})'.format(prob))
def test_binomial(self):
p = torch.arange(0.05, 1, 0.1).requires_grad_()
for total_count in [1, 2, 10]:
self._gradcheck_log_prob(lambda p: Binomial(total_count, p), [p])
self._gradcheck_log_prob(lambda p: Binomial(total_count, None, p.log()), [p])
self.assertRaises(NotImplementedError, Binomial(10, p).rsample)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_binomial_sample(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
for prob in [0.01, 0.1, 0.5, 0.8, 0.9]:
for count in [2, 10, 100, 500]:
self._check_sampler_discrete(Binomial(total_count=count, probs=prob),
scipy.stats.binom(count, prob),
'Binomial(total_count={}, probs={})'.format(count, prob))
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_binomial_log_prob_and_entropy(self):
probs = torch.arange(0.05, 1, 0.1)
for total_count in [1, 2, 10]:
def ref_log_prob(idx, x, log_prob):
p = probs.view(-1)[idx].item()
expected = scipy.stats.binom(total_count, p).logpmf(x)
self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)
self._check_log_prob(Binomial(total_count, probs), ref_log_prob)
logits = probs_to_logits(probs, is_binary=True)
self._check_log_prob(Binomial(total_count, logits=logits), ref_log_prob)
bin = Binomial(total_count, logits=logits)
self.assertEqual(
bin.entropy(),
scipy.stats.binom(total_count, bin.probs.detach().numpy(), loc=-1).entropy(),
atol=1e-3, rtol=0)
def test_binomial_stable(self):
logits = torch.tensor([-100., 100.], dtype=torch.float)
total_count = 1.
x = torch.tensor([0., 0.], dtype=torch.float)
log_prob = Binomial(total_count, logits=logits).log_prob(x)
self.assertTrue(torch.isfinite(log_prob).all())
# make sure that the grad at logits=0, value=0 is 0.5
x = torch.tensor(0., requires_grad=True)
y = Binomial(total_count, logits=x).log_prob(torch.tensor(0.))
self.assertEqual(grad(y, x)[0], torch.tensor(-0.5))
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_binomial_log_prob_vectorized_count(self):
probs = torch.tensor([0.2, 0.7, 0.9])
for total_count, sample in [(torch.tensor([10]), torch.tensor([7., 3., 9.])),
(torch.tensor([1, 2, 10]), torch.tensor([0., 1., 9.]))]:
log_prob = Binomial(total_count, probs).log_prob(sample)
expected = scipy.stats.binom(total_count.cpu().numpy(), probs.cpu().numpy()).logpmf(sample)
self.assertEqual(log_prob, expected, atol=1e-4, rtol=0)
def test_binomial_enumerate_support(self):
examples = [
({"probs": [0.1], "total_count": 2}, [[0], [1], [2]]),
({"probs": [0.1, 0.9], "total_count": 2}, [[0], [1], [2]]),
({"probs": [[0.1, 0.2], [0.3, 0.4]], "total_count": 3}, [[[0]], [[1]], [[2]], [[3]]]),
]
self._check_enumerate_support(Binomial, examples)
def test_binomial_extreme_vals(self):
total_count = 100
bin0 = Binomial(total_count, 0)
self.assertEqual(bin0.sample(), 0)
self.assertEqual(bin0.log_prob(torch.tensor([0.]))[0], 0, atol=1e-3, rtol=0)
self.assertEqual(float(bin0.log_prob(torch.tensor([1.])).exp()), 0)
bin1 = Binomial(total_count, 1)
self.assertEqual(bin1.sample(), total_count)
self.assertEqual(bin1.log_prob(torch.tensor([float(total_count)]))[0], 0, atol=1e-3, rtol=0)
self.assertEqual(float(bin1.log_prob(torch.tensor([float(total_count - 1)])).exp()), 0)
zero_counts = torch.zeros(torch.Size((2, 2)))
bin2 = Binomial(zero_counts, 1)
self.assertEqual(bin2.sample(), zero_counts)
self.assertEqual(bin2.log_prob(zero_counts), zero_counts)
def test_binomial_vectorized_count(self):
set_rng_seed(1) # see Note [Randomized statistical tests]
total_count = torch.tensor([[4, 7], [3, 8]])
bin0 = Binomial(total_count, torch.tensor(1.))
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(bin0.sample(), total_count)
bin1 = Binomial(total_count, torch.tensor(0.5))
samples = bin1.sample(torch.Size((100000,)))
self.assertTrue((samples <= total_count.type_as(samples)).all())
self.assertEqual(samples.mean(dim=0), bin1.mean, atol=0.02, rtol=0)
self.assertEqual(samples.var(dim=0), bin1.variance, atol=0.02, rtol=0)
def test_negative_binomial(self):
p = torch.arange(0.05, 1, 0.1).requires_grad_()
for total_count in [1, 2, 10]:
self._gradcheck_log_prob(lambda p: NegativeBinomial(total_count, p), [p])
self._gradcheck_log_prob(lambda p: NegativeBinomial(total_count, None, p.log()), [p])
self.assertRaises(NotImplementedError, NegativeBinomial(10, p).rsample)
self.assertRaises(NotImplementedError, NegativeBinomial(10, p).entropy)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_negative_binomial_log_prob(self):
probs = torch.arange(0.05, 1, 0.1)
for total_count in [1, 2, 10]:
def ref_log_prob(idx, x, log_prob):
p = probs.view(-1)[idx].item()
expected = scipy.stats.nbinom(total_count, 1 - p).logpmf(x)
self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)
self._check_log_prob(NegativeBinomial(total_count, probs), ref_log_prob)
logits = probs_to_logits(probs, is_binary=True)
self._check_log_prob(NegativeBinomial(total_count, logits=logits), ref_log_prob)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_negative_binomial_log_prob_vectorized_count(self):
probs = torch.tensor([0.2, 0.7, 0.9])
for total_count, sample in [(torch.tensor([10]), torch.tensor([7., 3., 9.])),
(torch.tensor([1, 2, 10]), torch.tensor([0., 1., 9.]))]:
log_prob = NegativeBinomial(total_count, probs).log_prob(sample)
expected = scipy.stats.nbinom(total_count.cpu().numpy(), 1 - probs.cpu().numpy()).logpmf(sample)
self.assertEqual(log_prob, expected, atol=1e-4, rtol=0)
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
def test_zero_excluded_binomial(self):
vals = Binomial(total_count=torch.tensor(1.0).cuda(),
probs=torch.tensor(0.9).cuda()
).sample(torch.Size((100000000,)))
self.assertTrue((vals >= 0).all())
vals = Binomial(total_count=torch.tensor(1.0).cuda(),
probs=torch.tensor(0.1).cuda()
).sample(torch.Size((100000000,)))
self.assertTrue((vals < 2).all())
vals = Binomial(total_count=torch.tensor(1.0).cuda(),
probs=torch.tensor(0.5).cuda()
).sample(torch.Size((10000,)))
# vals should be roughly half zeroes, half ones
assert (vals == 0.0).sum() > 4000
assert (vals == 1.0).sum() > 4000
def test_multinomial_1d(self):
total_count = 10
p = torch.tensor([0.1, 0.2, 0.3], requires_grad=True)
self.assertEqual(Multinomial(total_count, p).sample().size(), (3,))
self.assertEqual(Multinomial(total_count, p).sample((2, 2)).size(), (2, 2, 3))
self.assertEqual(Multinomial(total_count, p).sample((1,)).size(), (1, 3))
self._gradcheck_log_prob(lambda p: Multinomial(total_count, p), [p])
self._gradcheck_log_prob(lambda p: Multinomial(total_count, None, p.log()), [p])
self.assertRaises(NotImplementedError, Multinomial(10, p).rsample)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_multinomial_1d_log_prob_and_entropy(self):
total_count = 10
p = torch.tensor([0.1, 0.2, 0.3], requires_grad=True)
dist = Multinomial(total_count, probs=p)
x = dist.sample()
log_prob = dist.log_prob(x)
expected = torch.tensor(scipy.stats.multinomial.logpmf(x.numpy(), n=total_count, p=dist.probs.detach().numpy()))
self.assertEqual(log_prob, expected)
dist = Multinomial(total_count, logits=p.log())
x = dist.sample()
log_prob = dist.log_prob(x)
expected = torch.tensor(scipy.stats.multinomial.logpmf(x.numpy(), n=total_count, p=dist.probs.detach().numpy()))
self.assertEqual(log_prob, expected)
expected = scipy.stats.multinomial.entropy(total_count, dist.probs.detach().numpy())
self.assertEqual(dist.entropy(), expected, atol=1e-3, rtol=0)
def test_multinomial_2d(self):
total_count = 10
probabilities = [[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]]
probabilities_1 = [[1.0, 0.0], [0.0, 1.0]]
p = torch.tensor(probabilities, requires_grad=True)
s = torch.tensor(probabilities_1, requires_grad=True)
self.assertEqual(Multinomial(total_count, p).sample().size(), (2, 3))
self.assertEqual(Multinomial(total_count, p).sample(sample_shape=(3, 4)).size(), (3, 4, 2, 3))
self.assertEqual(Multinomial(total_count, p).sample((6,)).size(), (6, 2, 3))
set_rng_seed(0)
self._gradcheck_log_prob(lambda p: Multinomial(total_count, p), [p])
self._gradcheck_log_prob(lambda p: Multinomial(total_count, None, p.log()), [p])
# sample check for extreme value of probs
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(Multinomial(total_count, s).sample(),
torch.tensor([[total_count, 0], [0, total_count]]))
def test_categorical_1d(self):
p = torch.tensor([0.1, 0.2, 0.3], requires_grad=True)
self.assertTrue(is_all_nan(Categorical(p).mean))
self.assertTrue(is_all_nan(Categorical(p).variance))
self.assertEqual(Categorical(p).sample().size(), ())
self.assertFalse(Categorical(p).sample().requires_grad)
self.assertEqual(Categorical(p).sample((2, 2)).size(), (2, 2))
self.assertEqual(Categorical(p).sample((1,)).size(), (1,))
self._gradcheck_log_prob(Categorical, (p,))
self.assertRaises(NotImplementedError, Categorical(p).rsample)
def test_categorical_2d(self):
probabilities = [[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]]
probabilities_1 = [[1.0, 0.0], [0.0, 1.0]]
p = torch.tensor(probabilities, requires_grad=True)
s = torch.tensor(probabilities_1, requires_grad=True)
self.assertEqual(Categorical(p).mean.size(), (2,))
self.assertEqual(Categorical(p).variance.size(), (2,))
self.assertTrue(is_all_nan(Categorical(p).mean))
self.assertTrue(is_all_nan(Categorical(p).variance))
self.assertEqual(Categorical(p).sample().size(), (2,))
self.assertEqual(Categorical(p).sample(sample_shape=(3, 4)).size(), (3, 4, 2))
self.assertEqual(Categorical(p).sample((6,)).size(), (6, 2))
self._gradcheck_log_prob(Categorical, (p,))
# sample check for extreme value of probs
set_rng_seed(0)
self.assertEqual(Categorical(s).sample(sample_shape=(2,)),
torch.tensor([[0, 1], [0, 1]]))
def ref_log_prob(idx, val, log_prob):
sample_prob = p[idx][val] / p[idx].sum()
self.assertEqual(log_prob, math.log(sample_prob))
self._check_log_prob(Categorical(p), ref_log_prob)
self._check_log_prob(Categorical(logits=p.log()), ref_log_prob)
# check entropy computation
self.assertEqual(Categorical(p).entropy(), torch.tensor([1.0114, 1.0297]), atol=1e-4, rtol=0)
self.assertEqual(Categorical(s).entropy(), torch.tensor([0.0, 0.0]))
# issue gh-40553
logits = p.log()
logits[1, 1] = logits[0, 2] = float('-inf')
e = Categorical(logits=logits).entropy()
self.assertEqual(e, torch.tensor([0.6365, 0.5983]), atol=1e-4, rtol=0)
def test_categorical_enumerate_support(self):
examples = [
({"probs": [0.1, 0.2, 0.7]}, [0, 1, 2]),
({"probs": [[0.1, 0.9], [0.3, 0.7]]}, [[0], [1]]),
]
self._check_enumerate_support(Categorical, examples)
def test_one_hot_categorical_1d(self):
p = torch.tensor([0.1, 0.2, 0.3], requires_grad=True)
self.assertEqual(OneHotCategorical(p).sample().size(), (3,))
self.assertFalse(OneHotCategorical(p).sample().requires_grad)
self.assertEqual(OneHotCategorical(p).sample((2, 2)).size(), (2, 2, 3))
self.assertEqual(OneHotCategorical(p).sample((1,)).size(), (1, 3))
self._gradcheck_log_prob(OneHotCategorical, (p,))
self.assertRaises(NotImplementedError, OneHotCategorical(p).rsample)
def test_one_hot_categorical_2d(self):
probabilities = [[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]]
probabilities_1 = [[1.0, 0.0], [0.0, 1.0]]
p = torch.tensor(probabilities, requires_grad=True)
s = torch.tensor(probabilities_1, requires_grad=True)
self.assertEqual(OneHotCategorical(p).sample().size(), (2, 3))
self.assertEqual(OneHotCategorical(p).sample(sample_shape=(3, 4)).size(), (3, 4, 2, 3))
self.assertEqual(OneHotCategorical(p).sample((6,)).size(), (6, 2, 3))
self._gradcheck_log_prob(OneHotCategorical, (p,))
dist = OneHotCategorical(p)
x = dist.sample()
self.assertEqual(dist.log_prob(x), Categorical(p).log_prob(x.max(-1)[1]))
def test_one_hot_categorical_enumerate_support(self):
examples = [
({"probs": [0.1, 0.2, 0.7]}, [[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
({"probs": [[0.1, 0.9], [0.3, 0.7]]}, [[[1, 0]], [[0, 1]]]),
]
self._check_enumerate_support(OneHotCategorical, examples)
def test_poisson_forward_ad(self):
self._check_forward_ad(torch.poisson)
def test_poisson_shape(self):
rate = torch.randn(2, 3).abs().requires_grad_()
rate_1d = torch.randn(1).abs().requires_grad_()
self.assertEqual(Poisson(rate).sample().size(), (2, 3))
self.assertEqual(Poisson(rate).sample((7,)).size(), (7, 2, 3))
self.assertEqual(Poisson(rate_1d).sample().size(), (1,))
self.assertEqual(Poisson(rate_1d).sample((1,)).size(), (1, 1))
self.assertEqual(Poisson(2.0).sample((2,)).size(), (2,))
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_poisson_log_prob(self):
rate = torch.randn(2, 3).abs().requires_grad_()
rate_1d = torch.randn(1).abs().requires_grad_()
rate_zero = torch.zeros([], requires_grad=True)
def ref_log_prob(ref_rate, idx, x, log_prob):
l = ref_rate.view(-1)[idx].detach()
expected = scipy.stats.poisson.logpmf(x, l)
self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)
set_rng_seed(0)
self._check_log_prob(Poisson(rate), lambda *args: ref_log_prob(rate, *args))
self._check_log_prob(Poisson(rate_zero), lambda *args: ref_log_prob(rate_zero, *args))
self._gradcheck_log_prob(Poisson, (rate,))
self._gradcheck_log_prob(Poisson, (rate_1d,))
# We cannot check gradients automatically for zero rates because the finite difference
# approximation enters the forbidden parameter space. We instead compare with the
# theoretical results.
dist = Poisson(rate_zero)
s = dist.sample()
dist.log_prob(s).backward()
torch.testing.assert_allclose(rate_zero.grad, -1.0)
dist.log_prob(torch.ones_like(rate_zero)).backward()
torch.testing.assert_allclose(rate_zero.grad, torch.inf)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_poisson_sample(self):
set_rng_seed(1) # see Note [Randomized statistical tests]
for rate in [0.1, 1.0, 5.0]:
self._check_sampler_discrete(Poisson(rate),
scipy.stats.poisson(rate),
'Poisson(lambda={})'.format(rate),
failure_rate=1e-3)
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_poisson_gpu_sample(self):
set_rng_seed(1)
for rate in [0.12, 0.9, 4.0]:
self._check_sampler_discrete(Poisson(torch.tensor([rate]).cuda()),
scipy.stats.poisson(rate),
'Poisson(lambda={}, cuda)'.format(rate),
failure_rate=1e-3)
def test_relaxed_bernoulli(self):
p = torch.tensor([0.7, 0.2, 0.4], requires_grad=True)
r = torch.tensor(0.3, requires_grad=True)
s = 0.3
temp = torch.tensor(0.67, requires_grad=True)
self.assertEqual(RelaxedBernoulli(temp, p).sample((8,)).size(), (8, 3))
self.assertFalse(RelaxedBernoulli(temp, p).sample().requires_grad)
self.assertEqual(RelaxedBernoulli(temp, r).sample((8,)).size(), (8,))
self.assertEqual(RelaxedBernoulli(temp, r).sample().size(), ())
self.assertEqual(RelaxedBernoulli(temp, r).sample((3, 2)).size(), (3, 2,))
self.assertEqual(RelaxedBernoulli(temp, s).sample().size(), ())
self._gradcheck_log_prob(RelaxedBernoulli, (temp, p))
self._gradcheck_log_prob(RelaxedBernoulli, (temp, r))
# test that rsample doesn't fail
s = RelaxedBernoulli(temp, p).rsample()
s.backward(torch.ones_like(s))
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_rounded_relaxed_bernoulli(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
class Rounded(object):
def __init__(self, dist):
self.dist = dist
def sample(self, *args, **kwargs):
return torch.round(self.dist.sample(*args, **kwargs))
for probs, temp in product([0.1, 0.2, 0.8], [0.1, 1.0, 10.0]):
self._check_sampler_discrete(Rounded(RelaxedBernoulli(temp, probs)),
scipy.stats.bernoulli(probs),
'Rounded(RelaxedBernoulli(temp={}, probs={}))'.format(temp, probs),
failure_rate=1e-3)
for probs in [0.001, 0.2, 0.999]:
equal_probs = torch.tensor(0.5)
dist = RelaxedBernoulli(1e10, probs)
s = dist.rsample()
self.assertEqual(equal_probs, s)
def test_relaxed_one_hot_categorical_1d(self):
p = torch.tensor([0.1, 0.2, 0.3], requires_grad=True)
temp = torch.tensor(0.67, requires_grad=True)
self.assertEqual(RelaxedOneHotCategorical(probs=p, temperature=temp).sample().size(), (3,))
self.assertFalse(RelaxedOneHotCategorical(probs=p, temperature=temp).sample().requires_grad)
self.assertEqual(RelaxedOneHotCategorical(probs=p, temperature=temp).sample((2, 2)).size(), (2, 2, 3))
self.assertEqual(RelaxedOneHotCategorical(probs=p, temperature=temp).sample((1,)).size(), (1, 3))
self._gradcheck_log_prob(lambda t, p: RelaxedOneHotCategorical(t, p, validate_args=False), (temp, p))
def test_relaxed_one_hot_categorical_2d(self):
probabilities = [[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]]
probabilities_1 = [[1.0, 0.0], [0.0, 1.0]]
temp = torch.tensor([3.0], requires_grad=True)
# The lower the temperature, the more unstable the log_prob gradcheck is
# w.r.t. the sample. Values below 0.25 empirically fail the default tol.
temp_2 = torch.tensor([0.25], requires_grad=True)
p = torch.tensor(probabilities, requires_grad=True)
s = torch.tensor(probabilities_1, requires_grad=True)
self.assertEqual(RelaxedOneHotCategorical(temp, p).sample().size(), (2, 3))
self.assertEqual(RelaxedOneHotCategorical(temp, p).sample(sample_shape=(3, 4)).size(), (3, 4, 2, 3))
self.assertEqual(RelaxedOneHotCategorical(temp, p).sample((6,)).size(), (6, 2, 3))
self._gradcheck_log_prob(lambda t, p: RelaxedOneHotCategorical(t, p, validate_args=False), (temp, p))
self._gradcheck_log_prob(lambda t, p: RelaxedOneHotCategorical(t, p, validate_args=False), (temp_2, p))
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_argmax_relaxed_categorical(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
class ArgMax(object):
def __init__(self, dist):
self.dist = dist
def sample(self, *args, **kwargs):
s = self.dist.sample(*args, **kwargs)
_, idx = torch.max(s, -1)
return idx
class ScipyCategorical(object):
def __init__(self, dist):
self.dist = dist
def pmf(self, samples):
new_samples = np.zeros(samples.shape + self.dist.p.shape)
new_samples[np.arange(samples.shape[0]), samples] = 1
return self.dist.pmf(new_samples)
for probs, temp in product([torch.tensor([0.1, 0.9]), torch.tensor([0.2, 0.2, 0.6])], [0.1, 1.0, 10.0]):
self._check_sampler_discrete(ArgMax(RelaxedOneHotCategorical(temp, probs)),
ScipyCategorical(scipy.stats.multinomial(1, probs)),
'Rounded(RelaxedOneHotCategorical(temp={}, probs={}))'.format(temp, probs),
failure_rate=1e-3)
for probs in [torch.tensor([0.1, 0.9]), torch.tensor([0.2, 0.2, 0.6])]:
equal_probs = torch.ones(probs.size()) / probs.size()[0]
dist = RelaxedOneHotCategorical(1e10, probs)
s = dist.rsample()
self.assertEqual(equal_probs, s)
def test_uniform(self):
low = torch.zeros(5, 5, requires_grad=True)
high = (torch.ones(5, 5) * 3).requires_grad_()
low_1d = torch.zeros(1, requires_grad=True)
high_1d = (torch.ones(1) * 3).requires_grad_()
self.assertEqual(Uniform(low, high).sample().size(), (5, 5))
self.assertEqual(Uniform(low, high).sample((7,)).size(), (7, 5, 5))
self.assertEqual(Uniform(low_1d, high_1d).sample().size(), (1,))
self.assertEqual(Uniform(low_1d, high_1d).sample((1,)).size(), (1, 1))
self.assertEqual(Uniform(0.0, 1.0).sample((1,)).size(), (1,))
# Check log_prob computation when value outside range
uniform = Uniform(low_1d, high_1d, validate_args=False)
above_high = torch.tensor([4.0])
below_low = torch.tensor([-1.0])
self.assertEqual(uniform.log_prob(above_high).item(), -inf)
self.assertEqual(uniform.log_prob(below_low).item(), -inf)
# check cdf computation when value outside range
self.assertEqual(uniform.cdf(below_low).item(), 0)
self.assertEqual(uniform.cdf(above_high).item(), 1)
set_rng_seed(1)
self._gradcheck_log_prob(Uniform, (low, high))
self._gradcheck_log_prob(Uniform, (low, 1.0))
self._gradcheck_log_prob(Uniform, (0.0, high))
state = torch.get_rng_state()
rand = low.new(low.size()).uniform_()
torch.set_rng_state(state)
u = Uniform(low, high).rsample()
u.backward(torch.ones_like(u))
self.assertEqual(low.grad, 1 - rand)
self.assertEqual(high.grad, rand)
low.grad.zero_()
high.grad.zero_()
self._check_forward_ad(lambda x: x.uniform_())
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_vonmises_sample(self):
for loc in [0.0, math.pi / 2.0]:
for concentration in [0.03, 0.3, 1.0, 10.0, 100.0]:
self._check_sampler_sampler(VonMises(loc, concentration),
scipy.stats.vonmises(loc=loc, kappa=concentration),
"VonMises(loc={}, concentration={})".format(loc, concentration),
num_samples=int(1e5), circular=True)
def test_vonmises_logprob(self):
concentrations = [0.01, 0.03, 0.1, 0.3, 1.0, 3.0, 10.0, 30.0, 100.0]
for concentration in concentrations:
grid = torch.arange(0., 2 * math.pi, 1e-4)
prob = VonMises(0.0, concentration).log_prob(grid).exp()
norm = prob.mean().item() * 2 * math.pi
self.assertLess(abs(norm - 1), 1e-3)
def test_cauchy(self):
loc = torch.zeros(5, 5, requires_grad=True)
scale = torch.ones(5, 5, requires_grad=True)
loc_1d = torch.zeros(1, requires_grad=True)
scale_1d = torch.ones(1, requires_grad=True)
self.assertTrue(is_all_nan(Cauchy(loc_1d, scale_1d).mean))
self.assertEqual(Cauchy(loc_1d, scale_1d).variance, inf)
self.assertEqual(Cauchy(loc, scale).sample().size(), (5, 5))
self.assertEqual(Cauchy(loc, scale).sample((7,)).size(), (7, 5, 5))
self.assertEqual(Cauchy(loc_1d, scale_1d).sample().size(), (1,))
self.assertEqual(Cauchy(loc_1d, scale_1d).sample((1,)).size(), (1, 1))
self.assertEqual(Cauchy(0.0, 1.0).sample((1,)).size(), (1,))
set_rng_seed(1)
self._gradcheck_log_prob(Cauchy, (loc, scale))
self._gradcheck_log_prob(Cauchy, (loc, 1.0))
self._gradcheck_log_prob(Cauchy, (0.0, scale))
state = torch.get_rng_state()
eps = loc.new(loc.size()).cauchy_()
torch.set_rng_state(state)
c = Cauchy(loc, scale).rsample()
c.backward(torch.ones_like(c))
self.assertEqual(loc.grad, torch.ones_like(scale))
self.assertEqual(scale.grad, eps)
loc.grad.zero_()
scale.grad.zero_()
self._check_forward_ad(lambda x: x.cauchy_())
def test_halfcauchy(self):
scale = torch.ones(5, 5, requires_grad=True)
scale_1d = torch.ones(1, requires_grad=True)
self.assertTrue(torch.isinf(HalfCauchy(scale_1d).mean).all())
self.assertEqual(HalfCauchy(scale_1d).variance, inf)
self.assertEqual(HalfCauchy(scale).sample().size(), (5, 5))
self.assertEqual(HalfCauchy(scale).sample((7,)).size(), (7, 5, 5))
self.assertEqual(HalfCauchy(scale_1d).sample().size(), (1,))
self.assertEqual(HalfCauchy(scale_1d).sample((1,)).size(), (1, 1))
self.assertEqual(HalfCauchy(1.0).sample((1,)).size(), (1,))
set_rng_seed(1)
self._gradcheck_log_prob(HalfCauchy, (scale,))
self._gradcheck_log_prob(HalfCauchy, (1.0,))
state = torch.get_rng_state()
eps = scale.new(scale.size()).cauchy_().abs_()
torch.set_rng_state(state)
c = HalfCauchy(scale).rsample()
c.backward(torch.ones_like(c))
self.assertEqual(scale.grad, eps)
scale.grad.zero_()
def test_halfnormal(self):
std = torch.randn(5, 5).abs().requires_grad_()
std_1d = torch.randn(1).abs().requires_grad_()
std_delta = torch.tensor([1e-5, 1e-5])
self.assertEqual(HalfNormal(std).sample().size(), (5, 5))
self.assertEqual(HalfNormal(std).sample((7,)).size(), (7, 5, 5))
self.assertEqual(HalfNormal(std_1d).sample((1,)).size(), (1, 1))
self.assertEqual(HalfNormal(std_1d).sample().size(), (1,))
self.assertEqual(HalfNormal(.6).sample((1,)).size(), (1,))
self.assertEqual(HalfNormal(50.0).sample((1,)).size(), (1,))
# sample check for extreme value of std
set_rng_seed(1)
self.assertEqual(HalfNormal(std_delta).sample(sample_shape=(1, 2)),
torch.tensor([[[0.0, 0.0], [0.0, 0.0]]]),
atol=1e-4, rtol=0)
self._gradcheck_log_prob(HalfNormal, (std,))
self._gradcheck_log_prob(HalfNormal, (1.0,))
# check .log_prob() can broadcast.
dist = HalfNormal(torch.ones(2, 1, 4))
log_prob = dist.log_prob(torch.ones(3, 1))
self.assertEqual(log_prob.shape, (2, 3, 4))
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_halfnormal_logprob(self):
std = torch.randn(5, 1).abs().requires_grad_()
def ref_log_prob(idx, x, log_prob):
s = std.view(-1)[idx].detach()
expected = scipy.stats.halfnorm(scale=s).logpdf(x)
self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)
self._check_log_prob(HalfNormal(std), ref_log_prob)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_halfnormal_sample(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
for std in [0.1, 1.0, 10.0]:
self._check_sampler_sampler(HalfNormal(std),
scipy.stats.halfnorm(scale=std),
'HalfNormal(scale={})'.format(std))
def test_lognormal(self):
mean = torch.randn(5, 5, requires_grad=True)
std = torch.randn(5, 5).abs().requires_grad_()
mean_1d = torch.randn(1, requires_grad=True)
std_1d = torch.randn(1).abs().requires_grad_()
mean_delta = torch.tensor([1.0, 0.0])
std_delta = torch.tensor([1e-5, 1e-5])
self.assertEqual(LogNormal(mean, std).sample().size(), (5, 5))
self.assertEqual(LogNormal(mean, std).sample((7,)).size(), (7, 5, 5))
self.assertEqual(LogNormal(mean_1d, std_1d).sample((1,)).size(), (1, 1))
self.assertEqual(LogNormal(mean_1d, std_1d).sample().size(), (1,))
self.assertEqual(LogNormal(0.2, .6).sample((1,)).size(), (1,))
self.assertEqual(LogNormal(-0.7, 50.0).sample((1,)).size(), (1,))
# sample check for extreme value of mean, std
set_rng_seed(1)
self.assertEqual(LogNormal(mean_delta, std_delta).sample(sample_shape=(1, 2)),
torch.tensor([[[math.exp(1), 1.0], [math.exp(1), 1.0]]]),
atol=1e-4, rtol=0)
self._gradcheck_log_prob(LogNormal, (mean, std))
self._gradcheck_log_prob(LogNormal, (mean, 1.0))
self._gradcheck_log_prob(LogNormal, (0.0, std))
# check .log_prob() can broadcast.
dist = LogNormal(torch.zeros(4), torch.ones(2, 1, 1))
log_prob = dist.log_prob(torch.ones(3, 1))
self.assertEqual(log_prob.shape, (2, 3, 4))
self._check_forward_ad(lambda x: x.log_normal_())
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_lognormal_logprob(self):
mean = torch.randn(5, 1, requires_grad=True)
std = torch.randn(5, 1).abs().requires_grad_()
def ref_log_prob(idx, x, log_prob):
m = mean.view(-1)[idx].detach()
s = std.view(-1)[idx].detach()
expected = scipy.stats.lognorm(s=s, scale=math.exp(m)).logpdf(x)
self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)
self._check_log_prob(LogNormal(mean, std), ref_log_prob)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_lognormal_sample(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
for mean, std in product([-1.0, 0.0, 1.0], [0.1, 1.0, 10.0]):
self._check_sampler_sampler(LogNormal(mean, std),
scipy.stats.lognorm(scale=math.exp(mean), s=std),
'LogNormal(loc={}, scale={})'.format(mean, std))
def test_logisticnormal(self):
set_rng_seed(1) # see Note [Randomized statistical tests]
mean = torch.randn(5, 5).requires_grad_()
std = torch.randn(5, 5).abs().requires_grad_()
mean_1d = torch.randn(1).requires_grad_()
std_1d = torch.randn(1).abs().requires_grad_()
mean_delta = torch.tensor([1.0, 0.0])
std_delta = torch.tensor([1e-5, 1e-5])
self.assertEqual(LogisticNormal(mean, std).sample().size(), (5, 6))
self.assertEqual(LogisticNormal(mean, std).sample((7,)).size(), (7, 5, 6))
self.assertEqual(LogisticNormal(mean_1d, std_1d).sample((1,)).size(), (1, 2))
self.assertEqual(LogisticNormal(mean_1d, std_1d).sample().size(), (2,))
self.assertEqual(LogisticNormal(0.2, .6).sample().size(), (2,))
self.assertEqual(LogisticNormal(-0.7, 50.0).sample().size(), (2,))
# sample check for extreme value of mean, std
set_rng_seed(1)
self.assertEqual(LogisticNormal(mean_delta, std_delta).sample(),
torch.tensor([math.exp(1) / (1. + 1. + math.exp(1)),
1. / (1. + 1. + math.exp(1)),
1. / (1. + 1. + math.exp(1))]),
atol=1e-4, rtol=0)
# TODO: gradcheck seems to mutate the sample values so that the simplex
# constraint fails by a very small margin.
self._gradcheck_log_prob(lambda m, s: LogisticNormal(m, s, validate_args=False), (mean, std))
self._gradcheck_log_prob(lambda m, s: LogisticNormal(m, s, validate_args=False), (mean, 1.0))
self._gradcheck_log_prob(lambda m, s: LogisticNormal(m, s, validate_args=False), (0.0, std))
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_logisticnormal_logprob(self):
mean = torch.randn(5, 7).requires_grad_()
std = torch.randn(5, 7).abs().requires_grad_()
# Smoke test for now
# TODO: Once _check_log_prob works with multidimensional distributions,
# add proper testing of the log probabilities.
dist = LogisticNormal(mean, std)
assert dist.log_prob(dist.sample()).detach().cpu().numpy().shape == (5,)
def _get_logistic_normal_ref_sampler(self, base_dist):
def _sampler(num_samples):
x = base_dist.rvs(num_samples)
offset = np.log((x.shape[-1] + 1) - np.ones_like(x).cumsum(-1))
z = 1. / (1. + np.exp(offset - x))
z_cumprod = np.cumprod(1 - z, axis=-1)
y1 = np.pad(z, ((0, 0), (0, 1)), mode='constant', constant_values=1.)
y2 = np.pad(z_cumprod, ((0, 0), (1, 0)), mode='constant', constant_values=1.)
return y1 * y2
return _sampler
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_logisticnormal_sample(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
means = map(np.asarray, [(-1.0, -1.0), (0.0, 0.0), (1.0, 1.0)])
covs = map(np.diag, [(0.1, 0.1), (1.0, 1.0), (10.0, 10.0)])
for mean, cov in product(means, covs):
base_dist = scipy.stats.multivariate_normal(mean=mean, cov=cov)
ref_dist = scipy.stats.multivariate_normal(mean=mean, cov=cov)
ref_dist.rvs = self._get_logistic_normal_ref_sampler(base_dist)
mean_th = torch.tensor(mean)
std_th = torch.tensor(np.sqrt(np.diag(cov)))
self._check_sampler_sampler(
LogisticNormal(mean_th, std_th), ref_dist,
'LogisticNormal(loc={}, scale={})'.format(mean_th, std_th),
multivariate=True)
def test_mixture_same_family_shape(self):
normal_case_1d = MixtureSameFamily(
Categorical(torch.rand(5)),
Normal(torch.randn(5), torch.rand(5)))
normal_case_1d_batch = MixtureSameFamily(
Categorical(torch.rand(3, 5)),
Normal(torch.randn(3, 5), torch.rand(3, 5)))
normal_case_1d_multi_batch = MixtureSameFamily(
Categorical(torch.rand(4, 3, 5)),
Normal(torch.randn(4, 3, 5), torch.rand(4, 3, 5)))
normal_case_2d = MixtureSameFamily(
Categorical(torch.rand(5)),
Independent(Normal(torch.randn(5, 2), torch.rand(5, 2)), 1))
normal_case_2d_batch = MixtureSameFamily(
Categorical(torch.rand(3, 5)),
Independent(Normal(torch.randn(3, 5, 2), torch.rand(3, 5, 2)), 1))
normal_case_2d_multi_batch = MixtureSameFamily(
Categorical(torch.rand(4, 3, 5)),
Independent(Normal(torch.randn(4, 3, 5, 2), torch.rand(4, 3, 5, 2)), 1))
self.assertEqual(normal_case_1d.sample().size(), ())
self.assertEqual(normal_case_1d.sample((2,)).size(), (2,))
self.assertEqual(normal_case_1d.sample((2, 7)).size(), (2, 7))
self.assertEqual(normal_case_1d_batch.sample().size(), (3,))
self.assertEqual(normal_case_1d_batch.sample((2,)).size(), (2, 3))
self.assertEqual(normal_case_1d_batch.sample((2, 7)).size(), (2, 7, 3))
self.assertEqual(normal_case_1d_multi_batch.sample().size(), (4, 3))
self.assertEqual(normal_case_1d_multi_batch.sample((2,)).size(), (2, 4, 3))
self.assertEqual(normal_case_1d_multi_batch.sample((2, 7)).size(), (2, 7, 4, 3))
self.assertEqual(normal_case_2d.sample().size(), (2,))
self.assertEqual(normal_case_2d.sample((2,)).size(), (2, 2))
self.assertEqual(normal_case_2d.sample((2, 7)).size(), (2, 7, 2))
self.assertEqual(normal_case_2d_batch.sample().size(), (3, 2))
self.assertEqual(normal_case_2d_batch.sample((2,)).size(), (2, 3, 2))
self.assertEqual(normal_case_2d_batch.sample((2, 7)).size(), (2, 7, 3, 2))
self.assertEqual(normal_case_2d_multi_batch.sample().size(), (4, 3, 2))
self.assertEqual(normal_case_2d_multi_batch.sample((2,)).size(), (2, 4, 3, 2))
self.assertEqual(normal_case_2d_multi_batch.sample((2, 7)).size(), (2, 7, 4, 3, 2))
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_mixture_same_family_log_prob(self):
probs = torch.rand(5, 5).softmax(dim=-1)
loc = torch.randn(5, 5)
scale = torch.rand(5, 5)
def ref_log_prob(idx, x, log_prob):
p = probs[idx].numpy()
m = loc[idx].numpy()
s = scale[idx].numpy()
mix = scipy.stats.multinomial(1, p)
comp = scipy.stats.norm(m, s)
expected = scipy.special.logsumexp(comp.logpdf(x) + np.log(mix.p))
self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)
self._check_log_prob(
MixtureSameFamily(Categorical(probs=probs),
Normal(loc, scale)), ref_log_prob)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_mixture_same_family_sample(self):
probs = torch.rand(5).softmax(dim=-1)
loc = torch.randn(5)
scale = torch.rand(5)
class ScipyMixtureNormal(object):
def __init__(self, probs, mu, std):
self.probs = probs
self.mu = mu
self.std = std
def rvs(self, n_sample):
comp_samples = [scipy.stats.norm(m, s).rvs(n_sample) for m, s
in zip(self.mu, self.std)]
mix_samples = scipy.stats.multinomial(1, self.probs).rvs(n_sample)
samples = []
for i in range(n_sample):
samples.append(comp_samples[mix_samples[i].argmax()][i])
return np.asarray(samples)
self._check_sampler_sampler(
MixtureSameFamily(Categorical(probs=probs), Normal(loc, scale)),
ScipyMixtureNormal(probs.numpy(), loc.numpy(), scale.numpy()),
'''MixtureSameFamily(Categorical(probs={}),
Normal(loc={}, scale={}))'''.format(probs, loc, scale))
def test_normal(self):
loc = torch.randn(5, 5, requires_grad=True)
scale = torch.randn(5, 5).abs().requires_grad_()
loc_1d = torch.randn(1, requires_grad=True)
scale_1d = torch.randn(1).abs().requires_grad_()
loc_delta = torch.tensor([1.0, 0.0])
scale_delta = torch.tensor([1e-5, 1e-5])
self.assertEqual(Normal(loc, scale).sample().size(), (5, 5))
self.assertEqual(Normal(loc, scale).sample((7,)).size(), (7, 5, 5))
self.assertEqual(Normal(loc_1d, scale_1d).sample((1,)).size(), (1, 1))
self.assertEqual(Normal(loc_1d, scale_1d).sample().size(), (1,))
self.assertEqual(Normal(0.2, .6).sample((1,)).size(), (1,))
self.assertEqual(Normal(-0.7, 50.0).sample((1,)).size(), (1,))
# sample check for extreme value of mean, std
set_rng_seed(1)
self.assertEqual(Normal(loc_delta, scale_delta).sample(sample_shape=(1, 2)),
torch.tensor([[[1.0, 0.0], [1.0, 0.0]]]),
atol=1e-4, rtol=0)
self._gradcheck_log_prob(Normal, (loc, scale))
self._gradcheck_log_prob(Normal, (loc, 1.0))
self._gradcheck_log_prob(Normal, (0.0, scale))
state = torch.get_rng_state()
eps = torch.normal(torch.zeros_like(loc), torch.ones_like(scale))
torch.set_rng_state(state)
z = Normal(loc, scale).rsample()
z.backward(torch.ones_like(z))
self.assertEqual(loc.grad, torch.ones_like(loc))
self.assertEqual(scale.grad, eps)
loc.grad.zero_()
scale.grad.zero_()
self.assertEqual(z.size(), (5, 5))
def ref_log_prob(idx, x, log_prob):
m = loc.view(-1)[idx]
s = scale.view(-1)[idx]
expected = (math.exp(-(x - m) ** 2 / (2 * s ** 2)) /
math.sqrt(2 * math.pi * s ** 2))
self.assertEqual(log_prob, math.log(expected), atol=1e-3, rtol=0)
self._check_log_prob(Normal(loc, scale), ref_log_prob)
self._check_forward_ad(torch.normal)
self._check_forward_ad(lambda x: torch.normal(x, 0.5))
self._check_forward_ad(lambda x: torch.normal(0.2, x))
self._check_forward_ad(lambda x: torch.normal(x, x))
self._check_forward_ad(lambda x: x.normal_())
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_normal_sample(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
for loc, scale in product([-1.0, 0.0, 1.0], [0.1, 1.0, 10.0]):
self._check_sampler_sampler(Normal(loc, scale),
scipy.stats.norm(loc=loc, scale=scale),
'Normal(mean={}, std={})'.format(loc, scale))
def test_lowrank_multivariate_normal_shape(self):
mean = torch.randn(5, 3, requires_grad=True)
mean_no_batch = torch.randn(3, requires_grad=True)
mean_multi_batch = torch.randn(6, 5, 3, requires_grad=True)
# construct PSD covariance
cov_factor = torch.randn(3, 1, requires_grad=True)
cov_diag = torch.randn(3).abs().requires_grad_()
# construct batch of PSD covariances
cov_factor_batched = torch.randn(6, 5, 3, 2, requires_grad=True)
cov_diag_batched = torch.randn(6, 5, 3).abs().requires_grad_()
# ensure that sample, batch, event shapes all handled correctly
self.assertEqual(LowRankMultivariateNormal(mean, cov_factor, cov_diag)
.sample().size(), (5, 3))
self.assertEqual(LowRankMultivariateNormal(mean_no_batch, cov_factor, cov_diag)
.sample().size(), (3,))
self.assertEqual(LowRankMultivariateNormal(mean_multi_batch, cov_factor, cov_diag)
.sample().size(), (6, 5, 3))
self.assertEqual(LowRankMultivariateNormal(mean, cov_factor, cov_diag)
.sample((2,)).size(), (2, 5, 3))
self.assertEqual(LowRankMultivariateNormal(mean_no_batch, cov_factor, cov_diag)
.sample((2,)).size(), (2, 3))
self.assertEqual(LowRankMultivariateNormal(mean_multi_batch, cov_factor, cov_diag)
.sample((2,)).size(), (2, 6, 5, 3))
self.assertEqual(LowRankMultivariateNormal(mean, cov_factor, cov_diag)
.sample((2, 7)).size(), (2, 7, 5, 3))
self.assertEqual(LowRankMultivariateNormal(mean_no_batch, cov_factor, cov_diag)
.sample((2, 7)).size(), (2, 7, 3))
self.assertEqual(LowRankMultivariateNormal(mean_multi_batch, cov_factor, cov_diag)
.sample((2, 7)).size(), (2, 7, 6, 5, 3))
self.assertEqual(LowRankMultivariateNormal(mean, cov_factor_batched, cov_diag_batched)
.sample((2, 7)).size(), (2, 7, 6, 5, 3))
self.assertEqual(LowRankMultivariateNormal(mean_no_batch, cov_factor_batched, cov_diag_batched)
.sample((2, 7)).size(), (2, 7, 6, 5, 3))
self.assertEqual(LowRankMultivariateNormal(mean_multi_batch, cov_factor_batched, cov_diag_batched)
.sample((2, 7)).size(), (2, 7, 6, 5, 3))
# check gradients
self._gradcheck_log_prob(LowRankMultivariateNormal,
(mean, cov_factor, cov_diag))
self._gradcheck_log_prob(LowRankMultivariateNormal,
(mean_multi_batch, cov_factor, cov_diag))
self._gradcheck_log_prob(LowRankMultivariateNormal,
(mean_multi_batch, cov_factor_batched, cov_diag_batched))
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_lowrank_multivariate_normal_log_prob(self):
mean = torch.randn(3, requires_grad=True)
cov_factor = torch.randn(3, 1, requires_grad=True)
cov_diag = torch.randn(3).abs().requires_grad_()
cov = cov_factor.matmul(cov_factor.t()) + cov_diag.diag()
# check that logprob values match scipy logpdf,
# and that covariance and scale_tril parameters are equivalent
dist1 = LowRankMultivariateNormal(mean, cov_factor, cov_diag)
ref_dist = scipy.stats.multivariate_normal(mean.detach().numpy(), cov.detach().numpy())
x = dist1.sample((10,))
expected = ref_dist.logpdf(x.numpy())
self.assertEqual(0.0, np.mean((dist1.log_prob(x).detach().numpy() - expected)**2), atol=1e-3, rtol=0)
# Double-check that batched versions behave the same as unbatched
mean = torch.randn(5, 3, requires_grad=True)
cov_factor = torch.randn(5, 3, 2, requires_grad=True)
cov_diag = torch.randn(5, 3).abs().requires_grad_()
dist_batched = LowRankMultivariateNormal(mean, cov_factor, cov_diag)
dist_unbatched = [LowRankMultivariateNormal(mean[i], cov_factor[i], cov_diag[i])
for i in range(mean.size(0))]
x = dist_batched.sample((10,))
batched_prob = dist_batched.log_prob(x)
unbatched_prob = torch.stack([dist_unbatched[i].log_prob(x[:, i]) for i in range(5)]).t()
self.assertEqual(batched_prob.shape, unbatched_prob.shape)
self.assertEqual(0.0, (batched_prob - unbatched_prob).abs().max(), atol=1e-3, rtol=0)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_lowrank_multivariate_normal_sample(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
mean = torch.randn(5, requires_grad=True)
cov_factor = torch.randn(5, 1, requires_grad=True)
cov_diag = torch.randn(5).abs().requires_grad_()
cov = cov_factor.matmul(cov_factor.t()) + cov_diag.diag()
self._check_sampler_sampler(LowRankMultivariateNormal(mean, cov_factor, cov_diag),
scipy.stats.multivariate_normal(mean.detach().numpy(), cov.detach().numpy()),
'LowRankMultivariateNormal(loc={}, cov_factor={}, cov_diag={})'
.format(mean, cov_factor, cov_diag), multivariate=True)
def test_lowrank_multivariate_normal_properties(self):
loc = torch.randn(5)
cov_factor = torch.randn(5, 2)
cov_diag = torch.randn(5).abs()
cov = cov_factor.matmul(cov_factor.t()) + cov_diag.diag()
m1 = LowRankMultivariateNormal(loc, cov_factor, cov_diag)
m2 = MultivariateNormal(loc=loc, covariance_matrix=cov)
self.assertEqual(m1.mean, m2.mean)
self.assertEqual(m1.variance, m2.variance)
self.assertEqual(m1.covariance_matrix, m2.covariance_matrix)
self.assertEqual(m1.scale_tril, m2.scale_tril)
self.assertEqual(m1.precision_matrix, m2.precision_matrix)
self.assertEqual(m1.entropy(), m2.entropy())
def test_lowrank_multivariate_normal_moments(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
mean = torch.randn(5)
cov_factor = torch.randn(5, 2)
cov_diag = torch.randn(5).abs()
d = LowRankMultivariateNormal(mean, cov_factor, cov_diag)
samples = d.rsample((100000,))
empirical_mean = samples.mean(0)
self.assertEqual(d.mean, empirical_mean, atol=0.01, rtol=0)
empirical_var = samples.var(0)
self.assertEqual(d.variance, empirical_var, atol=0.02, rtol=0)
def test_multivariate_normal_shape(self):
mean = torch.randn(5, 3, requires_grad=True)
mean_no_batch = torch.randn(3, requires_grad=True)
mean_multi_batch = torch.randn(6, 5, 3, requires_grad=True)
# construct PSD covariance
tmp = torch.randn(3, 10)
cov = (torch.matmul(tmp, tmp.t()) / tmp.size(-1)).requires_grad_()
prec = cov.inverse().requires_grad_()
scale_tril = torch.linalg.cholesky(cov).requires_grad_()
# construct batch of PSD covariances
tmp = torch.randn(6, 5, 3, 10)
cov_batched = (tmp.unsqueeze(-2) * tmp.unsqueeze(-3)).mean(-1).requires_grad_()
prec_batched = cov_batched.inverse()
scale_tril_batched = torch.linalg.cholesky(cov_batched)
# ensure that sample, batch, event shapes all handled correctly
self.assertEqual(MultivariateNormal(mean, cov).sample().size(), (5, 3))
self.assertEqual(MultivariateNormal(mean_no_batch, cov).sample().size(), (3,))
self.assertEqual(MultivariateNormal(mean_multi_batch, cov).sample().size(), (6, 5, 3))
self.assertEqual(MultivariateNormal(mean, cov).sample((2,)).size(), (2, 5, 3))
self.assertEqual(MultivariateNormal(mean_no_batch, cov).sample((2,)).size(), (2, 3))
self.assertEqual(MultivariateNormal(mean_multi_batch, cov).sample((2,)).size(), (2, 6, 5, 3))
self.assertEqual(MultivariateNormal(mean, cov).sample((2, 7)).size(), (2, 7, 5, 3))
self.assertEqual(MultivariateNormal(mean_no_batch, cov).sample((2, 7)).size(), (2, 7, 3))
self.assertEqual(MultivariateNormal(mean_multi_batch, cov).sample((2, 7)).size(), (2, 7, 6, 5, 3))
self.assertEqual(MultivariateNormal(mean, cov_batched).sample((2, 7)).size(), (2, 7, 6, 5, 3))
self.assertEqual(MultivariateNormal(mean_no_batch, cov_batched).sample((2, 7)).size(), (2, 7, 6, 5, 3))
self.assertEqual(MultivariateNormal(mean_multi_batch, cov_batched).sample((2, 7)).size(), (2, 7, 6, 5, 3))
self.assertEqual(MultivariateNormal(mean, precision_matrix=prec).sample((2, 7)).size(), (2, 7, 5, 3))
self.assertEqual(MultivariateNormal(mean, precision_matrix=prec_batched).sample((2, 7)).size(), (2, 7, 6, 5, 3))
self.assertEqual(MultivariateNormal(mean, scale_tril=scale_tril).sample((2, 7)).size(), (2, 7, 5, 3))
self.assertEqual(MultivariateNormal(mean, scale_tril=scale_tril_batched).sample((2, 7)).size(), (2, 7, 6, 5, 3))
# check gradients
# We write a custom gradcheck function to maintain the symmetry
# of the perturbed covariances and their inverses (precision)
def multivariate_normal_log_prob_gradcheck(mean, covariance=None, precision=None, scale_tril=None):
mvn_samples = MultivariateNormal(mean, covariance, precision, scale_tril).sample().requires_grad_()
def gradcheck_func(samples, mu, sigma, prec, scale_tril):
if sigma is not None:
sigma = 0.5 * (sigma + sigma.mT) # Ensure symmetry of covariance
if prec is not None:
prec = 0.5 * (prec + prec.mT) # Ensure symmetry of precision
if scale_tril is not None:
scale_tril = scale_tril.tril()
return MultivariateNormal(mu, sigma, prec, scale_tril).log_prob(samples)
gradcheck(gradcheck_func, (mvn_samples, mean, covariance, precision, scale_tril), raise_exception=True)
multivariate_normal_log_prob_gradcheck(mean, cov)
multivariate_normal_log_prob_gradcheck(mean_multi_batch, cov)
multivariate_normal_log_prob_gradcheck(mean_multi_batch, cov_batched)
multivariate_normal_log_prob_gradcheck(mean, None, prec)
multivariate_normal_log_prob_gradcheck(mean_no_batch, None, prec_batched)
multivariate_normal_log_prob_gradcheck(mean, None, None, scale_tril)
multivariate_normal_log_prob_gradcheck(mean_no_batch, None, None, scale_tril_batched)
def test_multivariate_normal_stable_with_precision_matrix(self):
x = torch.randn(10)
P = torch.exp(-(x - x.unsqueeze(-1)) ** 2) # RBF kernel
MultivariateNormal(x.new_zeros(10), precision_matrix=P)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_multivariate_normal_log_prob(self):
mean = torch.randn(3, requires_grad=True)
tmp = torch.randn(3, 10)
cov = (torch.matmul(tmp, tmp.t()) / tmp.size(-1)).requires_grad_()
prec = cov.inverse().requires_grad_()
scale_tril = torch.linalg.cholesky(cov).requires_grad_()
# check that logprob values match scipy logpdf,
# and that covariance and scale_tril parameters are equivalent
dist1 = MultivariateNormal(mean, cov)
dist2 = MultivariateNormal(mean, precision_matrix=prec)
dist3 = MultivariateNormal(mean, scale_tril=scale_tril)
ref_dist = scipy.stats.multivariate_normal(mean.detach().numpy(), cov.detach().numpy())
x = dist1.sample((10,))
expected = ref_dist.logpdf(x.numpy())
self.assertEqual(0.0, np.mean((dist1.log_prob(x).detach().numpy() - expected)**2), atol=1e-3, rtol=0)
self.assertEqual(0.0, np.mean((dist2.log_prob(x).detach().numpy() - expected)**2), atol=1e-3, rtol=0)
self.assertEqual(0.0, np.mean((dist3.log_prob(x).detach().numpy() - expected)**2), atol=1e-3, rtol=0)
# Double-check that batched versions behave the same as unbatched
mean = torch.randn(5, 3, requires_grad=True)
tmp = torch.randn(5, 3, 10)
cov = (tmp.unsqueeze(-2) * tmp.unsqueeze(-3)).mean(-1).requires_grad_()
dist_batched = MultivariateNormal(mean, cov)
dist_unbatched = [MultivariateNormal(mean[i], cov[i]) for i in range(mean.size(0))]
x = dist_batched.sample((10,))
batched_prob = dist_batched.log_prob(x)
unbatched_prob = torch.stack([dist_unbatched[i].log_prob(x[:, i]) for i in range(5)]).t()
self.assertEqual(batched_prob.shape, unbatched_prob.shape)
self.assertEqual(0.0, (batched_prob - unbatched_prob).abs().max(), atol=1e-3, rtol=0)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_multivariate_normal_sample(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
mean = torch.randn(3, requires_grad=True)
tmp = torch.randn(3, 10)
cov = (torch.matmul(tmp, tmp.t()) / tmp.size(-1)).requires_grad_()
prec = cov.inverse().requires_grad_()
scale_tril = torch.linalg.cholesky(cov).requires_grad_()
self._check_sampler_sampler(MultivariateNormal(mean, cov),
scipy.stats.multivariate_normal(mean.detach().numpy(), cov.detach().numpy()),
'MultivariateNormal(loc={}, cov={})'.format(mean, cov),
multivariate=True)
self._check_sampler_sampler(MultivariateNormal(mean, precision_matrix=prec),
scipy.stats.multivariate_normal(mean.detach().numpy(), cov.detach().numpy()),
'MultivariateNormal(loc={}, atol={})'.format(mean, prec),
multivariate=True)
self._check_sampler_sampler(MultivariateNormal(mean, scale_tril=scale_tril),
scipy.stats.multivariate_normal(mean.detach().numpy(), cov.detach().numpy()),
'MultivariateNormal(loc={}, scale_tril={})'.format(mean, scale_tril),
multivariate=True)
def test_multivariate_normal_properties(self):
loc = torch.randn(5)
scale_tril = transform_to(constraints.lower_cholesky)(torch.randn(5, 5))
m = MultivariateNormal(loc=loc, scale_tril=scale_tril)
self.assertEqual(m.covariance_matrix, m.scale_tril.mm(m.scale_tril.t()))
self.assertEqual(m.covariance_matrix.mm(m.precision_matrix), torch.eye(m.event_shape[0]))
self.assertEqual(m.scale_tril, torch.linalg.cholesky(m.covariance_matrix))
def test_multivariate_normal_moments(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
mean = torch.randn(5)
scale_tril = transform_to(constraints.lower_cholesky)(torch.randn(5, 5))
d = MultivariateNormal(mean, scale_tril=scale_tril)
samples = d.rsample((100000,))
empirical_mean = samples.mean(0)
self.assertEqual(d.mean, empirical_mean, atol=0.01, rtol=0)
empirical_var = samples.var(0)
self.assertEqual(d.variance, empirical_var, atol=0.05, rtol=0)
# We applied same tests in Multivariate Normal distribution for Wishart distribution
def test_wishart_shape(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
ndim = 3
df = torch.rand(5, requires_grad=True) + ndim
df_no_batch = torch.rand([], requires_grad=True) + ndim
df_multi_batch = torch.rand(6, 5, requires_grad=True) + ndim
# construct PSD covariance
tmp = torch.randn(ndim, 10)
cov = (torch.matmul(tmp, tmp.t()) / tmp.size(-1)).requires_grad_()
prec = cov.inverse().requires_grad_()
scale_tril = torch.linalg.cholesky(cov).requires_grad_()
# construct batch of PSD covariances
tmp = torch.randn(6, 5, ndim, 10)
cov_batched = (tmp.unsqueeze(-2) * tmp.unsqueeze(-3)).mean(-1).requires_grad_()
prec_batched = cov_batched.inverse()
scale_tril_batched = torch.linalg.cholesky(cov_batched)
# ensure that sample, batch, event shapes all handled correctly
self.assertEqual(Wishart(df, cov).sample().size(), (5, ndim, ndim))
self.assertEqual(Wishart(df_no_batch, cov).sample().size(), (ndim, ndim))
self.assertEqual(Wishart(df_multi_batch, cov).sample().size(), (6, 5, ndim, ndim))
self.assertEqual(Wishart(df, cov).sample((2,)).size(), (2, 5, ndim, ndim))
self.assertEqual(Wishart(df_no_batch, cov).sample((2,)).size(), (2, ndim, ndim))
self.assertEqual(Wishart(df_multi_batch, cov).sample((2,)).size(), (2, 6, 5, ndim, ndim))
self.assertEqual(Wishart(df, cov).sample((2, 7)).size(), (2, 7, 5, ndim, ndim))
self.assertEqual(Wishart(df_no_batch, cov).sample((2, 7)).size(), (2, 7, ndim, ndim))
self.assertEqual(Wishart(df_multi_batch, cov).sample((2, 7)).size(), (2, 7, 6, 5, ndim, ndim))
self.assertEqual(Wishart(df, cov_batched).sample((2, 7)).size(), (2, 7, 6, 5, ndim, ndim))
self.assertEqual(Wishart(df_no_batch, cov_batched).sample((2, 7)).size(), (2, 7, 6, 5, ndim, ndim))
self.assertEqual(Wishart(df_multi_batch, cov_batched).sample((2, 7)).size(), (2, 7, 6, 5, ndim, ndim))
self.assertEqual(Wishart(df, precision_matrix=prec).sample((2, 7)).size(), (2, 7, 5, ndim, ndim))
self.assertEqual(Wishart(df, precision_matrix=prec_batched).sample((2, 7)).size(), (2, 7, 6, 5, ndim, ndim))
self.assertEqual(Wishart(df, scale_tril=scale_tril).sample((2, 7)).size(), (2, 7, 5, ndim, ndim))
self.assertEqual(Wishart(df, scale_tril=scale_tril_batched).sample((2, 7)).size(), (2, 7, 6, 5, ndim, ndim))
# check gradients
# Modified and applied the same tests for multivariate_normal
def wishart_log_prob_gradcheck(df=None, covariance=None, precision=None, scale_tril=None):
wishart_samples = Wishart(df, covariance, precision, scale_tril).sample().requires_grad_()
def gradcheck_func(samples, nu, sigma, prec, scale_tril):
if sigma is not None:
sigma = 0.5 * (sigma + sigma.mT) # Ensure symmetry of covariance
if prec is not None:
prec = 0.5 * (prec + prec.mT) # Ensure symmetry of precision
if scale_tril is not None:
scale_tril = scale_tril.tril()
return Wishart(nu, sigma, prec, scale_tril).log_prob(samples)
gradcheck(gradcheck_func, (wishart_samples, df, covariance, precision, scale_tril), raise_exception=True)
wishart_log_prob_gradcheck(df, cov)
wishart_log_prob_gradcheck(df_multi_batch, cov)
wishart_log_prob_gradcheck(df_multi_batch, cov_batched)
wishart_log_prob_gradcheck(df, None, prec)
wishart_log_prob_gradcheck(df_no_batch, None, prec_batched)
wishart_log_prob_gradcheck(df, None, None, scale_tril)
wishart_log_prob_gradcheck(df_no_batch, None, None, scale_tril_batched)
def test_wishart_stable_with_precision_matrix(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
ndim = 10
x = torch.randn(ndim)
P = torch.exp(-(x - x.unsqueeze(-1)) ** 2) # RBF kernel
Wishart(torch.tensor(ndim), precision_matrix=P)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_wishart_log_prob(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
ndim = 3
df = torch.rand([], requires_grad=True) + ndim - 1
# SciPy allowed ndim -1 < df < ndim for Wishar distribution after version 1.7.0
if version.parse(scipy.__version__) < version.parse("1.7.0"):
df += 1.
tmp = torch.randn(ndim, 10)
cov = (torch.matmul(tmp, tmp.t()) / tmp.size(-1)).requires_grad_()
prec = cov.inverse().requires_grad_()
scale_tril = torch.linalg.cholesky(cov).requires_grad_()
# check that logprob values match scipy logpdf,
# and that covariance and scale_tril parameters are equivalent
dist1 = Wishart(df, cov)
dist2 = Wishart(df, precision_matrix=prec)
dist3 = Wishart(df, scale_tril=scale_tril)
ref_dist = scipy.stats.wishart(df.item(), cov.detach().numpy())
x = dist1.sample((1000,))
expected = ref_dist.logpdf(x.transpose(0, 2).numpy())
self.assertEqual(0.0, np.mean((dist1.log_prob(x).detach().numpy() - expected)**2), atol=1e-3, rtol=0)
self.assertEqual(0.0, np.mean((dist2.log_prob(x).detach().numpy() - expected)**2), atol=1e-3, rtol=0)
self.assertEqual(0.0, np.mean((dist3.log_prob(x).detach().numpy() - expected)**2), atol=1e-3, rtol=0)
# Double-check that batched versions behave the same as unbatched
df = torch.rand(5, requires_grad=True) + ndim - 1
# SciPy allowed ndim -1 < df < ndim for Wishar distribution after version 1.7.0
if version.parse(scipy.__version__) < version.parse("1.7.0"):
df += 1.
tmp = torch.randn(5, ndim, 10)
cov = (tmp.unsqueeze(-2) * tmp.unsqueeze(-3)).mean(-1).requires_grad_()
dist_batched = Wishart(df, cov)
dist_unbatched = [Wishart(df[i], cov[i]) for i in range(df.size(0))]
x = dist_batched.sample((1000,))
batched_prob = dist_batched.log_prob(x)
unbatched_prob = torch.stack([dist_unbatched[i].log_prob(x[:, i]) for i in range(5)]).t()
self.assertEqual(batched_prob.shape, unbatched_prob.shape)
self.assertEqual(0.0, (batched_prob - unbatched_prob).abs().max(), atol=1e-3, rtol=0)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_wishart_sample(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
ndim = 3
df = torch.rand([], requires_grad=True) + ndim - 1
# SciPy allowed ndim -1 < df < ndim for Wishar distribution after version 1.7.0
if version.parse(scipy.__version__) < version.parse("1.7.0"):
df += 1.
tmp = torch.randn(ndim, 10)
cov = (torch.matmul(tmp, tmp.t()) / tmp.size(-1)).requires_grad_()
prec = cov.inverse().requires_grad_()
scale_tril = torch.linalg.cholesky(cov).requires_grad_()
ref_dist = scipy.stats.wishart(df.item(), cov.detach().numpy())
self._check_sampler_sampler(Wishart(df, cov),
ref_dist,
'Wishart(df={}, covariance_matrix={})'.format(df, cov),
multivariate=True)
self._check_sampler_sampler(Wishart(df, precision_matrix=prec),
ref_dist,
'Wishart(df={}, precision_matrix={})'.format(df, prec),
multivariate=True)
self._check_sampler_sampler(Wishart(df, scale_tril=scale_tril),
ref_dist,
'Wishart(df={}, scale_tril={})'.format(df, scale_tril),
multivariate=True)
def test_wishart_properties(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
ndim = 5
df = torch.rand([]) + ndim - 1
scale_tril = transform_to(constraints.lower_cholesky)(torch.randn(ndim, ndim))
m = Wishart(df=df, scale_tril=scale_tril)
self.assertEqual(m.covariance_matrix, m.scale_tril.mm(m.scale_tril.t()))
self.assertEqual(m.covariance_matrix.mm(m.precision_matrix), torch.eye(m.event_shape[0]))
self.assertEqual(m.scale_tril, torch.linalg.cholesky(m.covariance_matrix))
def test_wishart_moments(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
ndim = 3
df = torch.rand([]) + ndim - 1
scale_tril = transform_to(constraints.lower_cholesky)(torch.randn(ndim, ndim))
d = Wishart(df=df, scale_tril=scale_tril)
samples = d.rsample((ndim * ndim * 100000,))
empirical_mean = samples.mean(0)
self.assertEqual(d.mean, empirical_mean, atol=0.5, rtol=0)
empirical_var = samples.var(0)
self.assertEqual(d.variance, empirical_var, atol=0.5, rtol=0)
def test_exponential(self):
rate = torch.randn(5, 5).abs().requires_grad_()
rate_1d = torch.randn(1).abs().requires_grad_()
self.assertEqual(Exponential(rate).sample().size(), (5, 5))
self.assertEqual(Exponential(rate).sample((7,)).size(), (7, 5, 5))
self.assertEqual(Exponential(rate_1d).sample((1,)).size(), (1, 1))
self.assertEqual(Exponential(rate_1d).sample().size(), (1,))
self.assertEqual(Exponential(0.2).sample((1,)).size(), (1,))
self.assertEqual(Exponential(50.0).sample((1,)).size(), (1,))
self._gradcheck_log_prob(Exponential, (rate,))
state = torch.get_rng_state()
eps = rate.new(rate.size()).exponential_()
torch.set_rng_state(state)
z = Exponential(rate).rsample()
z.backward(torch.ones_like(z))
self.assertEqual(rate.grad, -eps / rate**2)
rate.grad.zero_()
self.assertEqual(z.size(), (5, 5))
def ref_log_prob(idx, x, log_prob):
m = rate.view(-1)[idx]
expected = math.log(m) - m * x
self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)
self._check_log_prob(Exponential(rate), ref_log_prob)
self._check_forward_ad(lambda x: x.exponential_())
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_exponential_sample(self):
set_rng_seed(1) # see Note [Randomized statistical tests]
for rate in [1e-5, 1.0, 10.]:
self._check_sampler_sampler(Exponential(rate),
scipy.stats.expon(scale=1. / rate),
'Exponential(rate={})'.format(rate))
def test_laplace(self):
loc = torch.randn(5, 5, requires_grad=True)
scale = torch.randn(5, 5).abs().requires_grad_()
loc_1d = torch.randn(1, requires_grad=True)
scale_1d = torch.randn(1, requires_grad=True)
loc_delta = torch.tensor([1.0, 0.0])
scale_delta = torch.tensor([1e-5, 1e-5])
self.assertEqual(Laplace(loc, scale).sample().size(), (5, 5))
self.assertEqual(Laplace(loc, scale).sample((7,)).size(), (7, 5, 5))
self.assertEqual(Laplace(loc_1d, scale_1d).sample((1,)).size(), (1, 1))
self.assertEqual(Laplace(loc_1d, scale_1d).sample().size(), (1,))
self.assertEqual(Laplace(0.2, .6).sample((1,)).size(), (1,))
self.assertEqual(Laplace(-0.7, 50.0).sample((1,)).size(), (1,))
# sample check for extreme value of mean, std
set_rng_seed(0)
self.assertEqual(Laplace(loc_delta, scale_delta).sample(sample_shape=(1, 2)),
torch.tensor([[[1.0, 0.0], [1.0, 0.0]]]),
atol=1e-4, rtol=0)
self._gradcheck_log_prob(Laplace, (loc, scale))
self._gradcheck_log_prob(Laplace, (loc, 1.0))
self._gradcheck_log_prob(Laplace, (0.0, scale))
state = torch.get_rng_state()
eps = torch.ones_like(loc).uniform_(-.5, .5)
torch.set_rng_state(state)
z = Laplace(loc, scale).rsample()
z.backward(torch.ones_like(z))
self.assertEqual(loc.grad, torch.ones_like(loc))
self.assertEqual(scale.grad, -eps.sign() * torch.log1p(-2 * eps.abs()))
loc.grad.zero_()
scale.grad.zero_()
self.assertEqual(z.size(), (5, 5))
def ref_log_prob(idx, x, log_prob):
m = loc.view(-1)[idx]
s = scale.view(-1)[idx]
expected = (-math.log(2 * s) - abs(x - m) / s)
self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)
self._check_log_prob(Laplace(loc, scale), ref_log_prob)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_laplace_sample(self):
set_rng_seed(1) # see Note [Randomized statistical tests]
for loc, scale in product([-1.0, 0.0, 1.0], [0.1, 1.0, 10.0]):
self._check_sampler_sampler(Laplace(loc, scale),
scipy.stats.laplace(loc=loc, scale=scale),
'Laplace(loc={}, scale={})'.format(loc, scale))
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_gamma_shape(self):
alpha = torch.randn(2, 3).exp().requires_grad_()
beta = torch.randn(2, 3).exp().requires_grad_()
alpha_1d = torch.randn(1).exp().requires_grad_()
beta_1d = torch.randn(1).exp().requires_grad_()
self.assertEqual(Gamma(alpha, beta).sample().size(), (2, 3))
self.assertEqual(Gamma(alpha, beta).sample((5,)).size(), (5, 2, 3))
self.assertEqual(Gamma(alpha_1d, beta_1d).sample((1,)).size(), (1, 1))
self.assertEqual(Gamma(alpha_1d, beta_1d).sample().size(), (1,))
self.assertEqual(Gamma(0.5, 0.5).sample().size(), ())
self.assertEqual(Gamma(0.5, 0.5).sample((1,)).size(), (1,))
def ref_log_prob(idx, x, log_prob):
a = alpha.view(-1)[idx].detach()
b = beta.view(-1)[idx].detach()
expected = scipy.stats.gamma.logpdf(x, a, scale=1 / b)
self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)
self._check_log_prob(Gamma(alpha, beta), ref_log_prob)
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_gamma_gpu_shape(self):
alpha = torch.randn(2, 3).cuda().exp().requires_grad_()
beta = torch.randn(2, 3).cuda().exp().requires_grad_()
alpha_1d = torch.randn(1).cuda().exp().requires_grad_()
beta_1d = torch.randn(1).cuda().exp().requires_grad_()
self.assertEqual(Gamma(alpha, beta).sample().size(), (2, 3))
self.assertEqual(Gamma(alpha, beta).sample((5,)).size(), (5, 2, 3))
self.assertEqual(Gamma(alpha_1d, beta_1d).sample((1,)).size(), (1, 1))
self.assertEqual(Gamma(alpha_1d, beta_1d).sample().size(), (1,))
self.assertEqual(Gamma(0.5, 0.5).sample().size(), ())
self.assertEqual(Gamma(0.5, 0.5).sample((1,)).size(), (1,))
def ref_log_prob(idx, x, log_prob):
a = alpha.view(-1)[idx].detach().cpu()
b = beta.view(-1)[idx].detach().cpu()
expected = scipy.stats.gamma.logpdf(x.cpu(), a, scale=1 / b)
self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)
self._check_log_prob(Gamma(alpha, beta), ref_log_prob)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_gamma_sample(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
for alpha, beta in product([0.1, 1.0, 5.0], [0.1, 1.0, 10.0]):
self._check_sampler_sampler(Gamma(alpha, beta),
scipy.stats.gamma(alpha, scale=1.0 / beta),
'Gamma(concentration={}, rate={})'.format(alpha, beta))
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_gamma_gpu_sample(self):
set_rng_seed(0)
for alpha, beta in product([0.1, 1.0, 5.0], [0.1, 1.0, 10.0]):
a, b = torch.tensor([alpha]).cuda(), torch.tensor([beta]).cuda()
self._check_sampler_sampler(Gamma(a, b),
scipy.stats.gamma(alpha, scale=1.0 / beta),
'Gamma(alpha={}, beta={})'.format(alpha, beta),
failure_rate=1e-4)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_pareto(self):
scale = torch.randn(2, 3).abs().requires_grad_()
alpha = torch.randn(2, 3).abs().requires_grad_()
scale_1d = torch.randn(1).abs().requires_grad_()
alpha_1d = torch.randn(1).abs().requires_grad_()
self.assertEqual(Pareto(scale_1d, 0.5).mean, inf)
self.assertEqual(Pareto(scale_1d, 0.5).variance, inf)
self.assertEqual(Pareto(scale, alpha).sample().size(), (2, 3))
self.assertEqual(Pareto(scale, alpha).sample((5,)).size(), (5, 2, 3))
self.assertEqual(Pareto(scale_1d, alpha_1d).sample((1,)).size(), (1, 1))
self.assertEqual(Pareto(scale_1d, alpha_1d).sample().size(), (1,))
self.assertEqual(Pareto(1.0, 1.0).sample().size(), ())
self.assertEqual(Pareto(1.0, 1.0).sample((1,)).size(), (1,))
def ref_log_prob(idx, x, log_prob):
s = scale.view(-1)[idx].detach()
a = alpha.view(-1)[idx].detach()
expected = scipy.stats.pareto.logpdf(x, a, scale=s)
self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)
self._check_log_prob(Pareto(scale, alpha), ref_log_prob)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_pareto_sample(self):
set_rng_seed(1) # see Note [Randomized statistical tests]
for scale, alpha in product([0.1, 1.0, 5.0], [0.1, 1.0, 10.0]):
self._check_sampler_sampler(Pareto(scale, alpha),
scipy.stats.pareto(alpha, scale=scale),
'Pareto(scale={}, alpha={})'.format(scale, alpha))
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_gumbel(self):
loc = torch.randn(2, 3, requires_grad=True)
scale = torch.randn(2, 3).abs().requires_grad_()
loc_1d = torch.randn(1, requires_grad=True)
scale_1d = torch.randn(1).abs().requires_grad_()
self.assertEqual(Gumbel(loc, scale).sample().size(), (2, 3))
self.assertEqual(Gumbel(loc, scale).sample((5,)).size(), (5, 2, 3))
self.assertEqual(Gumbel(loc_1d, scale_1d).sample().size(), (1,))
self.assertEqual(Gumbel(loc_1d, scale_1d).sample((1,)).size(), (1, 1))
self.assertEqual(Gumbel(1.0, 1.0).sample().size(), ())
self.assertEqual(Gumbel(1.0, 1.0).sample((1,)).size(), (1,))
def ref_log_prob(idx, x, log_prob):
l = loc.view(-1)[idx].detach()
s = scale.view(-1)[idx].detach()
expected = scipy.stats.gumbel_r.logpdf(x, loc=l, scale=s)
self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)
self._check_log_prob(Gumbel(loc, scale), ref_log_prob)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_gumbel_sample(self):
set_rng_seed(1) # see note [Randomized statistical tests]
for loc, scale in product([-5.0, -1.0, -0.1, 0.1, 1.0, 5.0], [0.1, 1.0, 10.0]):
self._check_sampler_sampler(Gumbel(loc, scale),
scipy.stats.gumbel_r(loc=loc, scale=scale),
'Gumbel(loc={}, scale={})'.format(loc, scale))
def test_kumaraswamy_shape(self):
concentration1 = torch.randn(2, 3).abs().requires_grad_()
concentration0 = torch.randn(2, 3).abs().requires_grad_()
concentration1_1d = torch.randn(1).abs().requires_grad_()
concentration0_1d = torch.randn(1).abs().requires_grad_()
self.assertEqual(Kumaraswamy(concentration1, concentration0).sample().size(), (2, 3))
self.assertEqual(Kumaraswamy(concentration1, concentration0).sample((5,)).size(), (5, 2, 3))
self.assertEqual(Kumaraswamy(concentration1_1d, concentration0_1d).sample().size(), (1,))
self.assertEqual(Kumaraswamy(concentration1_1d, concentration0_1d).sample((1,)).size(), (1, 1))
self.assertEqual(Kumaraswamy(1.0, 1.0).sample().size(), ())
self.assertEqual(Kumaraswamy(1.0, 1.0).sample((1,)).size(), (1,))
# Kumaraswamy distribution is not implemented in SciPy
# Hence these tests are explicit
def test_kumaraswamy_mean_variance(self):
c1_1 = torch.randn(2, 3).abs().requires_grad_()
c0_1 = torch.randn(2, 3).abs().requires_grad_()
c1_2 = torch.randn(4).abs().requires_grad_()
c0_2 = torch.randn(4).abs().requires_grad_()
cases = [(c1_1, c0_1), (c1_2, c0_2)]
for i, (a, b) in enumerate(cases):
m = Kumaraswamy(a, b)
samples = m.sample((60000, ))
expected = samples.mean(0)
actual = m.mean
error = (expected - actual).abs()
max_error = max(error[error == error])
self.assertLess(max_error, 0.01,
"Kumaraswamy example {}/{}, incorrect .mean".format(i + 1, len(cases)))
expected = samples.var(0)
actual = m.variance
error = (expected - actual).abs()
max_error = max(error[error == error])
self.assertLess(max_error, 0.01,
"Kumaraswamy example {}/{}, incorrect .variance".format(i + 1, len(cases)))
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_fishersnedecor(self):
df1 = torch.randn(2, 3).abs().requires_grad_()
df2 = torch.randn(2, 3).abs().requires_grad_()
df1_1d = torch.randn(1).abs()
df2_1d = torch.randn(1).abs()
self.assertTrue(is_all_nan(FisherSnedecor(1, 2).mean))
self.assertTrue(is_all_nan(FisherSnedecor(1, 4).variance))
self.assertEqual(FisherSnedecor(df1, df2).sample().size(), (2, 3))
self.assertEqual(FisherSnedecor(df1, df2).sample((5,)).size(), (5, 2, 3))
self.assertEqual(FisherSnedecor(df1_1d, df2_1d).sample().size(), (1,))
self.assertEqual(FisherSnedecor(df1_1d, df2_1d).sample((1,)).size(), (1, 1))
self.assertEqual(FisherSnedecor(1.0, 1.0).sample().size(), ())
self.assertEqual(FisherSnedecor(1.0, 1.0).sample((1,)).size(), (1,))
def ref_log_prob(idx, x, log_prob):
f1 = df1.view(-1)[idx].detach()
f2 = df2.view(-1)[idx].detach()
expected = scipy.stats.f.logpdf(x, f1, f2)
self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)
self._check_log_prob(FisherSnedecor(df1, df2), ref_log_prob)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_fishersnedecor_sample(self):
set_rng_seed(1) # see note [Randomized statistical tests]
for df1, df2 in product([0.1, 0.5, 1.0, 5.0, 10.0], [0.1, 0.5, 1.0, 5.0, 10.0]):
self._check_sampler_sampler(FisherSnedecor(df1, df2),
scipy.stats.f(df1, df2),
'FisherSnedecor(loc={}, scale={})'.format(df1, df2))
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_chi2_shape(self):
df = torch.randn(2, 3).exp().requires_grad_()
df_1d = torch.randn(1).exp().requires_grad_()
self.assertEqual(Chi2(df).sample().size(), (2, 3))
self.assertEqual(Chi2(df).sample((5,)).size(), (5, 2, 3))
self.assertEqual(Chi2(df_1d).sample((1,)).size(), (1, 1))
self.assertEqual(Chi2(df_1d).sample().size(), (1,))
self.assertEqual(Chi2(torch.tensor(0.5, requires_grad=True)).sample().size(), ())
self.assertEqual(Chi2(0.5).sample().size(), ())
self.assertEqual(Chi2(0.5).sample((1,)).size(), (1,))
def ref_log_prob(idx, x, log_prob):
d = df.view(-1)[idx].detach()
expected = scipy.stats.chi2.logpdf(x, d)
self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)
self._check_log_prob(Chi2(df), ref_log_prob)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_chi2_sample(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
for df in [0.1, 1.0, 5.0]:
self._check_sampler_sampler(Chi2(df),
scipy.stats.chi2(df),
'Chi2(df={})'.format(df))
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_studentT(self):
df = torch.randn(2, 3).exp().requires_grad_()
df_1d = torch.randn(1).exp().requires_grad_()
self.assertTrue(is_all_nan(StudentT(1).mean))
self.assertTrue(is_all_nan(StudentT(1).variance))
self.assertEqual(StudentT(2).variance, inf)
self.assertEqual(StudentT(df).sample().size(), (2, 3))
self.assertEqual(StudentT(df).sample((5,)).size(), (5, 2, 3))
self.assertEqual(StudentT(df_1d).sample((1,)).size(), (1, 1))
self.assertEqual(StudentT(df_1d).sample().size(), (1,))
self.assertEqual(StudentT(torch.tensor(0.5, requires_grad=True)).sample().size(), ())
self.assertEqual(StudentT(0.5).sample().size(), ())
self.assertEqual(StudentT(0.5).sample((1,)).size(), (1,))
def ref_log_prob(idx, x, log_prob):
d = df.view(-1)[idx].detach()
expected = scipy.stats.t.logpdf(x, d)
self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)
self._check_log_prob(StudentT(df), ref_log_prob)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_studentT_sample(self):
set_rng_seed(11) # see Note [Randomized statistical tests]
for df, loc, scale in product([0.1, 1.0, 5.0, 10.0], [-1.0, 0.0, 1.0], [0.1, 1.0, 10.0]):
self._check_sampler_sampler(StudentT(df=df, loc=loc, scale=scale),
scipy.stats.t(df=df, loc=loc, scale=scale),
'StudentT(df={}, loc={}, scale={})'.format(df, loc, scale))
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_studentT_log_prob(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
num_samples = 10
for df, loc, scale in product([0.1, 1.0, 5.0, 10.0], [-1.0, 0.0, 1.0], [0.1, 1.0, 10.0]):
dist = StudentT(df=df, loc=loc, scale=scale)
x = dist.sample((num_samples,))
actual_log_prob = dist.log_prob(x)
for i in range(num_samples):
expected_log_prob = scipy.stats.t.logpdf(x[i], df=df, loc=loc, scale=scale)
self.assertEqual(float(actual_log_prob[i]), float(expected_log_prob), atol=1e-3, rtol=0)
def test_dirichlet_shape(self):
alpha = torch.randn(2, 3).exp().requires_grad_()
alpha_1d = torch.randn(4).exp().requires_grad_()
self.assertEqual(Dirichlet(alpha).sample().size(), (2, 3))
self.assertEqual(Dirichlet(alpha).sample((5,)).size(), (5, 2, 3))
self.assertEqual(Dirichlet(alpha_1d).sample().size(), (4,))
self.assertEqual(Dirichlet(alpha_1d).sample((1,)).size(), (1, 4))
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_dirichlet_log_prob(self):
num_samples = 10
alpha = torch.exp(torch.randn(5))
dist = Dirichlet(alpha)
x = dist.sample((num_samples,))
actual_log_prob = dist.log_prob(x)
for i in range(num_samples):
expected_log_prob = scipy.stats.dirichlet.logpdf(x[i].numpy(), alpha.numpy())
self.assertEqual(actual_log_prob[i], expected_log_prob, atol=1e-3, rtol=0)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_dirichlet_sample(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
alpha = torch.exp(torch.randn(3))
self._check_sampler_sampler(Dirichlet(alpha),
scipy.stats.dirichlet(alpha.numpy()),
'Dirichlet(alpha={})'.format(list(alpha)),
multivariate=True)
def test_dirichlet_mode(self):
# Test a few edge cases for the Dirichlet distribution mode. This also covers beta distributions.
concentrations_and_modes = [
([2, 2, 1], [.5, .5, 0.]),
([3, 2, 1], [2 / 3, 1 / 3, 0]),
([.5, .2, .2], [1., 0., 0.]),
([1, 1, 1], [nan, nan, nan]),
]
for concentration, mode in concentrations_and_modes:
dist = Dirichlet(torch.tensor(concentration))
self.assertEqual(dist.mode, torch.tensor(mode))
def test_beta_shape(self):
con1 = torch.randn(2, 3).exp().requires_grad_()
con0 = torch.randn(2, 3).exp().requires_grad_()
con1_1d = torch.randn(4).exp().requires_grad_()
con0_1d = torch.randn(4).exp().requires_grad_()
self.assertEqual(Beta(con1, con0).sample().size(), (2, 3))
self.assertEqual(Beta(con1, con0).sample((5,)).size(), (5, 2, 3))
self.assertEqual(Beta(con1_1d, con0_1d).sample().size(), (4,))
self.assertEqual(Beta(con1_1d, con0_1d).sample((1,)).size(), (1, 4))
self.assertEqual(Beta(0.1, 0.3).sample().size(), ())
self.assertEqual(Beta(0.1, 0.3).sample((5,)).size(), (5,))
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_beta_log_prob(self):
for _ in range(100):
con1 = np.exp(np.random.normal())
con0 = np.exp(np.random.normal())
dist = Beta(con1, con0)
x = dist.sample()
actual_log_prob = dist.log_prob(x).sum()
expected_log_prob = scipy.stats.beta.logpdf(x, con1, con0)
self.assertEqual(float(actual_log_prob), float(expected_log_prob), atol=1e-3, rtol=0)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_beta_sample(self):
set_rng_seed(1) # see Note [Randomized statistical tests]
for con1, con0 in product([0.1, 1.0, 10.0], [0.1, 1.0, 10.0]):
self._check_sampler_sampler(Beta(con1, con0),
scipy.stats.beta(con1, con0),
'Beta(alpha={}, beta={})'.format(con1, con0))
# Check that small alphas do not cause NANs.
for Tensor in [torch.FloatTensor, torch.DoubleTensor]:
x = Beta(Tensor([1e-6]), Tensor([1e-6])).sample()[0]
self.assertTrue(np.isfinite(x) and x > 0, 'Invalid Beta.sample(): {}'.format(x))
def test_beta_underflow(self):
# For low values of (alpha, beta), the gamma samples can underflow
# with float32 and result in a spurious mode at 0.5. To prevent this,
# torch._sample_dirichlet works with double precision for intermediate
# calculations.
set_rng_seed(1)
num_samples = 50000
for dtype in [torch.float, torch.double]:
conc = torch.tensor(1e-2, dtype=dtype)
beta_samples = Beta(conc, conc).sample([num_samples])
self.assertEqual((beta_samples == 0).sum(), 0)
self.assertEqual((beta_samples == 1).sum(), 0)
# assert support is concentrated around 0 and 1
frac_zeros = float((beta_samples < 0.1).sum()) / num_samples
frac_ones = float((beta_samples > 0.9).sum()) / num_samples
self.assertEqual(frac_zeros, 0.5, atol=0.05, rtol=0)
self.assertEqual(frac_ones, 0.5, atol=0.05, rtol=0)
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
def test_beta_underflow_gpu(self):
set_rng_seed(1)
num_samples = 50000
conc = torch.tensor(1e-2, dtype=torch.float64).cuda()
beta_samples = Beta(conc, conc).sample([num_samples])
self.assertEqual((beta_samples == 0).sum(), 0)
self.assertEqual((beta_samples == 1).sum(), 0)
# assert support is concentrated around 0 and 1
frac_zeros = float((beta_samples < 0.1).sum()) / num_samples
frac_ones = float((beta_samples > 0.9).sum()) / num_samples
# TODO: increase precision once imbalance on GPU is fixed.
self.assertEqual(frac_zeros, 0.5, atol=0.12, rtol=0)
self.assertEqual(frac_ones, 0.5, atol=0.12, rtol=0)
def test_continuous_bernoulli(self):
p = torch.tensor([0.7, 0.2, 0.4], requires_grad=True)
r = torch.tensor(0.3, requires_grad=True)
s = 0.3
self.assertEqual(ContinuousBernoulli(p).sample((8,)).size(), (8, 3))
self.assertFalse(ContinuousBernoulli(p).sample().requires_grad)
self.assertEqual(ContinuousBernoulli(r).sample((8,)).size(), (8,))
self.assertEqual(ContinuousBernoulli(r).sample().size(), ())
self.assertEqual(ContinuousBernoulli(r).sample((3, 2)).size(), (3, 2,))
self.assertEqual(ContinuousBernoulli(s).sample().size(), ())
self._gradcheck_log_prob(ContinuousBernoulli, (p,))
def ref_log_prob(idx, val, log_prob):
prob = p[idx]
if prob > 0.499 and prob < 0.501: # using default value of lim here
log_norm_const = math.log(2.) + 4. / 3. * math.pow(prob - 0.5, 2) + 104. / 45. * math.pow(prob - 0.5, 4)
else:
log_norm_const = math.log(2. * math.atanh(1. - 2. * prob) / (1. - 2.0 * prob))
res = val * math.log(prob) + (1. - val) * math.log1p(-prob) + log_norm_const
self.assertEqual(log_prob, res)
self._check_log_prob(ContinuousBernoulli(p), ref_log_prob)
self._check_log_prob(ContinuousBernoulli(logits=p.log() - (-p).log1p()), ref_log_prob)
# check entropy computation
self.assertEqual(ContinuousBernoulli(p).entropy(), torch.tensor([-0.02938, -0.07641, -0.00682]), atol=1e-4, rtol=0)
# entropy below corresponds to the clamped value of prob when using float 64
# the value for float32 should be -1.76898
self.assertEqual(ContinuousBernoulli(torch.tensor([0.0])).entropy(), torch.tensor([-2.58473]), atol=1e-5, rtol=0)
self.assertEqual(ContinuousBernoulli(s).entropy(), torch.tensor(-0.02938), atol=1e-4, rtol=0)
def test_continuous_bernoulli_3d(self):
p = torch.full((2, 3, 5), 0.5).requires_grad_()
self.assertEqual(ContinuousBernoulli(p).sample().size(), (2, 3, 5))
self.assertEqual(ContinuousBernoulli(p).sample(sample_shape=(2, 5)).size(),
(2, 5, 2, 3, 5))
self.assertEqual(ContinuousBernoulli(p).sample((2,)).size(), (2, 2, 3, 5))
def test_lkj_cholesky_log_prob(self):
def tril_cholesky_to_tril_corr(x):
x = vec_to_tril_matrix(x, -1)
diag = (1 - (x * x).sum(-1)).sqrt().diag_embed()
x = x + diag
return tril_matrix_to_vec(x @ x.T, -1)
for dim in range(2, 5):
log_probs = []
lkj = LKJCholesky(dim, concentration=1., validate_args=True)
for i in range(2):
sample = lkj.sample()
sample_tril = tril_matrix_to_vec(sample, diag=-1)
log_prob = lkj.log_prob(sample)
log_abs_det_jacobian = torch.slogdet(jacobian(tril_cholesky_to_tril_corr, sample_tril)).logabsdet
log_probs.append(log_prob - log_abs_det_jacobian)
# for concentration=1., the density is uniform over the space of all
# correlation matrices.
if dim == 2:
# for dim=2, pdf = 0.5 (jacobian adjustment factor is 0.)
self.assertTrue(all(torch.allclose(x, torch.tensor(0.5).log(), atol=1e-10) for x in log_probs))
self.assertEqual(log_probs[0], log_probs[1])
invalid_sample = torch.cat([sample, sample.new_ones(1, dim)], dim=0)
self.assertRaises(ValueError, lambda: lkj.log_prob(invalid_sample))
def test_independent_shape(self):
for Dist, params in EXAMPLES:
for param in params:
base_dist = Dist(**param)
x = base_dist.sample()
base_log_prob_shape = base_dist.log_prob(x).shape
for reinterpreted_batch_ndims in range(len(base_dist.batch_shape) + 1):
indep_dist = Independent(base_dist, reinterpreted_batch_ndims)
indep_log_prob_shape = base_log_prob_shape[:len(base_log_prob_shape) - reinterpreted_batch_ndims]
self.assertEqual(indep_dist.log_prob(x).shape, indep_log_prob_shape)
self.assertEqual(indep_dist.sample().shape, base_dist.sample().shape)
self.assertEqual(indep_dist.has_rsample, base_dist.has_rsample)
if indep_dist.has_rsample:
self.assertEqual(indep_dist.sample().shape, base_dist.sample().shape)
try:
self.assertEqual(indep_dist.enumerate_support().shape, base_dist.enumerate_support().shape)
self.assertEqual(indep_dist.mean.shape, base_dist.mean.shape)
except NotImplementedError:
pass
try:
self.assertEqual(indep_dist.variance.shape, base_dist.variance.shape)
except NotImplementedError:
pass
try:
self.assertEqual(indep_dist.entropy().shape, indep_log_prob_shape)
except NotImplementedError:
pass
def test_independent_expand(self):
for Dist, params in EXAMPLES:
for param in params:
base_dist = Dist(**param)
for reinterpreted_batch_ndims in range(len(base_dist.batch_shape) + 1):
for s in [torch.Size(), torch.Size((2,)), torch.Size((2, 3))]:
indep_dist = Independent(base_dist, reinterpreted_batch_ndims)
expanded_shape = s + indep_dist.batch_shape
expanded = indep_dist.expand(expanded_shape)
expanded_sample = expanded.sample()
expected_shape = expanded_shape + indep_dist.event_shape
self.assertEqual(expanded_sample.shape, expected_shape)
self.assertEqual(expanded.log_prob(expanded_sample),
indep_dist.log_prob(expanded_sample))
self.assertEqual(expanded.event_shape, indep_dist.event_shape)
self.assertEqual(expanded.batch_shape, expanded_shape)
def test_cdf_icdf_inverse(self):
# Tests the invertibility property on the distributions
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(**param)
samples = dist.sample(sample_shape=(20,))
try:
cdf = dist.cdf(samples)
actual = dist.icdf(cdf)
except NotImplementedError:
continue
rel_error = torch.abs(actual - samples) / (1e-10 + torch.abs(samples))
self.assertLess(rel_error.max(), 1e-4, msg='\n'.join([
'{} example {}/{}, icdf(cdf(x)) != x'.format(Dist.__name__, i + 1, len(params)),
'x = {}'.format(samples),
'cdf(x) = {}'.format(cdf),
'icdf(cdf(x)) = {}'.format(actual),
]))
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_gamma_log_prob_at_boundary(self):
for concentration, log_prob in [(.5, inf), (1, 0), (2, -inf)]:
dist = Gamma(concentration, 1)
scipy_dist = scipy.stats.gamma(concentration)
self.assertAlmostEqual(dist.log_prob(0), log_prob)
self.assertAlmostEqual(dist.log_prob(0), scipy_dist.logpdf(0))
def test_cdf_log_prob(self):
# Tests if the differentiation of the CDF gives the PDF at a given value
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(**param)
samples = dist.sample()
if not dist.support.is_discrete:
samples.requires_grad_()
try:
cdfs = dist.cdf(samples)
pdfs = dist.log_prob(samples).exp()
except NotImplementedError:
continue
cdfs_derivative = grad(cdfs.sum(), [samples])[0] # this should not be wrapped in torch.abs()
self.assertEqual(cdfs_derivative, pdfs, msg='\n'.join([
'{} example {}/{}, d(cdf)/dx != pdf(x)'.format(Dist.__name__, i + 1, len(params)),
'x = {}'.format(samples),
'cdf = {}'.format(cdfs),
'pdf = {}'.format(pdfs),
'grad(cdf) = {}'.format(cdfs_derivative),
]))
def test_valid_parameter_broadcasting(self):
# Test correct broadcasting of parameter sizes for distributions that have multiple
# parameters.
# example type (distribution instance, expected sample shape)
valid_examples = [
(Normal(loc=torch.tensor([0., 0.]), scale=1),
(2,)),
(Normal(loc=0, scale=torch.tensor([1., 1.])),
(2,)),
(Normal(loc=torch.tensor([0., 0.]), scale=torch.tensor([1.])),
(2,)),
(Normal(loc=torch.tensor([0., 0.]), scale=torch.tensor([[1.], [1.]])),
(2, 2)),
(Normal(loc=torch.tensor([0., 0.]), scale=torch.tensor([[1.]])),
(1, 2)),
(Normal(loc=torch.tensor([0.]), scale=torch.tensor([[1.]])),
(1, 1)),
(FisherSnedecor(df1=torch.tensor([1., 1.]), df2=1),
(2,)),
(FisherSnedecor(df1=1, df2=torch.tensor([1., 1.])),
(2,)),
(FisherSnedecor(df1=torch.tensor([1., 1.]), df2=torch.tensor([1.])),
(2,)),
(FisherSnedecor(df1=torch.tensor([1., 1.]), df2=torch.tensor([[1.], [1.]])),
(2, 2)),
(FisherSnedecor(df1=torch.tensor([1., 1.]), df2=torch.tensor([[1.]])),
(1, 2)),
(FisherSnedecor(df1=torch.tensor([1.]), df2=torch.tensor([[1.]])),
(1, 1)),
(Gamma(concentration=torch.tensor([1., 1.]), rate=1),
(2,)),
(Gamma(concentration=1, rate=torch.tensor([1., 1.])),
(2,)),
(Gamma(concentration=torch.tensor([1., 1.]), rate=torch.tensor([[1.], [1.], [1.]])),
(3, 2)),
(Gamma(concentration=torch.tensor([1., 1.]), rate=torch.tensor([[1.], [1.]])),
(2, 2)),
(Gamma(concentration=torch.tensor([1., 1.]), rate=torch.tensor([[1.]])),
(1, 2)),
(Gamma(concentration=torch.tensor([1.]), rate=torch.tensor([[1.]])),
(1, 1)),
(Gumbel(loc=torch.tensor([0., 0.]), scale=1),
(2,)),
(Gumbel(loc=0, scale=torch.tensor([1., 1.])),
(2,)),
(Gumbel(loc=torch.tensor([0., 0.]), scale=torch.tensor([1.])),
(2,)),
(Gumbel(loc=torch.tensor([0., 0.]), scale=torch.tensor([[1.], [1.]])),
(2, 2)),
(Gumbel(loc=torch.tensor([0., 0.]), scale=torch.tensor([[1.]])),
(1, 2)),
(Gumbel(loc=torch.tensor([0.]), scale=torch.tensor([[1.]])),
(1, 1)),
(Kumaraswamy(concentration1=torch.tensor([1., 1.]), concentration0=1.),
(2,)),
(Kumaraswamy(concentration1=1, concentration0=torch.tensor([1., 1.])),
(2, )),
(Kumaraswamy(concentration1=torch.tensor([1., 1.]), concentration0=torch.tensor([1.])),
(2,)),
(Kumaraswamy(concentration1=torch.tensor([1., 1.]), concentration0=torch.tensor([[1.], [1.]])),
(2, 2)),
(Kumaraswamy(concentration1=torch.tensor([1., 1.]), concentration0=torch.tensor([[1.]])),
(1, 2)),
(Kumaraswamy(concentration1=torch.tensor([1.]), concentration0=torch.tensor([[1.]])),
(1, 1)),
(Laplace(loc=torch.tensor([0., 0.]), scale=1),
(2,)),
(Laplace(loc=0, scale=torch.tensor([1., 1.])),
(2,)),
(Laplace(loc=torch.tensor([0., 0.]), scale=torch.tensor([1.])),
(2,)),
(Laplace(loc=torch.tensor([0., 0.]), scale=torch.tensor([[1.], [1.]])),
(2, 2)),
(Laplace(loc=torch.tensor([0., 0.]), scale=torch.tensor([[1.]])),
(1, 2)),
(Laplace(loc=torch.tensor([0.]), scale=torch.tensor([[1.]])),
(1, 1)),
(Pareto(scale=torch.tensor([1., 1.]), alpha=1),
(2,)),
(Pareto(scale=1, alpha=torch.tensor([1., 1.])),
(2,)),
(Pareto(scale=torch.tensor([1., 1.]), alpha=torch.tensor([1.])),
(2,)),
(Pareto(scale=torch.tensor([1., 1.]), alpha=torch.tensor([[1.], [1.]])),
(2, 2)),
(Pareto(scale=torch.tensor([1., 1.]), alpha=torch.tensor([[1.]])),
(1, 2)),
(Pareto(scale=torch.tensor([1.]), alpha=torch.tensor([[1.]])),
(1, 1)),
(StudentT(df=torch.tensor([1., 1.]), loc=1),
(2,)),
(StudentT(df=1, scale=torch.tensor([1., 1.])),
(2,)),
(StudentT(df=torch.tensor([1., 1.]), loc=torch.tensor([1.])),
(2,)),
(StudentT(df=torch.tensor([1., 1.]), scale=torch.tensor([[1.], [1.]])),
(2, 2)),
(StudentT(df=torch.tensor([1., 1.]), loc=torch.tensor([[1.]])),
(1, 2)),
(StudentT(df=torch.tensor([1.]), scale=torch.tensor([[1.]])),
(1, 1)),
(StudentT(df=1., loc=torch.zeros(5, 1), scale=torch.ones(3)),
(5, 3)),
]
for dist, expected_size in valid_examples:
actual_size = dist.sample().size()
self.assertEqual(actual_size, expected_size,
msg='{} actual size: {} != expected size: {}'.format(dist, actual_size, expected_size))
sample_shape = torch.Size((2,))
expected_size = sample_shape + expected_size
actual_size = dist.sample(sample_shape).size()
self.assertEqual(actual_size, expected_size,
msg='{} actual size: {} != expected size: {}'.format(dist, actual_size, expected_size))
def test_invalid_parameter_broadcasting(self):
# invalid broadcasting cases; should throw error
# example type (distribution class, distribution params)
invalid_examples = [
(Normal, {
'loc': torch.tensor([[0, 0]]),
'scale': torch.tensor([1, 1, 1, 1])
}),
(Normal, {
'loc': torch.tensor([[[0, 0, 0], [0, 0, 0]]]),
'scale': torch.tensor([1, 1])
}),
(FisherSnedecor, {
'df1': torch.tensor([1, 1]),
'df2': torch.tensor([1, 1, 1]),
}),
(Gumbel, {
'loc': torch.tensor([[0, 0]]),
'scale': torch.tensor([1, 1, 1, 1])
}),
(Gumbel, {
'loc': torch.tensor([[[0, 0, 0], [0, 0, 0]]]),
'scale': torch.tensor([1, 1])
}),
(Gamma, {
'concentration': torch.tensor([0, 0]),
'rate': torch.tensor([1, 1, 1])
}),
(Kumaraswamy, {
'concentration1': torch.tensor([[1, 1]]),
'concentration0': torch.tensor([1, 1, 1, 1])
}),
(Kumaraswamy, {
'concentration1': torch.tensor([[[1, 1, 1], [1, 1, 1]]]),
'concentration0': torch.tensor([1, 1])
}),
(Laplace, {
'loc': torch.tensor([0, 0]),
'scale': torch.tensor([1, 1, 1])
}),
(Pareto, {
'scale': torch.tensor([1, 1]),
'alpha': torch.tensor([1, 1, 1])
}),
(StudentT, {
'df': torch.tensor([1., 1.]),
'scale': torch.tensor([1., 1., 1.])
}),
(StudentT, {
'df': torch.tensor([1., 1.]),
'loc': torch.tensor([1., 1., 1.])
})
]
for dist, kwargs in invalid_examples:
self.assertRaises(RuntimeError, dist, **kwargs)
def _test_discrete_distribution_mode(self, dist, sanitized_mode, batch_isfinite):
# We cannot easily check the mode for discrete distributions, but we can look left and right
# to ensure the log probability is smaller than at the mode.
for step in [-1, 1]:
log_prob_mode = dist.log_prob(sanitized_mode)
if isinstance(dist, OneHotCategorical):
idx = (dist._categorical.mode + 1) % dist.probs.shape[-1]
other = torch.nn.functional.one_hot(idx, num_classes=dist.probs.shape[-1]).to(dist.mode)
else:
other = dist.mode + step
mask = batch_isfinite & dist.support.check(other)
self.assertTrue(mask.any() or dist.mode.unique().numel() == 1)
# Add a dimension to the right if the event shape is not a scalar, e.g. OneHotCategorical.
other = torch.where(mask[..., None] if mask.ndim < other.ndim else mask, other, dist.sample())
log_prob_other = dist.log_prob(other)
delta = log_prob_mode - log_prob_other
self.assertTrue((-1e-12 < delta[mask].detach()).all()) # Allow up to 1e-12 rounding error.
def _test_continuous_distribution_mode(self, dist, sanitized_mode, batch_isfinite):
if isinstance(dist, Wishart):
return
# We perturb the mode in the unconstrained space and expect the log probability to decrease.
num_points = 10
transform = transform_to(dist.support)
unconstrained_mode = transform.inv(sanitized_mode)
perturbation = 1e-5 * (torch.rand((num_points,) + unconstrained_mode.shape) - 0.5)
perturbed_mode = transform(perturbation + unconstrained_mode)
log_prob_mode = dist.log_prob(sanitized_mode)
log_prob_other = dist.log_prob(perturbed_mode)
delta = log_prob_mode - log_prob_other
# We pass the test with a small tolerance to allow for rounding and manually set the
# difference to zero if both log probs are infinite with the same sign.
both_infinite_with_same_sign = (log_prob_mode == log_prob_other) & (log_prob_mode.abs() == inf)
delta[both_infinite_with_same_sign] = 0.
ordering = (delta > -1e-12).all(axis=0)
self.assertTrue(ordering[batch_isfinite].all())
def test_mode(self):
discrete_distributions = (
Bernoulli, Binomial, Categorical, Geometric, NegativeBinomial, OneHotCategorical, Poisson,
)
no_mode_available = (
ContinuousBernoulli, LKJCholesky, LogisticNormal, MixtureSameFamily, Multinomial,
RelaxedBernoulli, RelaxedOneHotCategorical,
)
for dist_cls, params in EXAMPLES:
for param in params:
dist = dist_cls(**param)
if isinstance(dist, no_mode_available) or type(dist) is TransformedDistribution:
with self.assertRaises(NotImplementedError):
dist.mode
continue
# Check that either all or no elements in the event shape are nan: the mode cannot be
# defined for part of an event.
isfinite = dist.mode.isfinite().reshape(dist.batch_shape + (dist.event_shape.numel(),))
batch_isfinite = isfinite.all(axis=-1)
self.assertTrue((batch_isfinite | ~isfinite.any(axis=-1)).all())
# We sanitize undefined modes by sampling from the distribution.
sanitized_mode = torch.where(~dist.mode.isnan(), dist.mode, dist.sample())
if isinstance(dist, discrete_distributions):
self._test_discrete_distribution_mode(dist, sanitized_mode, batch_isfinite)
else:
self._test_continuous_distribution_mode(dist, sanitized_mode, batch_isfinite)
self.assertFalse(dist.log_prob(sanitized_mode).isnan().any())
# These tests are only needed for a few distributions that implement custom
# reparameterized gradients. Most .rsample() implementations simply rely on
# the reparameterization trick and do not need to be tested for accuracy.
class TestRsample(DistributionsTestCase):
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_gamma(self):
num_samples = 100
for alpha in [1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4]:
alphas = torch.tensor([alpha] * num_samples, dtype=torch.float, requires_grad=True)
betas = alphas.new_ones(num_samples)
x = Gamma(alphas, betas).rsample()
x.sum().backward()
x, ind = x.sort()
x = x.detach().numpy()
actual_grad = alphas.grad[ind].numpy()
# Compare with expected gradient dx/dalpha along constant cdf(x,alpha).
cdf = scipy.stats.gamma.cdf
pdf = scipy.stats.gamma.pdf
eps = 0.01 * alpha / (1.0 + alpha ** 0.5)
cdf_alpha = (cdf(x, alpha + eps) - cdf(x, alpha - eps)) / (2 * eps)
cdf_x = pdf(x, alpha)
expected_grad = -cdf_alpha / cdf_x
rel_error = np.abs(actual_grad - expected_grad) / (expected_grad + 1e-30)
self.assertLess(np.max(rel_error), 0.0005, '\n'.join([
'Bad gradient dx/alpha for x ~ Gamma({}, 1)'.format(alpha),
'x {}'.format(x),
'expected {}'.format(expected_grad),
'actual {}'.format(actual_grad),
'rel error {}'.format(rel_error),
'max error {}'.format(rel_error.max()),
'at alpha={}, x={}'.format(alpha, x[rel_error.argmax()]),
]))
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_chi2(self):
num_samples = 100
for df in [1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4]:
dfs = torch.tensor([df] * num_samples, dtype=torch.float, requires_grad=True)
x = Chi2(dfs).rsample()
x.sum().backward()
x, ind = x.sort()
x = x.detach().numpy()
actual_grad = dfs.grad[ind].numpy()
# Compare with expected gradient dx/ddf along constant cdf(x,df).
cdf = scipy.stats.chi2.cdf
pdf = scipy.stats.chi2.pdf
eps = 0.01 * df / (1.0 + df ** 0.5)
cdf_df = (cdf(x, df + eps) - cdf(x, df - eps)) / (2 * eps)
cdf_x = pdf(x, df)
expected_grad = -cdf_df / cdf_x
rel_error = np.abs(actual_grad - expected_grad) / (expected_grad + 1e-30)
self.assertLess(np.max(rel_error), 0.001, '\n'.join([
'Bad gradient dx/ddf for x ~ Chi2({})'.format(df),
'x {}'.format(x),
'expected {}'.format(expected_grad),
'actual {}'.format(actual_grad),
'rel error {}'.format(rel_error),
'max error {}'.format(rel_error.max()),
]))
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_dirichlet_on_diagonal(self):
num_samples = 20
grid = [1e-1, 1e0, 1e1]
for a0, a1, a2 in product(grid, grid, grid):
alphas = torch.tensor([[a0, a1, a2]] * num_samples, dtype=torch.float, requires_grad=True)
x = Dirichlet(alphas).rsample()[:, 0]
x.sum().backward()
x, ind = x.sort()
x = x.detach().numpy()
actual_grad = alphas.grad[ind].numpy()[:, 0]
# Compare with expected gradient dx/dalpha0 along constant cdf(x,alpha).
# This reduces to a distribution Beta(alpha[0], alpha[1] + alpha[2]).
cdf = scipy.stats.beta.cdf
pdf = scipy.stats.beta.pdf
alpha, beta = a0, a1 + a2
eps = 0.01 * alpha / (1.0 + np.sqrt(alpha))
cdf_alpha = (cdf(x, alpha + eps, beta) - cdf(x, alpha - eps, beta)) / (2 * eps)
cdf_x = pdf(x, alpha, beta)
expected_grad = -cdf_alpha / cdf_x
rel_error = np.abs(actual_grad - expected_grad) / (expected_grad + 1e-30)
self.assertLess(np.max(rel_error), 0.001, '\n'.join([
'Bad gradient dx[0]/dalpha[0] for Dirichlet([{}, {}, {}])'.format(a0, a1, a2),
'x {}'.format(x),
'expected {}'.format(expected_grad),
'actual {}'.format(actual_grad),
'rel error {}'.format(rel_error),
'max error {}'.format(rel_error.max()),
'at x={}'.format(x[rel_error.argmax()]),
]))
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_beta_wrt_alpha(self):
num_samples = 20
grid = [1e-2, 1e-1, 1e0, 1e1, 1e2]
for con1, con0 in product(grid, grid):
con1s = torch.tensor([con1] * num_samples, dtype=torch.float, requires_grad=True)
con0s = con1s.new_tensor([con0] * num_samples)
x = Beta(con1s, con0s).rsample()
x.sum().backward()
x, ind = x.sort()
x = x.detach().numpy()
actual_grad = con1s.grad[ind].numpy()
# Compare with expected gradient dx/dcon1 along constant cdf(x,con1,con0).
cdf = scipy.stats.beta.cdf
pdf = scipy.stats.beta.pdf
eps = 0.01 * con1 / (1.0 + np.sqrt(con1))
cdf_alpha = (cdf(x, con1 + eps, con0) - cdf(x, con1 - eps, con0)) / (2 * eps)
cdf_x = pdf(x, con1, con0)
expected_grad = -cdf_alpha / cdf_x
rel_error = np.abs(actual_grad - expected_grad) / (expected_grad + 1e-30)
self.assertLess(np.max(rel_error), 0.005, '\n'.join([
'Bad gradient dx/dcon1 for x ~ Beta({}, {})'.format(con1, con0),
'x {}'.format(x),
'expected {}'.format(expected_grad),
'actual {}'.format(actual_grad),
'rel error {}'.format(rel_error),
'max error {}'.format(rel_error.max()),
'at x = {}'.format(x[rel_error.argmax()]),
]))
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_beta_wrt_beta(self):
num_samples = 20
grid = [1e-2, 1e-1, 1e0, 1e1, 1e2]
for con1, con0 in product(grid, grid):
con0s = torch.tensor([con0] * num_samples, dtype=torch.float, requires_grad=True)
con1s = con0s.new_tensor([con1] * num_samples)
x = Beta(con1s, con0s).rsample()
x.sum().backward()
x, ind = x.sort()
x = x.detach().numpy()
actual_grad = con0s.grad[ind].numpy()
# Compare with expected gradient dx/dcon0 along constant cdf(x,con1,con0).
cdf = scipy.stats.beta.cdf
pdf = scipy.stats.beta.pdf
eps = 0.01 * con0 / (1.0 + np.sqrt(con0))
cdf_beta = (cdf(x, con1, con0 + eps) - cdf(x, con1, con0 - eps)) / (2 * eps)
cdf_x = pdf(x, con1, con0)
expected_grad = -cdf_beta / cdf_x
rel_error = np.abs(actual_grad - expected_grad) / (expected_grad + 1e-30)
self.assertLess(np.max(rel_error), 0.005, '\n'.join([
'Bad gradient dx/dcon0 for x ~ Beta({}, {})'.format(con1, con0),
'x {}'.format(x),
'expected {}'.format(expected_grad),
'actual {}'.format(actual_grad),
'rel error {}'.format(rel_error),
'max error {}'.format(rel_error.max()),
'at x = {!r}'.format(x[rel_error.argmax()]),
]))
def test_dirichlet_multivariate(self):
alpha_crit = 0.25 * (5.0 ** 0.5 - 1.0)
num_samples = 100000
for shift in [-0.1, -0.05, -0.01, 0.0, 0.01, 0.05, 0.10]:
alpha = alpha_crit + shift
alpha = torch.tensor([alpha], dtype=torch.float, requires_grad=True)
alpha_vec = torch.cat([alpha, alpha, alpha.new([1])])
z = Dirichlet(alpha_vec.expand(num_samples, 3)).rsample()
mean_z3 = 1.0 / (2.0 * alpha + 1.0)
loss = torch.pow(z[:, 2] - mean_z3, 2.0).mean()
actual_grad = grad(loss, [alpha])[0]
# Compute expected gradient by hand.
num = 1.0 - 2.0 * alpha - 4.0 * alpha**2
den = (1.0 + alpha)**2 * (1.0 + 2.0 * alpha)**3
expected_grad = num / den
self.assertEqual(actual_grad, expected_grad, atol=0.002, rtol=0, msg='\n'.join([
"alpha = alpha_c + %.2g" % shift,
"expected_grad: %.5g" % expected_grad,
"actual_grad: %.5g" % actual_grad,
"error = %.2g" % torch.abs(expected_grad - actual_grad).max(),
]))
def test_dirichlet_tangent_field(self):
num_samples = 20
alpha_grid = [0.5, 1.0, 2.0]
# v = dx/dalpha[0] is the reparameterized gradient aka tangent field.
def compute_v(x, alpha):
return torch.stack([
_Dirichlet_backward(x, alpha, torch.eye(3, 3)[i].expand_as(x))[:, 0]
for i in range(3)
], dim=-1)
for a1, a2, a3 in product(alpha_grid, alpha_grid, alpha_grid):
alpha = torch.tensor([a1, a2, a3], requires_grad=True).expand(num_samples, 3)
x = Dirichlet(alpha).rsample()
dlogp_da = grad([Dirichlet(alpha).log_prob(x.detach()).sum()],
[alpha], retain_graph=True)[0][:, 0]
dlogp_dx = grad([Dirichlet(alpha.detach()).log_prob(x).sum()],
[x], retain_graph=True)[0]
v = torch.stack([grad([x[:, i].sum()], [alpha], retain_graph=True)[0][:, 0]
for i in range(3)], dim=-1)
# Compute ramaining properties by finite difference.
self.assertEqual(compute_v(x, alpha), v, msg='Bug in compute_v() helper')
# dx is an arbitrary orthonormal basis tangent to the simplex.
dx = torch.tensor([[2., -1., -1.], [0., 1., -1.]])
dx /= dx.norm(2, -1, True)
eps = 1e-2 * x.min(-1, True)[0] # avoid boundary
dv0 = (compute_v(x + eps * dx[0], alpha) - compute_v(x - eps * dx[0], alpha)) / (2 * eps)
dv1 = (compute_v(x + eps * dx[1], alpha) - compute_v(x - eps * dx[1], alpha)) / (2 * eps)
div_v = (dv0 * dx[0] + dv1 * dx[1]).sum(-1)
# This is a modification of the standard continuity equation, using the product rule to allow
# expression in terms of log_prob rather than the less numerically stable log_prob.exp().
error = dlogp_da + (dlogp_dx * v).sum(-1) + div_v
self.assertLess(torch.abs(error).max(), 0.005, '\n'.join([
'Dirichlet([{}, {}, {}]) gradient violates continuity equation:'.format(a1, a2, a3),
'error = {}'.format(error),
]))
class TestDistributionShapes(DistributionsTestCase):
def setUp(self):
super(TestDistributionShapes, self).setUp()
self.scalar_sample = 1
self.tensor_sample_1 = torch.ones(3, 2)
self.tensor_sample_2 = torch.ones(3, 2, 3)
def tearDown(self):
super(TestDistributionShapes, self).tearDown()
def test_entropy_shape(self):
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(validate_args=False, **param)
try:
actual_shape = dist.entropy().size()
expected_shape = dist.batch_shape if dist.batch_shape else torch.Size()
message = '{} example {}/{}, shape mismatch. expected {}, actual {}'.format(
Dist.__name__, i + 1, len(params), expected_shape, actual_shape)
self.assertEqual(actual_shape, expected_shape, msg=message)
except NotImplementedError:
continue
def test_bernoulli_shape_scalar_params(self):
bernoulli = Bernoulli(0.3)
self.assertEqual(bernoulli._batch_shape, torch.Size())
self.assertEqual(bernoulli._event_shape, torch.Size())
self.assertEqual(bernoulli.sample().size(), torch.Size())
self.assertEqual(bernoulli.sample((3, 2)).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, bernoulli.log_prob, self.scalar_sample)
self.assertEqual(bernoulli.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(bernoulli.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
def test_bernoulli_shape_tensor_params(self):
bernoulli = Bernoulli(torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]]))
self.assertEqual(bernoulli._batch_shape, torch.Size((3, 2)))
self.assertEqual(bernoulli._event_shape, torch.Size(()))
self.assertEqual(bernoulli.sample().size(), torch.Size((3, 2)))
self.assertEqual(bernoulli.sample((3, 2)).size(), torch.Size((3, 2, 3, 2)))
self.assertEqual(bernoulli.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, bernoulli.log_prob, self.tensor_sample_2)
self.assertEqual(bernoulli.log_prob(torch.ones(3, 1, 1)).size(), torch.Size((3, 3, 2)))
def test_geometric_shape_scalar_params(self):
geometric = Geometric(0.3)
self.assertEqual(geometric._batch_shape, torch.Size())
self.assertEqual(geometric._event_shape, torch.Size())
self.assertEqual(geometric.sample().size(), torch.Size())
self.assertEqual(geometric.sample((3, 2)).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, geometric.log_prob, self.scalar_sample)
self.assertEqual(geometric.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(geometric.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
def test_geometric_shape_tensor_params(self):
geometric = Geometric(torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]]))
self.assertEqual(geometric._batch_shape, torch.Size((3, 2)))
self.assertEqual(geometric._event_shape, torch.Size(()))
self.assertEqual(geometric.sample().size(), torch.Size((3, 2)))
self.assertEqual(geometric.sample((3, 2)).size(), torch.Size((3, 2, 3, 2)))
self.assertEqual(geometric.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, geometric.log_prob, self.tensor_sample_2)
self.assertEqual(geometric.log_prob(torch.ones(3, 1, 1)).size(), torch.Size((3, 3, 2)))
def test_beta_shape_scalar_params(self):
dist = Beta(0.1, 0.1)
self.assertEqual(dist._batch_shape, torch.Size())
self.assertEqual(dist._event_shape, torch.Size())
self.assertEqual(dist.sample().size(), torch.Size())
self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, dist.log_prob, self.scalar_sample)
self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(dist.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
def test_beta_shape_tensor_params(self):
dist = Beta(torch.tensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
torch.tensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]))
self.assertEqual(dist._batch_shape, torch.Size((3, 2)))
self.assertEqual(dist._event_shape, torch.Size(()))
self.assertEqual(dist.sample().size(), torch.Size((3, 2)))
self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 3, 2)))
self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_2)
self.assertEqual(dist.log_prob(torch.ones(3, 1, 1)).size(), torch.Size((3, 3, 2)))
def test_binomial_shape(self):
dist = Binomial(10, torch.tensor([0.6, 0.3]))
self.assertEqual(dist._batch_shape, torch.Size((2,)))
self.assertEqual(dist._event_shape, torch.Size(()))
self.assertEqual(dist.sample().size(), torch.Size((2,)))
self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 2)))
self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_2)
def test_binomial_shape_vectorized_n(self):
dist = Binomial(torch.tensor([[10, 3, 1], [4, 8, 4]]), torch.tensor([0.6, 0.3, 0.1]))
self.assertEqual(dist._batch_shape, torch.Size((2, 3)))
self.assertEqual(dist._event_shape, torch.Size(()))
self.assertEqual(dist.sample().size(), torch.Size((2, 3)))
self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 2, 3)))
self.assertEqual(dist.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_1)
def test_multinomial_shape(self):
dist = Multinomial(10, torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]]))
self.assertEqual(dist._batch_shape, torch.Size((3,)))
self.assertEqual(dist._event_shape, torch.Size((2,)))
self.assertEqual(dist.sample().size(), torch.Size((3, 2)))
self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 3, 2)))
self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3,)))
self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_2)
self.assertEqual(dist.log_prob(torch.ones(3, 1, 2)).size(), torch.Size((3, 3)))
def test_categorical_shape(self):
# unbatched
dist = Categorical(torch.tensor([0.6, 0.3, 0.1]))
self.assertEqual(dist._batch_shape, torch.Size(()))
self.assertEqual(dist._event_shape, torch.Size(()))
self.assertEqual(dist.sample().size(), torch.Size())
self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2,)))
self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(dist.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
self.assertEqual(dist.log_prob(torch.ones(3, 1)).size(), torch.Size((3, 1)))
# batched
dist = Categorical(torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]]))
self.assertEqual(dist._batch_shape, torch.Size((3,)))
self.assertEqual(dist._event_shape, torch.Size(()))
self.assertEqual(dist.sample().size(), torch.Size((3,)))
self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 3,)))
self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_1)
self.assertEqual(dist.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
self.assertEqual(dist.log_prob(torch.ones(3, 1)).size(), torch.Size((3, 3)))
def test_one_hot_categorical_shape(self):
# unbatched
dist = OneHotCategorical(torch.tensor([0.6, 0.3, 0.1]))
self.assertEqual(dist._batch_shape, torch.Size(()))
self.assertEqual(dist._event_shape, torch.Size((3,)))
self.assertEqual(dist.sample().size(), torch.Size((3,)))
self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 3)))
self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_1)
sample = torch.tensor([0., 1., 0.]).expand(3, 2, 3)
self.assertEqual(dist.log_prob(sample).size(), torch.Size((3, 2,)))
self.assertEqual(dist.log_prob(dist.enumerate_support()).size(), torch.Size((3,)))
sample = torch.eye(3)
self.assertEqual(dist.log_prob(sample).size(), torch.Size((3,)))
# batched
dist = OneHotCategorical(torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]]))
self.assertEqual(dist._batch_shape, torch.Size((3,)))
self.assertEqual(dist._event_shape, torch.Size((2,)))
self.assertEqual(dist.sample().size(), torch.Size((3, 2)))
self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 3, 2)))
sample = torch.tensor([0., 1.])
self.assertEqual(dist.log_prob(sample).size(), torch.Size((3,)))
self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_2)
self.assertEqual(dist.log_prob(dist.enumerate_support()).size(), torch.Size((2, 3)))
sample = torch.tensor([0., 1.]).expand(3, 1, 2)
self.assertEqual(dist.log_prob(sample).size(), torch.Size((3, 3)))
def test_cauchy_shape_scalar_params(self):
cauchy = Cauchy(0, 1)
self.assertEqual(cauchy._batch_shape, torch.Size())
self.assertEqual(cauchy._event_shape, torch.Size())
self.assertEqual(cauchy.sample().size(), torch.Size())
self.assertEqual(cauchy.sample(torch.Size((3, 2))).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, cauchy.log_prob, self.scalar_sample)
self.assertEqual(cauchy.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(cauchy.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
def test_cauchy_shape_tensor_params(self):
cauchy = Cauchy(torch.tensor([0., 0.]), torch.tensor([1., 1.]))
self.assertEqual(cauchy._batch_shape, torch.Size((2,)))
self.assertEqual(cauchy._event_shape, torch.Size(()))
self.assertEqual(cauchy.sample().size(), torch.Size((2,)))
self.assertEqual(cauchy.sample(torch.Size((3, 2))).size(), torch.Size((3, 2, 2)))
self.assertEqual(cauchy.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, cauchy.log_prob, self.tensor_sample_2)
self.assertEqual(cauchy.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)))
def test_halfcauchy_shape_scalar_params(self):
halfcauchy = HalfCauchy(1)
self.assertEqual(halfcauchy._batch_shape, torch.Size())
self.assertEqual(halfcauchy._event_shape, torch.Size())
self.assertEqual(halfcauchy.sample().size(), torch.Size())
self.assertEqual(halfcauchy.sample(torch.Size((3, 2))).size(),
torch.Size((3, 2)))
self.assertRaises(ValueError, halfcauchy.log_prob, self.scalar_sample)
self.assertEqual(halfcauchy.log_prob(self.tensor_sample_1).size(),
torch.Size((3, 2)))
self.assertEqual(halfcauchy.log_prob(self.tensor_sample_2).size(),
torch.Size((3, 2, 3)))
def test_halfcauchy_shape_tensor_params(self):
halfcauchy = HalfCauchy(torch.tensor([1., 1.]))
self.assertEqual(halfcauchy._batch_shape, torch.Size((2,)))
self.assertEqual(halfcauchy._event_shape, torch.Size(()))
self.assertEqual(halfcauchy.sample().size(), torch.Size((2,)))
self.assertEqual(halfcauchy.sample(torch.Size((3, 2))).size(),
torch.Size((3, 2, 2)))
self.assertEqual(halfcauchy.log_prob(self.tensor_sample_1).size(),
torch.Size((3, 2)))
self.assertRaises(ValueError, halfcauchy.log_prob, self.tensor_sample_2)
self.assertEqual(halfcauchy.log_prob(torch.ones(2, 1)).size(),
torch.Size((2, 2)))
def test_dirichlet_shape(self):
dist = Dirichlet(torch.tensor([[0.6, 0.3], [1.6, 1.3], [2.6, 2.3]]))
self.assertEqual(dist._batch_shape, torch.Size((3,)))
self.assertEqual(dist._event_shape, torch.Size((2,)))
self.assertEqual(dist.sample().size(), torch.Size((3, 2)))
self.assertEqual(dist.sample((5, 4)).size(), torch.Size((5, 4, 3, 2)))
simplex_sample = self.tensor_sample_1 / self.tensor_sample_1.sum(-1, keepdim=True)
self.assertEqual(dist.log_prob(simplex_sample).size(), torch.Size((3,)))
self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_2)
simplex_sample = torch.ones(3, 1, 2)
simplex_sample = simplex_sample / simplex_sample.sum(-1).unsqueeze(-1)
self.assertEqual(dist.log_prob(simplex_sample).size(), torch.Size((3, 3)))
def test_mixture_same_family_shape(self):
dist = MixtureSameFamily(Categorical(torch.rand(5)),
Normal(torch.randn(5), torch.rand(5)))
self.assertEqual(dist._batch_shape, torch.Size())
self.assertEqual(dist._event_shape, torch.Size())
self.assertEqual(dist.sample().size(), torch.Size())
self.assertEqual(dist.sample((5, 4)).size(), torch.Size((5, 4)))
self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(dist.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
def test_gamma_shape_scalar_params(self):
gamma = Gamma(1, 1)
self.assertEqual(gamma._batch_shape, torch.Size())
self.assertEqual(gamma._event_shape, torch.Size())
self.assertEqual(gamma.sample().size(), torch.Size())
self.assertEqual(gamma.sample((3, 2)).size(), torch.Size((3, 2)))
self.assertEqual(gamma.log_prob(self.scalar_sample).size(), torch.Size())
self.assertEqual(gamma.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(gamma.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
def test_gamma_shape_tensor_params(self):
gamma = Gamma(torch.tensor([1., 1.]), torch.tensor([1., 1.]))
self.assertEqual(gamma._batch_shape, torch.Size((2,)))
self.assertEqual(gamma._event_shape, torch.Size(()))
self.assertEqual(gamma.sample().size(), torch.Size((2,)))
self.assertEqual(gamma.sample((3, 2)).size(), torch.Size((3, 2, 2)))
self.assertEqual(gamma.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, gamma.log_prob, self.tensor_sample_2)
self.assertEqual(gamma.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)))
def test_chi2_shape_scalar_params(self):
chi2 = Chi2(1)
self.assertEqual(chi2._batch_shape, torch.Size())
self.assertEqual(chi2._event_shape, torch.Size())
self.assertEqual(chi2.sample().size(), torch.Size())
self.assertEqual(chi2.sample((3, 2)).size(), torch.Size((3, 2)))
self.assertEqual(chi2.log_prob(self.scalar_sample).size(), torch.Size())
self.assertEqual(chi2.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(chi2.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
def test_chi2_shape_tensor_params(self):
chi2 = Chi2(torch.tensor([1., 1.]))
self.assertEqual(chi2._batch_shape, torch.Size((2,)))
self.assertEqual(chi2._event_shape, torch.Size(()))
self.assertEqual(chi2.sample().size(), torch.Size((2,)))
self.assertEqual(chi2.sample((3, 2)).size(), torch.Size((3, 2, 2)))
self.assertEqual(chi2.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, chi2.log_prob, self.tensor_sample_2)
self.assertEqual(chi2.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)))
def test_studentT_shape_scalar_params(self):
st = StudentT(1)
self.assertEqual(st._batch_shape, torch.Size())
self.assertEqual(st._event_shape, torch.Size())
self.assertEqual(st.sample().size(), torch.Size())
self.assertEqual(st.sample((3, 2)).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, st.log_prob, self.scalar_sample)
self.assertEqual(st.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(st.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
def test_studentT_shape_tensor_params(self):
st = StudentT(torch.tensor([1., 1.]))
self.assertEqual(st._batch_shape, torch.Size((2,)))
self.assertEqual(st._event_shape, torch.Size(()))
self.assertEqual(st.sample().size(), torch.Size((2,)))
self.assertEqual(st.sample((3, 2)).size(), torch.Size((3, 2, 2)))
self.assertEqual(st.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, st.log_prob, self.tensor_sample_2)
self.assertEqual(st.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)))
def test_pareto_shape_scalar_params(self):
pareto = Pareto(1, 1)
self.assertEqual(pareto._batch_shape, torch.Size())
self.assertEqual(pareto._event_shape, torch.Size())
self.assertEqual(pareto.sample().size(), torch.Size())
self.assertEqual(pareto.sample((3, 2)).size(), torch.Size((3, 2)))
self.assertEqual(pareto.log_prob(self.tensor_sample_1 + 1).size(), torch.Size((3, 2)))
self.assertEqual(pareto.log_prob(self.tensor_sample_2 + 1).size(), torch.Size((3, 2, 3)))
def test_gumbel_shape_scalar_params(self):
gumbel = Gumbel(1, 1)
self.assertEqual(gumbel._batch_shape, torch.Size())
self.assertEqual(gumbel._event_shape, torch.Size())
self.assertEqual(gumbel.sample().size(), torch.Size())
self.assertEqual(gumbel.sample((3, 2)).size(), torch.Size((3, 2)))
self.assertEqual(gumbel.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(gumbel.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
def test_kumaraswamy_shape_scalar_params(self):
kumaraswamy = Kumaraswamy(1, 1)
self.assertEqual(kumaraswamy._batch_shape, torch.Size())
self.assertEqual(kumaraswamy._event_shape, torch.Size())
self.assertEqual(kumaraswamy.sample().size(), torch.Size())
self.assertEqual(kumaraswamy.sample((3, 2)).size(), torch.Size((3, 2)))
self.assertEqual(kumaraswamy.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(kumaraswamy.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
def test_vonmises_shape_tensor_params(self):
von_mises = VonMises(torch.tensor([0., 0.]), torch.tensor([1., 1.]))
self.assertEqual(von_mises._batch_shape, torch.Size((2,)))
self.assertEqual(von_mises._event_shape, torch.Size(()))
self.assertEqual(von_mises.sample().size(), torch.Size((2,)))
self.assertEqual(von_mises.sample(torch.Size((3, 2))).size(), torch.Size((3, 2, 2)))
self.assertEqual(von_mises.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(von_mises.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)))
def test_vonmises_shape_scalar_params(self):
von_mises = VonMises(0., 1.)
self.assertEqual(von_mises._batch_shape, torch.Size())
self.assertEqual(von_mises._event_shape, torch.Size())
self.assertEqual(von_mises.sample().size(), torch.Size())
self.assertEqual(von_mises.sample(torch.Size((3, 2))).size(),
torch.Size((3, 2)))
self.assertEqual(von_mises.log_prob(self.tensor_sample_1).size(),
torch.Size((3, 2)))
self.assertEqual(von_mises.log_prob(self.tensor_sample_2).size(),
torch.Size((3, 2, 3)))
def test_weibull_scale_scalar_params(self):
weibull = Weibull(1, 1)
self.assertEqual(weibull._batch_shape, torch.Size())
self.assertEqual(weibull._event_shape, torch.Size())
self.assertEqual(weibull.sample().size(), torch.Size())
self.assertEqual(weibull.sample((3, 2)).size(), torch.Size((3, 2)))
self.assertEqual(weibull.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(weibull.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
def test_wishart_shape_scalar_params(self):
wishart = Wishart(torch.tensor(1), torch.tensor([[1.]]))
self.assertEqual(wishart._batch_shape, torch.Size())
self.assertEqual(wishart._event_shape, torch.Size((1, 1)))
self.assertEqual(wishart.sample().size(), torch.Size((1, 1)))
self.assertEqual(wishart.sample((3, 2)).size(), torch.Size((3, 2, 1, 1)))
self.assertRaises(ValueError, wishart.log_prob, self.scalar_sample)
def test_wishart_shape_tensor_params(self):
wishart = Wishart(torch.tensor([1., 1.]), torch.tensor([[[1.]], [[1.]]]))
self.assertEqual(wishart._batch_shape, torch.Size((2,)))
self.assertEqual(wishart._event_shape, torch.Size((1, 1)))
self.assertEqual(wishart.sample().size(), torch.Size((2, 1, 1)))
self.assertEqual(wishart.sample((3, 2)).size(), torch.Size((3, 2, 2, 1, 1)))
self.assertRaises(ValueError, wishart.log_prob, self.tensor_sample_2)
self.assertEqual(wishart.log_prob(torch.ones(2, 1, 1)).size(), torch.Size((2,)))
def test_normal_shape_scalar_params(self):
normal = Normal(0, 1)
self.assertEqual(normal._batch_shape, torch.Size())
self.assertEqual(normal._event_shape, torch.Size())
self.assertEqual(normal.sample().size(), torch.Size())
self.assertEqual(normal.sample((3, 2)).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, normal.log_prob, self.scalar_sample)
self.assertEqual(normal.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(normal.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
def test_normal_shape_tensor_params(self):
normal = Normal(torch.tensor([0., 0.]), torch.tensor([1., 1.]))
self.assertEqual(normal._batch_shape, torch.Size((2,)))
self.assertEqual(normal._event_shape, torch.Size(()))
self.assertEqual(normal.sample().size(), torch.Size((2,)))
self.assertEqual(normal.sample((3, 2)).size(), torch.Size((3, 2, 2)))
self.assertEqual(normal.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, normal.log_prob, self.tensor_sample_2)
self.assertEqual(normal.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)))
def test_uniform_shape_scalar_params(self):
uniform = Uniform(0, 1)
self.assertEqual(uniform._batch_shape, torch.Size())
self.assertEqual(uniform._event_shape, torch.Size())
self.assertEqual(uniform.sample().size(), torch.Size())
self.assertEqual(uniform.sample(torch.Size((3, 2))).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, uniform.log_prob, self.scalar_sample)
self.assertEqual(uniform.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(uniform.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
def test_uniform_shape_tensor_params(self):
uniform = Uniform(torch.tensor([0., 0.]), torch.tensor([1., 1.]))
self.assertEqual(uniform._batch_shape, torch.Size((2,)))
self.assertEqual(uniform._event_shape, torch.Size(()))
self.assertEqual(uniform.sample().size(), torch.Size((2,)))
self.assertEqual(uniform.sample(torch.Size((3, 2))).size(), torch.Size((3, 2, 2)))
self.assertEqual(uniform.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, uniform.log_prob, self.tensor_sample_2)
self.assertEqual(uniform.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)))
def test_exponential_shape_scalar_param(self):
expon = Exponential(1.)
self.assertEqual(expon._batch_shape, torch.Size())
self.assertEqual(expon._event_shape, torch.Size())
self.assertEqual(expon.sample().size(), torch.Size())
self.assertEqual(expon.sample((3, 2)).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, expon.log_prob, self.scalar_sample)
self.assertEqual(expon.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(expon.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
def test_exponential_shape_tensor_param(self):
expon = Exponential(torch.tensor([1., 1.]))
self.assertEqual(expon._batch_shape, torch.Size((2,)))
self.assertEqual(expon._event_shape, torch.Size(()))
self.assertEqual(expon.sample().size(), torch.Size((2,)))
self.assertEqual(expon.sample((3, 2)).size(), torch.Size((3, 2, 2)))
self.assertEqual(expon.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, expon.log_prob, self.tensor_sample_2)
self.assertEqual(expon.log_prob(torch.ones(2, 2)).size(), torch.Size((2, 2)))
def test_laplace_shape_scalar_params(self):
laplace = Laplace(0, 1)
self.assertEqual(laplace._batch_shape, torch.Size())
self.assertEqual(laplace._event_shape, torch.Size())
self.assertEqual(laplace.sample().size(), torch.Size())
self.assertEqual(laplace.sample((3, 2)).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, laplace.log_prob, self.scalar_sample)
self.assertEqual(laplace.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(laplace.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
def test_laplace_shape_tensor_params(self):
laplace = Laplace(torch.tensor([0., 0.]), torch.tensor([1., 1.]))
self.assertEqual(laplace._batch_shape, torch.Size((2,)))
self.assertEqual(laplace._event_shape, torch.Size(()))
self.assertEqual(laplace.sample().size(), torch.Size((2,)))
self.assertEqual(laplace.sample((3, 2)).size(), torch.Size((3, 2, 2)))
self.assertEqual(laplace.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, laplace.log_prob, self.tensor_sample_2)
self.assertEqual(laplace.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)))
def test_continuous_bernoulli_shape_scalar_params(self):
continuous_bernoulli = ContinuousBernoulli(0.3)
self.assertEqual(continuous_bernoulli._batch_shape, torch.Size())
self.assertEqual(continuous_bernoulli._event_shape, torch.Size())
self.assertEqual(continuous_bernoulli.sample().size(), torch.Size())
self.assertEqual(continuous_bernoulli.sample((3, 2)).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, continuous_bernoulli.log_prob, self.scalar_sample)
self.assertEqual(continuous_bernoulli.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertEqual(continuous_bernoulli.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
def test_continuous_bernoulli_shape_tensor_params(self):
continuous_bernoulli = ContinuousBernoulli(torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]]))
self.assertEqual(continuous_bernoulli._batch_shape, torch.Size((3, 2)))
self.assertEqual(continuous_bernoulli._event_shape, torch.Size(()))
self.assertEqual(continuous_bernoulli.sample().size(), torch.Size((3, 2)))
self.assertEqual(continuous_bernoulli.sample((3, 2)).size(), torch.Size((3, 2, 3, 2)))
self.assertEqual(continuous_bernoulli.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
self.assertRaises(ValueError, continuous_bernoulli.log_prob, self.tensor_sample_2)
self.assertEqual(continuous_bernoulli.log_prob(torch.ones(3, 1, 1)).size(), torch.Size((3, 3, 2)))
class TestKL(DistributionsTestCase):
def setUp(self):
super(TestKL, self).setUp()
class Binomial30(Binomial):
def __init__(self, probs):
super(Binomial30, self).__init__(30, probs)
# These are pairs of distributions with 4 x 4 parameters as specified.
# The first of the pair e.g. bernoulli[0] varies column-wise and the second
# e.g. bernoulli[1] varies row-wise; that way we test all param pairs.
bernoulli = pairwise(Bernoulli, [0.1, 0.2, 0.6, 0.9])
binomial30 = pairwise(Binomial30, [0.1, 0.2, 0.6, 0.9])
binomial_vectorized_count = (Binomial(torch.tensor([3, 4]), torch.tensor([0.4, 0.6])),
Binomial(torch.tensor([3, 4]), torch.tensor([0.5, 0.8])))
beta = pairwise(Beta, [1.0, 2.5, 1.0, 2.5], [1.5, 1.5, 3.5, 3.5])
categorical = pairwise(Categorical, [[0.4, 0.3, 0.3],
[0.2, 0.7, 0.1],
[0.33, 0.33, 0.34],
[0.2, 0.2, 0.6]])
cauchy = pairwise(Cauchy, [-2.0, 2.0, -3.0, 3.0], [1.0, 2.0, 1.0, 2.0])
chi2 = pairwise(Chi2, [1.0, 2.0, 2.5, 5.0])
dirichlet = pairwise(Dirichlet, [[0.1, 0.2, 0.7],
[0.5, 0.4, 0.1],
[0.33, 0.33, 0.34],
[0.2, 0.2, 0.4]])
exponential = pairwise(Exponential, [1.0, 2.5, 5.0, 10.0])
gamma = pairwise(Gamma, [1.0, 2.5, 1.0, 2.5], [1.5, 1.5, 3.5, 3.5])
gumbel = pairwise(Gumbel, [-2.0, 4.0, -3.0, 6.0], [1.0, 2.5, 1.0, 2.5])
halfnormal = pairwise(HalfNormal, [1.0, 2.0, 1.0, 2.0])
laplace = pairwise(Laplace, [-2.0, 4.0, -3.0, 6.0], [1.0, 2.5, 1.0, 2.5])
lognormal = pairwise(LogNormal, [-2.0, 2.0, -3.0, 3.0], [1.0, 2.0, 1.0, 2.0])
normal = pairwise(Normal, [-2.0, 2.0, -3.0, 3.0], [1.0, 2.0, 1.0, 2.0])
independent = (Independent(normal[0], 1), Independent(normal[1], 1))
onehotcategorical = pairwise(OneHotCategorical, [[0.4, 0.3, 0.3],
[0.2, 0.7, 0.1],
[0.33, 0.33, 0.34],
[0.2, 0.2, 0.6]])
pareto = (Pareto(torch.tensor([2.5, 4.0, 2.5, 4.0]).expand(4, 4),
torch.tensor([2.25, 3.75, 2.25, 3.75]).expand(4, 4)),
Pareto(torch.tensor([2.25, 3.75, 2.25, 3.8]).expand(4, 4),
torch.tensor([2.25, 3.75, 2.25, 3.75]).expand(4, 4)))
poisson = pairwise(Poisson, [0.3, 1.0, 5.0, 10.0])
uniform_within_unit = pairwise(Uniform, [0.1, 0.9, 0.2, 0.75], [0.15, 0.95, 0.25, 0.8])
uniform_positive = pairwise(Uniform, [1, 1.5, 2, 4], [1.2, 2.0, 3, 7])
uniform_real = pairwise(Uniform, [-2., -1, 0, 2], [-1., 1, 1, 4])
uniform_pareto = pairwise(Uniform, [6.5, 7.5, 6.5, 8.5], [7.5, 8.5, 9.5, 9.5])
continuous_bernoulli = pairwise(ContinuousBernoulli, [0.1, 0.2, 0.5, 0.9])
# These tests should pass with precision = 0.01, but that makes tests very expensive.
# Instead, we test with precision = 0.1 and only test with higher precision locally
# when adding a new KL implementation.
# The following pairs are not tested due to very high variance of the monte carlo
# estimator; their implementations have been reviewed with extra care:
# - (pareto, normal)
self.precision = 0.1 # Set this to 0.01 when testing a new KL implementation.
self.max_samples = int(1e07) # Increase this when testing at smaller precision.
self.samples_per_batch = int(1e04)
self.finite_examples = [
(bernoulli, bernoulli),
(bernoulli, poisson),
(beta, beta),
(beta, chi2),
(beta, exponential),
(beta, gamma),
(beta, normal),
(binomial30, binomial30),
(binomial_vectorized_count, binomial_vectorized_count),
(categorical, categorical),
(cauchy, cauchy),
(chi2, chi2),
(chi2, exponential),
(chi2, gamma),
(chi2, normal),
(dirichlet, dirichlet),
(exponential, chi2),
(exponential, exponential),
(exponential, gamma),
(exponential, gumbel),
(exponential, normal),
(gamma, chi2),
(gamma, exponential),
(gamma, gamma),
(gamma, gumbel),
(gamma, normal),
(gumbel, gumbel),
(gumbel, normal),
(halfnormal, halfnormal),
(independent, independent),
(laplace, laplace),
(lognormal, lognormal),
(laplace, normal),
(normal, gumbel),
(normal, laplace),
(normal, normal),
(onehotcategorical, onehotcategorical),
(pareto, chi2),
(pareto, pareto),
(pareto, exponential),
(pareto, gamma),
(poisson, poisson),
(uniform_within_unit, beta),
(uniform_positive, chi2),
(uniform_positive, exponential),
(uniform_positive, gamma),
(uniform_real, gumbel),
(uniform_real, normal),
(uniform_pareto, pareto),
(continuous_bernoulli, continuous_bernoulli),
(continuous_bernoulli, exponential),
(continuous_bernoulli, normal),
(beta, continuous_bernoulli)
]
self.infinite_examples = [
(Bernoulli(0), Bernoulli(1)),
(Bernoulli(1), Bernoulli(0)),
(Categorical(torch.tensor([0.9, 0.1])), Categorical(torch.tensor([1., 0.]))),
(Categorical(torch.tensor([[0.9, 0.1], [.9, .1]])), Categorical(torch.tensor([1., 0.]))),
(Beta(1, 2), Uniform(0.25, 1)),
(Beta(1, 2), Uniform(0, 0.75)),
(Beta(1, 2), Uniform(0.25, 0.75)),
(Beta(1, 2), Pareto(1, 2)),
(Binomial(31, 0.7), Binomial(30, 0.3)),
(Binomial(torch.tensor([3, 4]), torch.tensor([0.4, 0.6])),
Binomial(torch.tensor([2, 3]), torch.tensor([0.5, 0.8]))),
(Chi2(1), Beta(2, 3)),
(Chi2(1), Pareto(2, 3)),
(Chi2(1), Uniform(-2, 3)),
(Exponential(1), Beta(2, 3)),
(Exponential(1), Pareto(2, 3)),
(Exponential(1), Uniform(-2, 3)),
(Gamma(1, 2), Beta(3, 4)),
(Gamma(1, 2), Pareto(3, 4)),
(Gamma(1, 2), Uniform(-3, 4)),
(Gumbel(-1, 2), Beta(3, 4)),
(Gumbel(-1, 2), Chi2(3)),
(Gumbel(-1, 2), Exponential(3)),
(Gumbel(-1, 2), Gamma(3, 4)),
(Gumbel(-1, 2), Pareto(3, 4)),
(Gumbel(-1, 2), Uniform(-3, 4)),
(Laplace(-1, 2), Beta(3, 4)),
(Laplace(-1, 2), Chi2(3)),
(Laplace(-1, 2), Exponential(3)),
(Laplace(-1, 2), Gamma(3, 4)),
(Laplace(-1, 2), Pareto(3, 4)),
(Laplace(-1, 2), Uniform(-3, 4)),
(Normal(-1, 2), Beta(3, 4)),
(Normal(-1, 2), Chi2(3)),
(Normal(-1, 2), Exponential(3)),
(Normal(-1, 2), Gamma(3, 4)),
(Normal(-1, 2), Pareto(3, 4)),
(Normal(-1, 2), Uniform(-3, 4)),
(Pareto(2, 1), Chi2(3)),
(Pareto(2, 1), Exponential(3)),
(Pareto(2, 1), Gamma(3, 4)),
(Pareto(1, 2), Normal(-3, 4)),
(Pareto(1, 2), Pareto(3, 4)),
(Poisson(2), Bernoulli(0.5)),
(Poisson(2.3), Binomial(10, 0.2)),
(Uniform(-1, 1), Beta(2, 2)),
(Uniform(0, 2), Beta(3, 4)),
(Uniform(-1, 2), Beta(3, 4)),
(Uniform(-1, 2), Chi2(3)),
(Uniform(-1, 2), Exponential(3)),
(Uniform(-1, 2), Gamma(3, 4)),
(Uniform(-1, 2), Pareto(3, 4)),
(ContinuousBernoulli(0.25), Uniform(0.25, 1)),
(ContinuousBernoulli(0.25), Uniform(0, 0.75)),
(ContinuousBernoulli(0.25), Uniform(0.25, 0.75)),
(ContinuousBernoulli(0.25), Pareto(1, 2)),
(Exponential(1), ContinuousBernoulli(0.75)),
(Gamma(1, 2), ContinuousBernoulli(0.75)),
(Gumbel(-1, 2), ContinuousBernoulli(0.75)),
(Laplace(-1, 2), ContinuousBernoulli(0.75)),
(Normal(-1, 2), ContinuousBernoulli(0.75)),
(Uniform(-1, 1), ContinuousBernoulli(0.75)),
(Uniform(0, 2), ContinuousBernoulli(0.75)),
(Uniform(-1, 2), ContinuousBernoulli(0.75))
]
def test_kl_monte_carlo(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
for (p, _), (_, q) in self.finite_examples:
actual = kl_divergence(p, q)
numerator = 0
denominator = 0
while denominator < self.max_samples:
x = p.sample(sample_shape=(self.samples_per_batch,))
numerator += (p.log_prob(x) - q.log_prob(x)).sum(0)
denominator += x.size(0)
expected = numerator / denominator
error = torch.abs(expected - actual) / (1 + expected)
if error[error == error].max() < self.precision:
break
self.assertLess(error[error == error].max(), self.precision, '\n'.join([
'Incorrect KL({}, {}).'.format(type(p).__name__, type(q).__name__),
'Expected ({} Monte Carlo samples): {}'.format(denominator, expected),
'Actual (analytic): {}'.format(actual),
]))
# Multivariate normal has a separate Monte Carlo based test due to the requirement of random generation of
# positive (semi) definite matrices. n is set to 5, but can be increased during testing.
def test_kl_multivariate_normal(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
n = 5 # Number of tests for multivariate_normal
for i in range(0, n):
loc = [torch.randn(4) for _ in range(0, 2)]
scale_tril = [transform_to(constraints.lower_cholesky)(torch.randn(4, 4)) for _ in range(0, 2)]
p = MultivariateNormal(loc=loc[0], scale_tril=scale_tril[0])
q = MultivariateNormal(loc=loc[1], scale_tril=scale_tril[1])
actual = kl_divergence(p, q)
numerator = 0
denominator = 0
while denominator < self.max_samples:
x = p.sample(sample_shape=(self.samples_per_batch,))
numerator += (p.log_prob(x) - q.log_prob(x)).sum(0)
denominator += x.size(0)
expected = numerator / denominator
error = torch.abs(expected - actual) / (1 + expected)
if error[error == error].max() < self.precision:
break
self.assertLess(error[error == error].max(), self.precision, '\n'.join([
'Incorrect KL(MultivariateNormal, MultivariateNormal) instance {}/{}'.format(i + 1, n),
'Expected ({} Monte Carlo sample): {}'.format(denominator, expected),
'Actual (analytic): {}'.format(actual),
]))
def test_kl_multivariate_normal_batched(self):
b = 7 # Number of batches
loc = [torch.randn(b, 3) for _ in range(0, 2)]
scale_tril = [transform_to(constraints.lower_cholesky)(torch.randn(b, 3, 3)) for _ in range(0, 2)]
expected_kl = torch.stack([
kl_divergence(MultivariateNormal(loc[0][i], scale_tril=scale_tril[0][i]),
MultivariateNormal(loc[1][i], scale_tril=scale_tril[1][i])) for i in range(0, b)])
actual_kl = kl_divergence(MultivariateNormal(loc[0], scale_tril=scale_tril[0]),
MultivariateNormal(loc[1], scale_tril=scale_tril[1]))
self.assertEqual(expected_kl, actual_kl)
def test_kl_multivariate_normal_batched_broadcasted(self):
b = 7 # Number of batches
loc = [torch.randn(b, 3) for _ in range(0, 2)]
scale_tril = [transform_to(constraints.lower_cholesky)(torch.randn(b, 3, 3)),
transform_to(constraints.lower_cholesky)(torch.randn(3, 3))]
expected_kl = torch.stack([
kl_divergence(MultivariateNormal(loc[0][i], scale_tril=scale_tril[0][i]),
MultivariateNormal(loc[1][i], scale_tril=scale_tril[1])) for i in range(0, b)])
actual_kl = kl_divergence(MultivariateNormal(loc[0], scale_tril=scale_tril[0]),
MultivariateNormal(loc[1], scale_tril=scale_tril[1]))
self.assertEqual(expected_kl, actual_kl)
def test_kl_lowrank_multivariate_normal(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
n = 5 # Number of tests for lowrank_multivariate_normal
for i in range(0, n):
loc = [torch.randn(4) for _ in range(0, 2)]
cov_factor = [torch.randn(4, 3) for _ in range(0, 2)]
cov_diag = [transform_to(constraints.positive)(torch.randn(4)) for _ in range(0, 2)]
covariance_matrix = [cov_factor[i].matmul(cov_factor[i].t()) +
cov_diag[i].diag() for i in range(0, 2)]
p = LowRankMultivariateNormal(loc[0], cov_factor[0], cov_diag[0])
q = LowRankMultivariateNormal(loc[1], cov_factor[1], cov_diag[1])
p_full = MultivariateNormal(loc[0], covariance_matrix[0])
q_full = MultivariateNormal(loc[1], covariance_matrix[1])
expected = kl_divergence(p_full, q_full)
actual_lowrank_lowrank = kl_divergence(p, q)
actual_lowrank_full = kl_divergence(p, q_full)
actual_full_lowrank = kl_divergence(p_full, q)
error_lowrank_lowrank = torch.abs(actual_lowrank_lowrank - expected).max()
self.assertLess(error_lowrank_lowrank, self.precision, '\n'.join([
'Incorrect KL(LowRankMultivariateNormal, LowRankMultivariateNormal) instance {}/{}'.format(i + 1, n),
'Expected (from KL MultivariateNormal): {}'.format(expected),
'Actual (analytic): {}'.format(actual_lowrank_lowrank),
]))
error_lowrank_full = torch.abs(actual_lowrank_full - expected).max()
self.assertLess(error_lowrank_full, self.precision, '\n'.join([
'Incorrect KL(LowRankMultivariateNormal, MultivariateNormal) instance {}/{}'.format(i + 1, n),
'Expected (from KL MultivariateNormal): {}'.format(expected),
'Actual (analytic): {}'.format(actual_lowrank_full),
]))
error_full_lowrank = torch.abs(actual_full_lowrank - expected).max()
self.assertLess(error_full_lowrank, self.precision, '\n'.join([
'Incorrect KL(MultivariateNormal, LowRankMultivariateNormal) instance {}/{}'.format(i + 1, n),
'Expected (from KL MultivariateNormal): {}'.format(expected),
'Actual (analytic): {}'.format(actual_full_lowrank),
]))
def test_kl_lowrank_multivariate_normal_batched(self):
b = 7 # Number of batches
loc = [torch.randn(b, 3) for _ in range(0, 2)]
cov_factor = [torch.randn(b, 3, 2) for _ in range(0, 2)]
cov_diag = [transform_to(constraints.positive)(torch.randn(b, 3)) for _ in range(0, 2)]
expected_kl = torch.stack([
kl_divergence(LowRankMultivariateNormal(loc[0][i], cov_factor[0][i], cov_diag[0][i]),
LowRankMultivariateNormal(loc[1][i], cov_factor[1][i], cov_diag[1][i]))
for i in range(0, b)])
actual_kl = kl_divergence(LowRankMultivariateNormal(loc[0], cov_factor[0], cov_diag[0]),
LowRankMultivariateNormal(loc[1], cov_factor[1], cov_diag[1]))
self.assertEqual(expected_kl, actual_kl)
def test_kl_exponential_family(self):
for (p, _), (_, q) in self.finite_examples:
if type(p) == type(q) and issubclass(type(p), ExponentialFamily):
actual = kl_divergence(p, q)
expected = _kl_expfamily_expfamily(p, q)
self.assertEqual(actual, expected, msg='\n'.join([
'Incorrect KL({}, {}).'.format(type(p).__name__, type(q).__name__),
'Expected (using Bregman Divergence) {}'.format(expected),
'Actual (analytic) {}'.format(actual),
'max error = {}'.format(torch.abs(actual - expected).max())
]))
def test_kl_infinite(self):
for p, q in self.infinite_examples:
self.assertTrue((kl_divergence(p, q) == inf).all(),
'Incorrect KL({}, {})'.format(type(p).__name__, type(q).__name__))
def test_kl_edgecases(self):
self.assertEqual(kl_divergence(Bernoulli(0), Bernoulli(0)), 0)
self.assertEqual(kl_divergence(Bernoulli(1), Bernoulli(1)), 0)
self.assertEqual(kl_divergence(Categorical(torch.tensor([0., 1.])), Categorical(torch.tensor([0., 1.]))), 0)
def test_kl_shape(self):
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(**param)
try:
kl = kl_divergence(dist, dist)
except NotImplementedError:
continue
expected_shape = dist.batch_shape if dist.batch_shape else torch.Size()
self.assertEqual(kl.shape, expected_shape, msg='\n'.join([
'{} example {}/{}'.format(Dist.__name__, i + 1, len(params)),
'Expected {}'.format(expected_shape),
'Actual {}'.format(kl.shape),
]))
def test_kl_transformed(self):
# Regression test for https://github.com/pytorch/pytorch/issues/34859
scale = torch.ones(2, 3)
loc = torch.zeros(2, 3)
normal = Normal(loc=loc, scale=scale)
diag_normal = Independent(normal, reinterpreted_batch_ndims=1)
trans_dist = TransformedDistribution(diag_normal, AffineTransform(loc=0., scale=2.))
self.assertEqual(kl_divergence(diag_normal, diag_normal).shape, (2,))
self.assertEqual(kl_divergence(trans_dist, trans_dist).shape, (2,))
def test_entropy_monte_carlo(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(**param)
try:
actual = dist.entropy()
except NotImplementedError:
continue
x = dist.sample(sample_shape=(60000,))
expected = -dist.log_prob(x).mean(0)
ignore = (expected == inf) | (expected == -inf)
expected[ignore] = actual[ignore]
self.assertEqual(actual, expected, atol=0.2, rtol=0, msg='\n'.join([
'{} example {}/{}, incorrect .entropy().'.format(Dist.__name__, i + 1, len(params)),
'Expected (monte carlo) {}'.format(expected),
'Actual (analytic) {}'.format(actual),
'max error = {}'.format(torch.abs(actual - expected).max()),
]))
def test_entropy_exponential_family(self):
for Dist, params in EXAMPLES:
if not issubclass(Dist, ExponentialFamily):
continue
for i, param in enumerate(params):
dist = Dist(**param)
try:
actual = dist.entropy()
except NotImplementedError:
continue
try:
expected = ExponentialFamily.entropy(dist)
except NotImplementedError:
continue
self.assertEqual(actual, expected, msg='\n'.join([
'{} example {}/{}, incorrect .entropy().'.format(Dist.__name__, i + 1, len(params)),
'Expected (Bregman Divergence) {}'.format(expected),
'Actual (analytic) {}'.format(actual),
'max error = {}'.format(torch.abs(actual - expected).max())
]))
class TestConstraints(DistributionsTestCase):
def test_params_constraints(self):
normalize_probs_dists = (
Categorical,
Multinomial,
OneHotCategorical,
OneHotCategoricalStraightThrough,
RelaxedOneHotCategorical
)
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(**param)
for name, value in param.items():
if isinstance(value, numbers.Number):
value = torch.tensor([value])
if Dist in normalize_probs_dists and name == 'probs':
# These distributions accept positive probs, but elsewhere we
# use a stricter constraint to the simplex.
value = value / value.sum(-1, True)
try:
constraint = dist.arg_constraints[name]
except KeyError:
continue # ignore optional parameters
# Check param shape is compatible with distribution shape.
self.assertGreaterEqual(value.dim(), constraint.event_dim)
value_batch_shape = value.shape[:value.dim() - constraint.event_dim]
torch.broadcast_shapes(dist.batch_shape, value_batch_shape)
if is_dependent(constraint):
continue
message = '{} example {}/{} parameter {} = {}'.format(
Dist.__name__, i + 1, len(params), name, value)
self.assertTrue(constraint.check(value).all(), msg=message)
def test_support_constraints(self):
for Dist, params in EXAMPLES:
self.assertIsInstance(Dist.support, Constraint)
for i, param in enumerate(params):
dist = Dist(**param)
value = dist.sample()
constraint = dist.support
message = '{} example {}/{} sample = {}'.format(
Dist.__name__, i + 1, len(params), value)
self.assertEqual(constraint.event_dim, len(dist.event_shape), msg=message)
ok = constraint.check(value)
self.assertEqual(ok.shape, dist.batch_shape, msg=message)
self.assertTrue(ok.all(), msg=message)
class TestNumericalStability(DistributionsTestCase):
def _test_pdf_score(self,
dist_class,
x,
expected_value,
probs=None,
logits=None,
expected_gradient=None,
atol=1e-5):
if probs is not None:
p = probs.detach().requires_grad_()
dist = dist_class(p)
else:
p = logits.detach().requires_grad_()
dist = dist_class(logits=p)
log_pdf = dist.log_prob(x)
log_pdf.sum().backward()
self.assertEqual(log_pdf,
expected_value,
atol=atol,
rtol=0,
msg='Incorrect value for tensor type: {}. Expected = {}, Actual = {}'
.format(type(x), expected_value, log_pdf))
if expected_gradient is not None:
self.assertEqual(p.grad,
expected_gradient,
atol=atol,
rtol=0,
msg='Incorrect gradient for tensor type: {}. Expected = {}, Actual = {}'
.format(type(x), expected_gradient, p.grad))
def test_bernoulli_gradient(self):
for tensor_type in [torch.FloatTensor, torch.DoubleTensor]:
self._test_pdf_score(dist_class=Bernoulli,
probs=tensor_type([0]),
x=tensor_type([0]),
expected_value=tensor_type([0]),
expected_gradient=tensor_type([0]))
self._test_pdf_score(dist_class=Bernoulli,
probs=tensor_type([0]),
x=tensor_type([1]),
expected_value=tensor_type([torch.finfo(tensor_type([]).dtype).eps]).log(),
expected_gradient=tensor_type([0]))
self._test_pdf_score(dist_class=Bernoulli,
probs=tensor_type([1e-4]),
x=tensor_type([1]),
expected_value=tensor_type([math.log(1e-4)]),
expected_gradient=tensor_type([10000]))
# Lower precision due to:
# >>> 1 / (1 - torch.FloatTensor([0.9999]))
# 9998.3408
# [torch.FloatTensor of size 1]
self._test_pdf_score(dist_class=Bernoulli,
probs=tensor_type([1 - 1e-4]),
x=tensor_type([0]),
expected_value=tensor_type([math.log(1e-4)]),
expected_gradient=tensor_type([-10000]),
atol=2)
self._test_pdf_score(dist_class=Bernoulli,
logits=tensor_type([math.log(9999)]),
x=tensor_type([0]),
expected_value=tensor_type([math.log(1e-4)]),
expected_gradient=tensor_type([-1]),
atol=1e-3)
def test_bernoulli_with_logits_underflow(self):
for tensor_type, lim in ([(torch.FloatTensor, -1e38),
(torch.DoubleTensor, -1e308)]):
self._test_pdf_score(dist_class=Bernoulli,
logits=tensor_type([lim]),
x=tensor_type([0]),
expected_value=tensor_type([0]),
expected_gradient=tensor_type([0]))
def test_bernoulli_with_logits_overflow(self):
for tensor_type, lim in ([(torch.FloatTensor, 1e38),
(torch.DoubleTensor, 1e308)]):
self._test_pdf_score(dist_class=Bernoulli,
logits=tensor_type([lim]),
x=tensor_type([1]),
expected_value=tensor_type([0]),
expected_gradient=tensor_type([0]))
def test_categorical_log_prob(self):
for dtype in ([torch.float, torch.double]):
p = torch.tensor([0, 1], dtype=dtype, requires_grad=True)
categorical = OneHotCategorical(p)
log_pdf = categorical.log_prob(torch.tensor([0, 1], dtype=dtype))
self.assertEqual(log_pdf.item(), 0)
def test_categorical_log_prob_with_logits(self):
for dtype in ([torch.float, torch.double]):
p = torch.tensor([-inf, 0], dtype=dtype, requires_grad=True)
categorical = OneHotCategorical(logits=p)
log_pdf_prob_1 = categorical.log_prob(torch.tensor([0, 1], dtype=dtype))
self.assertEqual(log_pdf_prob_1.item(), 0)
log_pdf_prob_0 = categorical.log_prob(torch.tensor([1, 0], dtype=dtype))
self.assertEqual(log_pdf_prob_0.item(), -inf)
def test_multinomial_log_prob(self):
for dtype in ([torch.float, torch.double]):
p = torch.tensor([0, 1], dtype=dtype, requires_grad=True)
s = torch.tensor([0, 10], dtype=dtype)
multinomial = Multinomial(10, p)
log_pdf = multinomial.log_prob(s)
self.assertEqual(log_pdf.item(), 0)
def test_multinomial_log_prob_with_logits(self):
for dtype in ([torch.float, torch.double]):
p = torch.tensor([-inf, 0], dtype=dtype, requires_grad=True)
multinomial = Multinomial(10, logits=p)
log_pdf_prob_1 = multinomial.log_prob(torch.tensor([0, 10], dtype=dtype))
self.assertEqual(log_pdf_prob_1.item(), 0)
log_pdf_prob_0 = multinomial.log_prob(torch.tensor([10, 0], dtype=dtype))
self.assertEqual(log_pdf_prob_0.item(), -inf)
def test_continuous_bernoulli_gradient(self):
def expec_val(x, probs=None, logits=None):
assert not (probs is None and logits is None)
if logits is not None:
probs = 1. / (1. + math.exp(-logits))
bern_log_lik = x * math.log(probs) + (1. - x) * math.log1p(-probs)
if probs < 0.499 or probs > 0.501: # using default values of lims here
log_norm_const = math.log(
math.fabs(math.atanh(1. - 2. * probs))) - math.log(math.fabs(1. - 2. * probs)) + math.log(2.)
else:
aux = math.pow(probs - 0.5, 2)
log_norm_const = math.log(2.0) + (4.0 / 3.0 + 104.0 / 45.0 * aux) * aux
log_lik = bern_log_lik + log_norm_const
return log_lik
def expec_grad(x, probs=None, logits=None):
assert not (probs is None and logits is None)
if logits is not None:
probs = 1. / (1. + math.exp(-logits))
grad_bern_log_lik = x / probs - (1. - x) / (1. - probs)
if probs < 0.499 or probs > 0.501: # using default values of lims here
grad_log_c = 2. * probs - 4. * (probs - 1.) * probs * math.atanh(1. - 2. * probs) - 1.
grad_log_c /= 2. * (probs - 1.) * probs * (2. * probs - 1.) * math.atanh(1. - 2. * probs)
else:
grad_log_c = 8. / 3. * (probs - 0.5) + 416. / 45. * math.pow(probs - 0.5, 3)
grad = grad_bern_log_lik + grad_log_c
if logits is not None:
grad *= 1. / (1. + math.exp(logits)) - 1. / math.pow(1. + math.exp(logits), 2)
return grad
for tensor_type in [torch.FloatTensor, torch.DoubleTensor]:
self._test_pdf_score(dist_class=ContinuousBernoulli,
probs=tensor_type([0.1]),
x=tensor_type([0.1]),
expected_value=tensor_type([expec_val(0.1, probs=0.1)]),
expected_gradient=tensor_type([expec_grad(0.1, probs=0.1)]))
self._test_pdf_score(dist_class=ContinuousBernoulli,
probs=tensor_type([0.1]),
x=tensor_type([1.]),
expected_value=tensor_type([expec_val(1., probs=0.1)]),
expected_gradient=tensor_type([expec_grad(1., probs=0.1)]))
self._test_pdf_score(dist_class=ContinuousBernoulli,
probs=tensor_type([0.4999]),
x=tensor_type([0.9]),
expected_value=tensor_type([expec_val(0.9, probs=0.4999)]),
expected_gradient=tensor_type([expec_grad(0.9, probs=0.4999)]))
self._test_pdf_score(dist_class=ContinuousBernoulli,
probs=tensor_type([1e-4]),
x=tensor_type([1]),
expected_value=tensor_type([expec_val(1, probs=1e-4)]),
expected_gradient=tensor_type(tensor_type([expec_grad(1, probs=1e-4)])),
atol=1e-3)
self._test_pdf_score(dist_class=ContinuousBernoulli,
probs=tensor_type([1 - 1e-4]),
x=tensor_type([0.1]),
expected_value=tensor_type([expec_val(0.1, probs=1 - 1e-4)]),
expected_gradient=tensor_type([expec_grad(0.1, probs=1 - 1e-4)]),
atol=2)
self._test_pdf_score(dist_class=ContinuousBernoulli,
logits=tensor_type([math.log(9999)]),
x=tensor_type([0]),
expected_value=tensor_type([expec_val(0, logits=math.log(9999))]),
expected_gradient=tensor_type([expec_grad(0, logits=math.log(9999))]),
atol=1e-3)
self._test_pdf_score(dist_class=ContinuousBernoulli,
logits=tensor_type([0.001]),
x=tensor_type([0.5]),
expected_value=tensor_type([expec_val(0.5, logits=0.001)]),
expected_gradient=tensor_type([expec_grad(0.5, logits=0.001)]))
def test_continuous_bernoulli_with_logits_underflow(self):
for tensor_type, lim, expected in ([(torch.FloatTensor, -1e38, 2.76898),
(torch.DoubleTensor, -1e308, 3.58473)]):
self._test_pdf_score(dist_class=ContinuousBernoulli,
logits=tensor_type([lim]),
x=tensor_type([0]),
expected_value=tensor_type([expected]),
expected_gradient=tensor_type([0.]))
def test_continuous_bernoulli_with_logits_overflow(self):
for tensor_type, lim, expected in ([(torch.FloatTensor, 1e38, 2.76898),
(torch.DoubleTensor, 1e308, 3.58473)]):
self._test_pdf_score(dist_class=ContinuousBernoulli,
logits=tensor_type([lim]),
x=tensor_type([1]),
expected_value=tensor_type([expected]),
expected_gradient=tensor_type([0.]))
# TODO: make this a pytest parameterized test
class TestLazyLogitsInitialization(DistributionsTestCase):
def setUp(self):
super(TestLazyLogitsInitialization, self).setUp()
# ContinuousBernoulli is not tested because log_prob is not computed simply
# from 'logits', but 'probs' is also needed
self.examples = [e for e in EXAMPLES if e.Dist in
(Categorical, OneHotCategorical, Bernoulli, Binomial, Multinomial)]
def test_lazy_logits_initialization(self):
for Dist, params in self.examples:
param = params[0].copy()
if 'probs' not in param:
continue
probs = param.pop('probs')
param['logits'] = probs_to_logits(probs)
dist = Dist(**param)
# Create new instance to generate a valid sample
dist.log_prob(Dist(**param).sample())
message = 'Failed for {} example 0/{}'.format(Dist.__name__, len(params))
self.assertNotIn('probs', dist.__dict__, msg=message)
try:
dist.enumerate_support()
except NotImplementedError:
pass
self.assertNotIn('probs', dist.__dict__, msg=message)
batch_shape, event_shape = dist.batch_shape, dist.event_shape
self.assertNotIn('probs', dist.__dict__, msg=message)
def test_lazy_probs_initialization(self):
for Dist, params in self.examples:
param = params[0].copy()
if 'probs' not in param:
continue
dist = Dist(**param)
dist.sample()
message = 'Failed for {} example 0/{}'.format(Dist.__name__, len(params))
self.assertNotIn('logits', dist.__dict__, msg=message)
try:
dist.enumerate_support()
except NotImplementedError:
pass
self.assertNotIn('logits', dist.__dict__, msg=message)
batch_shape, event_shape = dist.batch_shape, dist.event_shape
self.assertNotIn('logits', dist.__dict__, msg=message)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
class TestAgainstScipy(DistributionsTestCase):
def setUp(self):
super(TestAgainstScipy, self).setUp()
positive_var = torch.randn(20).exp()
positive_var2 = torch.randn(20).exp()
random_var = torch.randn(20)
simplex_tensor = softmax(torch.randn(20), dim=-1)
cov_tensor = torch.randn(20, 20)
cov_tensor = cov_tensor @ cov_tensor.mT
self.distribution_pairs = [
(
Bernoulli(simplex_tensor),
scipy.stats.bernoulli(simplex_tensor)
),
(
Beta(positive_var, positive_var2),
scipy.stats.beta(positive_var, positive_var2)
),
(
Binomial(10, simplex_tensor),
scipy.stats.binom(10 * np.ones(simplex_tensor.shape), simplex_tensor.numpy())
),
(
Cauchy(random_var, positive_var),
scipy.stats.cauchy(loc=random_var, scale=positive_var)
),
(
Dirichlet(positive_var),
scipy.stats.dirichlet(positive_var)
),
(
Exponential(positive_var),
scipy.stats.expon(scale=positive_var.reciprocal())
),
(
FisherSnedecor(positive_var, 4 + positive_var2), # var for df2<=4 is undefined
scipy.stats.f(positive_var, 4 + positive_var2)
),
(
Gamma(positive_var, positive_var2),
scipy.stats.gamma(positive_var, scale=positive_var2.reciprocal())
),
(
Geometric(simplex_tensor),
scipy.stats.geom(simplex_tensor, loc=-1)
),
(
Gumbel(random_var, positive_var2),
scipy.stats.gumbel_r(random_var, positive_var2)
),
(
HalfCauchy(positive_var),
scipy.stats.halfcauchy(scale=positive_var)
),
(
HalfNormal(positive_var2),
scipy.stats.halfnorm(scale=positive_var2)
),
(
Laplace(random_var, positive_var2),
scipy.stats.laplace(random_var, positive_var2)
),
(
# Tests fail 1e-5 threshold if scale > 3
LogNormal(random_var, positive_var.clamp(max=3)),
scipy.stats.lognorm(s=positive_var.clamp(max=3), scale=random_var.exp())
),
(
LowRankMultivariateNormal(random_var, torch.zeros(20, 1), positive_var2),
scipy.stats.multivariate_normal(random_var, torch.diag(positive_var2))
),
(
Multinomial(10, simplex_tensor),
scipy.stats.multinomial(10, simplex_tensor)
),
(
MultivariateNormal(random_var, torch.diag(positive_var2)),
scipy.stats.multivariate_normal(random_var, torch.diag(positive_var2))
),
(
MultivariateNormal(random_var, cov_tensor),
scipy.stats.multivariate_normal(random_var, cov_tensor)
),
(
Normal(random_var, positive_var2),
scipy.stats.norm(random_var, positive_var2)
),
(
OneHotCategorical(simplex_tensor),
scipy.stats.multinomial(1, simplex_tensor)
),
(
Pareto(positive_var, 2 + positive_var2),
scipy.stats.pareto(2 + positive_var2, scale=positive_var)
),
(
Poisson(positive_var),
scipy.stats.poisson(positive_var)
),
(
StudentT(2 + positive_var, random_var, positive_var2),
scipy.stats.t(2 + positive_var, random_var, positive_var2)
),
(
Uniform(random_var, random_var + positive_var),
scipy.stats.uniform(random_var, positive_var)
),
(
VonMises(random_var, positive_var),
scipy.stats.vonmises(positive_var, loc=random_var)
),
(
Weibull(positive_var[0], positive_var2[0]), # scipy var for Weibull only supports scalars
scipy.stats.weibull_min(c=positive_var2[0], scale=positive_var[0])
),
(
# scipy var for Wishart only supports scalars
# SciPy allowed ndim -1 < df < ndim for Wishar distribution after version 1.7.0
Wishart(
(20 if version.parse(scipy.__version__) < version.parse("1.7.0") else 19) + positive_var[0],
cov_tensor,
),
scipy.stats.wishart(
(20 if version.parse(scipy.__version__) < version.parse("1.7.0") else 19) + positive_var[0].item(),
cov_tensor,
),
),
]
def test_mean(self):
for pytorch_dist, scipy_dist in self.distribution_pairs:
if isinstance(pytorch_dist, (Cauchy, HalfCauchy)):
# Cauchy, HalfCauchy distributions' mean is nan, skipping check
continue
elif isinstance(pytorch_dist, (LowRankMultivariateNormal, MultivariateNormal)):
self.assertEqual(pytorch_dist.mean, scipy_dist.mean, msg=pytorch_dist)
else:
self.assertEqual(pytorch_dist.mean, scipy_dist.mean(), msg=pytorch_dist)
def test_variance_stddev(self):
for pytorch_dist, scipy_dist in self.distribution_pairs:
if isinstance(pytorch_dist, (Cauchy, HalfCauchy, VonMises)):
# Cauchy, HalfCauchy distributions' standard deviation is nan, skipping check
# VonMises variance is circular and scipy doesn't produce a correct result
continue
elif isinstance(pytorch_dist, (Multinomial, OneHotCategorical)):
self.assertEqual(pytorch_dist.variance, np.diag(scipy_dist.cov()), msg=pytorch_dist)
self.assertEqual(pytorch_dist.stddev, np.diag(scipy_dist.cov()) ** 0.5, msg=pytorch_dist)
elif isinstance(pytorch_dist, (LowRankMultivariateNormal, MultivariateNormal)):
self.assertEqual(pytorch_dist.variance, np.diag(scipy_dist.cov), msg=pytorch_dist)
self.assertEqual(pytorch_dist.stddev, np.diag(scipy_dist.cov) ** 0.5, msg=pytorch_dist)
else:
self.assertEqual(pytorch_dist.variance, scipy_dist.var(), msg=pytorch_dist)
self.assertEqual(pytorch_dist.stddev, scipy_dist.var() ** 0.5, msg=pytorch_dist)
def test_cdf(self):
for pytorch_dist, scipy_dist in self.distribution_pairs:
samples = pytorch_dist.sample((5,))
try:
cdf = pytorch_dist.cdf(samples)
except NotImplementedError:
continue
self.assertEqual(cdf, scipy_dist.cdf(samples), msg=pytorch_dist)
def test_icdf(self):
for pytorch_dist, scipy_dist in self.distribution_pairs:
samples = torch.rand((5,) + pytorch_dist.batch_shape)
try:
icdf = pytorch_dist.icdf(samples)
except NotImplementedError:
continue
self.assertEqual(icdf, scipy_dist.ppf(samples), msg=pytorch_dist)
class TestFunctors(DistributionsTestCase):
def test_cat_transform(self):
x1 = -1 * torch.arange(1, 101, dtype=torch.float).view(-1, 100)
x2 = (torch.arange(1, 101, dtype=torch.float).view(-1, 100) - 1) / 100
x3 = torch.arange(1, 101, dtype=torch.float).view(-1, 100)
t1, t2, t3 = ExpTransform(), AffineTransform(1, 100), identity_transform
dim = 0
x = torch.cat([x1, x2, x3], dim=dim)
t = CatTransform([t1, t2, t3], dim=dim)
actual_dom_check = t.domain.check(x)
expected_dom_check = torch.cat([t1.domain.check(x1),
t2.domain.check(x2),
t3.domain.check(x3)], dim=dim)
self.assertEqual(expected_dom_check, actual_dom_check)
actual = t(x)
expected = torch.cat([t1(x1), t2(x2), t3(x3)], dim=dim)
self.assertEqual(expected, actual)
y1 = torch.arange(1, 101, dtype=torch.float).view(-1, 100)
y2 = torch.arange(1, 101, dtype=torch.float).view(-1, 100)
y3 = torch.arange(1, 101, dtype=torch.float).view(-1, 100)
y = torch.cat([y1, y2, y3], dim=dim)
actual_cod_check = t.codomain.check(y)
expected_cod_check = torch.cat([t1.codomain.check(y1),
t2.codomain.check(y2),
t3.codomain.check(y3)], dim=dim)
self.assertEqual(actual_cod_check, expected_cod_check)
actual_inv = t.inv(y)
expected_inv = torch.cat([t1.inv(y1), t2.inv(y2), t3.inv(y3)], dim=dim)
self.assertEqual(expected_inv, actual_inv)
actual_jac = t.log_abs_det_jacobian(x, y)
expected_jac = torch.cat([t1.log_abs_det_jacobian(x1, y1),
t2.log_abs_det_jacobian(x2, y2),
t3.log_abs_det_jacobian(x3, y3)], dim=dim)
self.assertEqual(actual_jac, expected_jac)
def test_cat_transform_non_uniform(self):
x1 = -1 * torch.arange(1, 101, dtype=torch.float).view(-1, 100)
x2 = torch.cat([(torch.arange(1, 101, dtype=torch.float).view(-1, 100) - 1) / 100,
torch.arange(1, 101, dtype=torch.float).view(-1, 100)])
t1 = ExpTransform()
t2 = CatTransform([AffineTransform(1, 100), identity_transform], dim=0)
dim = 0
x = torch.cat([x1, x2], dim=dim)
t = CatTransform([t1, t2], dim=dim, lengths=[1, 2])
actual_dom_check = t.domain.check(x)
expected_dom_check = torch.cat([t1.domain.check(x1),
t2.domain.check(x2)], dim=dim)
self.assertEqual(expected_dom_check, actual_dom_check)
actual = t(x)
expected = torch.cat([t1(x1), t2(x2)], dim=dim)
self.assertEqual(expected, actual)
y1 = torch.arange(1, 101, dtype=torch.float).view(-1, 100)
y2 = torch.cat([torch.arange(1, 101, dtype=torch.float).view(-1, 100),
torch.arange(1, 101, dtype=torch.float).view(-1, 100)])
y = torch.cat([y1, y2], dim=dim)
actual_cod_check = t.codomain.check(y)
expected_cod_check = torch.cat([t1.codomain.check(y1),
t2.codomain.check(y2)], dim=dim)
self.assertEqual(actual_cod_check, expected_cod_check)
actual_inv = t.inv(y)
expected_inv = torch.cat([t1.inv(y1), t2.inv(y2)], dim=dim)
self.assertEqual(expected_inv, actual_inv)
actual_jac = t.log_abs_det_jacobian(x, y)
expected_jac = torch.cat([t1.log_abs_det_jacobian(x1, y1),
t2.log_abs_det_jacobian(x2, y2)], dim=dim)
self.assertEqual(actual_jac, expected_jac)
def test_cat_event_dim(self):
t1 = AffineTransform(0, 2 * torch.ones(2), event_dim=1)
t2 = AffineTransform(0, 2 * torch.ones(2), event_dim=1)
dim = 1
bs = 16
x1 = torch.randn(bs, 2)
x2 = torch.randn(bs, 2)
x = torch.cat([x1, x2], dim=1)
t = CatTransform([t1, t2], dim=dim, lengths=[2, 2])
y1 = t1(x1)
y2 = t2(x2)
y = t(x)
actual_jac = t.log_abs_det_jacobian(x, y)
expected_jac = sum([t1.log_abs_det_jacobian(x1, y1),
t2.log_abs_det_jacobian(x2, y2)])
def test_stack_transform(self):
x1 = -1 * torch.arange(1, 101, dtype=torch.float)
x2 = (torch.arange(1, 101, dtype=torch.float) - 1) / 100
x3 = torch.arange(1, 101, dtype=torch.float)
t1, t2, t3 = ExpTransform(), AffineTransform(1, 100), identity_transform
dim = 0
x = torch.stack([x1, x2, x3], dim=dim)
t = StackTransform([t1, t2, t3], dim=dim)
actual_dom_check = t.domain.check(x)
expected_dom_check = torch.stack([t1.domain.check(x1),
t2.domain.check(x2),
t3.domain.check(x3)], dim=dim)
self.assertEqual(expected_dom_check, actual_dom_check)
actual = t(x)
expected = torch.stack([t1(x1), t2(x2), t3(x3)], dim=dim)
self.assertEqual(expected, actual)
y1 = torch.arange(1, 101, dtype=torch.float)
y2 = torch.arange(1, 101, dtype=torch.float)
y3 = torch.arange(1, 101, dtype=torch.float)
y = torch.stack([y1, y2, y3], dim=dim)
actual_cod_check = t.codomain.check(y)
expected_cod_check = torch.stack([t1.codomain.check(y1),
t2.codomain.check(y2),
t3.codomain.check(y3)], dim=dim)
self.assertEqual(actual_cod_check, expected_cod_check)
actual_inv = t.inv(x)
expected_inv = torch.stack([t1.inv(x1), t2.inv(x2), t3.inv(x3)], dim=dim)
self.assertEqual(expected_inv, actual_inv)
actual_jac = t.log_abs_det_jacobian(x, y)
expected_jac = torch.stack([t1.log_abs_det_jacobian(x1, y1),
t2.log_abs_det_jacobian(x2, y2),
t3.log_abs_det_jacobian(x3, y3)], dim=dim)
self.assertEqual(actual_jac, expected_jac)
class TestValidation(DistributionsTestCase):
def setUp(self):
super(TestValidation, self).setUp()
def test_valid(self):
for Dist, params in EXAMPLES:
for param in params:
Dist(validate_args=True, **param)
def test_invalid_log_probs_arg(self):
# Check that validation errors are indeed disabled,
# but they might raise another error
for Dist, params in EXAMPLES:
if Dist == TransformedDistribution:
# TransformedDistribution has a distribution instance
# as the argument, so we cannot do much about that
continue
for i, param in enumerate(params):
d_nonval = Dist(validate_args=False, **param)
d_val = Dist(validate_args=True, **param)
for v in torch.tensor([-2.0, -1.0, 0.0, 1.0, 2.0]):
# samples with incorrect shape must throw ValueError only
try:
log_prob = d_val.log_prob(v)
except ValueError:
pass
# get sample of correct shape
val = torch.full(d_val.batch_shape + d_val.event_shape, v)
# check samples with incorrect support
try:
log_prob = d_val.log_prob(val)
except ValueError as e:
if e.args and 'must be within the support' in e.args[0]:
try:
log_prob = d_nonval.log_prob(val)
except RuntimeError:
pass
# check correct samples are ok
valid_value = d_val.sample()
d_val.log_prob(valid_value)
# check invalid values raise ValueError
if valid_value.dtype == torch.long:
valid_value = valid_value.float()
invalid_value = torch.full_like(valid_value, math.nan)
try:
with self.assertRaisesRegex(
ValueError,
"Expected value argument .* to be within the support .*",
):
d_val.log_prob(invalid_value)
except AssertionError as e:
fail_string = "Support ValueError not raised for {} example {}/{}"
raise AssertionError(
fail_string.format(Dist.__name__, i + 1, len(params))
) from e
@unittest.skipIf(TEST_WITH_UBSAN, "division-by-zero error with UBSAN")
def test_invalid(self):
for Dist, params in BAD_EXAMPLES:
for i, param in enumerate(params):
try:
with self.assertRaises(ValueError):
Dist(validate_args=True, **param)
except AssertionError as e:
fail_string = "ValueError not raised for {} example {}/{}"
raise AssertionError(
fail_string.format(Dist.__name__, i + 1, len(params))
) from e
def test_warning_unimplemented_constraints(self):
class Delta(Distribution):
def __init__(self, validate_args=True):
super().__init__(validate_args=validate_args)
def sample(self, sample_shape=torch.Size()):
return torch.tensor(0.).expand(sample_shape)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
value[value != 0.] = -float('inf')
value[value == 0.] = 0.
return value
with self.assertWarns(UserWarning):
d = Delta()
sample = d.sample((2,))
with self.assertWarns(UserWarning):
d.log_prob(sample)
def tearDown(self):
super(TestValidation, self).tearDown()
class TestJit(DistributionsTestCase):
def _examples(self):
for Dist, params in EXAMPLES:
for param in params:
keys = param.keys()
values = tuple(param[key] for key in keys)
if not all(isinstance(x, torch.Tensor) for x in values):
continue
sample = Dist(**param).sample()
yield Dist, keys, values, sample
def _perturb_tensor(self, value, constraint):
if isinstance(constraint, constraints._IntegerGreaterThan):
return value + 1
if isinstance(constraint, constraints._PositiveDefinite) or isinstance(constraint, constraints._PositiveSemidefinite):
return value + torch.eye(value.shape[-1])
if value.dtype in [torch.float, torch.double]:
transform = transform_to(constraint)
delta = value.new(value.shape).normal_()
return transform(transform.inv(value) + delta)
if value.dtype == torch.long:
result = value.clone()
result[value == 0] = 1
result[value == 1] = 0
return result
raise NotImplementedError
def _perturb(self, Dist, keys, values, sample):
with torch.no_grad():
if Dist is Uniform:
param = dict(zip(keys, values))
param['low'] = param['low'] - torch.rand(param['low'].shape)
param['high'] = param['high'] + torch.rand(param['high'].shape)
values = [param[key] for key in keys]
else:
values = [self._perturb_tensor(value, Dist.arg_constraints.get(key, constraints.real))
for key, value in zip(keys, values)]
param = dict(zip(keys, values))
sample = Dist(**param).sample()
return values, sample
def test_sample(self):
for Dist, keys, values, sample in self._examples():
def f(*values):
param = dict(zip(keys, values))
dist = Dist(**param)
return dist.sample()
traced_f = torch.jit.trace(f, values, check_trace=False)
# FIXME Schema not found for node
xfail = [
Cauchy, # aten::cauchy(Double(2,1), float, float, Generator)
HalfCauchy, # aten::cauchy(Double(2, 1), float, float, Generator)
VonMises # Variance is not Euclidean
]
if Dist in xfail:
continue
with torch.random.fork_rng():
sample = f(*values)
traced_sample = traced_f(*values)
self.assertEqual(sample, traced_sample)
# FIXME no nondeterministic nodes found in trace
xfail = [Beta, Dirichlet]
if Dist not in xfail:
self.assertTrue(any(n.isNondeterministic() for n in traced_f.graph.nodes()))
def test_rsample(self):
for Dist, keys, values, sample in self._examples():
if not Dist.has_rsample:
continue
def f(*values):
param = dict(zip(keys, values))
dist = Dist(**param)
return dist.rsample()
traced_f = torch.jit.trace(f, values, check_trace=False)
# FIXME Schema not found for node
xfail = [
Cauchy, # aten::cauchy(Double(2,1), float, float, Generator)
HalfCauchy, # aten::cauchy(Double(2, 1), float, float, Generator)
]
if Dist in xfail:
continue
with torch.random.fork_rng():
sample = f(*values)
traced_sample = traced_f(*values)
self.assertEqual(sample, traced_sample)
# FIXME no nondeterministic nodes found in trace
xfail = [Beta, Dirichlet]
if Dist not in xfail:
self.assertTrue(any(n.isNondeterministic() for n in traced_f.graph.nodes()))
def test_log_prob(self):
for Dist, keys, values, sample in self._examples():
# FIXME traced functions produce incorrect results
xfail = [LowRankMultivariateNormal, MultivariateNormal]
if Dist in xfail:
continue
def f(sample, *values):
param = dict(zip(keys, values))
dist = Dist(**param)
return dist.log_prob(sample)
traced_f = torch.jit.trace(f, (sample,) + values)
# check on different data
values, sample = self._perturb(Dist, keys, values, sample)
expected = f(sample, *values)
actual = traced_f(sample, *values)
self.assertEqual(expected, actual,
msg='{}\nExpected:\n{}\nActual:\n{}'.format(Dist.__name__, expected, actual))
def test_enumerate_support(self):
for Dist, keys, values, sample in self._examples():
# FIXME traced functions produce incorrect results
xfail = [Binomial]
if Dist in xfail:
continue
def f(*values):
param = dict(zip(keys, values))
dist = Dist(**param)
return dist.enumerate_support()
try:
traced_f = torch.jit.trace(f, values)
except NotImplementedError:
continue
# check on different data
values, sample = self._perturb(Dist, keys, values, sample)
expected = f(*values)
actual = traced_f(*values)
self.assertEqual(expected, actual,
msg='{}\nExpected:\n{}\nActual:\n{}'.format(Dist.__name__, expected, actual))
def test_mean(self):
for Dist, keys, values, sample in self._examples():
def f(*values):
param = dict(zip(keys, values))
dist = Dist(**param)
return dist.mean
try:
traced_f = torch.jit.trace(f, values)
except NotImplementedError:
continue
# check on different data
values, sample = self._perturb(Dist, keys, values, sample)
expected = f(*values)
actual = traced_f(*values)
expected[expected == float('inf')] = 0.
actual[actual == float('inf')] = 0.
self.assertEqual(expected, actual,
msg='{}\nExpected:\n{}\nActual:\n{}'.format(Dist.__name__, expected, actual))
def test_variance(self):
for Dist, keys, values, sample in self._examples():
if Dist in [Cauchy, HalfCauchy]:
continue # infinite variance
def f(*values):
param = dict(zip(keys, values))
dist = Dist(**param)
return dist.variance
try:
traced_f = torch.jit.trace(f, values)
except NotImplementedError:
continue
# check on different data
values, sample = self._perturb(Dist, keys, values, sample)
expected = f(*values).clone()
actual = traced_f(*values).clone()
expected[expected == float('inf')] = 0.
actual[actual == float('inf')] = 0.
self.assertEqual(expected, actual,
msg='{}\nExpected:\n{}\nActual:\n{}'.format(Dist.__name__, expected, actual))
def test_entropy(self):
for Dist, keys, values, sample in self._examples():
# FIXME traced functions produce incorrect results
xfail = [LowRankMultivariateNormal, MultivariateNormal]
if Dist in xfail:
continue
def f(*values):
param = dict(zip(keys, values))
dist = Dist(**param)
return dist.entropy()
try:
traced_f = torch.jit.trace(f, values)
except NotImplementedError:
continue
# check on different data
values, sample = self._perturb(Dist, keys, values, sample)
expected = f(*values)
actual = traced_f(*values)
self.assertEqual(expected, actual,
msg='{}\nExpected:\n{}\nActual:\n{}'.format(Dist.__name__, expected, actual))
def test_cdf(self):
for Dist, keys, values, sample in self._examples():
def f(sample, *values):
param = dict(zip(keys, values))
dist = Dist(**param)
cdf = dist.cdf(sample)
return dist.icdf(cdf)
try:
traced_f = torch.jit.trace(f, (sample,) + values)
except NotImplementedError:
continue
# check on different data
values, sample = self._perturb(Dist, keys, values, sample)
expected = f(sample, *values)
actual = traced_f(sample, *values)
self.assertEqual(expected, actual,
msg='{}\nExpected:\n{}\nActual:\n{}'.format(Dist.__name__, expected, actual))
if __name__ == '__main__' and torch._C.has_lapack:
run_tests()
|
pytorch-master
|
test/distributions/test_distributions.py
|
# Owner(s): ["module: distributions"]
import io
from numbers import Number
import pytest
import torch
from torch.autograd.functional import jacobian
from torch.distributions import Dirichlet, Independent, Normal, TransformedDistribution, constraints
from torch.distributions.transforms import (AbsTransform, AffineTransform, ComposeTransform,
CorrCholeskyTransform, CumulativeDistributionTransform,
ExpTransform, IndependentTransform,
LowerCholeskyTransform, PowerTransform,
ReshapeTransform, SigmoidTransform, TanhTransform,
SoftmaxTransform, SoftplusTransform, StickBreakingTransform,
identity_transform, Transform, _InverseTransform)
from torch.distributions.utils import tril_matrix_to_vec, vec_to_tril_matrix
def get_transforms(cache_size):
transforms = [
AbsTransform(cache_size=cache_size),
ExpTransform(cache_size=cache_size),
PowerTransform(exponent=2,
cache_size=cache_size),
PowerTransform(exponent=torch.tensor(5.).normal_(),
cache_size=cache_size),
PowerTransform(exponent=torch.tensor(5.).normal_(),
cache_size=cache_size),
SigmoidTransform(cache_size=cache_size),
TanhTransform(cache_size=cache_size),
AffineTransform(0, 1, cache_size=cache_size),
AffineTransform(1, -2, cache_size=cache_size),
AffineTransform(torch.randn(5),
torch.randn(5),
cache_size=cache_size),
AffineTransform(torch.randn(4, 5),
torch.randn(4, 5),
cache_size=cache_size),
SoftmaxTransform(cache_size=cache_size),
SoftplusTransform(cache_size=cache_size),
StickBreakingTransform(cache_size=cache_size),
LowerCholeskyTransform(cache_size=cache_size),
CorrCholeskyTransform(cache_size=cache_size),
ComposeTransform([
AffineTransform(torch.randn(4, 5),
torch.randn(4, 5),
cache_size=cache_size),
]),
ComposeTransform([
AffineTransform(torch.randn(4, 5),
torch.randn(4, 5),
cache_size=cache_size),
ExpTransform(cache_size=cache_size),
]),
ComposeTransform([
AffineTransform(0, 1, cache_size=cache_size),
AffineTransform(torch.randn(4, 5),
torch.randn(4, 5),
cache_size=cache_size),
AffineTransform(1, -2, cache_size=cache_size),
AffineTransform(torch.randn(4, 5),
torch.randn(4, 5),
cache_size=cache_size),
]),
ReshapeTransform((4, 5), (2, 5, 2)),
IndependentTransform(
AffineTransform(torch.randn(5),
torch.randn(5),
cache_size=cache_size),
1),
CumulativeDistributionTransform(Normal(0, 1)),
]
transforms += [t.inv for t in transforms]
return transforms
def reshape_transform(transform, shape):
# Needed to squash batch dims for testing jacobian
if isinstance(transform, AffineTransform):
if isinstance(transform.loc, Number):
return transform
try:
return AffineTransform(transform.loc.expand(shape), transform.scale.expand(shape), cache_size=transform._cache_size)
except RuntimeError:
return AffineTransform(transform.loc.reshape(shape), transform.scale.reshape(shape), cache_size=transform._cache_size)
if isinstance(transform, ComposeTransform):
reshaped_parts = []
for p in transform.parts:
reshaped_parts.append(reshape_transform(p, shape))
return ComposeTransform(reshaped_parts, cache_size=transform._cache_size)
if isinstance(transform.inv, AffineTransform):
return reshape_transform(transform.inv, shape).inv
if isinstance(transform.inv, ComposeTransform):
return reshape_transform(transform.inv, shape).inv
return transform
# Generate pytest ids
def transform_id(x):
assert isinstance(x, Transform)
name = f'Inv({type(x._inv).__name__})' if isinstance(x, _InverseTransform) else f'{type(x).__name__}'
return f'{name}(cache_size={x._cache_size})'
def generate_data(transform):
torch.manual_seed(1)
while isinstance(transform, IndependentTransform):
transform = transform.base_transform
if isinstance(transform, ReshapeTransform):
return torch.randn(transform.in_shape)
if isinstance(transform.inv, ReshapeTransform):
return torch.randn(transform.inv.out_shape)
domain = transform.domain
while (isinstance(domain, constraints.independent) and
domain is not constraints.real_vector):
domain = domain.base_constraint
codomain = transform.codomain
x = torch.empty(4, 5)
if domain is constraints.lower_cholesky or codomain is constraints.lower_cholesky:
x = torch.empty(6, 6)
x = x.normal_()
return x
elif domain is constraints.real:
return x.normal_()
elif domain is constraints.real_vector:
# For corr_cholesky the last dim in the vector
# must be of size (dim * dim) // 2
x = torch.empty(3, 6)
x = x.normal_()
return x
elif domain is constraints.positive:
return x.normal_().exp()
elif domain is constraints.unit_interval:
return x.uniform_()
elif isinstance(domain, constraints.interval):
x = x.uniform_()
x = x.mul_(domain.upper_bound - domain.lower_bound).add_(domain.lower_bound)
return x
elif domain is constraints.simplex:
x = x.normal_().exp()
x /= x.sum(-1, True)
return x
elif domain is constraints.corr_cholesky:
x = torch.empty(4, 5, 5)
x = x.normal_().tril()
x /= x.norm(dim=-1, keepdim=True)
x.diagonal(dim1=-1).copy_(x.diagonal(dim1=-1).abs())
return x
raise ValueError('Unsupported domain: {}'.format(domain))
TRANSFORMS_CACHE_ACTIVE = get_transforms(cache_size=1)
TRANSFORMS_CACHE_INACTIVE = get_transforms(cache_size=0)
ALL_TRANSFORMS = TRANSFORMS_CACHE_ACTIVE + TRANSFORMS_CACHE_INACTIVE + [identity_transform]
@pytest.mark.parametrize('transform', ALL_TRANSFORMS, ids=transform_id)
def test_inv_inv(transform, ids=transform_id):
assert transform.inv.inv is transform
@pytest.mark.parametrize('x', TRANSFORMS_CACHE_INACTIVE, ids=transform_id)
@pytest.mark.parametrize('y', TRANSFORMS_CACHE_INACTIVE, ids=transform_id)
def test_equality(x, y):
if x is y:
assert x == y
else:
assert x != y
assert identity_transform == identity_transform.inv
@pytest.mark.parametrize('transform', ALL_TRANSFORMS, ids=transform_id)
def test_with_cache(transform):
if transform._cache_size == 0:
transform = transform.with_cache(1)
assert transform._cache_size == 1
x = generate_data(transform).requires_grad_()
try:
y = transform(x)
except NotImplementedError:
pytest.skip('Not implemented.')
y2 = transform(x)
assert y2 is y
@pytest.mark.parametrize('transform', ALL_TRANSFORMS, ids=transform_id)
@pytest.mark.parametrize('test_cached', [True, False])
def test_forward_inverse(transform, test_cached):
x = generate_data(transform).requires_grad_()
try:
y = transform(x)
except NotImplementedError:
pytest.skip('Not implemented.')
assert y.shape == transform.forward_shape(x.shape)
if test_cached:
x2 = transform.inv(y) # should be implemented at least by caching
else:
try:
x2 = transform.inv(y.clone()) # bypass cache
except NotImplementedError:
pytest.skip('Not implemented.')
assert x2.shape == transform.inverse_shape(y.shape)
y2 = transform(x2)
if transform.bijective:
# verify function inverse
assert torch.allclose(x2, x, atol=1e-4, equal_nan=True), '\n'.join([
'{} t.inv(t(-)) error'.format(transform),
'x = {}'.format(x),
'y = t(x) = {}'.format(y),
'x2 = t.inv(y) = {}'.format(x2),
])
else:
# verify weaker function pseudo-inverse
assert torch.allclose(y2, y, atol=1e-4, equal_nan=True), '\n'.join([
'{} t(t.inv(t(-))) error'.format(transform),
'x = {}'.format(x),
'y = t(x) = {}'.format(y),
'x2 = t.inv(y) = {}'.format(x2),
'y2 = t(x2) = {}'.format(y2),
])
def test_compose_transform_shapes():
transform0 = ExpTransform()
transform1 = SoftmaxTransform()
transform2 = LowerCholeskyTransform()
assert transform0.event_dim == 0
assert transform1.event_dim == 1
assert transform2.event_dim == 2
assert ComposeTransform([transform0, transform1]).event_dim == 1
assert ComposeTransform([transform0, transform2]).event_dim == 2
assert ComposeTransform([transform1, transform2]).event_dim == 2
transform0 = ExpTransform()
transform1 = SoftmaxTransform()
transform2 = LowerCholeskyTransform()
base_dist0 = Normal(torch.zeros(4, 4), torch.ones(4, 4))
base_dist1 = Dirichlet(torch.ones(4, 4))
base_dist2 = Normal(torch.zeros(3, 4, 4), torch.ones(3, 4, 4))
@pytest.mark.parametrize('batch_shape, event_shape, dist', [
((4, 4), (), base_dist0),
((4,), (4,), base_dist1),
((4, 4), (), TransformedDistribution(base_dist0, [transform0])),
((4,), (4,), TransformedDistribution(base_dist0, [transform1])),
((4,), (4,), TransformedDistribution(base_dist0, [transform0, transform1])),
((), (4, 4), TransformedDistribution(base_dist0, [transform0, transform2])),
((4,), (4,), TransformedDistribution(base_dist0, [transform1, transform0])),
((), (4, 4), TransformedDistribution(base_dist0, [transform1, transform2])),
((), (4, 4), TransformedDistribution(base_dist0, [transform2, transform0])),
((), (4, 4), TransformedDistribution(base_dist0, [transform2, transform1])),
((4,), (4,), TransformedDistribution(base_dist1, [transform0])),
((4,), (4,), TransformedDistribution(base_dist1, [transform1])),
((), (4, 4), TransformedDistribution(base_dist1, [transform2])),
((4,), (4,), TransformedDistribution(base_dist1, [transform0, transform1])),
((), (4, 4), TransformedDistribution(base_dist1, [transform0, transform2])),
((4,), (4,), TransformedDistribution(base_dist1, [transform1, transform0])),
((), (4, 4), TransformedDistribution(base_dist1, [transform1, transform2])),
((), (4, 4), TransformedDistribution(base_dist1, [transform2, transform0])),
((), (4, 4), TransformedDistribution(base_dist1, [transform2, transform1])),
((3, 4, 4), (), base_dist2),
((3,), (4, 4), TransformedDistribution(base_dist2, [transform2])),
((3,), (4, 4), TransformedDistribution(base_dist2, [transform0, transform2])),
((3,), (4, 4), TransformedDistribution(base_dist2, [transform1, transform2])),
((3,), (4, 4), TransformedDistribution(base_dist2, [transform2, transform0])),
((3,), (4, 4), TransformedDistribution(base_dist2, [transform2, transform1])),
])
def test_transformed_distribution_shapes(batch_shape, event_shape, dist):
assert dist.batch_shape == batch_shape
assert dist.event_shape == event_shape
x = dist.rsample()
try:
dist.log_prob(x) # this should not crash
except NotImplementedError:
pytest.skip('Not implemented.')
@pytest.mark.parametrize('transform', TRANSFORMS_CACHE_INACTIVE, ids=transform_id)
def test_jit_fwd(transform):
x = generate_data(transform).requires_grad_()
def f(x):
return transform(x)
try:
traced_f = torch.jit.trace(f, (x,))
except NotImplementedError:
pytest.skip('Not implemented.')
# check on different inputs
x = generate_data(transform).requires_grad_()
assert torch.allclose(f(x), traced_f(x), atol=1e-5, equal_nan=True)
@pytest.mark.parametrize('transform', TRANSFORMS_CACHE_INACTIVE, ids=transform_id)
def test_jit_inv(transform):
y = generate_data(transform.inv).requires_grad_()
def f(y):
return transform.inv(y)
try:
traced_f = torch.jit.trace(f, (y,))
except NotImplementedError:
pytest.skip('Not implemented.')
# check on different inputs
y = generate_data(transform.inv).requires_grad_()
assert torch.allclose(f(y), traced_f(y), atol=1e-5, equal_nan=True)
@pytest.mark.parametrize('transform', TRANSFORMS_CACHE_INACTIVE, ids=transform_id)
def test_jit_jacobian(transform):
x = generate_data(transform).requires_grad_()
def f(x):
y = transform(x)
return transform.log_abs_det_jacobian(x, y)
try:
traced_f = torch.jit.trace(f, (x,))
except NotImplementedError:
pytest.skip('Not implemented.')
# check on different inputs
x = generate_data(transform).requires_grad_()
assert torch.allclose(f(x), traced_f(x), atol=1e-5, equal_nan=True)
@pytest.mark.parametrize('transform', ALL_TRANSFORMS, ids=transform_id)
def test_jacobian(transform):
x = generate_data(transform)
try:
y = transform(x)
actual = transform.log_abs_det_jacobian(x, y)
except NotImplementedError:
pytest.skip('Not implemented.')
# Test shape
target_shape = x.shape[:x.dim() - transform.domain.event_dim]
assert actual.shape == target_shape
# Expand if required
transform = reshape_transform(transform, x.shape)
ndims = len(x.shape)
event_dim = ndims - transform.domain.event_dim
x_ = x.view((-1,) + x.shape[event_dim:])
n = x_.shape[0]
# Reshape to squash batch dims to a single batch dim
transform = reshape_transform(transform, x_.shape)
# 1. Transforms with unit jacobian
if isinstance(transform, ReshapeTransform) or isinstance(transform.inv, ReshapeTransform):
expected = x.new_zeros(x.shape[x.dim() - transform.domain.event_dim])
expected = x.new_zeros(x.shape[x.dim() - transform.domain.event_dim])
# 2. Transforms with 0 off-diagonal elements
elif transform.domain.event_dim == 0:
jac = jacobian(transform, x_)
# assert off-diagonal elements are zero
assert torch.allclose(jac, jac.diagonal().diag_embed())
expected = jac.diagonal().abs().log().reshape(x.shape)
# 3. Transforms with non-0 off-diagonal elements
else:
if isinstance(transform, CorrCholeskyTransform):
jac = jacobian(lambda x: tril_matrix_to_vec(transform(x), diag=-1), x_)
elif isinstance(transform.inv, CorrCholeskyTransform):
jac = jacobian(lambda x: transform(vec_to_tril_matrix(x, diag=-1)),
tril_matrix_to_vec(x_, diag=-1))
elif isinstance(transform, StickBreakingTransform):
jac = jacobian(lambda x: transform(x)[..., :-1], x_)
else:
jac = jacobian(transform, x_)
# Note that jacobian will have shape (batch_dims, y_event_dims, batch_dims, x_event_dims)
# However, batches are independent so this can be converted into a (batch_dims, event_dims, event_dims)
# after reshaping the event dims (see above) to give a batched square matrix whose determinant
# can be computed.
gather_idx_shape = list(jac.shape)
gather_idx_shape[-2] = 1
gather_idxs = torch.arange(n).reshape((n,) + (1,) * (len(jac.shape) - 1)).expand(gather_idx_shape)
jac = jac.gather(-2, gather_idxs).squeeze(-2)
out_ndims = jac.shape[-2]
jac = jac[..., :out_ndims] # Remove extra zero-valued dims (for inverse stick-breaking).
expected = torch.slogdet(jac).logabsdet
assert torch.allclose(actual, expected, atol=1e-5)
@pytest.mark.parametrize("event_dims",
[(0,), (1,), (2, 3), (0, 1, 2), (1, 2, 0), (2, 0, 1)],
ids=str)
def test_compose_affine(event_dims):
transforms = [AffineTransform(torch.zeros((1,) * e), 1, event_dim=e) for e in event_dims]
transform = ComposeTransform(transforms)
assert transform.codomain.event_dim == max(event_dims)
assert transform.domain.event_dim == max(event_dims)
base_dist = Normal(0, 1)
if transform.domain.event_dim:
base_dist = base_dist.expand((1,) * transform.domain.event_dim)
dist = TransformedDistribution(base_dist, transform.parts)
assert dist.support.event_dim == max(event_dims)
base_dist = Dirichlet(torch.ones(5))
if transform.domain.event_dim > 1:
base_dist = base_dist.expand((1,) * (transform.domain.event_dim - 1))
dist = TransformedDistribution(base_dist, transforms)
assert dist.support.event_dim == max(1, max(event_dims))
@pytest.mark.parametrize("batch_shape", [(), (6,), (5, 4)], ids=str)
def test_compose_reshape(batch_shape):
transforms = [ReshapeTransform((), ()),
ReshapeTransform((2,), (1, 2)),
ReshapeTransform((3, 1, 2), (6,)),
ReshapeTransform((6,), (2, 3))]
transform = ComposeTransform(transforms)
assert transform.codomain.event_dim == 2
assert transform.domain.event_dim == 2
data = torch.randn(batch_shape + (3, 2))
assert transform(data).shape == batch_shape + (2, 3)
dist = TransformedDistribution(Normal(data, 1), transforms)
assert dist.batch_shape == batch_shape
assert dist.event_shape == (2, 3)
assert dist.support.event_dim == 2
@pytest.mark.parametrize("sample_shape", [(), (7,)], ids=str)
@pytest.mark.parametrize("transform_dim", [0, 1, 2])
@pytest.mark.parametrize("base_batch_dim", [0, 1, 2])
@pytest.mark.parametrize("base_event_dim", [0, 1, 2])
@pytest.mark.parametrize("num_transforms", [0, 1, 2, 3])
def test_transformed_distribution(base_batch_dim, base_event_dim, transform_dim,
num_transforms, sample_shape):
shape = torch.Size([2, 3, 4, 5])
base_dist = Normal(0, 1)
base_dist = base_dist.expand(shape[4 - base_batch_dim - base_event_dim:])
if base_event_dim:
base_dist = Independent(base_dist, base_event_dim)
transforms = [AffineTransform(torch.zeros(shape[4 - transform_dim:]), 1),
ReshapeTransform((4, 5), (20,)),
ReshapeTransform((3, 20), (6, 10))]
transforms = transforms[:num_transforms]
transform = ComposeTransform(transforms)
# Check validation in .__init__().
if base_batch_dim + base_event_dim < transform.domain.event_dim:
with pytest.raises(ValueError):
TransformedDistribution(base_dist, transforms)
return
d = TransformedDistribution(base_dist, transforms)
# Check sampling is sufficiently expanded.
x = d.sample(sample_shape)
assert x.shape == sample_shape + d.batch_shape + d.event_shape
num_unique = len(set(x.reshape(-1).tolist()))
assert num_unique >= 0.9 * x.numel()
# Check log_prob shape on full samples.
log_prob = d.log_prob(x)
assert log_prob.shape == sample_shape + d.batch_shape
# Check log_prob shape on partial samples.
y = x
while y.dim() > len(d.event_shape):
y = y[0]
log_prob = d.log_prob(y)
assert log_prob.shape == d.batch_shape
def test_save_load_transform():
# Evaluating `log_prob` will create a weakref `_inv` which cannot be pickled. Here, we check
# that `__getstate__` correctly handles the weakref, and that we can evaluate the density after.
dist = TransformedDistribution(Normal(0, 1), [AffineTransform(2, 3)])
x = torch.linspace(0, 1, 10)
log_prob = dist.log_prob(x)
stream = io.BytesIO()
torch.save(dist, stream)
stream.seek(0)
other = torch.load(stream)
assert torch.allclose(log_prob, other.log_prob(x))
if __name__ == '__main__':
pytest.main([__file__])
|
pytorch-master
|
test/distributions/test_transforms.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["oncall: package/deploy"]
from io import BytesIO
from sys import version_info
from textwrap import dedent
from unittest import skipIf
from torch.package import PackageExporter, PackageImporter
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
@skipIf(version_info < (3, 7), "ResourceReader API introduced in Python 3.7")
class TestResources(PackageTestCase):
"""Tests for access APIs for packaged resources."""
def test_resource_reader(self):
"""Test compliance with the get_resource_reader importlib API."""
buffer = BytesIO()
with PackageExporter(buffer) as pe:
# Layout looks like:
# package
# ├── one/
# │ ├── a.txt
# │ ├── b.txt
# │ ├── c.txt
# │ └── three/
# │ ├── d.txt
# │ └── e.txt
# └── two/
# ├── f.txt
# └── g.txt
pe.save_text("one", "a.txt", "hello, a!")
pe.save_text("one", "b.txt", "hello, b!")
pe.save_text("one", "c.txt", "hello, c!")
pe.save_text("one.three", "d.txt", "hello, d!")
pe.save_text("one.three", "e.txt", "hello, e!")
pe.save_text("two", "f.txt", "hello, f!")
pe.save_text("two", "g.txt", "hello, g!")
buffer.seek(0)
importer = PackageImporter(buffer)
reader_one = importer.get_resource_reader("one")
with self.assertRaises(FileNotFoundError):
reader_one.resource_path("a.txt")
self.assertTrue(reader_one.is_resource("a.txt"))
self.assertEqual(reader_one.open_resource("a.txt").getbuffer(), b"hello, a!")
self.assertFalse(reader_one.is_resource("three"))
reader_one_contents = list(reader_one.contents())
self.assertSequenceEqual(
reader_one_contents, ["a.txt", "b.txt", "c.txt", "three"]
)
reader_two = importer.get_resource_reader("two")
self.assertTrue(reader_two.is_resource("f.txt"))
self.assertEqual(reader_two.open_resource("f.txt").getbuffer(), b"hello, f!")
reader_two_contents = list(reader_two.contents())
self.assertSequenceEqual(reader_two_contents, ["f.txt", "g.txt"])
reader_one_three = importer.get_resource_reader("one.three")
self.assertTrue(reader_one_three.is_resource("d.txt"))
self.assertEqual(
reader_one_three.open_resource("d.txt").getbuffer(), b"hello, d!"
)
reader_one_three_contenst = list(reader_one_three.contents())
self.assertSequenceEqual(reader_one_three_contenst, ["d.txt", "e.txt"])
self.assertIsNone(importer.get_resource_reader("nonexistent_package"))
def test_package_resource_access(self):
"""Packaged modules should be able to use the importlib.resources API to access
resources saved in the package.
"""
mod_src = dedent(
"""\
import importlib.resources
import my_cool_resources
def secret_message():
return importlib.resources.read_text(my_cool_resources, 'sekrit.txt')
"""
)
buffer = BytesIO()
with PackageExporter(buffer) as pe:
pe.save_source_string("foo.bar", mod_src)
pe.save_text("my_cool_resources", "sekrit.txt", "my sekrit plays")
buffer.seek(0)
importer = PackageImporter(buffer)
self.assertEqual(
importer.import_module("foo.bar").secret_message(), "my sekrit plays"
)
def test_importer_access(self):
buffer = BytesIO()
with PackageExporter(buffer) as he:
he.save_text("main", "main", "my string")
he.save_binary("main", "main_binary", "my string".encode("utf-8"))
src = dedent(
"""\
import importlib
import torch_package_importer as resources
t = resources.load_text('main', 'main')
b = resources.load_binary('main', 'main_binary')
"""
)
he.save_source_string("main", src, is_package=True)
buffer.seek(0)
hi = PackageImporter(buffer)
m = hi.import_module("main")
self.assertEqual(m.t, "my string")
self.assertEqual(m.b, "my string".encode("utf-8"))
def test_resource_access_by_path(self):
"""
Tests that packaged code can used importlib.resources.path.
"""
buffer = BytesIO()
with PackageExporter(buffer) as he:
he.save_binary("string_module", "my_string", "my string".encode("utf-8"))
src = dedent(
"""\
import importlib.resources
import string_module
with importlib.resources.path(string_module, 'my_string') as path:
with open(path, mode='r', encoding='utf-8') as f:
s = f.read()
"""
)
he.save_source_string("main", src, is_package=True)
buffer.seek(0)
hi = PackageImporter(buffer)
m = hi.import_module("main")
self.assertEqual(m.s, "my string")
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/package/test_resources.py
|
# Owner(s): ["oncall: package/deploy"]
from io import BytesIO
from textwrap import dedent
from unittest import skipIf
import torch
from torch.package import PackageExporter, PackageImporter, sys_importer
from torch.testing._internal.common_utils import IS_FBCODE, IS_SANDCASTLE, run_tests
try:
from torchvision.models import resnet18
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = skipIf(not HAS_TORCHVISION, "no torchvision")
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
@skipIf(True, "Does not work with recent torchvision, see https://github.com/pytorch/pytorch/issues/81115")
@skipIfNoTorchVision
class ModelTest(PackageTestCase):
"""End-to-end tests packaging an entire model."""
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_resnet(self):
resnet = resnet18()
f1 = self.temp()
# create a package that will save it along with its code
with PackageExporter(f1) as e:
# put the pickled resnet in the package, by default
# this will also save all the code files references by
# the objects in the pickle
e.intern("**")
e.save_pickle("model", "model.pkl", resnet)
# we can now load the saved model
i = PackageImporter(f1)
r2 = i.load_pickle("model", "model.pkl")
# test that it works
input = torch.rand(1, 3, 224, 224)
ref = resnet(input)
self.assertEqual(r2(input), ref)
# functions exist also to get at the private modules in each package
torchvision = i.import_module("torchvision")
f2 = BytesIO()
# if we are doing transfer learning we might want to re-save
# things that were loaded from a package.
# We need to tell the exporter about any modules that
# came from imported packages so that it can resolve
# class names like torchvision.models.resnet.ResNet
# to their source code.
with PackageExporter(f2, importer=(i, sys_importer)) as e:
# e.importers is a list of module importing functions
# that by default contains importlib.import_module.
# it is searched in order until the first success and
# that module is taken to be what torchvision.models.resnet
# should be in this code package. In the case of name collisions,
# such as trying to save a ResNet from two different packages,
# we take the first thing found in the path, so only ResNet objects from
# one importer will work. This avoids a bunch of name mangling in
# the source code. If you need to actually mix ResNet objects,
# we suggest reconstructing the model objects using code from a single package
# using functions like save_state_dict and load_state_dict to transfer state
# to the correct code objects.
e.intern("**")
e.save_pickle("model", "model.pkl", r2)
f2.seek(0)
i2 = PackageImporter(f2)
r3 = i2.load_pickle("model", "model.pkl")
self.assertEqual(r3(input), ref)
@skipIfNoTorchVision
def test_model_save(self):
# This example shows how you might package a model
# so that the creator of the model has flexibility about
# how they want to save it but the 'server' can always
# use the same API to load the package.
# The convension is for each model to provide a
# 'model' package with a 'load' function that actual
# reads the model out of the archive.
# How the load function is implemented is up to the
# the packager.
# get our normal torchvision resnet
resnet = resnet18()
f1 = BytesIO()
# Option 1: save by pickling the whole model
# + single-line, similar to torch.jit.save
# - more difficult to edit the code after the model is created
with PackageExporter(f1) as e:
e.intern("**")
e.save_pickle("model", "pickled", resnet)
# note that this source is the same for all models in this approach
# so it can be made part of an API that just takes the model and
# packages it with this source.
src = dedent(
"""\
import importlib
import torch_package_importer as resources
# server knows to call model.load() to get the model,
# maybe in the future it passes options as arguments by convension
def load():
return resources.load_pickle('model', 'pickled')
"""
)
e.save_source_string("model", src, is_package=True)
f2 = BytesIO()
# Option 2: save with state dict
# - more code to write to save/load the model
# + but this code can be edited later to adjust adapt the model later
with PackageExporter(f2) as e:
e.intern("**")
e.save_pickle("model", "state_dict", resnet.state_dict())
src = dedent(
"""\
import importlib
import torch_package_importer as resources
from torchvision.models.resnet import resnet18
def load():
# if you want, you can later edit how resnet is constructed here
# to edit the model in the package, while still loading the original
# state dict weights
r = resnet18()
state_dict = resources.load_pickle('model', 'state_dict')
r.load_state_dict(state_dict)
return r
"""
)
e.save_source_string("model", src, is_package=True)
# regardless of how we chose to package, we can now use the model in a server in the same way
input = torch.rand(1, 3, 224, 224)
results = []
for m in [f1, f2]:
m.seek(0)
importer = PackageImporter(m)
the_model = importer.import_module("model").load()
r = the_model(input)
results.append(r)
self.assertEqual(*results)
@skipIfNoTorchVision
def test_script_resnet(self):
resnet = resnet18()
f1 = BytesIO()
# Option 1: save by pickling the whole model
# + single-line, similar to torch.jit.save
# - more difficult to edit the code after the model is created
with PackageExporter(f1) as e:
e.intern("**")
e.save_pickle("model", "pickled", resnet)
f1.seek(0)
i = PackageImporter(f1)
loaded = i.load_pickle("model", "pickled")
# Model should script successfully.
scripted = torch.jit.script(loaded)
# Scripted model should save and load successfully.
f2 = BytesIO()
torch.jit.save(scripted, f2)
f2.seek(0)
loaded = torch.jit.load(f2)
input = torch.rand(1, 3, 224, 224)
self.assertEqual(loaded(input), resnet(input))
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/package/test_model.py
|
# Owner(s): ["oncall: package/deploy"]
import importlib
from io import BytesIO
from sys import version_info
from textwrap import dedent
from unittest import skipIf
import torch.nn
from torch.package import EmptyMatchError, Importer, PackageExporter, PackageImporter
from torch.package.package_exporter import PackagingError
from torch.testing._internal.common_utils import IS_WINDOWS, run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
class TestDependencyAPI(PackageTestCase):
"""Dependency management API tests.
- mock()
- extern()
- deny()
"""
def test_extern(self):
buffer = BytesIO()
with PackageExporter(buffer) as he:
he.extern(["package_a.subpackage", "module_a"])
he.save_source_string("foo", "import package_a.subpackage; import module_a")
buffer.seek(0)
hi = PackageImporter(buffer)
import module_a
import package_a.subpackage
module_a_im = hi.import_module("module_a")
hi.import_module("package_a.subpackage")
package_a_im = hi.import_module("package_a")
self.assertIs(module_a, module_a_im)
self.assertIsNot(package_a, package_a_im)
self.assertIs(package_a.subpackage, package_a_im.subpackage)
def test_extern_glob(self):
buffer = BytesIO()
with PackageExporter(buffer) as he:
he.extern(["package_a.*", "module_*"])
he.save_module("package_a")
he.save_source_string(
"test_module",
dedent(
"""\
import package_a.subpackage
import module_a
"""
),
)
buffer.seek(0)
hi = PackageImporter(buffer)
import module_a
import package_a.subpackage
module_a_im = hi.import_module("module_a")
hi.import_module("package_a.subpackage")
package_a_im = hi.import_module("package_a")
self.assertIs(module_a, module_a_im)
self.assertIsNot(package_a, package_a_im)
self.assertIs(package_a.subpackage, package_a_im.subpackage)
def test_extern_glob_allow_empty(self):
"""
Test that an error is thrown when a extern glob is specified with allow_empty=True
and no matching module is required during packaging.
"""
import package_a.subpackage # noqa: F401
buffer = BytesIO()
with self.assertRaisesRegex(EmptyMatchError, r"did not match any modules"):
with PackageExporter(buffer) as exporter:
exporter.extern(include=["package_b.*"], allow_empty=False)
exporter.save_module("package_a.subpackage")
def test_deny(self):
"""
Test marking packages as "deny" during export.
"""
buffer = BytesIO()
with self.assertRaisesRegex(PackagingError, "denied"):
with PackageExporter(buffer) as exporter:
exporter.deny(["package_a.subpackage", "module_a"])
exporter.save_source_string("foo", "import package_a.subpackage")
def test_deny_glob(self):
"""
Test marking packages as "deny" using globs instead of package names.
"""
buffer = BytesIO()
with self.assertRaises(PackagingError):
with PackageExporter(buffer) as exporter:
exporter.deny(["package_a.*", "module_*"])
exporter.save_source_string(
"test_module",
dedent(
"""\
import package_a.subpackage
import module_a
"""
),
)
@skipIf(version_info < (3, 7), "mock uses __getattr__ a 3.7 feature")
def test_mock(self):
buffer = BytesIO()
with PackageExporter(buffer) as he:
he.mock(["package_a.subpackage", "module_a"])
# Import something that dependso n package_a.subpackage
he.save_source_string("foo", "import package_a.subpackage")
buffer.seek(0)
hi = PackageImporter(buffer)
import package_a.subpackage
_ = package_a.subpackage
import module_a
_ = module_a
m = hi.import_module("package_a.subpackage")
r = m.result
with self.assertRaisesRegex(NotImplementedError, "was mocked out"):
r()
@skipIf(version_info < (3, 7), "mock uses __getattr__ a 3.7 feature")
def test_mock_glob(self):
buffer = BytesIO()
with PackageExporter(buffer) as he:
he.mock(["package_a.*", "module*"])
he.save_module("package_a")
he.save_source_string(
"test_module",
dedent(
"""\
import package_a.subpackage
import module_a
"""
),
)
buffer.seek(0)
hi = PackageImporter(buffer)
import package_a.subpackage
_ = package_a.subpackage
import module_a
_ = module_a
m = hi.import_module("package_a.subpackage")
r = m.result
with self.assertRaisesRegex(NotImplementedError, "was mocked out"):
r()
def test_mock_glob_allow_empty(self):
"""
Test that an error is thrown when a mock glob is specified with allow_empty=True
and no matching module is required during packaging.
"""
import package_a.subpackage # noqa: F401
buffer = BytesIO()
with self.assertRaisesRegex(EmptyMatchError, r"did not match any modules"):
with PackageExporter(buffer) as exporter:
exporter.mock(include=["package_b.*"], allow_empty=False)
exporter.save_module("package_a.subpackage")
@skipIf(version_info < (3, 7), "mock uses __getattr__ a 3.7 feature")
def test_pickle_mocked(self):
import package_a.subpackage
obj = package_a.subpackage.PackageASubpackageObject()
obj2 = package_a.PackageAObject(obj)
buffer = BytesIO()
with self.assertRaises(PackagingError):
with PackageExporter(buffer) as he:
he.mock(include="package_a.subpackage")
he.intern("**")
he.save_pickle("obj", "obj.pkl", obj2)
@skipIf(version_info < (3, 7), "mock uses __getattr__ a 3.7 feature")
def test_pickle_mocked_all(self):
import package_a.subpackage
obj = package_a.subpackage.PackageASubpackageObject()
obj2 = package_a.PackageAObject(obj)
buffer = BytesIO()
with PackageExporter(buffer) as he:
he.intern(include="package_a.**")
he.mock("**")
he.save_pickle("obj", "obj.pkl", obj2)
def test_allow_empty_with_error(self):
"""If an error occurs during packaging, it should not be shadowed by the allow_empty error."""
buffer = BytesIO()
with self.assertRaises(ModuleNotFoundError):
with PackageExporter(buffer) as pe:
# Even though we did not extern a module that matches this
# pattern, we want to show the save_module error, not the allow_empty error.
pe.extern("foo", allow_empty=False)
pe.save_module("aodoifjodisfj") # will error
# we never get here, so technically the allow_empty check
# should raise an error. However, the error above is more
# informative to what's actually going wrong with packaging.
pe.save_source_string("bar", "import foo\n")
def test_implicit_intern(self):
"""The save_module APIs should implicitly intern the module being saved."""
import package_a # noqa: F401
buffer = BytesIO()
with PackageExporter(buffer) as he:
he.save_module("package_a")
def test_intern_error(self):
"""Failure to handle all dependencies should lead to an error."""
import package_a.subpackage
obj = package_a.subpackage.PackageASubpackageObject()
obj2 = package_a.PackageAObject(obj)
buffer = BytesIO()
with self.assertRaises(PackagingError) as e:
with PackageExporter(buffer) as he:
he.save_pickle("obj", "obj.pkl", obj2)
self.assertEqual(
str(e.exception),
dedent(
"""
* Module did not match against any action pattern. Extern, mock, or intern it.
package_a
package_a.subpackage
"""
),
)
# Interning all dependencies should work
with PackageExporter(buffer) as he:
he.intern(["package_a", "package_a.subpackage"])
he.save_pickle("obj", "obj.pkl", obj2)
@skipIf(IS_WINDOWS, "extension modules have a different file extension on windows")
def test_broken_dependency(self):
"""A unpackageable dependency should raise a PackagingError."""
def create_module(name):
spec = importlib.machinery.ModuleSpec(name, self, is_package=False) # type: ignore[arg-type]
module = importlib.util.module_from_spec(spec)
ns = module.__dict__
ns["__spec__"] = spec
ns["__loader__"] = self
ns["__file__"] = f"{name}.so"
ns["__cached__"] = None
return module
class BrokenImporter(Importer):
def __init__(self):
self.modules = {
"foo": create_module("foo"),
"bar": create_module("bar"),
}
def import_module(self, module_name):
return self.modules[module_name]
buffer = BytesIO()
with self.assertRaises(PackagingError) as e:
with PackageExporter(buffer, importer=BrokenImporter()) as exporter:
exporter.intern(["foo", "bar"])
exporter.save_source_string("my_module", "import foo; import bar")
self.assertEqual(
str(e.exception),
dedent(
"""
* Module is a C extension module. torch.package supports Python modules only.
foo
bar
"""
),
)
def test_invalid_import(self):
"""An incorrectly-formed import should raise a PackagingError."""
buffer = BytesIO()
with self.assertRaises(PackagingError) as e:
with PackageExporter(buffer) as exporter:
# This import will fail to load.
exporter.save_source_string("foo", "from ........ import lol")
self.assertEqual(
str(e.exception),
dedent(
"""
* Dependency resolution failed.
foo
Context: attempted relative import beyond top-level package
"""
),
)
@skipIf(version_info < (3, 7), "mock uses __getattr__ a 3.7 feature")
def test_repackage_mocked_module(self):
"""Re-packaging a package that contains a mocked module should work correctly."""
buffer = BytesIO()
with PackageExporter(buffer) as exporter:
exporter.mock("package_a")
exporter.save_source_string("foo", "import package_a")
buffer.seek(0)
importer = PackageImporter(buffer)
foo = importer.import_module("foo")
# "package_a" should be mocked out.
with self.assertRaises(NotImplementedError):
foo.package_a.get_something()
# Re-package the model, but intern the previously-mocked module and mock
# everything else.
buffer2 = BytesIO()
with PackageExporter(buffer2, importer=importer) as exporter:
exporter.intern("package_a")
exporter.mock("**")
exporter.save_source_string("foo", "import package_a")
buffer2.seek(0)
importer2 = PackageImporter(buffer2)
foo2 = importer2.import_module("foo")
# "package_a" should still be mocked out.
with self.assertRaises(NotImplementedError):
foo2.package_a.get_something()
def test_externing_c_extension(self):
"""Externing c extensions modules should allow us to still access them especially those found in torch._C."""
buffer = BytesIO()
# The C extension module in question is F.gelu which comes from torch._C._nn
model = torch.nn.TransformerEncoderLayer(
d_model=64,
nhead=2,
dim_feedforward=64,
dropout=1.0,
batch_first=True,
activation="gelu",
norm_first=True,
)
with PackageExporter(buffer) as e:
e.extern("torch.**")
e.intern("**")
e.save_pickle("model", "model.pkl", model)
buffer.seek(0)
imp = PackageImporter(buffer)
imp.load_pickle("model", "model.pkl")
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/package/test_dependency_api.py
|
result = "module_a"
|
pytorch-master
|
test/package/module_a.py
|
# Owner(s): ["oncall: package/deploy"]
from io import BytesIO
from torch.package import PackageExporter, PackageImporter
from torch.package._mangling import (
demangle,
get_mangle_prefix,
is_mangled,
PackageMangler,
)
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
class TestMangling(PackageTestCase):
def test_unique_manglers(self):
"""
Each mangler instance should generate a unique mangled name for a given input.
"""
a = PackageMangler()
b = PackageMangler()
self.assertNotEqual(a.mangle("foo.bar"), b.mangle("foo.bar"))
def test_mangler_is_consistent(self):
"""
Mangling the same name twice should produce the same result.
"""
a = PackageMangler()
self.assertEqual(a.mangle("abc.def"), a.mangle("abc.def"))
def test_roundtrip_mangling(self):
a = PackageMangler()
self.assertEqual("foo", demangle(a.mangle("foo")))
def test_is_mangled(self):
a = PackageMangler()
b = PackageMangler()
self.assertTrue(is_mangled(a.mangle("foo.bar")))
self.assertTrue(is_mangled(b.mangle("foo.bar")))
self.assertFalse(is_mangled("foo.bar"))
self.assertFalse(is_mangled(demangle(a.mangle("foo.bar"))))
def test_demangler_multiple_manglers(self):
"""
PackageDemangler should be able to demangle name generated by any PackageMangler.
"""
a = PackageMangler()
b = PackageMangler()
self.assertEqual("foo.bar", demangle(a.mangle("foo.bar")))
self.assertEqual("bar.foo", demangle(b.mangle("bar.foo")))
def test_mangle_empty_errors(self):
a = PackageMangler()
with self.assertRaises(AssertionError):
a.mangle("")
def test_demangle_base(self):
"""
Demangling a mangle parent directly should currently return an empty string.
"""
a = PackageMangler()
mangled = a.mangle("foo")
mangle_parent = mangled.partition(".")[0]
self.assertEqual("", demangle(mangle_parent))
def test_mangle_prefix(self):
a = PackageMangler()
mangled = a.mangle("foo.bar")
mangle_prefix = get_mangle_prefix(mangled)
self.assertEqual(mangle_prefix + "." + "foo.bar", mangled)
def test_unique_module_names(self):
import package_a.subpackage
obj = package_a.subpackage.PackageASubpackageObject()
obj2 = package_a.PackageAObject(obj)
f1 = BytesIO()
with PackageExporter(f1) as pe:
pe.intern("**")
pe.save_pickle("obj", "obj.pkl", obj2)
f1.seek(0)
importer1 = PackageImporter(f1)
loaded1 = importer1.load_pickle("obj", "obj.pkl")
f1.seek(0)
importer2 = PackageImporter(f1)
loaded2 = importer2.load_pickle("obj", "obj.pkl")
# Modules from loaded packages should not shadow the names of modules.
# See mangling.md for more info.
self.assertNotEqual(type(obj2).__module__, type(loaded1).__module__)
self.assertNotEqual(type(loaded1).__module__, type(loaded2).__module__)
def test_package_mangler(self):
a = PackageMangler()
b = PackageMangler()
a_mangled = a.mangle("foo.bar")
# Since `a` mangled this string, it should demangle properly.
self.assertEqual(a.demangle(a_mangled), "foo.bar")
# Since `b` did not mangle this string, demangling should leave it alone.
self.assertEqual(b.demangle(a_mangled), a_mangled)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/package/test_mangling.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["oncall: package/deploy"]
import inspect
import platform
from io import BytesIO
from pathlib import Path
from textwrap import dedent
from unittest import skipIf
from torch.package import is_from_package, PackageExporter, PackageImporter
from torch.package.package_exporter import PackagingError
from torch.testing._internal.common_utils import IS_FBCODE, IS_SANDCASTLE, run_tests, skipIfTorchDynamo
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
class TestMisc(PackageTestCase):
"""Tests for one-off or random functionality. Try not to add to this!"""
def test_file_structure(self):
"""
Tests package's Directory structure representation of a zip file. Ensures
that the returned Directory prints what is expected and filters
inputs/outputs correctly.
"""
buffer = BytesIO()
export_plain = dedent(
"""\
├── .data
│ ├── extern_modules
│ ├── python_version
│ └── version
├── main
│ └── main
├── obj
│ └── obj.pkl
├── package_a
│ ├── __init__.py
│ └── subpackage.py
└── module_a.py
"""
)
export_include = dedent(
"""\
├── obj
│ └── obj.pkl
└── package_a
└── subpackage.py
"""
)
import_exclude = dedent(
"""\
├── .data
│ ├── extern_modules
│ ├── python_version
│ └── version
├── main
│ └── main
├── obj
│ └── obj.pkl
├── package_a
│ ├── __init__.py
│ └── subpackage.py
└── module_a.py
"""
)
with PackageExporter(buffer) as he:
import module_a
import package_a
import package_a.subpackage
obj = package_a.subpackage.PackageASubpackageObject()
he.intern("**")
he.save_module(module_a.__name__)
he.save_module(package_a.__name__)
he.save_pickle("obj", "obj.pkl", obj)
he.save_text("main", "main", "my string")
buffer.seek(0)
hi = PackageImporter(buffer)
file_structure = hi.file_structure()
# remove first line from testing because WINDOW/iOS/Unix treat the buffer differently
self.assertEqual(
dedent("\n".join(str(file_structure).split("\n")[1:])),
export_plain,
)
file_structure = hi.file_structure(include=["**/subpackage.py", "**/*.pkl"])
self.assertEqual(
dedent("\n".join(str(file_structure).split("\n")[1:])),
export_include,
)
file_structure = hi.file_structure(exclude="**/*.storage")
self.assertEqual(
dedent("\n".join(str(file_structure).split("\n")[1:])),
import_exclude,
)
def test_python_version(self):
"""
Tests that the current python version is stored in the package and is available
via PackageImporter's python_version() method.
"""
buffer = BytesIO()
with PackageExporter(buffer) as he:
from package_a.test_module import SimpleTest
he.intern("**")
obj = SimpleTest()
he.save_pickle("obj", "obj.pkl", obj)
buffer.seek(0)
hi = PackageImporter(buffer)
self.assertEqual(hi.python_version(), platform.python_version())
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_load_python_version_from_package(self):
"""Tests loading a package with a python version embdded"""
importer1 = PackageImporter(
f"{Path(__file__).parent}/package_e/test_nn_module.pt"
)
self.assertEqual(importer1.python_version(), "3.9.7")
def test_file_structure_has_file(self):
"""
Test Directory's has_file() method.
"""
buffer = BytesIO()
with PackageExporter(buffer) as he:
import package_a.subpackage
he.intern("**")
obj = package_a.subpackage.PackageASubpackageObject()
he.save_pickle("obj", "obj.pkl", obj)
buffer.seek(0)
importer = PackageImporter(buffer)
file_structure = importer.file_structure()
self.assertTrue(file_structure.has_file("package_a/subpackage.py"))
self.assertFalse(file_structure.has_file("package_a/subpackage"))
def test_exporter_content_lists(self):
"""
Test content list API for PackageExporter's contained modules.
"""
with PackageExporter(BytesIO()) as he:
import package_b
he.extern("package_b.subpackage_1")
he.mock("package_b.subpackage_2")
he.intern("**")
he.save_pickle("obj", "obj.pkl", package_b.PackageBObject(["a"]))
self.assertEqual(he.externed_modules(), ["package_b.subpackage_1"])
self.assertEqual(he.mocked_modules(), ["package_b.subpackage_2"])
self.assertEqual(
he.interned_modules(),
["package_b", "package_b.subpackage_0.subsubpackage_0"],
)
self.assertEqual(he.get_rdeps("package_b.subpackage_2"), ["package_b"])
with self.assertRaises(PackagingError) as e:
with PackageExporter(BytesIO()) as he:
import package_b
he.deny("package_b")
he.save_pickle("obj", "obj.pkl", package_b.PackageBObject(["a"]))
self.assertEqual(he.denied_modules(), ["package_b"])
def test_is_from_package(self):
"""is_from_package should work for objects and modules"""
import package_a.subpackage
buffer = BytesIO()
obj = package_a.subpackage.PackageASubpackageObject()
with PackageExporter(buffer) as pe:
pe.intern("**")
pe.save_pickle("obj", "obj.pkl", obj)
buffer.seek(0)
pi = PackageImporter(buffer)
mod = pi.import_module("package_a.subpackage")
loaded_obj = pi.load_pickle("obj", "obj.pkl")
self.assertFalse(is_from_package(package_a.subpackage))
self.assertTrue(is_from_package(mod))
self.assertFalse(is_from_package(obj))
self.assertTrue(is_from_package(loaded_obj))
def test_inspect_class(self):
"""Should be able to retrieve source for a packaged class."""
import package_a.subpackage
buffer = BytesIO()
obj = package_a.subpackage.PackageASubpackageObject()
with PackageExporter(buffer) as pe:
pe.intern("**")
pe.save_pickle("obj", "obj.pkl", obj)
buffer.seek(0)
pi = PackageImporter(buffer)
packaged_class = pi.import_module(
"package_a.subpackage"
).PackageASubpackageObject
regular_class = package_a.subpackage.PackageASubpackageObject
packaged_src = inspect.getsourcelines(packaged_class)
regular_src = inspect.getsourcelines(regular_class)
self.assertEqual(packaged_src, regular_src)
def test_dunder_package_present(self):
"""
The attribute '__torch_package__' should be populated on imported modules.
"""
import package_a.subpackage
buffer = BytesIO()
obj = package_a.subpackage.PackageASubpackageObject()
with PackageExporter(buffer) as pe:
pe.intern("**")
pe.save_pickle("obj", "obj.pkl", obj)
buffer.seek(0)
pi = PackageImporter(buffer)
mod = pi.import_module("package_a.subpackage")
self.assertTrue(hasattr(mod, "__torch_package__"))
def test_dunder_package_works_from_package(self):
"""
The attribute '__torch_package__' should be accessible from within
the module itself, so that packaged code can detect whether it's
being used in a packaged context or not.
"""
import package_a.use_dunder_package as mod
buffer = BytesIO()
with PackageExporter(buffer) as pe:
pe.intern("**")
pe.save_module(mod.__name__)
buffer.seek(0)
pi = PackageImporter(buffer)
imported_mod = pi.import_module(mod.__name__)
self.assertTrue(imported_mod.is_from_package())
self.assertFalse(mod.is_from_package())
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_std_lib_sys_hackery_checks(self):
"""
The standard library performs sys.module assignment hackery which
causes modules who do this hackery to fail on import. See
https://github.com/pytorch/pytorch/issues/57490 for more information.
"""
import package_a.std_sys_module_hacks
buffer = BytesIO()
mod = package_a.std_sys_module_hacks.Module()
with PackageExporter(buffer) as pe:
pe.intern("**")
pe.save_pickle("obj", "obj.pkl", mod)
buffer.seek(0)
pi = PackageImporter(buffer)
mod = pi.load_pickle("obj", "obj.pkl")
mod()
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/package/test_misc.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["oncall: package/deploy"]
import os
import zipfile
from sys import version_info
from tempfile import TemporaryDirectory
from textwrap import dedent
from unittest import skipIf
import torch
from torch.package import PackageExporter, PackageImporter
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
)
try:
from torchvision.models import resnet18
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = skipIf(not HAS_TORCHVISION, "no torchvision")
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
from pathlib import Path
packaging_directory = Path(__file__).parent
@skipIf(
IS_FBCODE or IS_SANDCASTLE or IS_WINDOWS,
"Tests that use temporary files are disabled in fbcode",
)
class DirectoryReaderTest(PackageTestCase):
"""Tests use of DirectoryReader as accessor for opened packages."""
@skipIfNoTorchVision
@skipIf(True, "Does not work with latest TorchVision, see https://github.com/pytorch/pytorch/issues/81115")
def test_loading_pickle(self):
"""
Test basic saving and loading of modules and pickles from a DirectoryReader.
"""
resnet = resnet18()
filename = self.temp()
with PackageExporter(filename) as e:
e.intern("**")
e.save_pickle("model", "model.pkl", resnet)
zip_file = zipfile.ZipFile(filename, "r")
with TemporaryDirectory() as temp_dir:
zip_file.extractall(path=temp_dir)
importer = PackageImporter(Path(temp_dir) / Path(filename).name)
dir_mod = importer.load_pickle("model", "model.pkl")
input = torch.rand(1, 3, 224, 224)
self.assertEqual(dir_mod(input), resnet(input))
def test_loading_module(self):
"""
Test basic saving and loading of a packages from a DirectoryReader.
"""
import package_a
filename = self.temp()
with PackageExporter(filename) as e:
e.save_module("package_a")
zip_file = zipfile.ZipFile(filename, "r")
with TemporaryDirectory() as temp_dir:
zip_file.extractall(path=temp_dir)
dir_importer = PackageImporter(Path(temp_dir) / Path(filename).name)
dir_mod = dir_importer.import_module("package_a")
self.assertEqual(dir_mod.result, package_a.result)
def test_loading_has_record(self):
"""
Test DirectoryReader's has_record().
"""
import package_a # noqa: F401
filename = self.temp()
with PackageExporter(filename) as e:
e.save_module("package_a")
zip_file = zipfile.ZipFile(filename, "r")
with TemporaryDirectory() as temp_dir:
zip_file.extractall(path=temp_dir)
dir_importer = PackageImporter(Path(temp_dir) / Path(filename).name)
self.assertTrue(dir_importer.zip_reader.has_record("package_a/__init__.py"))
self.assertFalse(dir_importer.zip_reader.has_record("package_a"))
@skipIf(version_info < (3, 7), "ResourceReader API introduced in Python 3.7")
def test_resource_reader(self):
"""Tests DirectoryReader as the base for get_resource_reader."""
filename = self.temp()
with PackageExporter(filename) as pe:
# Layout looks like:
# package
# ├── one/
# │ ├── a.txt
# │ ├── b.txt
# │ ├── c.txt
# │ └── three/
# │ ├── d.txt
# │ └── e.txt
# └── two/
# ├── f.txt
# └── g.txt
pe.save_text("one", "a.txt", "hello, a!")
pe.save_text("one", "b.txt", "hello, b!")
pe.save_text("one", "c.txt", "hello, c!")
pe.save_text("one.three", "d.txt", "hello, d!")
pe.save_text("one.three", "e.txt", "hello, e!")
pe.save_text("two", "f.txt", "hello, f!")
pe.save_text("two", "g.txt", "hello, g!")
zip_file = zipfile.ZipFile(filename, "r")
with TemporaryDirectory() as temp_dir:
zip_file.extractall(path=temp_dir)
importer = PackageImporter(Path(temp_dir) / Path(filename).name)
reader_one = importer.get_resource_reader("one")
# Different behavior from still zipped archives
resource_path = os.path.join(
Path(temp_dir), Path(filename).name, "one", "a.txt"
)
self.assertEqual(reader_one.resource_path("a.txt"), resource_path)
self.assertTrue(reader_one.is_resource("a.txt"))
self.assertEqual(
reader_one.open_resource("a.txt").getbuffer(), b"hello, a!"
)
self.assertFalse(reader_one.is_resource("three"))
reader_one_contents = list(reader_one.contents())
reader_one_contents.sort()
self.assertSequenceEqual(
reader_one_contents, ["a.txt", "b.txt", "c.txt", "three"]
)
reader_two = importer.get_resource_reader("two")
self.assertTrue(reader_two.is_resource("f.txt"))
self.assertEqual(
reader_two.open_resource("f.txt").getbuffer(), b"hello, f!"
)
reader_two_contents = list(reader_two.contents())
reader_two_contents.sort()
self.assertSequenceEqual(reader_two_contents, ["f.txt", "g.txt"])
reader_one_three = importer.get_resource_reader("one.three")
self.assertTrue(reader_one_three.is_resource("d.txt"))
self.assertEqual(
reader_one_three.open_resource("d.txt").getbuffer(), b"hello, d!"
)
reader_one_three_contents = list(reader_one_three.contents())
reader_one_three_contents.sort()
self.assertSequenceEqual(reader_one_three_contents, ["d.txt", "e.txt"])
self.assertIsNone(importer.get_resource_reader("nonexistent_package"))
@skipIf(version_info < (3, 7), "ResourceReader API introduced in Python 3.7")
def test_package_resource_access(self):
"""Packaged modules should be able to use the importlib.resources API to access
resources saved in the package.
"""
mod_src = dedent(
"""\
import importlib.resources
import my_cool_resources
def secret_message():
return importlib.resources.read_text(my_cool_resources, 'sekrit.txt')
"""
)
filename = self.temp()
with PackageExporter(filename) as pe:
pe.save_source_string("foo.bar", mod_src)
pe.save_text("my_cool_resources", "sekrit.txt", "my sekrit plays")
zip_file = zipfile.ZipFile(filename, "r")
with TemporaryDirectory() as temp_dir:
zip_file.extractall(path=temp_dir)
dir_importer = PackageImporter(Path(temp_dir) / Path(filename).name)
self.assertEqual(
dir_importer.import_module("foo.bar").secret_message(),
"my sekrit plays",
)
@skipIf(version_info < (3, 7), "ResourceReader API introduced in Python 3.7")
def test_importer_access(self):
filename = self.temp()
with PackageExporter(filename) as he:
he.save_text("main", "main", "my string")
he.save_binary("main", "main_binary", "my string".encode("utf-8"))
src = dedent(
"""\
import importlib
import torch_package_importer as resources
t = resources.load_text('main', 'main')
b = resources.load_binary('main', 'main_binary')
"""
)
he.save_source_string("main", src, is_package=True)
zip_file = zipfile.ZipFile(filename, "r")
with TemporaryDirectory() as temp_dir:
zip_file.extractall(path=temp_dir)
dir_importer = PackageImporter(Path(temp_dir) / Path(filename).name)
m = dir_importer.import_module("main")
self.assertEqual(m.t, "my string")
self.assertEqual(m.b, "my string".encode("utf-8"))
@skipIf(version_info < (3, 7), "ResourceReader API introduced in Python 3.7")
def test_resource_access_by_path(self):
"""
Tests that packaged code can used importlib.resources.path.
"""
filename = self.temp()
with PackageExporter(filename) as e:
e.save_binary("string_module", "my_string", "my string".encode("utf-8"))
src = dedent(
"""\
import importlib.resources
import string_module
with importlib.resources.path(string_module, 'my_string') as path:
with open(path, mode='r', encoding='utf-8') as f:
s = f.read()
"""
)
e.save_source_string("main", src, is_package=True)
zip_file = zipfile.ZipFile(filename, "r")
with TemporaryDirectory() as temp_dir:
zip_file.extractall(path=temp_dir)
dir_importer = PackageImporter(Path(temp_dir) / Path(filename).name)
m = dir_importer.import_module("main")
self.assertEqual(m.s, "my string")
def test_scriptobject_failure_message(self):
"""
Test basic saving and loading of a ScriptModule in a directory.
Currently not supported.
"""
from package_a.test_module import ModWithTensor
scripted_mod = torch.jit.script(ModWithTensor(torch.rand(1, 2, 3)))
filename = self.temp()
with PackageExporter(filename) as e:
e.save_pickle("res", "mod.pkl", scripted_mod)
zip_file = zipfile.ZipFile(filename, "r")
with self.assertRaisesRegex(
RuntimeError,
"Loading ScriptObjects from a PackageImporter created from a "
"directory is not supported. Use a package archive file instead.",
):
with TemporaryDirectory() as temp_dir:
zip_file.extractall(path=temp_dir)
dir_importer = PackageImporter(Path(temp_dir) / Path(filename).name)
dir_mod = dir_importer.load_pickle("res", "mod.pkl")
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/package/test_directory_reader.py
|
# Owner(s): ["oncall: package/deploy"]
from io import BytesIO
import torch
from torch.package import (
Importer,
OrderedImporter,
PackageExporter,
PackageImporter,
sys_importer,
)
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
class TestImporter(PackageTestCase):
"""Tests for Importer and derived classes."""
def test_sys_importer(self):
import package_a
import package_a.subpackage
self.assertIs(sys_importer.import_module("package_a"), package_a)
self.assertIs(
sys_importer.import_module("package_a.subpackage"), package_a.subpackage
)
def test_sys_importer_roundtrip(self):
import package_a
import package_a.subpackage
importer = sys_importer
type_ = package_a.subpackage.PackageASubpackageObject
module_name, type_name = importer.get_name(type_)
module = importer.import_module(module_name)
self.assertIs(getattr(module, type_name), type_)
def test_single_ordered_importer(self):
import module_a # noqa: F401
import package_a
buffer = BytesIO()
with PackageExporter(buffer) as pe:
pe.save_module(package_a.__name__)
buffer.seek(0)
importer = PackageImporter(buffer)
# Construct an importer-only environment.
ordered_importer = OrderedImporter(importer)
# The module returned by this environment should be the same one that's
# in the importer.
self.assertIs(
ordered_importer.import_module("package_a"),
importer.import_module("package_a"),
)
# It should not be the one available in the outer Python environment.
self.assertIsNot(ordered_importer.import_module("package_a"), package_a)
# We didn't package this module, so it should not be available.
with self.assertRaises(ModuleNotFoundError):
ordered_importer.import_module("module_a")
def test_ordered_importer_basic(self):
import package_a
buffer = BytesIO()
with PackageExporter(buffer) as pe:
pe.save_module(package_a.__name__)
buffer.seek(0)
importer = PackageImporter(buffer)
ordered_importer_sys_first = OrderedImporter(sys_importer, importer)
self.assertIs(ordered_importer_sys_first.import_module("package_a"), package_a)
ordered_importer_package_first = OrderedImporter(importer, sys_importer)
self.assertIs(
ordered_importer_package_first.import_module("package_a"),
importer.import_module("package_a"),
)
def test_ordered_importer_whichmodule(self):
"""OrderedImporter's implementation of whichmodule should try each
underlying importer's whichmodule in order.
"""
class DummyImporter(Importer):
def __init__(self, whichmodule_return):
self._whichmodule_return = whichmodule_return
def import_module(self, module_name):
raise NotImplementedError()
def whichmodule(self, obj, name):
return self._whichmodule_return
class DummyClass:
pass
dummy_importer_foo = DummyImporter("foo")
dummy_importer_bar = DummyImporter("bar")
dummy_importer_not_found = DummyImporter(
"__main__"
) # __main__ is used as a proxy for "not found" by CPython
foo_then_bar = OrderedImporter(dummy_importer_foo, dummy_importer_bar)
self.assertEqual(foo_then_bar.whichmodule(DummyClass(), ""), "foo")
bar_then_foo = OrderedImporter(dummy_importer_bar, dummy_importer_foo)
self.assertEqual(bar_then_foo.whichmodule(DummyClass(), ""), "bar")
notfound_then_foo = OrderedImporter(
dummy_importer_not_found, dummy_importer_foo
)
self.assertEqual(notfound_then_foo.whichmodule(DummyClass(), ""), "foo")
def test_package_importer_whichmodule_no_dunder_module(self):
"""Exercise corner case where we try to pickle an object whose
__module__ doesn't exist because it's from a C extension.
"""
# torch.float16 is an example of such an object: it is a C extension
# type for which there is no __module__ defined. The default pickler
# finds it using special logic to traverse sys.modules and look up
# `float16` on each module (see pickle.py:whichmodule).
#
# We must ensure that we emulate the same behavior from PackageImporter.
my_dtype = torch.float16
# Set up a PackageImporter which has a torch.float16 object pickled:
buffer = BytesIO()
with PackageExporter(buffer) as exporter:
exporter.save_pickle("foo", "foo.pkl", my_dtype)
buffer.seek(0)
importer = PackageImporter(buffer)
my_loaded_dtype = importer.load_pickle("foo", "foo.pkl")
# Re-save a package with only our PackageImporter as the importer
buffer2 = BytesIO()
with PackageExporter(buffer2, importer=importer) as exporter:
exporter.save_pickle("foo", "foo.pkl", my_loaded_dtype)
buffer2.seek(0)
importer2 = PackageImporter(buffer2)
my_loaded_dtype2 = importer2.load_pickle("foo", "foo.pkl")
self.assertIs(my_dtype, my_loaded_dtype)
self.assertIs(my_dtype, my_loaded_dtype2)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/package/test_importer.py
|
pytorch-master
|
test/package/__init__.py
|
|
# Owner(s): ["oncall: package/deploy"]
from typing import Iterable
from torch.package import GlobGroup
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
class TestGlobGroup(PackageTestCase):
def assertMatchesGlob(self, glob: GlobGroup, candidates: Iterable[str]):
for candidate in candidates:
self.assertTrue(glob.matches(candidate))
def assertNotMatchesGlob(self, glob: GlobGroup, candidates: Iterable[str]):
for candidate in candidates:
self.assertFalse(glob.matches(candidate))
def test_one_star(self):
glob_group = GlobGroup("torch.*")
self.assertMatchesGlob(glob_group, ["torch.foo", "torch.bar"])
self.assertNotMatchesGlob(glob_group, ["tor.foo", "torch.foo.bar", "torch"])
def test_one_star_middle(self):
glob_group = GlobGroup("foo.*.bar")
self.assertMatchesGlob(glob_group, ["foo.q.bar", "foo.foo.bar"])
self.assertNotMatchesGlob(
glob_group,
[
"foo.bar",
"foo.foo",
"outer.foo.inner.bar",
"foo.q.bar.more",
"foo.one.two.bar",
],
)
def test_one_star_partial(self):
glob_group = GlobGroup("fo*.bar")
self.assertMatchesGlob(glob_group, ["fo.bar", "foo.bar", "foobar.bar"])
self.assertNotMatchesGlob(glob_group, ["oij.bar", "f.bar", "foo"])
def test_one_star_multiple_in_component(self):
glob_group = GlobGroup("foo/a*.htm*", separator="/")
self.assertMatchesGlob(glob_group, ["foo/a.html", "foo/a.htm", "foo/abc.html"])
def test_one_star_partial_extension(self):
glob_group = GlobGroup("foo/*.txt", separator="/")
self.assertMatchesGlob(
glob_group, ["foo/hello.txt", "foo/goodbye.txt", "foo/.txt"]
)
self.assertNotMatchesGlob(
glob_group, ["foo/bar/hello.txt", "bar/foo/hello.txt"]
)
def test_two_star(self):
glob_group = GlobGroup("torch.**")
self.assertMatchesGlob(
glob_group, ["torch.foo", "torch.bar", "torch.foo.bar", "torch"]
)
self.assertNotMatchesGlob(glob_group, ["what.torch", "torchvision"])
def test_two_star_end(self):
glob_group = GlobGroup("**.torch")
self.assertMatchesGlob(glob_group, ["torch", "bar.torch"])
self.assertNotMatchesGlob(glob_group, ["visiontorch"])
def test_two_star_middle(self):
glob_group = GlobGroup("foo.**.baz")
self.assertMatchesGlob(
glob_group, ["foo.baz", "foo.bar.baz", "foo.bar1.bar2.baz"]
)
self.assertNotMatchesGlob(glob_group, ["foobaz", "foo.bar.baz.z"])
def test_two_star_multiple(self):
glob_group = GlobGroup("**/bar/**/*.txt", separator="/")
self.assertMatchesGlob(
glob_group, ["bar/baz.txt", "a/bar/b.txt", "bar/foo/c.txt"]
)
self.assertNotMatchesGlob(glob_group, ["baz.txt", "a/b.txt"])
def test_raw_two_star(self):
glob_group = GlobGroup("**")
self.assertMatchesGlob(glob_group, ["bar", "foo.bar", "ab.c.d.e"])
self.assertNotMatchesGlob(glob_group, [""])
def test_invalid_raw(self):
with self.assertRaises(ValueError):
GlobGroup("a.**b")
def test_exclude(self):
glob_group = GlobGroup("torch.**", exclude=["torch.**.foo"])
self.assertMatchesGlob(
glob_group,
["torch", "torch.bar", "torch.barfoo"],
)
self.assertNotMatchesGlob(
glob_group,
["torch.foo", "torch.some.foo"],
)
def test_exclude_from_all(self):
glob_group = GlobGroup("**", exclude=["foo.**", "bar.**"])
self.assertMatchesGlob(glob_group, ["a", "hello", "anything.really"])
self.assertNotMatchesGlob(glob_group, ["foo.bar", "foo.bar.baz"])
def test_list_include_exclude(self):
glob_group = GlobGroup(["foo", "bar.**"], exclude=["bar.baz", "bar.qux"])
self.assertMatchesGlob(glob_group, ["foo", "bar.other", "bar.bazother"])
self.assertNotMatchesGlob(glob_group, ["bar.baz", "bar.qux"])
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/package/test_glob_group.py
|
# Owner(s): ["oncall: package/deploy"]
from io import BytesIO
from textwrap import dedent
from unittest import skipIf
import torch
from torch.package import PackageExporter, PackageImporter
from torch.testing._internal.common_utils import IS_FBCODE, IS_SANDCASTLE, run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
try:
from torchvision.models import resnet18
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = skipIf(not HAS_TORCHVISION, "no torchvision")
class TestPackageScript(PackageTestCase):
"""Tests for compatibility with TorchScript."""
def test_package_interface(self):
"""Packaging an interface class should work correctly."""
import package_a.fake_interface as fake
uses_interface = fake.UsesInterface()
scripted = torch.jit.script(uses_interface)
scripted.proxy_mod = torch.jit.script(fake.NewModule())
buffer = BytesIO()
with PackageExporter(buffer) as pe:
pe.intern("**")
pe.save_pickle("model", "model.pkl", uses_interface)
buffer.seek(0)
package_importer = PackageImporter(buffer)
loaded = package_importer.load_pickle("model", "model.pkl")
scripted_loaded = torch.jit.script(loaded)
scripted_loaded.proxy_mod = torch.jit.script(fake.NewModule())
input = torch.tensor(1)
self.assertEqual(scripted(input), scripted_loaded(input))
def test_different_package_interface(self):
"""Test a case where the interface defined in the package is
different than the one defined in the loading environment, to make
sure TorchScript can distinguish between the two.
"""
# Import one version of the interface
import package_a.fake_interface as fake
# Simulate a package that contains a different version of the
# interface, with the exact same name.
buffer = BytesIO()
with PackageExporter(buffer) as pe:
pe.save_source_string(
fake.__name__,
dedent(
"""\
import torch
from torch import Tensor
@torch.jit.interface
class ModuleInterface(torch.nn.Module):
def one(self, inp1: Tensor) -> Tensor:
pass
class ImplementsInterface(torch.nn.Module):
def one(self, inp1: Tensor) -> Tensor:
return inp1 + 1
class UsesInterface(torch.nn.Module):
proxy_mod: ModuleInterface
def __init__(self):
super().__init__()
self.proxy_mod = ImplementsInterface()
def forward(self, input: Tensor) -> Tensor:
return self.proxy_mod.one(input)
"""
),
)
buffer.seek(0)
package_importer = PackageImporter(buffer)
diff_fake = package_importer.import_module(fake.__name__)
# We should be able to script successfully.
torch.jit.script(diff_fake.UsesInterface())
def test_package_script_class(self):
import package_a.fake_script_class as fake
buffer = BytesIO()
with PackageExporter(buffer) as pe:
pe.save_module(fake.__name__)
buffer.seek(0)
package_importer = PackageImporter(buffer)
loaded = package_importer.import_module(fake.__name__)
input = torch.tensor(1)
self.assertTrue(
torch.allclose(
fake.uses_script_class(input), loaded.uses_script_class(input)
)
)
def test_package_script_class_referencing_self(self):
import package_a.fake_script_class as fake
obj = fake.UsesIdListFeature()
# intentionally script here to fill the compilation cache, to make sure
# there is no false sharing between scripted types coming from the
# package vs. outside environment.
torch.jit.script(obj)
buffer = BytesIO()
with PackageExporter(buffer) as exporter:
exporter.intern("**")
exporter.save_pickle("obj", "obj.pkl", obj)
buffer.seek(0)
importer = PackageImporter(buffer)
obj_loaded = importer.load_pickle("obj", "obj.pkl")
scripted_obj_loaded = torch.jit.script(obj_loaded)
# Make sure the scripted object can be serialized without error.
buffer2 = scripted_obj_loaded.save_to_buffer()
torch.jit.load(BytesIO(buffer2))
def test_different_package_script_class(self):
"""Test a case where the script class defined in the package is
different than the one defined in the loading environment, to make
sure TorchScript can distinguish between the two.
"""
import package_a.fake_script_class as fake
# Simulate a package that contains a different version of the
# script class ,with the attribute `bar` instead of `foo`
buffer = BytesIO()
with PackageExporter(buffer) as pe2:
pe2.save_source_string(
fake.__name__,
dedent(
"""\
import torch
@torch.jit.script
class MyScriptClass:
def __init__(self, x):
self.bar = x
"""
),
)
buffer.seek(0)
package_importer = PackageImporter(buffer)
diff_fake = package_importer.import_module(fake.__name__)
input = torch.rand(2, 3)
loaded_script_class = diff_fake.MyScriptClass(input)
orig_script_class = fake.MyScriptClass(input)
self.assertEqual(loaded_script_class.bar, orig_script_class.foo)
def test_save_scriptmodule(self):
"""
Test basic saving of ScriptModule.
"""
from package_a.test_module import ModWithTensor
scripted_mod = torch.jit.script(ModWithTensor(torch.rand(1, 2, 3)))
buffer = BytesIO()
with PackageExporter(buffer) as e:
e.save_pickle("res", "mod.pkl", scripted_mod)
buffer.seek(0)
importer = PackageImporter(buffer)
loaded_mod = importer.load_pickle("res", "mod.pkl", map_location="cpu")
input = torch.rand(1, 2, 3)
self.assertEqual(loaded_mod(input), scripted_mod(input))
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_save_scriptmodule_file(self):
"""
Test basic saving of ScriptModule in file.
"""
from package_a.test_module import ModWithTensor
scripted_mod = torch.jit.script(ModWithTensor(torch.rand(1, 2, 3)))
filename = self.temp()
with PackageExporter(filename) as e:
e.save_pickle("res", "mod.pkl", scripted_mod)
importer = PackageImporter(filename)
loaded_mod = importer.load_pickle("res", "mod.pkl")
input = torch.rand(1, 2, 3)
self.assertEqual(loaded_mod(input), scripted_mod(input))
def test_save_scriptmodule_with_submods(self):
"""
Test basic saving of ScriptModule with submodule.
"""
from package_a.test_module import ModWithSubmod, ModWithTensor
scripted_mod = torch.jit.script(
ModWithSubmod(ModWithTensor(torch.rand(1, 2, 3)))
)
buffer = BytesIO()
with PackageExporter(buffer) as e:
e.save_pickle("res", "mod.pkl", scripted_mod)
buffer.seek(0)
importer = PackageImporter(buffer)
loaded_mod = importer.load_pickle("res", "mod.pkl", map_location="cpu")
input = torch.rand(1, 2, 3)
self.assertEqual(loaded_mod(input), scripted_mod(input))
def test_save_scriptmodules_submod_redefinition(self):
"""
Test to verify saving multiple ScriptModules with same top module
but different submodules works. Submodule is redefined to between
the defintion of the top module to check that the different concrete
types of the modules are thoroughly recognized by serializaiton code.
"""
class Submod(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input: str):
input = input + "_submod"
return input
class TopMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.modB = Submod()
def forward(self, input: str):
return self.modB(input)
scripted_mod_0 = torch.jit.script(TopMod())
# redefinition is intentional, change single inner string
# string attribute, should trigger new module type
class Submod(torch.nn.Module): # noqa: F811
def __init__(self):
super().__init__()
def forward(self, input: str):
input = input + "_submod(changed)"
return input
scripted_mod_1 = torch.jit.script(TopMod())
buffer = BytesIO()
with PackageExporter(buffer) as e:
e.save_pickle("res", "mod1.pkl", scripted_mod_0)
e.save_pickle("res", "mod2.pkl", scripted_mod_1)
buffer.seek(0)
importer = PackageImporter(buffer)
loaded_mod_0 = importer.load_pickle("res", "mod1.pkl")
loaded_mod_1 = importer.load_pickle("res", "mod2.pkl")
self.assertEqual(loaded_mod_0("input"), scripted_mod_0("input"))
self.assertEqual(loaded_mod_1("input"), scripted_mod_1("input"))
self.assertNotEqual(loaded_mod_0("input"), loaded_mod_1("input"))
def test_save_independent_scriptmodules(self):
"""
Test to verify saving multiple ScriptModules with completely
separate code works.
"""
from package_a.test_module import ModWithTensor, SimpleTest
scripted_mod_0 = torch.jit.script(SimpleTest())
scripted_mod_1 = torch.jit.script(ModWithTensor(torch.rand(1, 2, 3)))
buffer = BytesIO()
with PackageExporter(buffer) as e:
e.save_pickle("res", "mod1.pkl", scripted_mod_0)
e.save_pickle("res", "mod2.pkl", scripted_mod_1)
buffer.seek(0)
importer = PackageImporter(buffer)
loaded_mod_0 = importer.load_pickle("res", "mod1.pkl")
loaded_mod_1 = importer.load_pickle("res", "mod2.pkl")
input = torch.rand(1, 2, 3)
self.assertEqual(loaded_mod_0(input), scripted_mod_0(input))
self.assertEqual(loaded_mod_1(input), scripted_mod_1(input))
def test_save_repeat_scriptmodules(self):
"""
Test to verify saving multiple different modules and
repeats of same scriptmodule in package works. Also tests that
PyTorchStreamReader isn't having code hidden from
PyTorchStreamWriter writing ScriptModule code files multiple times.
"""
from package_a.test_module import (
ModWithSubmodAndTensor,
ModWithTensor,
SimpleTest,
)
scripted_mod_0 = torch.jit.script(SimpleTest())
scripted_mod_1 = torch.jit.script(ModWithTensor(torch.rand(1, 2, 3)))
scripted_mod_2 = torch.jit.script(
ModWithSubmodAndTensor(
torch.rand(1, 2, 3), ModWithTensor(torch.rand(1, 2, 3))
)
)
buffer = BytesIO()
with PackageExporter(buffer) as e:
e.save_pickle("res", "mod0.pkl", scripted_mod_0)
e.save_pickle("res", "mod1.pkl", scripted_mod_1)
e.save_pickle("res", "mod2.pkl", scripted_mod_0)
e.save_pickle("res", "mod3.pkl", scripted_mod_1)
e.save_pickle("res", "mod4.pkl", scripted_mod_2)
buffer.seek(0)
importer = PackageImporter(buffer)
loaded_mod_0 = importer.load_pickle("res", "mod0.pkl")
loaded_mod_1 = importer.load_pickle("res", "mod3.pkl")
loaded_mod_2 = importer.load_pickle("res", "mod4.pkl")
input = torch.rand(1, 2, 3)
self.assertEqual(loaded_mod_0(input), scripted_mod_0(input))
self.assertEqual(loaded_mod_1(input), scripted_mod_1(input))
self.assertEqual(loaded_mod_2(input), scripted_mod_2(input))
def test_scriptmodules_repeat_save(self):
"""
Test to verify saving and loading same ScriptModule object works
across multiple packages.
"""
from package_a.test_module import ModWithSubmodAndTensor, ModWithTensor
scripted_mod_0 = torch.jit.script(ModWithTensor(torch.rand(1, 2, 3)))
scripted_mod_1 = torch.jit.script(
ModWithSubmodAndTensor(
torch.rand(1, 2, 3), ModWithTensor(torch.rand(1, 2, 3))
)
)
buffer_0 = BytesIO()
with PackageExporter(buffer_0) as e:
e.save_pickle("res", "mod1.pkl", scripted_mod_0)
buffer_0.seek(0)
importer_0 = PackageImporter(buffer_0)
loaded_module_0 = importer_0.load_pickle("res", "mod1.pkl")
buffer_1 = BytesIO()
with PackageExporter(buffer_1) as e:
e.save_pickle("res", "mod1.pkl", scripted_mod_1)
e.save_pickle("res", "mod2.pkl", loaded_module_0)
buffer_1.seek(0)
importer_1 = PackageImporter(buffer_1)
loaded_module_1 = importer_1.load_pickle("res", "mod1.pkl")
reloaded_module_0 = importer_1.load_pickle("res", "mod2.pkl")
input = torch.rand(1, 2, 3)
self.assertEqual(loaded_module_0(input), scripted_mod_0(input))
self.assertEqual(loaded_module_0(input), reloaded_module_0(input))
self.assertEqual(loaded_module_1(input), scripted_mod_1(input))
@skipIfNoTorchVision
def test_save_scriptmodule_only_necessary_code(self):
"""
Test to verify when saving multiple packages with same CU
that packages don't include unnecessary torchscript code files.
The TorchVision code should only be saved in the package that
relies on it.
"""
from package_a.test_module import ModWithTensor
class ModWithTorchVision(torch.nn.Module):
def __init__(self, name: str):
super().__init__()
self.tvmod = resnet18()
def forward(self, input):
return input * 4
scripted_mod_0 = torch.jit.script(ModWithTorchVision("foo"))
scripted_mod_1 = torch.jit.script(ModWithTensor(torch.rand(1, 2, 3)))
buffer_0 = BytesIO()
with PackageExporter(buffer_0) as e:
e.save_pickle("res", "mod1.pkl", scripted_mod_0)
buffer_0.seek(0)
importer_0 = importer = PackageImporter(buffer_0)
buffer_1 = BytesIO()
with PackageExporter(buffer_1) as e:
e.save_pickle("res", "mod1.pkl", scripted_mod_1)
buffer_1.seek(0)
importer_1 = PackageImporter(buffer_1)
self.assertTrue("torchvision" in str(importer_0.file_structure()))
self.assertFalse("torchvision" in str(importer_1.file_structure()))
def test_save_scriptmodules_in_container(self):
"""
Test saving of ScriptModules inside of container. Checks that relations
between shared modules are upheld.
"""
from package_a.test_module import ModWithSubmodAndTensor, ModWithTensor
scripted_mod_a = torch.jit.script(ModWithTensor(torch.rand(1, 2, 3)))
scripted_mod_b = torch.jit.script(
ModWithSubmodAndTensor(torch.rand(1, 2, 3), scripted_mod_a)
)
script_mods_list = [scripted_mod_a, scripted_mod_b]
buffer = BytesIO()
with PackageExporter(buffer) as e:
e.save_pickle("res", "list.pkl", script_mods_list)
buffer.seek(0)
importer = PackageImporter(buffer)
loaded_mod_list = importer.load_pickle("res", "list.pkl")
input = torch.rand(1, 2, 3)
self.assertEqual(loaded_mod_list[0](input), scripted_mod_a(input))
self.assertEqual(loaded_mod_list[1](input), scripted_mod_b(input))
def test_save_eager_mods_sharing_scriptmodule(self):
"""
Test saving of single ScriptModule shared by multiple
eager modules (ScriptModule should be saved just once
even though is contained in multiple pickles).
"""
from package_a.test_module import ModWithSubmod, SimpleTest
scripted_mod = torch.jit.script(SimpleTest())
mod1 = ModWithSubmod(scripted_mod)
mod2 = ModWithSubmod(scripted_mod)
buffer = BytesIO()
with PackageExporter(buffer) as e:
e.intern("**")
e.save_pickle("res", "mod1.pkl", mod1)
e.save_pickle("res", "mod2.pkl", mod2)
buffer.seek(0)
importer = PackageImporter(buffer)
file_structure = importer.file_structure()
self.assertTrue(file_structure.has_file(".data/ts_code/0"))
self.assertFalse(file_structure.has_file(".data/ts_code/1"))
def test_load_shared_scriptmodules(self):
"""
Test loading of single ScriptModule shared by multiple eager
modules in single pickle (ScriptModule objects should be the same).
"""
from package_a.test_module import (
ModWithMultipleSubmods,
ModWithSubmod,
SimpleTest,
)
scripted_mod = torch.jit.script(SimpleTest())
mod1 = ModWithSubmod(scripted_mod)
mod2 = ModWithSubmod(scripted_mod)
mod_parent = ModWithMultipleSubmods(mod1, mod2)
buffer = BytesIO()
with PackageExporter(buffer) as e:
e.intern("**")
e.save_pickle("res", "mod.pkl", mod_parent)
buffer.seek(0)
importer = PackageImporter(buffer)
loaded_mod = importer.load_pickle("res", "mod.pkl")
self.assertTrue(
id(loaded_mod.mod1.script_mod) == id(loaded_mod.mod2.script_mod)
)
def test_save_shared_tensors(self):
"""
Test tensors shared across eager and ScriptModules are serialized once.
"""
from package_a.test_module import ModWithSubmodAndTensor, ModWithTensor
shared_tensor = torch.rand(2, 3, 4)
scripted_mod = torch.jit.script(ModWithTensor(shared_tensor))
mod1 = ModWithSubmodAndTensor(shared_tensor, scripted_mod)
mod2 = ModWithSubmodAndTensor(shared_tensor, scripted_mod)
buffer = BytesIO()
with PackageExporter(buffer) as e:
e.intern("**")
e.save_pickle("res", "tensor", shared_tensor)
e.save_pickle("res", "mod1.pkl", mod1)
e.save_pickle("res", "mod2.pkl", mod2)
buffer.seek(0)
importer = PackageImporter(buffer)
loaded_mod_1 = importer.load_pickle("res", "mod1.pkl")
# assert that there is only one storage stored in package
file_structure = importer.file_structure(include=".data/*.storage")
self.assertTrue(len(file_structure.children[".data"].children) == 1)
input = torch.rand(2, 3, 4)
self.assertEqual(loaded_mod_1(input), mod1(input))
def test_load_shared_tensors(self):
"""
Test tensors shared across eager and ScriptModules on load
are the same.
"""
from package_a.test_module import ModWithTensor, ModWithTwoSubmodsAndTensor
shared_tensor = torch.ones(3, 3)
scripted_mod_0 = torch.jit.script(ModWithTensor(shared_tensor))
scripted_mod_1 = torch.jit.script(ModWithTensor(shared_tensor))
mod1 = ModWithTwoSubmodsAndTensor(shared_tensor, scripted_mod_0, scripted_mod_1)
self.assertEqual(
shared_tensor.storage()._cdata,
scripted_mod_0.tensor.storage()._cdata,
)
self.assertEqual(
shared_tensor.storage()._cdata,
scripted_mod_1.tensor.storage()._cdata,
)
buffer = BytesIO()
with PackageExporter(buffer) as e:
e.intern("**")
e.save_pickle("res", "mod1.pkl", mod1)
buffer.seek(0)
importer = PackageImporter(buffer)
loaded_mod_1 = importer.load_pickle("res", "mod1.pkl")
self.assertEqual(
loaded_mod_1.tensor.storage()._cdata,
loaded_mod_1.sub_mod_0.tensor.storage()._cdata,
)
self.assertEqual(
loaded_mod_1.tensor.storage()._cdata,
loaded_mod_1.sub_mod_1.tensor.storage()._cdata,
)
loaded_mod_1.tensor.add_(torch.ones(3, 3))
self.assertTrue(
torch.allclose(loaded_mod_1.tensor, loaded_mod_1.sub_mod_0.tensor)
)
self.assertTrue(
torch.allclose(loaded_mod_1.tensor, loaded_mod_1.sub_mod_1.tensor)
)
def test_load_shared_tensors_repackaged(self):
"""
Test tensors shared across eager and ScriptModules on load
are the same across multiple package saves and loads. This is
an important test because not all of the tensor information is restored
in python between packages. The python identity is not maintained, but
the backing cpp TensorImpl is. We load/save storages based off of this
cpp TensorImpl and not the python identity.
"""
from package_a.test_module import ModWithTensor, ModWithTwoSubmodsAndTensor
shared_tensor = torch.ones(3, 3)
scripted_mod_0 = torch.jit.script(ModWithTensor(shared_tensor))
scripted_mod_1 = torch.jit.script(ModWithTensor(shared_tensor))
mod1 = ModWithTwoSubmodsAndTensor(shared_tensor, scripted_mod_0, scripted_mod_1)
buffer_0 = BytesIO()
with PackageExporter(buffer_0) as e:
e.intern("**")
e.save_pickle("res", "mod1.pkl", mod1)
buffer_0.seek(0)
importer_0 = PackageImporter(buffer_0)
loaded_mod_0 = importer_0.load_pickle("res", "mod1.pkl")
buffer_1 = BytesIO()
with PackageExporter(buffer_1, importer=importer_0) as e:
e.intern("**")
e.save_pickle("res", "mod1.pkl", loaded_mod_0)
buffer_1.seek(0)
importer = PackageImporter(buffer_1)
loaded_mod_1 = importer.load_pickle("res", "mod1.pkl")
self.assertEqual(
loaded_mod_1.tensor.storage()._cdata,
loaded_mod_1.sub_mod_0.tensor.storage()._cdata,
)
self.assertEqual(
loaded_mod_1.tensor.storage()._cdata,
loaded_mod_1.sub_mod_1.tensor.storage()._cdata,
)
loaded_mod_1.tensor.add_(
torch.ones(3, 3)
) # all tensors should reflect this change
self.assertTrue(
torch.allclose(loaded_mod_1.tensor, loaded_mod_1.sub_mod_0.tensor)
)
self.assertTrue(
torch.allclose(loaded_mod_1.tensor, loaded_mod_1.sub_mod_1.tensor)
)
def test_saving_and_scripting_packaged_mod(self):
"""
Test scripting a module loaded from a package
and saving it in a new package as a script object.
"""
from package_a.test_module import SimpleTest
orig_mod = SimpleTest()
buffer_0 = BytesIO()
with PackageExporter(buffer_0) as e:
e.intern("**")
e.save_pickle("model", "model.pkl", orig_mod)
buffer_0.seek(0)
importer_0 = PackageImporter(buffer_0)
loaded_mod = importer_0.load_pickle("model", "model.pkl")
input = torch.rand(2, 3)
self.assertEqual(loaded_mod(input), orig_mod(input))
scripted_mod = torch.jit.script(loaded_mod)
buffer_1 = BytesIO()
with PackageExporter(buffer_1, importer=importer_0) as e:
e.intern("**")
e.save_pickle("res", "scripted_mod.pkl", scripted_mod)
buffer_1.seek(0)
importer_1 = PackageImporter(buffer_1)
loaded_mod_scripted = importer_1.load_pickle("res", "scripted_mod.pkl")
self.assertEqual(loaded_mod_scripted(input), orig_mod(input))
def test_mixing_packaged_and_inline_modules(self):
"""
Test saving inline and imported modules in same package with
independent code.
"""
class InlineMod(torch.nn.Module):
def __init__(self, name: str):
super().__init__()
self.name = name
self.tensor = torch.rand(1, 2, 3)
def forward(self, input: str):
input = input + "_modInline:" + self.name
return input, (self.tensor * 4)
inline_mod = InlineMod("inline")
scripted_inline = torch.jit.script(inline_mod)
from package_a.test_module import SimpleTest
imported_mod = SimpleTest()
scripted_imported = torch.jit.script(imported_mod)
buffer = BytesIO()
with PackageExporter(buffer) as e:
e.save_pickle("model", "inline.pkl", scripted_inline)
e.save_pickle("model", "imported.pkl", scripted_imported)
buffer.seek(0)
importer = PackageImporter(buffer)
loaded_inline = importer.load_pickle("model", "inline.pkl")
loaded_imported = importer.load_pickle("model", "imported.pkl")
input = torch.rand(2, 3)
self.assertEqual(loaded_imported(input), imported_mod(input))
self.assertEqual(loaded_inline("input"), inline_mod("input"))
@skipIfNoTorchVision
def test_mixing_packaged_and_inline_modules_shared_code(self):
"""
Test saving inline and imported modules in same package that
share code.
"""
class TorchVisionTestInline(torch.nn.Module):
def __init__(self):
super().__init__()
self.tvmod = resnet18()
def forward(self, x):
x = a_non_torch_leaf(x, x)
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
inline_mod = TorchVisionTestInline()
scripted_inline = torch.jit.script(inline_mod)
from package_c.test_module import TorchVisionTest
imported_mod = TorchVisionTest()
scripted_imported = torch.jit.script(imported_mod)
buffer = BytesIO()
with PackageExporter(buffer) as e:
e.save_pickle("model", "inline.pkl", scripted_inline)
e.save_pickle("model", "imported.pkl", scripted_imported)
buffer.seek(0)
importer = PackageImporter(buffer)
loaded_inline = importer.load_pickle("model", "inline.pkl")
loaded_imported = importer.load_pickle("model", "imported.pkl")
input = torch.rand(2, 3)
self.assertEqual(loaded_imported(input), imported_mod(input))
self.assertEqual(loaded_inline(input), inline_mod(input))
def test_tensor_sharing_pickle(self):
"""Test that saving a ScriptModule and a separately saving a tensor
object causes no issues.
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.foo = torch.ones(2, 3)
def forward(self):
return self.foo
scripted_m = torch.jit.script(M())
original_tensor = torch.ones(0)
f = BytesIO()
with torch.package.PackageExporter(f) as exporter:
exporter.save_pickle("model", "model.pkl", scripted_m)
exporter.save_pickle("model", "input.pkl", original_tensor)
f.seek(0)
# Should be able to load correctly
importer = PackageImporter(f)
loaded_m = importer.load_pickle("model", "model.pkl")
loaded_tensor = importer.load_pickle("model", "input.pkl")
self.assertEqual(scripted_m.foo, loaded_m.foo)
self.assertEqual(original_tensor, loaded_tensor)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/package/test_package_script.py
|
# Owner(s): ["oncall: package/deploy"]
import pickle
from io import BytesIO
from textwrap import dedent
from unittest import skipIf
from torch.package import PackageExporter, PackageImporter, sys_importer
from torch.testing._internal.common_utils import IS_FBCODE, IS_SANDCASTLE, run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
from pathlib import Path
packaging_directory = Path(__file__).parent
class TestSaveLoad(PackageTestCase):
"""Core save_* and loading API tests."""
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_saving_source(self):
filename = self.temp()
with PackageExporter(filename) as he:
he.save_source_file("foo", str(packaging_directory / "module_a.py"))
he.save_source_file("foodir", str(packaging_directory / "package_a"))
hi = PackageImporter(filename)
foo = hi.import_module("foo")
s = hi.import_module("foodir.subpackage")
self.assertEqual(foo.result, "module_a")
self.assertEqual(s.result, "package_a.subpackage")
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_saving_string(self):
filename = self.temp()
with PackageExporter(filename) as he:
src = dedent(
"""\
import math
the_math = math
"""
)
he.save_source_string("my_mod", src)
hi = PackageImporter(filename)
m = hi.import_module("math")
import math
self.assertIs(m, math)
my_mod = hi.import_module("my_mod")
self.assertIs(my_mod.math, math)
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_save_module(self):
filename = self.temp()
with PackageExporter(filename) as he:
import module_a
import package_a
he.save_module(module_a.__name__)
he.save_module(package_a.__name__)
hi = PackageImporter(filename)
module_a_i = hi.import_module("module_a")
self.assertEqual(module_a_i.result, "module_a")
self.assertIsNot(module_a, module_a_i)
package_a_i = hi.import_module("package_a")
self.assertEqual(package_a_i.result, "package_a")
self.assertIsNot(package_a_i, package_a)
def test_dunder_imports(self):
buffer = BytesIO()
with PackageExporter(buffer) as he:
import package_b
obj = package_b.PackageBObject
he.intern("**")
he.save_pickle("res", "obj.pkl", obj)
buffer.seek(0)
hi = PackageImporter(buffer)
loaded_obj = hi.load_pickle("res", "obj.pkl")
package_b = hi.import_module("package_b")
self.assertEqual(package_b.result, "package_b")
math = hi.import_module("math")
self.assertEqual(math.__name__, "math")
xml_sub_sub_package = hi.import_module("xml.sax.xmlreader")
self.assertEqual(xml_sub_sub_package.__name__, "xml.sax.xmlreader")
subpackage_1 = hi.import_module("package_b.subpackage_1")
self.assertEqual(subpackage_1.result, "subpackage_1")
subpackage_2 = hi.import_module("package_b.subpackage_2")
self.assertEqual(subpackage_2.result, "subpackage_2")
subsubpackage_0 = hi.import_module("package_b.subpackage_0.subsubpackage_0")
self.assertEqual(subsubpackage_0.result, "subsubpackage_0")
def test_bad_dunder_imports(self):
"""Test to ensure bad __imports__ don't cause PackageExporter to fail."""
buffer = BytesIO()
with PackageExporter(buffer) as e:
e.save_source_string(
"m", '__import__(these, unresolvable, "things", wont, crash, me)'
)
def test_save_module_binary(self):
f = BytesIO()
with PackageExporter(f) as he:
import module_a
import package_a
he.save_module(module_a.__name__)
he.save_module(package_a.__name__)
f.seek(0)
hi = PackageImporter(f)
module_a_i = hi.import_module("module_a")
self.assertEqual(module_a_i.result, "module_a")
self.assertIsNot(module_a, module_a_i)
package_a_i = hi.import_module("package_a")
self.assertEqual(package_a_i.result, "package_a")
self.assertIsNot(package_a_i, package_a)
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_pickle(self):
import package_a.subpackage
obj = package_a.subpackage.PackageASubpackageObject()
obj2 = package_a.PackageAObject(obj)
filename = self.temp()
with PackageExporter(filename) as he:
he.intern("**")
he.save_pickle("obj", "obj.pkl", obj2)
hi = PackageImporter(filename)
# check we got dependencies
sp = hi.import_module("package_a.subpackage")
# check we didn't get other stuff
with self.assertRaises(ImportError):
hi.import_module("module_a")
obj_loaded = hi.load_pickle("obj", "obj.pkl")
self.assertIsNot(obj2, obj_loaded)
self.assertIsInstance(obj_loaded.obj, sp.PackageASubpackageObject)
self.assertIsNot(
package_a.subpackage.PackageASubpackageObject, sp.PackageASubpackageObject
)
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_exporting_mismatched_code(self):
"""
If an object with the same qualified name is loaded from different
packages, the user should get an error if they try to re-save the
object with the wrong package's source code.
"""
import package_a.subpackage
obj = package_a.subpackage.PackageASubpackageObject()
obj2 = package_a.PackageAObject(obj)
f1 = self.temp()
with PackageExporter(f1) as pe:
pe.intern("**")
pe.save_pickle("obj", "obj.pkl", obj2)
importer1 = PackageImporter(f1)
loaded1 = importer1.load_pickle("obj", "obj.pkl")
importer2 = PackageImporter(f1)
loaded2 = importer2.load_pickle("obj", "obj.pkl")
f2 = self.temp()
def make_exporter():
pe = PackageExporter(f2, importer=[importer1, sys_importer])
# Ensure that the importer finds the 'PackageAObject' defined in 'importer1' first.
return pe
# This should fail. The 'PackageAObject' type defined from 'importer1'
# is not necessarily the same 'obj2's version of 'PackageAObject'.
pe = make_exporter()
with self.assertRaises(pickle.PicklingError):
pe.save_pickle("obj", "obj.pkl", obj2)
# This should also fail. The 'PackageAObject' type defined from 'importer1'
# is not necessarily the same as the one defined from 'importer2'
pe = make_exporter()
with self.assertRaises(pickle.PicklingError):
pe.save_pickle("obj", "obj.pkl", loaded2)
# This should succeed. The 'PackageAObject' type defined from
# 'importer1' is a match for the one used by loaded1.
pe = make_exporter()
pe.save_pickle("obj", "obj.pkl", loaded1)
def test_save_imported_module(self):
"""Saving a module that came from another PackageImporter should work."""
import package_a.subpackage
obj = package_a.subpackage.PackageASubpackageObject()
obj2 = package_a.PackageAObject(obj)
buffer = BytesIO()
with PackageExporter(buffer) as exporter:
exporter.intern("**")
exporter.save_pickle("model", "model.pkl", obj2)
buffer.seek(0)
importer = PackageImporter(buffer)
imported_obj2 = importer.load_pickle("model", "model.pkl")
imported_obj2_module = imported_obj2.__class__.__module__
# Should export without error.
buffer2 = BytesIO()
with PackageExporter(buffer2, importer=(importer, sys_importer)) as exporter:
exporter.intern("**")
exporter.save_module(imported_obj2_module)
def test_save_imported_module_using_package_importer(self):
"""Exercise a corner case: re-packaging a module that uses `torch_package_importer`"""
import package_a.use_torch_package_importer # noqa: F401
buffer = BytesIO()
with PackageExporter(buffer) as exporter:
exporter.intern("**")
exporter.save_module("package_a.use_torch_package_importer")
buffer.seek(0)
importer = PackageImporter(buffer)
# Should export without error.
buffer2 = BytesIO()
with PackageExporter(buffer2, importer=(importer, sys_importer)) as exporter:
exporter.intern("**")
exporter.save_module("package_a.use_torch_package_importer")
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/package/test_save_load.py
|
# Owner(s): ["oncall: package/deploy"]
from io import BytesIO
from torch.package import PackageExporter, PackageImporter, sys_importer
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
class TestRepackage(PackageTestCase):
"""Tests for repackaging."""
def test_repackage_import_indirectly_via_parent_module(self):
from package_d.imports_directly import ImportsDirectlyFromSubSubPackage
from package_d.imports_indirectly import ImportsIndirectlyFromSubPackage
model_a = ImportsDirectlyFromSubSubPackage()
buffer = BytesIO()
with PackageExporter(buffer) as pe:
pe.intern("**")
pe.save_pickle("default", "model.py", model_a)
buffer.seek(0)
pi = PackageImporter(buffer)
loaded_model = pi.load_pickle("default", "model.py")
model_b = ImportsIndirectlyFromSubPackage()
buffer = BytesIO()
with PackageExporter(
buffer,
importer=(
pi,
sys_importer,
),
) as pe:
pe.intern("**")
pe.save_pickle("default", "model_b.py", model_b)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/package/test_repackage.py
|
import os
import sys
from tempfile import NamedTemporaryFile
import torch.package.package_exporter
from torch.testing._internal.common_utils import IS_WINDOWS, TestCase
class PackageTestCase(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._temporary_files = []
def temp(self):
t = NamedTemporaryFile()
name = t.name
if IS_WINDOWS:
t.close() # can't read an open file in windows
else:
self._temporary_files.append(t)
return name
def setUp(self):
"""Add test/package/ to module search path. This ensures that
importing our fake packages via, e.g. `import package_a` will always
work regardless of how we invoke the test.
"""
super().setUp()
self.package_test_dir = os.path.dirname(os.path.realpath(__file__))
self.orig_sys_path = sys.path.copy()
sys.path.append(self.package_test_dir)
torch.package.package_exporter._gate_torchscript_serialization = False
def tearDown(self):
super().tearDown()
sys.path = self.orig_sys_path
# remove any temporary files
for t in self._temporary_files:
t.close()
self._temporary_files = []
|
pytorch-master
|
test/package/common.py
|
# Owner(s): ["oncall: package/deploy"]
from io import BytesIO
import torch
from torch.fx import Graph, GraphModule, symbolic_trace
from torch.package import (
ObjMismatchError,
PackageExporter,
PackageImporter,
sys_importer,
)
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
class TestPackageFX(PackageTestCase):
"""Tests for compatibility with FX."""
def test_package_fx_simple(self):
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
st = SimpleTest()
traced = symbolic_trace(st)
f = BytesIO()
with PackageExporter(f) as pe:
pe.save_pickle("model", "model.pkl", traced)
f.seek(0)
pi = PackageImporter(f)
loaded_traced = pi.load_pickle("model", "model.pkl")
input = torch.rand(2, 3)
self.assertEqual(loaded_traced(input), traced(input))
def test_package_then_fx(self):
from package_a.test_module import SimpleTest
model = SimpleTest()
f = BytesIO()
with PackageExporter(f) as pe:
pe.intern("**")
pe.save_pickle("model", "model.pkl", model)
f.seek(0)
pi = PackageImporter(f)
loaded = pi.load_pickle("model", "model.pkl")
traced = symbolic_trace(loaded)
input = torch.rand(2, 3)
self.assertEqual(loaded(input), traced(input))
def test_package_fx_package(self):
from package_a.test_module import SimpleTest
model = SimpleTest()
f = BytesIO()
with PackageExporter(f) as pe:
pe.intern("**")
pe.save_pickle("model", "model.pkl", model)
f.seek(0)
pi = PackageImporter(f)
loaded = pi.load_pickle("model", "model.pkl")
traced = symbolic_trace(loaded)
# re-save the package exporter
f2 = BytesIO()
# This should fail, because we are referencing some globals that are
# only in the package.
with self.assertRaises(ObjMismatchError):
with PackageExporter(f2) as pe:
pe.intern("**")
pe.save_pickle("model", "model.pkl", traced)
f2.seek(0)
with PackageExporter(f2, importer=(pi, sys_importer)) as pe:
# Make the package available to the exporter's environment.
pe.intern("**")
pe.save_pickle("model", "model.pkl", traced)
f2.seek(0)
pi2 = PackageImporter(f2)
loaded2 = pi2.load_pickle("model", "model.pkl")
input = torch.rand(2, 3)
self.assertEqual(loaded(input), loaded2(input))
def test_package_fx_with_imports(self):
import package_a.subpackage
# Manually construct a graph that invokes a leaf function
graph = Graph()
a = graph.placeholder("x")
b = graph.placeholder("y")
c = graph.call_function(package_a.subpackage.leaf_function, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
f = BytesIO()
with PackageExporter(f) as pe:
pe.intern("**")
pe.save_pickle("model", "model.pkl", gm)
f.seek(0)
pi = PackageImporter(f)
loaded_gm = pi.load_pickle("model", "model.pkl")
input_x = torch.rand(2, 3)
input_y = torch.rand(2, 3)
self.assertTrue(
torch.allclose(loaded_gm(input_x, input_y), gm(input_x, input_y))
)
# Check that the packaged version of the leaf_function dependency is
# not the same as in the outer env.
packaged_dependency = pi.import_module("package_a.subpackage")
self.assertTrue(packaged_dependency is not package_a.subpackage)
def test_package_fx_custom_tracer(self):
from package_a.test_all_leaf_modules_tracer import TestAllLeafModulesTracer
from package_a.test_module import ModWithTwoSubmodsAndTensor, SimpleTest
class SpecialGraphModule(torch.fx.GraphModule):
def __init__(self, root, graph, info):
super().__init__(root, graph)
self.info = info
sub_module = SimpleTest()
module = ModWithTwoSubmodsAndTensor(
torch.ones(3),
sub_module,
sub_module,
)
tracer = TestAllLeafModulesTracer()
graph = tracer.trace(module)
self.assertEqual(graph._tracer_cls, TestAllLeafModulesTracer)
gm = SpecialGraphModule(module, graph, "secret")
self.assertEqual(gm._tracer_cls, TestAllLeafModulesTracer)
f = BytesIO()
with PackageExporter(f) as pe:
pe.intern("**")
pe.save_pickle("model", "model.pkl", gm)
f.seek(0)
pi = PackageImporter(f)
loaded_gm = pi.load_pickle("model", "model.pkl")
self.assertEqual(
type(loaded_gm).__class__.__name__, SpecialGraphModule.__class__.__name__
)
self.assertEqual(loaded_gm.info, "secret")
input_x = torch.randn(3)
self.assertEqual(loaded_gm(input_x), gm(input_x))
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/package/test_package_fx.py
|
# Owner(s): ["oncall: package/deploy"]
from io import BytesIO
from torch.package import PackageExporter
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
class TestDependencyHooks(PackageTestCase):
"""Dependency management hooks API tests.
- register_mock_hook()
- register_extern_hook()
"""
def test_single_hook(self):
buffer = BytesIO()
my_externs = set()
def my_extern_hook(package_exporter, module_name):
my_externs.add(module_name)
with PackageExporter(buffer) as exporter:
exporter.extern(["package_a.subpackage", "module_a"])
exporter.register_extern_hook(my_extern_hook)
exporter.save_source_string("foo", "import module_a")
self.assertEqual(my_externs, set(["module_a"]))
def test_multiple_extern_hooks(self):
buffer = BytesIO()
my_externs = set()
def my_extern_hook(package_exporter, module_name):
my_externs.add(module_name)
# This also checks ordering, since `remove()` will fail if the value is not in the set.
def my_extern_hook2(package_exporter, module_name):
my_externs.remove(module_name)
with PackageExporter(buffer) as exporter:
exporter.extern(["package_a.subpackage", "module_a"])
exporter.register_extern_hook(my_extern_hook)
exporter.register_extern_hook(my_extern_hook2)
exporter.save_source_string("foo", "import module_a")
self.assertEqual(my_externs, set())
def test_multiple_mock_hooks(self):
buffer = BytesIO()
my_mocks = set()
def my_mock_hook(package_exporter, module_name):
my_mocks.add(module_name)
# This also checks ordering, since `remove()` will fail if the value is not in the set.
def my_mock_hook2(package_exporter, module_name):
my_mocks.remove(module_name)
with PackageExporter(buffer) as exporter:
exporter.mock(["package_a.subpackage", "module_a"])
exporter.register_mock_hook(my_mock_hook)
exporter.register_mock_hook(my_mock_hook2)
exporter.save_source_string("foo", "import module_a")
self.assertEqual(my_mocks, set())
def test_remove_hooks(self):
buffer = BytesIO()
my_externs = set()
my_externs2 = set()
def my_extern_hook(package_exporter, module_name):
my_externs.add(module_name)
def my_extern_hook2(package_exporter, module_name):
my_externs2.add(module_name)
with PackageExporter(buffer) as exporter:
exporter.extern(["package_a.subpackage", "module_a"])
handle = exporter.register_extern_hook(my_extern_hook)
exporter.register_extern_hook(my_extern_hook2)
handle.remove()
exporter.save_source_string("foo", "import module_a")
self.assertEqual(my_externs, set())
self.assertEqual(my_externs2, set(["module_a"]))
def test_extern_and_mock_hook(self):
buffer = BytesIO()
my_externs = set()
my_mocks = set()
def my_extern_hook(package_exporter, module_name):
my_externs.add(module_name)
def my_mock_hook(package_exporter, module_name):
my_mocks.add(module_name)
with PackageExporter(buffer) as exporter:
exporter.extern("module_a")
exporter.mock("package_a")
exporter.register_extern_hook(my_extern_hook)
exporter.register_mock_hook(my_mock_hook)
exporter.save_source_string("foo", "import module_a; import package_a")
self.assertEqual(my_externs, set(["module_a"]))
self.assertEqual(my_mocks, set(["package_a"]))
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/package/test_dependency_hooks.py
|
from pathlib import Path
import torch
from torch.fx import symbolic_trace
from torch.package import PackageExporter
from torch.testing._internal.common_utils import IS_FBCODE, IS_SANDCASTLE
packaging_directory = f"{Path(__file__).parent}/package_bc"
torch.package.package_exporter._gate_torchscript_serialization = False
def generate_bc_packages():
"""Function to create packages for testing backwards compatiblity"""
if not IS_FBCODE or IS_SANDCASTLE:
from package_a.test_nn_module import TestNnModule
test_nn_module = TestNnModule()
test_torchscript_module = torch.jit.script(TestNnModule())
test_fx_module: torch.fx.GraphModule = symbolic_trace(TestNnModule())
with PackageExporter(f"{packaging_directory}/test_nn_module.pt") as pe1:
pe1.intern("**")
pe1.save_pickle("nn_module", "nn_module.pkl", test_nn_module)
with PackageExporter(
f"{packaging_directory}/test_torchscript_module.pt"
) as pe2:
pe2.intern("**")
pe2.save_pickle(
"torchscript_module", "torchscript_module.pkl", test_torchscript_module
)
with PackageExporter(f"{packaging_directory}/test_fx_module.pt") as pe3:
pe3.intern("**")
pe3.save_pickle("fx_module", "fx_module.pkl", test_fx_module)
if __name__ == "__main__":
generate_bc_packages()
|
pytorch-master
|
test/package/generate_bc_packages.py
|
# Owner(s): ["oncall: package/deploy"]
from pathlib import Path
from unittest import skipIf
from torch.package import PackageImporter
from torch.testing._internal.common_utils import IS_FBCODE, IS_SANDCASTLE, run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
packaging_directory = f"{Path(__file__).parent}/package_bc"
class TestLoadBCPackages(PackageTestCase):
"""Tests for checking loading has backwards compatiblity"""
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_load_bc_packages_nn_module(self):
"""Tests for backwards compatible nn module"""
importer1 = PackageImporter(f"{packaging_directory}/test_nn_module.pt")
loaded1 = importer1.load_pickle("nn_module", "nn_module.pkl")
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_load_bc_packages_torchscript_module(self):
"""Tests for backwards compatible torchscript module"""
importer2 = PackageImporter(f"{packaging_directory}/test_torchscript_module.pt")
loaded2 = importer2.load_pickle("torchscript_module", "torchscript_module.pkl")
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_load_bc_packages_fx_module(self):
"""Tests for backwards compatible fx module"""
importer3 = PackageImporter(f"{packaging_directory}/test_fx_module.pt")
loaded3 = importer3.load_pickle("fx_module", "fx_module.pkl")
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/package/test_load_bc_packages.py
|
# Owner(s): ["oncall: package/deploy"]
import torch
from torch.package import analyze
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
class TestAnalyze(PackageTestCase):
"""Dependency analysis API tests."""
def test_trace_dependencies(self):
import test_trace_dep
obj = test_trace_dep.SumMod()
used_modules = analyze.trace_dependencies(obj, [(torch.randn(4),)])
self.assertNotIn("yaml", used_modules)
self.assertIn("test_trace_dep", used_modules)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/package/test_analyze.py
|
# Owner(s): ["oncall: package/deploy"]
from torch.package._digraph import DiGraph
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
class TestDiGraph(PackageTestCase):
"""Test the DiGraph structure we use to represent dependencies in PackageExporter"""
def test_successors(self):
g = DiGraph()
g.add_edge("foo", "bar")
g.add_edge("foo", "baz")
g.add_node("qux")
self.assertIn("bar", list(g.successors("foo")))
self.assertIn("baz", list(g.successors("foo")))
self.assertEqual(len(list(g.successors("qux"))), 0)
def test_predecessors(self):
g = DiGraph()
g.add_edge("foo", "bar")
g.add_edge("foo", "baz")
g.add_node("qux")
self.assertIn("foo", list(g.predecessors("bar")))
self.assertIn("foo", list(g.predecessors("baz")))
self.assertEqual(len(list(g.predecessors("qux"))), 0)
def test_successor_not_in_graph(self):
g = DiGraph()
with self.assertRaises(ValueError):
g.successors("not in graph")
def test_predecessor_not_in_graph(self):
g = DiGraph()
with self.assertRaises(ValueError):
g.predecessors("not in graph")
def test_node_attrs(self):
g = DiGraph()
g.add_node("foo", my_attr=1, other_attr=2)
self.assertEqual(g.nodes["foo"]["my_attr"], 1)
self.assertEqual(g.nodes["foo"]["other_attr"], 2)
def test_node_attr_update(self):
g = DiGraph()
g.add_node("foo", my_attr=1)
self.assertEqual(g.nodes["foo"]["my_attr"], 1)
g.add_node("foo", my_attr="different")
self.assertEqual(g.nodes["foo"]["my_attr"], "different")
def test_edges(self):
g = DiGraph()
g.add_edge(1, 2)
g.add_edge(2, 3)
g.add_edge(1, 3)
g.add_edge(4, 5)
edge_list = list(g.edges)
self.assertEqual(len(edge_list), 4)
self.assertIn((1, 2), edge_list)
self.assertIn((2, 3), edge_list)
self.assertIn((1, 3), edge_list)
self.assertIn((4, 5), edge_list)
def test_iter(self):
g = DiGraph()
g.add_node(1)
g.add_node(2)
g.add_node(3)
nodes = set()
for n in g:
nodes.add(n)
self.assertEqual(nodes, set([1, 2, 3]))
def test_contains(self):
g = DiGraph()
g.add_node("yup")
self.assertTrue("yup" in g)
self.assertFalse("nup" in g)
def test_contains_non_hashable(self):
g = DiGraph()
self.assertFalse([1, 2, 3] in g)
def test_forward_closure(self):
g = DiGraph()
g.add_edge("1", "2")
g.add_edge("2", "3")
g.add_edge("5", "4")
g.add_edge("4", "3")
self.assertTrue(g.forward_transitive_closure("1") == set(["1", "2", "3"]))
self.assertTrue(g.forward_transitive_closure("4") == set(["4", "3"]))
def test_all_paths(self):
g = DiGraph()
g.add_edge("1", "2")
g.add_edge("1", "7")
g.add_edge("7", "8")
g.add_edge("8", "3")
g.add_edge("2", "3")
g.add_edge("5", "4")
g.add_edge("4", "3")
result = g.all_paths("1", "3")
# to get rid of indeterminism
actual = set([i.strip("\n") for i in result.split(";")[2:-1]])
expected = {
'"2" -> "3"',
'"1" -> "7"',
'"7" -> "8"',
'"1" -> "2"',
'"8" -> "3"',
}
self.assertEqual(actual, expected)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/package/test_digraph.py
|
result = "subpackage_1"
class PackageBSubpackage1Object_0:
__slots__ = ["obj"]
def __init__(self, obj):
self.obj = obj
def return_result(self):
return result
|
pytorch-master
|
test/package/package_b/subpackage_1.py
|
__import__("subpackage_1", globals(), fromlist=["PackageBSubpackage1Object_0"], level=1)
__import__("subpackage_0.subsubpackage_0", globals(), fromlist=[""], level=1)
__import__("subpackage_2", globals=globals(), locals=locals(), fromlist=["*"], level=1)
result = "package_b"
class PackageBObject:
__slots__ = ["obj"]
def __init__(self, obj):
self.obj = obj
def return_result(self):
return result
|
pytorch-master
|
test/package/package_b/__init__.py
|
__import__("math", fromlist=[])
__import__("xml.sax.xmlreader")
result = "subpackage_2"
class PackageBSubpackage2Object_0:
pass
def dynamic_import_test(name: str):
__import__(name)
|
pytorch-master
|
test/package/package_b/subpackage_2.py
|
result = "subpackage_0"
|
pytorch-master
|
test/package/package_b/subpackage_0/__init__.py
|
__import__("subpackage_1", globals(), locals(), ["PackageBSubpackage1Object_0"], 3)
result = "subsubpackage_0"
class PackageBSubsubpackage0Object_0:
pass
|
pytorch-master
|
test/package/package_b/subpackage_0/subsubpackage_0/__init__.py
|
import torch
import yaml
class SumMod(torch.nn.Module):
def forward(self, inp):
return torch.sum(inp)
|
pytorch-master
|
test/package/test_trace_dep/__init__.py
|
# Owner(s): ["oncall: package/deploy"]
import torch
try:
from torchvision.models import resnet18
class TorchVisionTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.tvmod = resnet18()
def forward(self, x):
x = a_non_torch_leaf(x, x)
return torch.relu(x + 3.0)
except ImportError:
pass
def a_non_torch_leaf(a, b):
return a + b
|
pytorch-master
|
test/package/package_c/test_module.py
|
result = "package_c"
class PackageCObject:
__slots__ = ["obj"]
def __init__(self, obj):
self.obj = obj
def return_result(self):
return result
|
pytorch-master
|
test/package/package_c/__init__.py
|
import torch
from .subpackage_0 import important_string
class ImportsIndirectlyFromSubPackage(torch.nn.Module):
key = important_string
def forward(self, inp):
return torch.sum(inp)
|
pytorch-master
|
test/package/package_d/imports_indirectly.py
|
pytorch-master
|
test/package/package_d/__init__.py
|
|
import torch
from .subpackage_0.subsubpackage_0 import important_string
class ImportsDirectlyFromSubSubPackage(torch.nn.Module):
key = important_string
def forward(self, inp):
return torch.sum(inp)
|
pytorch-master
|
test/package/package_d/imports_directly.py
|
from .subsubpackage_0 import important_string
|
pytorch-master
|
test/package/package_d/subpackage_0/__init__.py
|
important_string = "subsubpackage_0"
|
pytorch-master
|
test/package/package_d/subpackage_0/subsubpackage_0/__init__.py
|
if "__torch_package__" in dir():
def is_from_package():
return True
else:
def is_from_package():
return False
|
pytorch-master
|
test/package/package_a/use_dunder_package.py
|
# Owner(s): ["oncall: package/deploy"]
import torch
from torch.fx import wrap
wrap("a_non_torch_leaf")
class ModWithSubmod(torch.nn.Module):
def __init__(self, script_mod):
super().__init__()
self.script_mod = script_mod
def forward(self, x):
return self.script_mod(x)
class ModWithTensor(torch.nn.Module):
def __init__(self, tensor):
super().__init__()
self.tensor = tensor
def forward(self, x):
return self.tensor * x
class ModWithSubmodAndTensor(torch.nn.Module):
def __init__(self, tensor, sub_mod):
super().__init__()
self.tensor = tensor
self.sub_mod = sub_mod
def forward(self, x):
return self.sub_mod(x) + self.tensor
class ModWithTwoSubmodsAndTensor(torch.nn.Module):
def __init__(self, tensor, sub_mod_0, sub_mod_1):
super().__init__()
self.tensor = tensor
self.sub_mod_0 = sub_mod_0
self.sub_mod_1 = sub_mod_1
def forward(self, x):
return self.sub_mod_0(x) + self.sub_mod_1(x) + self.tensor
class ModWithMultipleSubmods(torch.nn.Module):
def __init__(self, mod1, mod2):
super().__init__()
self.mod1 = mod1
self.mod2 = mod2
def forward(self, x):
return self.mod1(x) + self.mod2(x)
class SimpleTest(torch.nn.Module):
def forward(self, x):
x = a_non_torch_leaf(x, x)
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
|
pytorch-master
|
test/package/package_a/test_module.py
|
# Owner(s): ["oncall: package/deploy"]
from torch.fx import Tracer
class TestAllLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return True
|
pytorch-master
|
test/package/package_a/test_all_leaf_modules_tracer.py
|
result = "package_a"
class PackageAObject:
__slots__ = ["obj"]
def __init__(self, obj):
self.obj = obj
def return_result(self):
return result
|
pytorch-master
|
test/package/package_a/__init__.py
|
import torch
from torch import Tensor
@torch.jit.interface
class ModuleInterface(torch.nn.Module):
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
pass
class OrigModule(torch.nn.Module):
"""A module that implements ModuleInterface."""
def __init__(self):
super(OrigModule, self).__init__()
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
return inp1 + inp2 + 1
def two(self, input: Tensor) -> Tensor:
return input + 2
def forward(self, input: Tensor) -> Tensor:
return input + self.one(input, input) + 1
class NewModule(torch.nn.Module):
"""A *different* module that implements ModuleInterface."""
def __init__(self):
super(NewModule, self).__init__()
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
return inp1 * inp2 + 1
def forward(self, input: Tensor) -> Tensor:
return self.one(input, input + 1)
class UsesInterface(torch.nn.Module):
proxy_mod: ModuleInterface
def __init__(self):
super().__init__()
self.proxy_mod = OrigModule()
def forward(self, input: Tensor) -> Tensor:
return self.proxy_mod.one(input, input)
|
pytorch-master
|
test/package/package_a/fake_interface.py
|
from typing import Any
import torch
@torch.jit.script
class MyScriptClass:
"""Intended to be scripted."""
def __init__(self, x):
self.foo = x
def set_foo(self, x):
self.foo = x
@torch.jit.script
def uses_script_class(x):
"""Intended to be scripted."""
foo = MyScriptClass(x)
return foo.foo
class IdListFeature:
def __init__(self):
self.id_list = torch.ones(1, 1)
def returns_self(self) -> "IdListFeature":
return IdListFeature()
class UsesIdListFeature(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, feature: Any):
if isinstance(feature, IdListFeature):
return feature.id_list
else:
return feature
|
pytorch-master
|
test/package/package_a/fake_script_class.py
|
try:
import torch_package_importer # noqa: F401
except ImportError:
pass
|
pytorch-master
|
test/package/package_a/use_torch_package_importer.py
|
import os # noqa: F401
import os.path # noqa: F401
import typing # noqa: F401
import typing.io # noqa: F401
import typing.re # noqa: F401
import torch
class Module(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return os.path.abspath("test")
|
pytorch-master
|
test/package/package_a/std_sys_module_hacks.py
|
# Owner(s): ["oncall: package/deploy"]
import torch
class TestNnModule(torch.nn.Module):
def __init__(self, nz=6, ngf=9, nc=3):
super(TestNnModule, self).__init__()
self.main = torch.nn.Sequential(
# input is Z, going into a convolution
torch.nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
torch.nn.BatchNorm2d(ngf * 8),
torch.nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
torch.nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
torch.nn.BatchNorm2d(ngf * 4),
torch.nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
torch.nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
torch.nn.BatchNorm2d(ngf * 2),
torch.nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
torch.nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
torch.nn.BatchNorm2d(ngf),
torch.nn.ReLU(True),
# state size. (ngf) x 32 x 32
torch.nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
torch.nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
return self.main(input)
|
pytorch-master
|
test/package/package_a/test_nn_module.py
|
result = "package_a.subpackage"
class PackageASubpackageObject:
pass
def leaf_function(a, b):
return a + b
|
pytorch-master
|
test/package/package_a/subpackage.py
|
pytorch-master
|
test/expect/__init__.py
|
|
import warnings
from torch.onnx import _CAFFE2_ATEN_FALLBACK
if not _CAFFE2_ATEN_FALLBACK:
warnings.warn("Caffe2 support is not fully enabled in this PyTorch build. "
"Please enable Caffe2 by building PyTorch from source with `BUILD_CAFFE2=1` flag.")
|
pytorch-master
|
caffe2/__init__.py
|
pytorch-master
|
caffe2/core/__init__.py
|
|
pytorch-master
|
caffe2/core/nomnigraph/__init__.py
|
|
#!/usr/bin/env python3
import argparse
from textwrap import dedent
from subprocess import call
def parse_lines(lines):
# States
EMPTY = 0
OP = 1
MACRO = 2
parse_state = EMPTY
# Preprocess the macros
curr_macro = ""
macros = {}
index = 0
while index < len(lines):
line = lines[index]
if line.lower().startswith("macro"):
assert parse_state == EMPTY
macro_line = line.split(" ")
# Support macros that look like attributes
# e.g. macro - CONV_LIKE
curr_macro = " ".join(macro_line[1:])
assert curr_macro not in macros, 'Macro "{}" defined twice.'.format(
curr_macro
)
macros[curr_macro] = []
parse_state = MACRO
lines = lines[:index] + lines[index + 1 :]
continue
elif line.lower().startswith("endmacro"):
assert parse_state == MACRO
parse_state = EMPTY
lines = lines[:index] + lines[index + 1 :]
continue
elif parse_state == MACRO:
macros[curr_macro].append(line)
lines = lines[:index] + lines[index + 1 :]
continue
index += 1
index = 0
while index < len(lines):
line = lines[index]
if line in macros:
lines = lines[:index] + macros[line] + lines[index + 1 :]
index += len(macros[line]) - 1
index += 1
# Now parse the file
curr_op = ""
# dict of the form
# opName : { attributes: [], ... }
ops = {}
# To preserve parsing order for dependencies (for things like init_from)
op_list = []
for line in lines:
if not len(line):
continue
if line[0] == "-":
assert parse_state is OP
attr = [_.strip() for _ in line[1:].split(":")]
assert attr[0][0].isupper()
if len(attr) == 2: # attribute : type
ops[curr_op]["attributes"].append((attr[0], attr[1]))
elif len(attr) == 3: # attribute : type
ops[curr_op]["attributes"].append((attr[0], attr[1], attr[2]))
else:
op = [l.strip() for l in line.split(":")]
assert len(op[0].split(" ")) == 1
parse_state = OP
curr_op = op[0]
assert curr_op not in ops
ops[curr_op] = {}
op_list.append(curr_op)
if len(op) > 1:
ops[curr_op]["init_from"] = [op[1]]
ops[curr_op]["attributes"] = []
return ops, op_list
def gen_class(op, op_def):
attributes = op_def["attributes"]
attribute_args = []
default_init = "NeuralNetOperator(NNKind::{op})".format(op=op)
attribute_init = [default_init]
attribute_declarations = []
attribute_getters = []
attribute_setters = []
for attr in attributes:
lower_name = attr[0][0].lower() + attr[0][1:]
private_name = lower_name + "_"
default_arg = "" if len(attr) < 3 else " = {}".format(attr[2])
name = attr[0]
t = attr[1]
attr_arg = "{type} {lower_name}".format(
type=t, lower_name=lower_name + default_arg
)
attr_init = "{private_name}({lower_name})".format(
private_name=private_name, lower_name=lower_name)
attr_declare = "{type} {private_name};".format(
type=t, private_name=private_name)
attr_get = dedent(
"""
{type} get{name}() const {{
return {private_name};
}}
""".format(
type=t, name=name, private_name=private_name
)
)
attr_set = dedent(
"""
void set{name}({type} {lower_name}) {{
{private_name} = {lower_name};
}}
""".format(
type=t, name=name, private_name=private_name, lower_name=lower_name
)
)
attribute_args.append(attr_arg)
attribute_init.append(attr_init)
attribute_declarations.append(attr_declare)
attribute_getters.append(attr_get)
attribute_setters.append(attr_set)
extra_init = ""
if "init_from" in op_def:
for other_op in op_def["init_from"]:
lower_other_op = other_op[0].lower() + other_op[1:]
other_init = [default_init]
for attr in attributes:
lower_name = attr[0][0].lower() + attr[0][1:]
private_name = lower_name + "_"
other_init.append(
"{private_name}({other_op}.get{name}())".format(
name=attr[0], private_name=private_name, other_op=lower_other_op
)
)
init = dedent(
"""
{op}(const {other_op}& {lower_other_op}) :
{other_init} {{}}
""".format(
op=op,
other_op=other_op,
lower_other_op=lower_other_op,
other_init=",\n ".join(other_init),
)
)
extra_init += init
return dedent(
"""
class {op} : public NeuralNetOperator {{
public:
{op}({attribute_args}) :
{attribute_init} {{}}
{extra_init}
~{op}() {{}}
NOMNIGRAPH_DEFINE_NN_RTTI({op});
{getters}{setters}
private:
{attribute_declarations}
}};
""".format(
op=op,
extra_init=extra_init,
getters="".join(attribute_getters),
setters="".join(attribute_setters),
attribute_args=",\n".join(attribute_args),
attribute_init=",\n".join(attribute_init),
attribute_declarations="\n".join(attribute_declarations),
)
)
def gen_classes(ops, op_list):
f = ""
for op in op_list:
f += gen_class(op, ops[op])
return f
def gen_enum(op_list):
return ",\n".join([op for op in op_list]) + "\n"
def gen_names(op_list):
f = ""
for op in op_list:
f += dedent(
"""
case NNKind::{name}:
return \"{name}\";
""".format(
name=op
)
)
return f
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate op files.")
parser.add_argument("--install_dir", help="installation directory")
parser.add_argument("--source_def", help="ops.def", action="append")
args = parser.parse_args()
install_dir = args.install_dir
sources = args.source_def
lines = []
for source in sources:
with open(source, "rb") as f:
lines_tmp = f.readlines()
lines += [l.strip().decode("utf-8") for l in lines_tmp]
ops, op_list = parse_lines(lines)
with open(install_dir + "/OpClasses.h", "wb") as f:
f.write(gen_classes(ops, op_list).encode("utf-8"))
with open(install_dir + "/OpNames.h", "wb") as f:
f.write(gen_names(op_list).encode("utf-8"))
with open(install_dir + "/OpEnum.h", "wb") as f:
f.write(gen_enum(op_list).encode("utf-8"))
try:
cmd = ["clang-format", "-i", install_dir + "/OpClasses.h"]
call(cmd)
cmd = ["clang-format", "-i", install_dir + "/OpNames.h"]
call(cmd)
cmd = ["clang-format", "-i", install_dir + "/OpEnum.h"]
call(cmd)
except Exception:
pass
|
pytorch-master
|
caffe2/core/nomnigraph/op_gen.py
|
import warnings
# NOTE: we have to import python protobuf here **before** we load cpp extension.
# Otherwise it breaks under certain build conditions if cpp implementation of
# protobuf is used. Presumably there's some registry in protobuf library and
# python side has to initialize the dictionary first, before static
# initialization in python extension does so. Otherwise, duplicated protobuf
# descriptors will be created and it can lead to obscure errors like
# "Parameter to MergeFrom() must be instance of same class:
# expected caffe2.NetDef got caffe2.NetDef."
#
# This has to be done for all python targets, so listing them here
try:
from caffe2.proto import caffe2_pb2, metanet_pb2, torch_pb2
except ImportError:
warnings.warn('Caffe2 support is not enabled in this PyTorch build. '
'Please enable Caffe2 by building PyTorch from source with `BUILD_CAFFE2=1` flag.')
raise
try:
from caffe2.caffe2.fb.session.proto import session_pb2
except ImportError:
pass
|
pytorch-master
|
caffe2/proto/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.