|
|
|
|
|
import abc |
|
import collections |
|
import contextlib |
|
import dataclasses |
|
import enum |
|
import functools |
|
import inspect |
|
import itertools |
|
import logging |
|
import math |
|
import operator |
|
import re |
|
import sys |
|
import types |
|
from typing import Any, List, NamedTuple, Optional, Union |
|
|
|
from torch.utils._sympy.value_ranges import ValueRanges |
|
|
|
try: |
|
import numpy as np |
|
except ModuleNotFoundError: |
|
np = None |
|
|
|
import torch |
|
|
|
from torch import SymInt |
|
from torch._guards import GuardSource, TracingContext |
|
from torch._higher_order_ops.torchbind import call_torchbind |
|
from torch._ops import HigherOrderOperator |
|
from torch._streambase import _EventBase, _StreamBase |
|
from torch._subclasses.fake_tensor import FakeTensor, is_fake, maybe_get_fake_mode |
|
from torch._subclasses.meta_utils import is_sparse_any |
|
from torch.fx.experimental._backward_state import BackwardState |
|
from torch.fx.experimental.symbolic_shapes import ( |
|
_constrain_range_for_size, |
|
DimDynamic, |
|
RelaxedUnspecConstraint, |
|
StatefulSymbolicContext, |
|
SubclassSymbolicContext, |
|
SymbolicContext, |
|
) |
|
from torch.fx.immutable_collections import immutable_dict, immutable_list |
|
from torch.utils._python_dispatch import is_traceable_wrapper_subclass |
|
from torch.utils.weak import TensorWeakRef |
|
from .. import config, mutation_guard, replay_record, trace_rules |
|
|
|
from ..device_interface import get_registered_device_interfaces |
|
from ..exc import InternalTorchDynamoError, unimplemented |
|
from ..guards import GuardBuilder, install_guard, make_dupe_guard |
|
from ..side_effects import SideEffects |
|
from ..source import ( |
|
AttrSource, |
|
CallMethodItemSource, |
|
ConstantSource, |
|
ConstDictKeySource, |
|
ConvertIntSource, |
|
FloatTensorSource, |
|
GetItemSource, |
|
GradSource, |
|
is_cell_contents, |
|
is_constant_source, |
|
is_from_defaults, |
|
is_from_optimizer_source, |
|
LocalSource, |
|
NumpyTensorSource, |
|
OptimizerSource, |
|
RandomValueSource, |
|
Source, |
|
TupleIteratorGetItemSource, |
|
) |
|
from ..trace_rules import ( |
|
is_callable_allowed, |
|
is_numpy, |
|
is_numpy_dtype, |
|
is_numpy_type_info, |
|
) |
|
from ..utils import ( |
|
build_checkpoint_variable, |
|
clone_input, |
|
common_constant_types, |
|
get_fake_value, |
|
get_locals_to_steal, |
|
get_static_address_type, |
|
is_function_or_wrapper, |
|
is_namedtuple, |
|
is_typing, |
|
is_utils_checkpoint, |
|
istype, |
|
odict_values, |
|
proxy_args_kwargs, |
|
set_example_value, |
|
tensor_always_has_static_shape, |
|
tuple_iterator, |
|
tuple_iterator_getitem, |
|
tuple_iterator_len, |
|
unwrap_with_attr_name_if_wrapper, |
|
wrap_fake_exception, |
|
) |
|
|
|
from .base import MutableLocal, typestr, VariableTracker, VariableTrackerMeta |
|
from .constant import ConstantVariable, EnumVariable |
|
from .ctx_manager import ( |
|
AutocastModeVariable, |
|
EventVariable, |
|
NullContextVariable, |
|
PreserveVersionContextVariable, |
|
StreamContextVariable, |
|
StreamVariable, |
|
) |
|
from .dicts import ( |
|
ConstDictVariable, |
|
DataClassVariable, |
|
DefaultDictVariable, |
|
HFPretrainedConfigVariable, |
|
PythonSysModulesVariable, |
|
SetVariable, |
|
) |
|
from .distributed import ( |
|
DeviceMeshVariable, |
|
PlacementClassVariable, |
|
PlacementVariable, |
|
ProcessGroupVariable, |
|
WorldMetaClassVariable, |
|
) |
|
from .functions import ( |
|
CollectiveFunctionRewriteVariable, |
|
FunctoolsPartialVariable, |
|
TritonKernelVariable, |
|
UserMethodVariable, |
|
) |
|
from .higher_order_ops import TorchHigherOrderOperatorVariable |
|
from .iter import ItertoolsVariable |
|
from .lazy import LazyVariableTracker |
|
from .lists import ( |
|
BaseListVariable, |
|
ListVariable, |
|
NamedTupleVariable, |
|
RangeVariable, |
|
RestrictedListSubclassVariable, |
|
SizeVariable, |
|
SliceVariable, |
|
TupleIteratorVariable, |
|
TupleVariable, |
|
) |
|
from .misc import ( |
|
AutogradFunctionContextVariable, |
|
AutogradFunctionVariable, |
|
ComptimeVariable, |
|
DebuggingVariable, |
|
DelayGraphBreakVariable, |
|
GetAttrVariable, |
|
GetSetDescriptorVariable, |
|
InspectSignatureVariable, |
|
LambdaVariable, |
|
LoggingLoggerVariable, |
|
MethodWrapperVariable, |
|
NumpyDTypeVariable, |
|
NumpyTypeInfoVariable, |
|
NumpyVariable, |
|
PythonModuleVariable, |
|
RegexPatternVariable, |
|
SavedTensorBox, |
|
TorchVersionVariable, |
|
TypingVariable, |
|
) |
|
from .nn_module import FSDPManagedNNModuleVariable, UnspecializedNNModuleVariable |
|
from .optimizer import OptimizerVariable |
|
from .script_object import TorchScriptObjectVariable |
|
|
|
from .sdpa import SDPAParamsVariable |
|
from .tensor import ( |
|
NumpyNdarrayVariable, |
|
SymNodeVariable, |
|
TensorSubclassVariable, |
|
TensorVariable, |
|
UnspecializedPythonVariable, |
|
) |
|
from .torch import TorchCtxManagerClassVariable, TorchInGraphFunctionVariable |
|
from .torch_function import build_torch_function_fn, TensorWithTFOverrideVariable |
|
from .user_defined import ( |
|
KeyedJaggedTensorVariable, |
|
SourcelessGraphModuleVariable, |
|
UserDefinedClassVariable, |
|
UserDefinedObjectVariable, |
|
) |
|
|
|
|
|
log = logging.getLogger(__name__) |
|
|
|
|
|
DimList = List |
|
|
|
|
|
class _missing: |
|
pass |
|
|
|
|
|
@dataclasses.dataclass |
|
class GraphArg: |
|
source: Source |
|
|
|
|
|
|
|
_example: Union[TensorWeakRef, torch.SymInt] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pass_arg_as_tensor: bool |
|
fake_tensor: Optional[torch._subclasses.fake_tensor.FakeTensor] |
|
|
|
|
|
|
|
|
|
|
|
is_tensor: bool = True |
|
|
|
|
|
|
|
example_strong_ref: Optional[torch.Tensor] = None |
|
|
|
@property |
|
def example(self): |
|
if isinstance(self._example, TensorWeakRef): |
|
r = self._example() |
|
assert r is not None |
|
return r |
|
else: |
|
return self._example |
|
|
|
def __post_init__(self): |
|
if isinstance(self._example, torch.Tensor): |
|
self._example = TensorWeakRef(self._example) |
|
assert is_fake(self.fake_tensor) |
|
|
|
def reconstruct(self, codegen): |
|
self.source.reconstruct(codegen) |
|
|
|
def erase(self): |
|
self._example = None |
|
self.example_strong_ref = None |
|
|
|
def __eq__(self, other): |
|
return self.source.name() == other.source.name() |
|
|
|
|
|
class BackwardStateGraphArg(GraphArg): |
|
def __init__(self): |
|
super().__init__( |
|
source=None, |
|
_example=BackwardState(), |
|
pass_arg_as_tensor=False, |
|
fake_tensor=None, |
|
is_tensor=False, |
|
) |
|
|
|
def reconstruct(self, codegen): |
|
assert codegen.tx.output.backward_state_var |
|
codegen.load_import_from(BackwardState.__module__, "BackwardState") |
|
codegen.call_function(0, True) |
|
codegen.dup_top() |
|
codegen.store(codegen.tx.output.backward_state_var) |
|
|
|
|
|
@dataclasses.dataclass |
|
class FrameStateSizeEntry: |
|
scalar: Optional[int] |
|
size: Optional[List[int]] |
|
|
|
|
|
class VariableBuilder: |
|
"""Wrap a python value in a VariableTracker() instance""" |
|
|
|
def __init__( |
|
self, |
|
tx, |
|
source: Source, |
|
): |
|
assert ( |
|
source is not None |
|
), "Consider SourcelessBuilder for ephemeral objects, usually objects created locally." |
|
assert TracingContext.try_get() is not None, "Expected active TracingContext" |
|
super().__init__() |
|
self.tx = tx |
|
self.source = source |
|
self.name = source.name() |
|
|
|
def __call__(self, value): |
|
if value in self.tx.output.side_effects: |
|
side_effect_result = self.tx.output.side_effects[value] |
|
dup_guard = make_dupe_guard(self.source, side_effect_result.source) |
|
if dup_guard: |
|
self.install_guards(dup_guard) |
|
return side_effect_result |
|
|
|
cached_vt = self.tx.output.variable_tracker_cache.lookup(value, self.source) |
|
if cached_vt: |
|
return cached_vt |
|
|
|
vt = self._wrap(value) |
|
vt.source = self.source |
|
if self._can_lift_attrs_to_inputs(vt): |
|
vt = self.tx.output.side_effects.track_object_existing(value, vt) |
|
|
|
self.tx.output.variable_tracker_cache.add(value, self.source, vt) |
|
return vt |
|
|
|
def _can_lift_attrs_to_inputs(self, vt): |
|
if type(vt) in [ |
|
TensorVariable, |
|
TensorWithTFOverrideVariable, |
|
UserDefinedObjectVariable, |
|
NumpyNdarrayVariable, |
|
]: |
|
return True |
|
return False |
|
|
|
@staticmethod |
|
@functools.lru_cache(None) |
|
def _common_constants(): |
|
return { |
|
|
|
|
|
0, |
|
1, |
|
|
|
|
|
|
|
|
|
} |
|
|
|
def get_source(self): |
|
return self.source |
|
|
|
def install_guards(self, *guards): |
|
source = self.get_source() |
|
if ( |
|
isinstance(source, ConstantSource) |
|
or source.guard_source() == GuardSource.CONSTANT |
|
): |
|
return None |
|
install_guard(*[source.make_guard(guard) for guard in guards], skip=1) |
|
return {} |
|
|
|
def set_source_and_track_mutable(self, value, var): |
|
assert isinstance(var, VariableTracker) |
|
var.source = self.source |
|
return self.tx.output.side_effects.track_mutable(value, var) |
|
|
|
@classmethod |
|
@functools.lru_cache(None) |
|
def _type_dispatch(cls): |
|
|
|
entries = [ |
|
( |
|
( |
|
torch.Tensor, |
|
torch.nn.Parameter, |
|
torch._subclasses.FakeTensor, |
|
torch._subclasses.functional_tensor.FunctionalTensor, |
|
), |
|
cls.wrap_tensor, |
|
), |
|
( |
|
(tuple, list, odict_values, collections.deque, torch.Size), |
|
cls.wrap_listlike, |
|
), |
|
(tuple_iterator, cls.wrap_tuple_iterator), |
|
((slice, range), cls.wrap_slice_range), |
|
(tuple(common_constant_types), cls.wrap_literal), |
|
(re.Pattern, cls.wrap_regex_pattern), |
|
] |
|
|
|
if config.trace_numpy and np: |
|
entries.append((np.ndarray, cls.wrap_numpy_ndarray)) |
|
|
|
result = {} |
|
for ts, fn in entries: |
|
for t in ts if isinstance(ts, tuple) else (ts,): |
|
assert t not in result |
|
result[t] = fn |
|
|
|
return result |
|
|
|
def wrap_regex_pattern(self, value: re.Pattern): |
|
|
|
self.install_guards(GuardBuilder.ID_MATCH) |
|
return RegexPatternVariable(value) |
|
|
|
@classmethod |
|
@functools.lru_cache(None) |
|
def _id_dispatch(cls): |
|
from ..comptime import comptime |
|
|
|
entries = [ |
|
( |
|
inspect.signature, |
|
lambda self, value: LambdaVariable( |
|
InspectSignatureVariable.create, |
|
source=self.source, |
|
**self.install_guards(GuardBuilder.CLOSURE_MATCH), |
|
), |
|
), |
|
(comptime, lambda self, value: ComptimeVariable()), |
|
( |
|
dataclasses.fields, |
|
lambda self, value: LambdaVariable( |
|
_dataclasses_fields_lambda, |
|
source=self.source, |
|
**self.install_guards(GuardBuilder.FUNCTION_MATCH), |
|
), |
|
), |
|
(torch.__version__, lambda self, value: TorchVersionVariable()), |
|
] |
|
|
|
result = {} |
|
for ts, fn in entries: |
|
for t in ts if isinstance(ts, (tuple, list)) else (ts,): |
|
assert t not in result |
|
result[id(t)] = fn |
|
|
|
return result |
|
|
|
def _wrap(self, value): |
|
|
|
from torch.utils._triton import has_triton |
|
|
|
if has_triton(): |
|
from triton.runtime.autotuner import Autotuner |
|
from triton.runtime.jit import JITFunction |
|
else: |
|
|
|
class JITFunction: |
|
pass |
|
|
|
class Autotuner: |
|
pass |
|
|
|
|
|
type_dispatch = self._type_dispatch().get(type(value)) |
|
if type_dispatch is not None: |
|
return type_dispatch(self, value) |
|
|
|
|
|
id_dispatch = self._id_dispatch().get(id(value)) |
|
if id_dispatch is not None: |
|
return id_dispatch(self, value) |
|
|
|
|
|
|
|
value = inspect.getattr_static(value, "_torchdynamo_inline", value) |
|
|
|
|
|
if is_traceable_wrapper_subclass(value) or istype( |
|
value, config.traceable_tensor_subclasses |
|
): |
|
return self.wrap_tensor(value) |
|
elif is_namedtuple(value): |
|
return self.wrap_listlike(value) |
|
|
|
elif value is torch.utils._pytree.SUPPORTED_NODES: |
|
|
|
|
|
self.install_guards(GuardBuilder.DICT_VERSION) |
|
|
|
|
|
|
|
self.tx.output.guard_on_key_order.add(self.source.name()) |
|
result = { |
|
ConstantVariable.create(k): UserDefinedObjectVariable( |
|
v, |
|
source=GetItemSource( |
|
self.get_source(), ConstDictKeySource(self.get_source(), i) |
|
), |
|
) |
|
for i, (k, v) in enumerate(value.items()) |
|
} |
|
return ConstDictVariable(result, type(value)) |
|
elif value is sys.modules: |
|
self.install_guards(GuardBuilder.FUNCTION_MATCH) |
|
return PythonSysModulesVariable(source=self.source) |
|
elif istype(value, (dict, collections.defaultdict, collections.OrderedDict)): |
|
if not value and self.get_source().is_nn_module(): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.install_guards(GuardBuilder.BOOL_FALSE) |
|
else: |
|
self.install_guards(GuardBuilder.SEQUENCE_LENGTH) |
|
|
|
|
|
all_const = all(ConstantVariable.is_literal(k) for k in value.keys()) |
|
if all_const: |
|
|
|
|
|
self.install_guards(GuardBuilder.DICT_CONST_KEYS) |
|
else: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.tx.output.guard_on_key_order.add(self.source.name()) |
|
|
|
|
|
|
|
def build_key_value(i, k, v): |
|
if all_const: |
|
key = ConstantVariable.create(k) |
|
source_key = k |
|
else: |
|
source_key = ConstDictKeySource(self.get_source(), i) |
|
key = LazyVariableTracker.create(k, source_key) |
|
|
|
source_value = GetItemSource(self.get_source(), source_key) |
|
value = LazyVariableTracker.create(v, source_value) |
|
|
|
return key, value |
|
|
|
result = dict( |
|
build_key_value(i, k, v) for i, (k, v) in enumerate(value.items()) |
|
) |
|
|
|
if istype(value, collections.defaultdict): |
|
factory_source = AttrSource(self.source, "default_factory") |
|
result = DefaultDictVariable( |
|
result, |
|
type(value), |
|
default_factory=VariableBuilder(self.tx, factory_source)( |
|
value.default_factory |
|
), |
|
source=self.source, |
|
) |
|
else: |
|
result = ConstDictVariable(result, type(value), source=self.source) |
|
|
|
return self.set_source_and_track_mutable(value, result) |
|
elif isinstance(value, torch.nn.Module): |
|
return self.wrap_module(value) |
|
elif ConstantVariable.is_literal(value): |
|
return self.wrap_literal(value) |
|
elif istype(value, frozenset) and ( |
|
ConstantVariable.is_literal(x) for x in value |
|
): |
|
|
|
|
|
self.install_guards(GuardBuilder.ID_MATCH) |
|
return ConstantVariable.create(value=value, source=self.source) |
|
elif isinstance(value, enum.Enum): |
|
self.install_guards(GuardBuilder.ID_MATCH) |
|
return EnumVariable(value=value, source=self.source) |
|
elif DebuggingVariable.is_reorderable_logging_function(value): |
|
|
|
|
|
self.install_guards(GuardBuilder.BUILTIN_MATCH) |
|
return DebuggingVariable(value, source=self.source) |
|
elif isinstance(value, logging.Logger): |
|
self.install_guards(GuardBuilder.FUNCTION_MATCH) |
|
return LoggingLoggerVariable(value, source=self.source) |
|
elif is_utils_checkpoint(value): |
|
return build_checkpoint_variable(source=self.source) |
|
elif isinstance(value, functools.partial): |
|
func_src = AttrSource(self.get_source(), "func") |
|
func_obj = VariableBuilder(self.tx, func_src)(value.func) |
|
|
|
args = [] |
|
args_source = AttrSource(self.get_source(), "args") |
|
for i, arg in enumerate(value.args): |
|
args.append( |
|
VariableBuilder(self.tx, GetItemSource(args_source, i))(arg) |
|
) |
|
|
|
keywords = {} |
|
keywords_source = AttrSource(self.get_source(), "keywords") |
|
for k, v in value.keywords.items(): |
|
if not ConstantVariable.is_literal(k): |
|
unimplemented("functools.partial with non-literal keyword") |
|
keywords[k] = VariableBuilder( |
|
self.tx, GetItemSource(keywords_source, k) |
|
)(v) |
|
|
|
install_guard( |
|
self.get_source().make_guard(GuardBuilder.TYPE_MATCH), |
|
keywords_source.make_guard(GuardBuilder.DICT_KEYS), |
|
args_source.make_guard(GuardBuilder.SEQUENCE_LENGTH), |
|
) |
|
return FunctoolsPartialVariable(func_obj, args, keywords) |
|
elif is_typing(value): |
|
|
|
self.install_guards(GuardBuilder.ID_MATCH) |
|
return TypingVariable( |
|
value, |
|
source=self.source, |
|
) |
|
elif np is not None and isinstance(value, np.generic): |
|
|
|
return self.wrap_numpy_ndarray(np.asarray(value)) |
|
elif is_numpy(value): |
|
assert np |
|
self.install_guards( |
|
GuardBuilder.FUNCTION_MATCH |
|
if callable(value) |
|
else GuardBuilder.TYPE_MATCH |
|
) |
|
return NumpyVariable(value, source=self.source) |
|
elif is_numpy_dtype(value): |
|
self.install_guards(GuardBuilder.ID_MATCH) |
|
return NumpyDTypeVariable(value, source=self.source) |
|
elif is_numpy_type_info(value): |
|
if isinstance(value, np.iinfo): |
|
self.install_guards(GuardBuilder.TYPE_MATCH) |
|
dt_source = AttrSource(self.source, "dtype") |
|
install_guard(dt_source.make_guard(GuardBuilder.ID_MATCH)) |
|
else: |
|
self.install_guards(GuardBuilder.ID_MATCH) |
|
return NumpyTypeInfoVariable(value, source=self.source) |
|
|
|
elif CollectiveFunctionRewriteVariable.can_rewrite(value): |
|
self.install_guards(GuardBuilder.FUNCTION_MATCH) |
|
return CollectiveFunctionRewriteVariable.create( |
|
self.tx, |
|
value, |
|
source=self.source, |
|
) |
|
elif istype(value, torch.autograd.function.FunctionMeta): |
|
self.install_guards(GuardBuilder.FUNCTION_MATCH) |
|
return AutogradFunctionVariable( |
|
value, |
|
source=self.source, |
|
) |
|
elif isinstance(value, torch.autograd.function.FunctionCtx): |
|
actual_saved_tensors = None |
|
try: |
|
actual_saved_tensors = value.saved_tensors |
|
except RuntimeError: |
|
pass |
|
|
|
saved_tensors = [] |
|
guards = [self.source.make_guard(GuardBuilder.TYPE_MATCH)] |
|
if isinstance(actual_saved_tensors, tuple): |
|
saved_tensors_source = AttrSource(self.source, "saved_tensors") |
|
guards.append( |
|
saved_tensors_source.make_guard(GuardBuilder.SEQUENCE_LENGTH) |
|
) |
|
for i, v in enumerate(actual_saved_tensors): |
|
saved_tensors.append( |
|
VariableBuilder( |
|
self.tx, GetItemSource(saved_tensors_source, i) |
|
)(v) |
|
) |
|
install_guard(*guards) |
|
|
|
return self.tx.output.side_effects.track_object_existing( |
|
value, |
|
AutogradFunctionContextVariable( |
|
value, |
|
source=self.source, |
|
saved_tensors=SavedTensorBox(saved_tensors), |
|
), |
|
) |
|
elif ( |
|
isinstance(value, types.MethodType) |
|
and istype( |
|
getattr(value, "__self__", None), torch.autograd.function.FunctionMeta |
|
) |
|
and getattr(value, "__name__", "") == "apply" |
|
and value == getattr(value.__self__, "apply", None) |
|
): |
|
|
|
self.install_guards(GuardBuilder.FUNCTION_MATCH) |
|
return GetAttrVariable( |
|
AutogradFunctionVariable( |
|
value.__self__, source=AttrSource(self.source, member="__self__") |
|
), |
|
"apply", |
|
) |
|
elif callable(value) and trace_rules.lookup_callable(value) is not None: |
|
if is_callable_allowed(value): |
|
self.tx.output.has_user_defined_allowed_in_graph = True |
|
return trace_rules.lookup_callable(value).create_with_source( |
|
value, source=self.source |
|
) |
|
elif np and isinstance(value, np.number): |
|
return self.wrap_unspecialized_primitive(value) |
|
elif DataClassVariable.is_matching_object(value): |
|
self.install_guards(GuardBuilder.TYPE_MATCH) |
|
return DataClassVariable.wrap(self, value) |
|
elif HFPretrainedConfigVariable.is_matching_object(value): |
|
self.install_guards(GuardBuilder.TYPE_MATCH) |
|
return HFPretrainedConfigVariable(value) |
|
elif isinstance(value, HigherOrderOperator): |
|
self.install_guards(GuardBuilder.TYPE_MATCH, GuardBuilder.NAME_MATCH) |
|
return TorchHigherOrderOperatorVariable.make(value, source=self.source) |
|
elif isinstance(value, torch.cuda.StreamContext): |
|
self.install_guards(GuardBuilder.ID_MATCH) |
|
stream_source = AttrSource(self.source, "stream") |
|
stream_var = VariableBuilder(self.tx, stream_source)(value.stream) |
|
return StreamContextVariable.create(self.tx, stream_var) |
|
elif isinstance(value, _StreamBase): |
|
self.install_guards(GuardBuilder.ID_MATCH) |
|
stream_proxy = self.tx.output.create_proxy( |
|
"call_function", |
|
torch.cuda.Stream, |
|
(), |
|
{ |
|
"stream_id": value.stream_id, |
|
"device_index": value.device_index, |
|
"device_type": value.device_type, |
|
}, |
|
) |
|
set_example_value(stream_proxy.node, value) |
|
return StreamVariable( |
|
stream_proxy, |
|
value, |
|
value.device, |
|
source=self.source, |
|
) |
|
elif isinstance(value, (torch._C._SDPAParams)): |
|
self.install_guards(GuardBuilder.TYPE_MATCH) |
|
return SDPAParamsVariable.create(self.tx, value, self.source) |
|
elif isinstance(value, _EventBase): |
|
self.install_guards(GuardBuilder.ID_MATCH) |
|
return EventVariable( |
|
None, |
|
value, |
|
source=self.source, |
|
) |
|
elif ( |
|
isinstance(value, torch._C._TensorMeta) |
|
and value in config.traceable_tensor_subclasses |
|
): |
|
return TensorSubclassVariable(value, source=self.source) |
|
elif ( |
|
istype(value, contextlib.nullcontext) |
|
and inspect.getattr_static(value, "enter_result", None) is None |
|
): |
|
self.install_guards(GuardBuilder.TYPE_MATCH) |
|
return NullContextVariable(source=self.source) |
|
elif KeyedJaggedTensorVariable.is_matching_object(value): |
|
self.install_guards(GuardBuilder.TYPE_MATCH) |
|
result = KeyedJaggedTensorVariable(value, source=self.source) |
|
|
|
return self.tx.output.side_effects.track_object_existing(value, result) |
|
elif isinstance(value, torch.optim.Optimizer): |
|
self.install_guards(GuardBuilder.ID_MATCH) |
|
self.source = OptimizerSource(self.source) |
|
return OptimizerVariable(value, source=self.source) |
|
elif WorldMetaClassVariable.is_group_member_type(value): |
|
return WorldMetaClassVariable(value, source=self.source) |
|
elif ProcessGroupVariable.is_process_group(value): |
|
self.install_guards(GuardBuilder.ID_MATCH) |
|
return ProcessGroupVariable(value, source=self.source) |
|
elif DeviceMeshVariable.is_device_mesh(value): |
|
|
|
self.install_guards(GuardBuilder.ID_MATCH) |
|
return DeviceMeshVariable(value, source=self.source) |
|
elif PlacementClassVariable.is_placement_type(value): |
|
|
|
self.install_guards(GuardBuilder.ID_MATCH) |
|
return PlacementClassVariable(value, source=self.source) |
|
elif PlacementVariable.is_placement(value): |
|
|
|
self.install_guards(GuardBuilder.ID_MATCH) |
|
return PlacementVariable( |
|
value, |
|
source=self.source, |
|
) |
|
elif istype(value, type) and value in itertools.__dict__.values(): |
|
self.install_guards(GuardBuilder.FUNCTION_MATCH) |
|
return ItertoolsVariable(value, source=self.source) |
|
elif isinstance(value, torch.SymBool): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
value_hint = value.node.require_hint() |
|
new_source = ConvertIntSource(self.source) |
|
|
|
new_symint = self.tx.output.shape_env.create_unspecified_symint_and_symbol( |
|
int(value_hint), |
|
new_source, |
|
dynamic_dim=DimDynamic.DYNAMIC, |
|
) |
|
|
|
sym_node_proxy = self.tx.output.root_tracer.create_graph_input( |
|
re.sub(r"[^a-zA-Z0-9]+", "_", self.name), |
|
type(new_symint), |
|
source=new_source, |
|
) |
|
|
|
sym_node_proxy.node.meta["grapharg"] = GraphArg( |
|
new_source, |
|
new_symint, |
|
False, |
|
None, |
|
is_tensor=False, |
|
example_strong_ref=new_symint, |
|
) |
|
self.tx.output.bound_symbols.add(new_symint.node.expr) |
|
self.tx.output.tracked_fakes.append( |
|
TrackedFake(new_symint, new_source, None) |
|
) |
|
return SymNodeVariable( |
|
sym_node_proxy, |
|
new_symint == 1, |
|
) |
|
elif isinstance(value, (JITFunction, Autotuner)): |
|
self.install_guards(GuardBuilder.ID_MATCH) |
|
return TritonKernelVariable( |
|
value, |
|
None, |
|
None, |
|
source=self.source, |
|
) |
|
elif isinstance(value, torch.amp.autocast_mode.autocast): |
|
self.install_guards(GuardBuilder.ID_MATCH) |
|
return AutocastModeVariable( |
|
target_values=[ |
|
value.device, |
|
value.fast_dtype, |
|
value._enabled, |
|
value._cache_enabled, |
|
], |
|
source=self.source, |
|
) |
|
elif TorchCtxManagerClassVariable.is_matching_cls(value): |
|
self.install_guards(GuardBuilder.FUNCTION_MATCH) |
|
return TorchCtxManagerClassVariable(value, source=self.source) |
|
elif is_function_or_wrapper(value): |
|
value, attr_name = unwrap_with_attr_name_if_wrapper(value) |
|
|
|
|
|
if attr_name is not None: |
|
self.source = AttrSource(self.source, attr_name) |
|
return trace_rules.lookup(value).create_with_source( |
|
value, source=self.source |
|
) |
|
|
|
|
|
|
|
elif isinstance(value, (types.ModuleType, replay_record.DummyModule)): |
|
self.install_guards(GuardBuilder.FUNCTION_MATCH) |
|
return PythonModuleVariable( |
|
value, |
|
source=self.source, |
|
) |
|
elif isinstance(value, types.MethodType) and isinstance( |
|
value.__self__, (torch.nn.Module, torch.utils._pytree.TreeSpec) |
|
): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self_obj = VariableBuilder( |
|
self.tx, source=AttrSource(self.source, "__self__") |
|
)(value.__self__) |
|
assert self_obj and isinstance( |
|
self_obj, VariableTracker |
|
), "Failed to produce a valid self obj" |
|
self.install_guards(GuardBuilder.FUNCTION_MATCH) |
|
return UserMethodVariable( |
|
value.__func__, |
|
self_obj, |
|
source=self.source, |
|
) |
|
elif isinstance(value, types.GetSetDescriptorType): |
|
self.install_guards(GuardBuilder.FUNCTION_MATCH) |
|
return GetSetDescriptorVariable(value) |
|
elif isinstance(value, types.MethodWrapperType): |
|
self.install_guards(GuardBuilder.FUNCTION_MATCH) |
|
return MethodWrapperVariable(value) |
|
elif issubclass(type(value), type): |
|
if value in (torch.utils.hooks.BackwardHook, torch.nn.Parameter): |
|
|
|
return trace_rules.lookup(value).create_with_source( |
|
value, source=self.source |
|
) |
|
if value is torch.autograd._unsafe_preserve_version_counter: |
|
self.install_guards(GuardBuilder.FUNCTION_MATCH) |
|
return PreserveVersionContextVariable.constructor(self.tx) |
|
|
|
|
|
self.install_guards(GuardBuilder.ID_MATCH) |
|
return UserDefinedClassVariable( |
|
value, |
|
source=self.source, |
|
) |
|
elif RestrictedListSubclassVariable.is_matching_cls(type(value)): |
|
self.install_guards(GuardBuilder.SEQUENCE_LENGTH) |
|
return self.set_source_and_track_mutable( |
|
value, |
|
RestrictedListSubclassVariable( |
|
[ |
|
LazyVariableTracker.create( |
|
value=value[i], source=GetItemSource(self.source, i) |
|
) |
|
for i in range(len(value)) |
|
], |
|
user_cls=type(value), |
|
user_cls_source=AttrSource(self.source, "__class__"), |
|
), |
|
) |
|
elif TorchScriptObjectVariable.is_matching_cls(type(value)): |
|
from ..source import ( |
|
FlattenScriptObjectSource, |
|
ScriptObjectQualifiedNameSource, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
if not hasattr(value, "__obj_flatten__"): |
|
return self.wrap_user_defined(value) |
|
|
|
|
|
LazyVariableTracker.realize_all( |
|
VariableBuilder(self.tx, ScriptObjectQualifiedNameSource(self.source))( |
|
value._type().qualified_name() |
|
) |
|
) |
|
|
|
|
|
LazyVariableTracker.realize_all( |
|
VariableBuilder(self.tx, FlattenScriptObjectSource(self.source))( |
|
value.__obj_flatten__() |
|
) |
|
) |
|
|
|
fake_script_obj = torch._library.fake_class_registry.to_fake_obj( |
|
self.tx.output.fake_mode, value |
|
) |
|
|
|
proxy = self.tx.output.root_tracer.create_graph_input( |
|
re.sub(r"[^a-zA-Z0-9]+", "_", self.name), |
|
type(value), |
|
source=self.source, |
|
) |
|
|
|
|
|
|
|
|
|
proxy.node.meta["grapharg"] = GraphArg( |
|
self.source, value, False, None, False, fake_script_obj |
|
) |
|
return TorchScriptObjectVariable.create( |
|
proxy, |
|
fake_script_obj, |
|
source=self.source, |
|
) |
|
else: |
|
return self.wrap_user_defined(value) |
|
|
|
def wrap_user_defined(self, value: Any): |
|
self.install_guards(GuardBuilder.TYPE_MATCH) |
|
result = UserDefinedObjectVariable(value, source=self.source) |
|
if not SideEffects.cls_supports_mutation_side_effects(type(value)): |
|
|
|
return result |
|
return self.tx.output.side_effects.track_object_existing(value, result) |
|
|
|
def wrap_listlike(self, value: Union[tuple, list, odict_values, NamedTuple]): |
|
if config.specialize_int and type(value) is torch.Size: |
|
self.install_guards(GuardBuilder.CONSTANT_MATCH) |
|
return ConstantVariable.create(value=value) |
|
|
|
|
|
self.install_guards(GuardBuilder.SEQUENCE_LENGTH) |
|
|
|
for item in value: |
|
if item is value: |
|
unimplemented("list elements are pointing to the list itself") |
|
|
|
output = [ |
|
LazyVariableTracker.create(item, source=GetItemSource(self.get_source(), i)) |
|
for i, item in enumerate(value) |
|
] |
|
|
|
maybe_gm = self.tx.output.local_scope.get("self") |
|
if isinstance( |
|
self.source, LocalSource |
|
) and self.source.local_name in get_locals_to_steal(maybe_gm): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
source = self.source |
|
assert isinstance(value, list) |
|
tensor_list_proxy = self.tx.output.root_tracer.create_graph_input( |
|
re.sub(r"[^a-zA-Z0-9]+", "_", self.name), type(value), source=source |
|
) |
|
tensor_list_proxy.node.meta["steal_arg"] = True |
|
|
|
list_variable = wrap_fx_proxy_cls( |
|
target_cls=TensorVariable, |
|
tx=self.tx, |
|
proxy=tensor_list_proxy, |
|
example_value=value, |
|
subclass_type=None, |
|
source=source, |
|
) |
|
|
|
guards = [] |
|
for i, tensor_variable in enumerate(list_variable.items): |
|
source_i = GetItemSource(base=source, index=i, index_is_slice=False) |
|
|
|
self.tx.output.input_source_to_var[source_i] = tensor_variable |
|
|
|
guard = functools.partial( |
|
GuardBuilder.TENSOR_MATCH, value=TensorWeakRef(value[i]) |
|
) |
|
guards.append(source_i.make_guard(guard)) |
|
|
|
install_guard(*guards, skip=1) |
|
|
|
grapharg = GraphArg( |
|
source, |
|
value, |
|
pass_arg_as_tensor=False, |
|
fake_tensor=None, |
|
is_tensor=False, |
|
) |
|
tensor_list_proxy.node.meta["grapharg"] = grapharg |
|
|
|
result = BaseListVariable.cls_for_instance(value)( |
|
output, mutable_local=MutableLocal() |
|
) |
|
if istype(value, list): |
|
return self.set_source_and_track_mutable(value, result) |
|
return result |
|
|
|
def wrap_tuple_iterator(self, value: tuple_iterator): |
|
self.install_guards(GuardBuilder.TUPLE_ITERATOR_LEN) |
|
output = [ |
|
VariableBuilder(self.tx, TupleIteratorGetItemSource(self.get_source(), i))( |
|
tuple_iterator_getitem(value, i) |
|
) |
|
for i in range(tuple_iterator_len(value)) |
|
] |
|
result = TupleIteratorVariable( |
|
output, mutable_local=MutableLocal(), source=self.source |
|
) |
|
|
|
return self.set_source_and_track_mutable(value, result) |
|
|
|
def wrap_slice_range(self, value: Union[slice, range]): |
|
items = [ |
|
VariableBuilder(self.tx, AttrSource(self.get_source(), k))( |
|
getattr(value, k) |
|
) |
|
for k in ("start", "stop", "step") |
|
] |
|
self.install_guards(GuardBuilder.TYPE_MATCH) |
|
if isinstance(value, slice): |
|
return SliceVariable(items, source=self.source) |
|
else: |
|
return RangeVariable(items, source=self.source) |
|
|
|
def wrap_module(self, value: torch.nn.Module): |
|
from ..eval_frame import OptimizedModule |
|
|
|
if len(value.__dict__) == 0: |
|
unimplemented(f"uninitialized nn.Module: {typestr(value)}") |
|
if istype(value, OptimizedModule): |
|
|
|
if inspect.getattr_static(value.forward, "_torchdynamo_disable", False): |
|
|
|
|
|
|
|
|
|
return DelayGraphBreakVariable(source=self.source) |
|
|
|
self.install_guards(GuardBuilder.TYPE_MATCH) |
|
self.source = AttrSource(self.source, "_orig_mod") |
|
return self.wrap_module(value._orig_mod) |
|
|
|
if ( |
|
isinstance(value, (torch.nn.RNN, torch.nn.GRU, torch.nn.LSTM)) |
|
and not config.allow_rnn |
|
): |
|
unimplemented("TorchDynamo purposely graph breaks on RNN, GRU, LSTMs") |
|
if mutation_guard.is_dynamic_nn_module(value, self.tx.export): |
|
|
|
self.install_guards(GuardBuilder.TYPE_MATCH) |
|
result = UnspecializedNNModuleVariable(value, source=self.source) |
|
if not SideEffects.cls_supports_mutation_side_effects(type(value)): |
|
|
|
return result |
|
return self.tx.output.side_effects.track_object_existing(value, result) |
|
elif issubclass( |
|
value.__class__, torch.nn.parallel.distributed.DistributedDataParallel |
|
): |
|
self.install_guards(GuardBuilder.TYPE_MATCH) |
|
return UnspecializedNNModuleVariable(value) |
|
elif getattr(value, "_is_fsdp_managed_module", False): |
|
|
|
|
|
|
|
|
|
|
|
assert getattr( |
|
value, "_fsdp_use_orig_params", False |
|
), "Dynamo only supports FSDP with use_orig_params=True" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.install_guards(GuardBuilder.TYPE_MATCH, GuardBuilder.ID_MATCH) |
|
return FSDPManagedNNModuleVariable(value, source=self.get_source()) |
|
else: |
|
return self.tx.output.register_attr_or_module( |
|
value, |
|
self.name, |
|
source=self.get_source(), |
|
|
|
) |
|
|
|
def wrap_literal(self, value): |
|
if not config.specialize_int and type(value) is int: |
|
|
|
|
|
if not TracingContext.get().force_unspec_int_unbacked_size_like and ( |
|
value in self._common_constants() |
|
|
|
or not self.source.guard_source().is_local() |
|
or is_from_defaults(self.source) |
|
or is_cell_contents(self.source) |
|
): |
|
self.install_guards(GuardBuilder.CONSTANT_MATCH) |
|
return ConstantVariable.create(value=value, source=self.source) |
|
else: |
|
return self.wrap_symint(value) |
|
elif not config.specialize_float and type(value) is float: |
|
return self.wrap_symfloat(value) |
|
else: |
|
self.install_guards(GuardBuilder.CONSTANT_MATCH) |
|
return ConstantVariable.create(value=value) |
|
|
|
def assert_not_wrapped_by_this_graph(self, value: torch.Tensor): |
|
if is_fake(value) and maybe_get_fake_mode(value) is self.tx.fake_mode: |
|
raise InternalTorchDynamoError( |
|
"Cannot wrap a Tensor that has already been", |
|
"wrapped by this instance of Dynamo", |
|
) |
|
|
|
def wrap_tensor(self, value: torch.Tensor): |
|
source = self.get_source() |
|
|
|
|
|
|
|
assert value not in self.tx.output.side_effects |
|
|
|
if ( |
|
source.guard_source().is_nn_module() |
|
or get_static_address_type(value) is not None |
|
) and not source.guard_source().is_fsdp_module(): |
|
self.assert_not_wrapped_by_this_graph(value) |
|
return self.tx.output.register_attr_or_module( |
|
value, self.name, source=source |
|
) |
|
|
|
if is_constant_source(source): |
|
self.assert_not_wrapped_by_this_graph(value) |
|
return self.tx.output.register_attr_or_module( |
|
value, |
|
re.sub(r"[^a-zA-Z0-9]+", "_", self.name), |
|
source=source, |
|
|
|
) |
|
|
|
if type(value) in config.traceable_tensor_subclasses: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
subclass_type = type(value) |
|
else: |
|
assert type(value) in ( |
|
torch.Tensor, |
|
torch.nn.Parameter, |
|
torch._subclasses.fake_tensor.FakeTensor, |
|
torch._subclasses.functional_tensor.FunctionalTensor, |
|
) or is_traceable_wrapper_subclass(value), type(value) |
|
subclass_type = None |
|
|
|
|
|
|
|
|
|
|
|
|
|
is_duplicate_tensor = source in self.tx.output.input_source_to_var |
|
if is_duplicate_tensor: |
|
return self.tx.output.input_source_to_var[source] |
|
|
|
|
|
self.assert_not_wrapped_by_this_graph(value) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tensor_proxy = self.tx.output.root_tracer.create_graph_input( |
|
re.sub(r"[^a-zA-Z0-9]+", "_", self.name), type(value), source=source |
|
) |
|
options = {} |
|
if type(value) in config.traceable_tensor_subclasses: |
|
options["torch_function_fn"] = build_torch_function_fn( |
|
self.tx, value, self.source |
|
) |
|
self.install_guards(GuardBuilder.TYPE_MATCH) |
|
|
|
if ( |
|
isinstance(value, torch.Tensor) |
|
and value.is_nested |
|
and not isinstance(value, torch.nested._internal.nested_tensor.NestedTensor) |
|
): |
|
unimplemented("torch.compile does not support strided NestedTensor") |
|
|
|
|
|
|
|
if is_sparse_any(value) and not value.is_sparse: |
|
unimplemented( |
|
f"torch.compile does not support sparse Tensor with {value.layout} layout" |
|
) |
|
|
|
tensor_variable = wrap_fx_proxy( |
|
tx=self.tx, |
|
proxy=tensor_proxy, |
|
example_value=value, |
|
subclass_type=subclass_type, |
|
source=source, |
|
**options, |
|
) |
|
|
|
guard_type = GuardBuilder.TENSOR_MATCH |
|
|
|
if isinstance(source, GradSource) and is_from_optimizer_source(source): |
|
guard_type = GuardBuilder.NOT_NONE_MATCH |
|
|
|
self.install_guards( |
|
functools.partial( |
|
guard_type, |
|
value=value |
|
if isinstance(source, NumpyTensorSource) |
|
else TensorWeakRef(value), |
|
) |
|
) |
|
|
|
|
|
|
|
if is_traceable_wrapper_subclass(value): |
|
self.install_guards(GuardBuilder.TYPE_MATCH) |
|
attrs, _ = value.__tensor_flatten__() |
|
for attr in attrs: |
|
inner_value = getattr(value, attr) |
|
inner_source = AttrSource(self.source, attr) |
|
LazyVariableTracker.realize_all( |
|
VariableBuilder(self.tx, inner_source)(inner_value) |
|
) |
|
|
|
self.tx.output.input_source_to_var[source] = tensor_variable |
|
assert "tensor_dict" not in tensor_proxy.node.meta |
|
tensor_proxy.node.meta["tensor_dict"] = value.__dict__.copy() |
|
|
|
|
|
fake_tensor_value = tensor_variable.proxy.node.meta["example_value"] |
|
if maybe_get_fake_mode(fake_tensor_value) is not self.tx.fake_mode: |
|
raise InternalTorchDynamoError("Wrapped Tensor must be this graph's fake") |
|
|
|
grapharg = GraphArg(source, value, False, fake_tensor_value) |
|
tensor_proxy.node.meta["grapharg"] = grapharg |
|
self.tx.output.add_symbol_bindings(grapharg) |
|
return tensor_variable |
|
|
|
def wrap_numpy_ndarray(self, value): |
|
assert np is not None |
|
assert isinstance(value, np.ndarray) |
|
|
|
source = NumpyTensorSource(self.get_source()) |
|
|
|
from torch._numpy import _util |
|
|
|
readonly = not value.flags.writeable |
|
if readonly: |
|
try: |
|
value.flags.writeable = True |
|
except ValueError: |
|
|
|
|
|
assert isinstance(value.base, np.nditer) |
|
pass |
|
|
|
try: |
|
tensor_value = _util._try_convert_to_tensor(value) |
|
if readonly: |
|
from torch._prims_common import clone_preserve_strides |
|
|
|
tensor_value = clone_preserve_strides(tensor_value) |
|
except NotImplementedError as e: |
|
|
|
unimplemented(str(e)) |
|
|
|
|
|
|
|
|
|
|
|
LazyVariableTracker.realize_all(VariableBuilder(self.tx, source)(tensor_value)) |
|
proxy = self.tx.output.root_tracer.create_graph_input( |
|
re.sub(r"[^a-zA-Z0-9]+", "_", self.name), type(tensor_value), source=source |
|
) |
|
options = {"source": source} |
|
numpy_ndarray_variable = wrap_fx_proxy_cls( |
|
target_cls=NumpyNdarrayVariable, |
|
tx=self.tx, |
|
proxy=proxy, |
|
example_value=tensor_value, |
|
**options, |
|
) |
|
|
|
self.tx.output.input_source_to_var[source] = numpy_ndarray_variable |
|
example_value = numpy_ndarray_variable.proxy.node.meta["example_value"] |
|
|
|
|
|
|
|
grapharg = GraphArg( |
|
source, |
|
tensor_value, |
|
pass_arg_as_tensor=True, |
|
fake_tensor=example_value, |
|
is_tensor=True, |
|
example_strong_ref=tensor_value, |
|
) |
|
proxy.node.meta["grapharg"] = grapharg |
|
|
|
return numpy_ndarray_variable |
|
|
|
def wrap_symint(self, value): |
|
assert type(value) is int |
|
|
|
if self.name in self.tx.output.unspec_variable_map: |
|
return self.tx.output.unspec_variable_map[self.name] |
|
|
|
shape_env = self.tx.output.shape_env |
|
if TracingContext.get().force_unspec_int_unbacked_size_like: |
|
wrapped_value = shape_env.create_unbacked_symint() |
|
_constrain_range_for_size(wrapped_value) |
|
self.tx.output.bound_symbols.add(wrapped_value.node.expr) |
|
self.tx.output.tracked_fakes.append( |
|
TrackedFake(wrapped_value, self.source, None) |
|
) |
|
|
|
|
|
|
|
|
|
|
|
elif not is_constant_source(self.get_source()): |
|
if torch._dynamo.config.specialize_int: |
|
|
|
|
|
|
|
self.install_guards(GuardBuilder.CONSTANT_MATCH) |
|
return ConstantVariable.create(value=value, source=self.source) |
|
|
|
name = self.source.name() |
|
if name not in self.tx.output.frame_state: |
|
|
|
|
|
|
|
|
|
frame_state_entry = FrameStateSizeEntry(scalar=value, size=None) |
|
else: |
|
frame_state_entry = self.tx.output.frame_state[name] |
|
if frame_state_entry.scalar != value: |
|
log.debug( |
|
"automatic dynamic int %s val %s != %s", |
|
name, |
|
value, |
|
frame_state_entry.scalar, |
|
) |
|
frame_state_entry.scalar = None |
|
self.tx.output.frame_state[name] = frame_state_entry |
|
|
|
|
|
|
|
|
|
|
|
if ( |
|
config.automatic_dynamic_shapes and frame_state_entry.scalar is None |
|
) or not config.assume_static_by_default: |
|
dynamic_dim = DimDynamic.DYNAMIC |
|
else: |
|
|
|
|
|
self.install_guards(GuardBuilder.CONSTANT_MATCH) |
|
return ConstantVariable.create(value=value) |
|
|
|
wrapped_value = shape_env.create_unspecified_symint_and_symbol( |
|
value, |
|
source=self.source, |
|
dynamic_dim=dynamic_dim, |
|
) |
|
self.tx.output.bound_symbols.add(wrapped_value.node.expr) |
|
|
|
self.tx.output.tracked_fakes.append( |
|
TrackedFake(wrapped_value, self.source, None) |
|
) |
|
else: |
|
assert is_constant_source(self.get_source()) |
|
|
|
self.install_guards(GuardBuilder.CONSTANT_MATCH) |
|
return ConstantVariable.create(value=value, source=self.source) |
|
|
|
assert not isinstance(self.get_source(), RandomValueSource) |
|
install_guard(self.get_source().make_guard(GuardBuilder.TYPE_MATCH)) |
|
|
|
options = {"source": self.get_source()} |
|
|
|
proxy = self.tx.output.root_tracer.create_graph_input( |
|
re.sub(r"[^a-zA-Z0-9]+", "_", self.name), |
|
type(wrapped_value), |
|
source=self.get_source(), |
|
) |
|
|
|
set_example_value(proxy.node, wrapped_value) |
|
unspec_var = SymNodeVariable(proxy, wrapped_value, **options) |
|
self.tx.output.unspec_variable_map[self.name] = unspec_var |
|
|
|
if not is_constant_source(self.get_source()): |
|
if self.tx.export and not isinstance(self.get_source(), LocalSource): |
|
raise AssertionError( |
|
f"Dynamo attempts to add additional input during export: value={wrapped_value}, source={self.get_source()}" |
|
) |
|
|
|
example_value = unspec_var.proxy.node.meta["example_value"] |
|
|
|
proxy.node.meta["grapharg"] = GraphArg( |
|
self.get_source(), |
|
wrapped_value, |
|
pass_arg_as_tensor=False, |
|
fake_tensor=None, |
|
is_tensor=False, |
|
example_strong_ref=wrapped_value, |
|
) |
|
|
|
return unspec_var |
|
|
|
def wrap_symfloat(self, value): |
|
|
|
|
|
|
|
|
|
|
|
|
|
if self.name in self.tx.output.unspec_variable_map: |
|
return self.tx.output.unspec_variable_map[self.name] |
|
|
|
|
|
|
|
if ( |
|
torch._dynamo.config.specialize_float |
|
or is_constant_source(self.get_source()) |
|
or math.isnan(value) |
|
): |
|
self.install_guards(GuardBuilder.CONSTANT_MATCH) |
|
return ConstantVariable.create(value=value, source=self.source) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
wrapped_value = torch.tensor(value, dtype=torch.float64) |
|
|
|
|
|
assert not isinstance(self.get_source(), RandomValueSource) |
|
install_guard(self.get_source().make_guard(GuardBuilder.TYPE_MATCH)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
options = {"source": FloatTensorSource(self.get_source()), "raw_value": value} |
|
|
|
|
|
|
|
proxy = self.tx.output.root_tracer.create_graph_input( |
|
re.sub(r"[^a-zA-Z0-9]+", "_", self.name), |
|
type(wrapped_value), |
|
source=self.get_source(), |
|
) |
|
|
|
unspec_var = wrap_fx_proxy_cls( |
|
UnspecializedPythonVariable, |
|
tx=self.tx, |
|
proxy=proxy, |
|
example_value=wrapped_value, |
|
**options, |
|
) |
|
assert isinstance(unspec_var, UnspecializedPythonVariable) |
|
self.tx.output.unspec_variable_map[self.name] = unspec_var |
|
|
|
if self.tx.export and not isinstance(self.get_source(), LocalSource): |
|
raise AssertionError( |
|
f"Dynamo attempts to add additional input during export: value={wrapped_value}, source={self.get_source()}" |
|
) |
|
fake_tensor_value = None |
|
example_value = unspec_var.proxy.node.meta["example_value"] |
|
assert is_fake(example_value) |
|
|
|
fake_tensor_value = example_value |
|
assert fake_tensor_value.fake_mode is self.tx.fake_mode, ( |
|
f"fake mode ({fake_tensor_value.fake_mode}) from fake tensor metadata doesn't match mode" |
|
"({self.tx.fake_mode}) from InstructionTranslator" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
proxy.node.meta["grapharg"] = GraphArg( |
|
self.get_source(), |
|
wrapped_value, |
|
pass_arg_as_tensor=True, |
|
fake_tensor=fake_tensor_value, |
|
is_tensor=False, |
|
example_strong_ref=wrapped_value, |
|
) |
|
|
|
|
|
r = wrap_fx_proxy( |
|
self.tx, |
|
self.tx.output.create_proxy( |
|
"call_method", |
|
"item", |
|
*proxy_args_kwargs([unspec_var], {}), |
|
), |
|
) |
|
self.tx.output.tracked_fakes.append(TrackedFake(r.sym_num, self.source, None)) |
|
|
|
return r |
|
|
|
def wrap_unspecialized_primitive(self, value): |
|
if self.name in self.tx.output.unspec_variable_map: |
|
return self.tx.output.unspec_variable_map[self.name] |
|
|
|
wrapped_value = torch.tensor(value) |
|
if not isinstance(self.get_source(), RandomValueSource): |
|
install_guard(self.get_source().make_guard(GuardBuilder.TYPE_MATCH)) |
|
|
|
options = {"source": self.get_source()} |
|
options.update({"raw_value": value}) |
|
|
|
proxy = self.tx.output.root_tracer.create_graph_input( |
|
re.sub(r"[^a-zA-Z0-9]+", "_", self.name), |
|
type(wrapped_value), |
|
source=self.get_source(), |
|
) |
|
|
|
unspec_var = wrap_fx_proxy_cls( |
|
UnspecializedPythonVariable, |
|
tx=self.tx, |
|
proxy=proxy, |
|
example_value=wrapped_value, |
|
**options, |
|
) |
|
self.tx.output.unspec_variable_map[self.name] = unspec_var |
|
if not is_constant_source(self.get_source()): |
|
if self.tx.export and not isinstance(self.get_source(), LocalSource): |
|
raise AssertionError( |
|
f"Dynamo attempts to add additional input during export: value={wrapped_value}, source={self.get_source()}" |
|
) |
|
fake_tensor_value = None |
|
if isinstance(unspec_var, ConstantVariable): |
|
|
|
example_value = unspec_var.value |
|
else: |
|
example_value = unspec_var.proxy.node.meta["example_value"] |
|
assert is_fake(example_value) |
|
|
|
fake_tensor_value = example_value |
|
assert fake_tensor_value.fake_mode is self.tx.fake_mode, ( |
|
f"fake mode ({fake_tensor_value.fake_mode}) from fake tensor metadata doesn't match mode" |
|
"({self.tx.fake_mode}) from InstructionTranslator" |
|
) |
|
|
|
proxy.node.meta["grapharg"] = GraphArg( |
|
self.get_source(), |
|
wrapped_value, |
|
pass_arg_as_tensor=True, |
|
fake_tensor=fake_tensor_value, |
|
is_tensor=False, |
|
example_strong_ref=wrapped_value, |
|
) |
|
return unspec_var |
|
|
|
|
|
def _dataclasses_fields_lambda(obj): |
|
if isinstance(obj, UserDefinedObjectVariable): |
|
value = obj.value |
|
elif isinstance(obj, DataClassVariable): |
|
value = obj.user_cls |
|
else: |
|
unimplemented(f"Dataclass fields handling fails for type {obj}") |
|
items = [] |
|
for field in dataclasses.fields(value): |
|
source = None |
|
if obj.source: |
|
source = GetItemSource( |
|
AttrSource(obj.source, "__dataclass_fields__"), field.name |
|
) |
|
items.append(UserDefinedObjectVariable(field, source=source)) |
|
return TupleVariable(items) |
|
|
|
|
|
def wrap_fx_proxy( |
|
tx, proxy, example_value=None, subclass_type=None, **options |
|
) -> VariableTracker: |
|
kwargs = { |
|
"tx": tx, |
|
"proxy": proxy, |
|
"example_value": example_value, |
|
"subclass_type": subclass_type, |
|
**options, |
|
} |
|
if subclass_type is None: |
|
return wrap_fx_proxy_cls(target_cls=TensorVariable, **kwargs) |
|
else: |
|
result = wrap_fx_proxy_cls(target_cls=TensorWithTFOverrideVariable, **kwargs) |
|
result.install_global(tx) |
|
return result |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def wrap_fx_proxy_cls( |
|
target_cls, tx, proxy, example_value=None, subclass_type=None, **options |
|
): |
|
from ..symbolic_convert import InstructionTranslatorBase |
|
|
|
assert isinstance(tx, InstructionTranslatorBase) |
|
if "guards" in options and options["guards"] is not None: |
|
tx.output.guards.update(options["guards"]) |
|
|
|
assert "example_value" not in proxy.node.meta, f"{proxy.node.meta['example_value']}" |
|
|
|
initial_example_value = example_value |
|
|
|
def _clone_input(value): |
|
if isinstance(value, torch.Tensor): |
|
|
|
if not ( |
|
isinstance(value, FakeTensor) |
|
or ( |
|
|
|
torch._is_functional_tensor(value) |
|
and maybe_get_fake_mode(value) is tx.fake_mode |
|
) |
|
or value.is_nested |
|
): |
|
|
|
value = clone_input(value) |
|
|
|
return value |
|
|
|
|
|
if example_value is None: |
|
|
|
|
|
example_value = get_fake_value(proxy.node, tx, allow_non_graph_fake=True) |
|
|
|
|
|
elif maybe_get_fake_mode(example_value) is tx.fake_mode: |
|
pass |
|
|
|
elif isinstance(example_value, torch.Tensor): |
|
if tx.export: |
|
|
|
|
|
|
|
with torch._C.DisableTorchFunctionSubclass(): |
|
proxy.tracer.real_value_cache[proxy.node] = _clone_input(example_value) |
|
|
|
|
|
|
|
|
|
kwargs = { |
|
"is_tensor": target_cls in (TensorVariable, TensorWithTFOverrideVariable), |
|
} |
|
assert "source" in options and options["source"] is not None |
|
kwargs["source"] = options["source"] |
|
example_value = wrap_to_fake_tensor_and_record(example_value, tx=tx, **kwargs) |
|
if isinstance(example_value, torch.Tensor) and ( |
|
maybe_get_fake_mode(example_value) is not tx.fake_mode |
|
): |
|
raise InternalTorchDynamoError( |
|
"`example_value` needs to be a `FakeTensor`" |
|
f"wrapped by this instance of Dynamo. Found: {example_value}" |
|
) |
|
|
|
if isinstance(example_value, torch.Tensor): |
|
is_parameter = isinstance(example_value, torch.nn.Parameter) |
|
|
|
|
|
|
|
|
|
example_value = _clone_input(example_value) |
|
set_example_value(proxy.node, example_value) |
|
specialized_props = target_cls.specialize(example_value) |
|
|
|
if ( |
|
isinstance(example_value, torch._subclasses.fake_tensor.FakeTensor) |
|
and example_value.fake_mode is tx.fake_mode |
|
): |
|
tensor_type = subclass_type if subclass_type else torch.Tensor |
|
specialized_props["class_type"] = ( |
|
torch.nn.Parameter if is_parameter else tensor_type |
|
) |
|
|
|
options.update(specialized_props) |
|
return target_cls(proxy, **options) |
|
elif ( |
|
hasattr(proxy.node.target, "__name__") |
|
and proxy.node.target.__name__ == "set_state" |
|
and isinstance(proxy.node.target.__self__, torch._C.Generator) |
|
or proxy.node.target == torch.random.set_rng_state |
|
): |
|
return TorchInGraphFunctionVariable(proxy.node.target) |
|
elif ( |
|
proxy.node.target == torch._C._DisableFuncTorch |
|
or proxy.node.target == torch.cuda._is_in_bad_fork |
|
): |
|
return UserDefinedObjectVariable(example_value) |
|
elif istype(example_value, torch.Size) and all( |
|
isinstance(x, int) for x in example_value |
|
): |
|
sizes = [ConstantVariable.create(x) for x in example_value] |
|
return SizeVariable(sizes, **options) |
|
elif isinstance(example_value, (tuple, list)): |
|
set_example_value(proxy.node, example_value) |
|
unpacked = [] |
|
for i, val in enumerate(example_value): |
|
if val is None: |
|
|
|
unpacked.append( |
|
ConstantVariable.create(None, **options), |
|
) |
|
else: |
|
proxy_i = proxy.tracer.create_proxy( |
|
kind="call_function", |
|
target=operator.getitem, |
|
args=(proxy, i), |
|
kwargs={}, |
|
) |
|
|
|
if "source" in options: |
|
source = options["source"] |
|
options_i = options.copy() |
|
options_i["source"] = GetItemSource( |
|
base=source, index=i, index_is_slice=False |
|
) |
|
else: |
|
|
|
options_i = options |
|
|
|
|
|
unpacked.append( |
|
wrap_fx_proxy_cls( |
|
target_cls=target_cls, |
|
tx=tx, |
|
proxy=proxy_i, |
|
example_value=val, |
|
**options_i, |
|
) |
|
) |
|
if isinstance(example_value, torch.Size): |
|
|
|
|
|
return SizeVariable(unpacked, proxy, **options) |
|
elif istype(example_value, tuple): |
|
return TupleVariable(unpacked, **options) |
|
elif istype(example_value, (list, immutable_list)): |
|
return ListVariable(unpacked, mutable_local=MutableLocal(), **options) |
|
else: |
|
assert example_value.__class__.__module__ == "torch.return_types" or hasattr( |
|
example_value, "_fields" |
|
), f"expected {example_value.__class__.__module__} == torch.return_types or named tuple but got {type(example_value)}" |
|
return NamedTupleVariable(unpacked, example_value.__class__, **options) |
|
elif example_value is None or proxy.node.target is torch.manual_seed: |
|
return ConstantVariable.create(None, **options) |
|
elif isinstance(example_value, (torch.SymInt, torch.SymFloat, torch.SymBool)): |
|
set_example_value(proxy.node, example_value) |
|
return SymNodeVariable(proxy, example_value, **options) |
|
elif ( |
|
inspect.isclass(proxy.node.target) |
|
and issubclass(proxy.node.target, _StreamBase) |
|
) or proxy.node.target in [ |
|
device_interface.current_stream |
|
for _, device_interface in get_registered_device_interfaces() |
|
]: |
|
set_example_value(proxy.node, example_value) |
|
return StreamVariable(proxy, example_value, example_value.device, **options) |
|
elif ( |
|
inspect.isclass(proxy.node.target) and issubclass(proxy.node.target, _EventBase) |
|
) or proxy.node.target in [ |
|
device_interface.Event |
|
for _, device_interface in get_registered_device_interfaces() |
|
]: |
|
set_example_value(proxy.node, example_value) |
|
return EventVariable(proxy, example_value, **options) |
|
elif proxy.node.target == "query" and proxy.node.op == "call_method": |
|
set_example_value(proxy.node, example_value) |
|
return ConstantVariable(example_value, **options) |
|
elif ( |
|
example_value is not None |
|
and isinstance(example_value, _EventBase) |
|
and proxy.node.target == "record_event" |
|
and proxy.node.op == "call_method" |
|
): |
|
set_example_value(proxy.node, example_value) |
|
return EventVariable(proxy, example_value, **options) |
|
elif isinstance(example_value, int) and proxy.node.target in [ |
|
torch.sym_int, |
|
getattr, |
|
operator.getitem, |
|
torch._utils._element_size, |
|
torch.seed, |
|
operator.mod, |
|
torch._functorch.vmap._validate_and_get_batch_size, |
|
|
|
getattr(torch.distributed, "get_rank", _missing), |
|
getattr(torch.distributed, "get_world_size", _missing), |
|
|
|
|
|
torch._constrain_as_size, |
|
]: |
|
set_example_value(proxy.node, example_value) |
|
return ConstantVariable.create(example_value, **options) |
|
elif isinstance(example_value, torch.backends.cuda.SDPAParams): |
|
from .sdpa import SDPAParamsVariable |
|
|
|
set_example_value(proxy.node, example_value) |
|
return SDPAParamsVariable(proxy, **options) |
|
elif isinstance(example_value, bool) and proxy.node.target in [ |
|
torch.backends.cuda.can_use_flash_attention, |
|
torch.backends.cuda.can_use_efficient_attention, |
|
]: |
|
set_example_value(proxy.node, example_value) |
|
return ConstantVariable.create(example_value, **options) |
|
elif ( |
|
isinstance(example_value, (int, float, bool)) |
|
and proxy.node.target is call_torchbind |
|
): |
|
set_example_value(proxy.node, example_value) |
|
return ConstantVariable.create(example_value, **options) |
|
else: |
|
unimplemented( |
|
"torch.* op returned non-Tensor " |
|
+ f"{typestr(example_value)} {proxy.node.op} {proxy.node.target}" |
|
) |
|
|
|
|
|
|
|
|
|
@dataclasses.dataclass |
|
class TrackedFake: |
|
fake: Union[FakeTensor, SymInt] |
|
source: Source |
|
|
|
symbolic_context: Optional[SymbolicContext] |
|
|
|
def __hash__(self) -> int: |
|
return hash((self.fake, self.source.name())) |
|
|
|
def __eq__(self, other: object) -> bool: |
|
if isinstance(other, TrackedFake): |
|
return self.fake is other.fake and self.source.name() == other.source.name() |
|
return False |
|
|
|
|
|
|
|
|
|
def _automatic_dynamic( |
|
e, tx, source, static_shapes, outer_only=False |
|
) -> SymbolicContext: |
|
|
|
if e.is_nested and not isinstance( |
|
e, torch.nested._internal.nested_tensor.NestedTensor |
|
): |
|
unimplemented("torch.compile does not support strided NestedTensor") |
|
|
|
name = source.name() |
|
prior_policy = tx.output.tracing_context.tensor_to_context.get(e, None) |
|
shape_env_to_source_to_symbol_cache = ( |
|
prior_policy.shape_env_to_source_to_symbol_cache if prior_policy else None |
|
) |
|
|
|
|
|
view_base_context: Optional[SymbolicContext] = None |
|
if e._is_view(): |
|
base_source = AttrSource(source, "_base") |
|
view_base_context = _automatic_dynamic(e._base, tx, base_source, static_shapes) |
|
|
|
if is_traceable_wrapper_subclass(e) and not outer_only: |
|
|
|
outer_context = _automatic_dynamic( |
|
e, tx, source, static_shapes, outer_only=True |
|
) |
|
|
|
|
|
attrs, _ = type(e).__tensor_flatten__(e) |
|
inner_contexts = {} |
|
for attr in attrs: |
|
inner_tensor = getattr(e, attr) |
|
inner_source = AttrSource(source, attr) |
|
inner_context = _automatic_dynamic( |
|
inner_tensor, tx, inner_source, static_shapes |
|
) |
|
inner_contexts[attr] = inner_context |
|
|
|
return SubclassSymbolicContext( |
|
dynamic_sizes=outer_context.dynamic_sizes, |
|
constraint_sizes=outer_context.constraint_sizes, |
|
view_base_context=view_base_context, |
|
tensor_source=outer_context.tensor_source, |
|
shape_env_to_source_to_symbol_cache=outer_context.shape_env_to_source_to_symbol_cache, |
|
inner_contexts=inner_contexts, |
|
) |
|
|
|
if static_shapes: |
|
return StatefulSymbolicContext( |
|
dynamic_sizes=[DimDynamic.STATIC] * e.dim(), |
|
constraint_sizes=[None] * e.dim(), |
|
view_base_context=view_base_context, |
|
tensor_source=source, |
|
shape_env_to_source_to_symbol_cache=shape_env_to_source_to_symbol_cache, |
|
) |
|
|
|
|
|
|
|
from torch.fx.experimental.symbolic_shapes import is_nested_int |
|
|
|
if any(isinstance(s, SymInt) and not is_nested_int(s) for s in e.size()): |
|
return StatefulSymbolicContext( |
|
dynamic_sizes=[ |
|
DimDynamic.DYNAMIC if isinstance(s, SymInt) else DimDynamic.STATIC |
|
for s in e.size() |
|
], |
|
constraint_sizes=[None] * e.dim(), |
|
view_base_context=view_base_context, |
|
tensor_source=source, |
|
shape_env_to_source_to_symbol_cache=shape_env_to_source_to_symbol_cache, |
|
) |
|
|
|
|
|
frame_state_entry = None |
|
if name not in tx.output.frame_state: |
|
|
|
|
|
frame_state_entry = FrameStateSizeEntry(None, None) |
|
frame_state_entry.size = list(e.size()) |
|
else: |
|
frame_state_entry = tx.output.frame_state[name] |
|
if frame_state_entry.size is not None: |
|
if e.ndim != len(frame_state_entry.size): |
|
|
|
|
|
log.debug( |
|
"automatic dynamic %s dim %s != %s", |
|
name, |
|
e.ndim, |
|
frame_state_entry.size, |
|
) |
|
frame_state_entry.size = None |
|
else: |
|
|
|
|
|
for i, dim in enumerate(frame_state_entry.size): |
|
if dim is not None and e.size()[i] != dim: |
|
log.debug( |
|
"automatic dynamic %s size(%s) %s != %s", |
|
name, |
|
i, |
|
e.size(i), |
|
dim, |
|
) |
|
frame_state_entry.size[i] = None |
|
|
|
|
|
|
|
t_id = id(e) |
|
dim2constraint = {} |
|
|
|
def update_dim2constraint(dim, constraint_range, debug_name): |
|
if dim in dim2constraint: |
|
from torch.fx.experimental.symbolic_shapes import StrictMinMaxConstraint |
|
|
|
old_constraint_range, old_debug_name = dim2constraint[dim] |
|
new_constraint_range = StrictMinMaxConstraint( |
|
vr=constraint_range.vr & old_constraint_range.vr, |
|
warn_only=False, |
|
) |
|
|
|
|
|
new_debug_name = old_debug_name or debug_name |
|
dim2constraint[dim] = new_constraint_range, new_debug_name |
|
else: |
|
dim2constraint[dim] = constraint_range, debug_name |
|
|
|
if tx.output.export_constraints: |
|
for constraint in tx.output.export_constraints: |
|
if constraint.t_id == t_id: |
|
update_dim2constraint( |
|
constraint.dim, constraint.constraint_range, constraint.debug_name |
|
) |
|
if constraint.shared is not None and constraint.shared.t_id == t_id: |
|
|
|
|
|
|
|
|
|
|
|
|
|
update_dim2constraint( |
|
constraint.shared.dim, |
|
constraint.constraint_range, |
|
constraint.debug_name, |
|
) |
|
|
|
dynamic_dims = [] |
|
constraint_dims = [] |
|
for i in range(e.dim()): |
|
|
|
marked_dynamic = i in getattr(e, "_dynamo_dynamic_indices", set()) |
|
marked_weak_dynamic = i in getattr(e, "_dynamo_weak_dynamic_indices", set()) |
|
marked_static = i in getattr(e, "_dynamo_static_indices", set()) |
|
|
|
|
|
automatic_dynamic = config.automatic_dynamic_shapes and ( |
|
frame_state_entry.size is None or frame_state_entry.size[i] is None |
|
) |
|
|
|
|
|
|
|
if frame_state_entry.size and marked_dynamic: |
|
log.debug("automatic dynamic %s marked dynamic", name) |
|
frame_state_entry.size[i] = None |
|
|
|
|
|
|
|
|
|
constraint = dim2constraint.get(i) |
|
if constraint is None: |
|
if marked_dynamic and not config.allow_ignore_mark_dynamic: |
|
if hasattr(e, "_dynamo_dynamic_range"): |
|
dim_range = [ |
|
dr for dr in e._dynamo_dynamic_range if dr.dim == i |
|
].pop() |
|
if dim_range.min is None and dim_range.max is None: |
|
constraint_dim = RelaxedUnspecConstraint(warn_only=False) |
|
else: |
|
from torch.fx.experimental.symbolic_shapes import ( |
|
StrictMinMaxConstraint, |
|
) |
|
|
|
constraint_dim = StrictMinMaxConstraint( |
|
vr=ValueRanges(lower=dim_range.min, upper=dim_range.max), |
|
warn_only=False, |
|
) |
|
else: |
|
constraint_dim = RelaxedUnspecConstraint(warn_only=False) |
|
|
|
elif not marked_static and automatic_dynamic: |
|
constraint_dim = RelaxedUnspecConstraint(warn_only=True) |
|
else: |
|
constraint_dim = None |
|
else: |
|
constraint_dim, debug_name = constraint |
|
if debug_name is not None: |
|
dim_name = f"{name}.size()[{i}]" |
|
tx.output.shape_env.source_name_to_debug_name[dim_name] = debug_name |
|
constraint_dims.append(constraint_dim) |
|
|
|
|
|
if ( |
|
constraint_dim is not None |
|
or marked_dynamic |
|
or marked_weak_dynamic |
|
or is_nested_int(e.shape[i]) |
|
): |
|
|
|
|
|
|
|
dynamic = DimDynamic.DYNAMIC |
|
elif static_shapes or config.assume_static_by_default or marked_static: |
|
dynamic = DimDynamic.STATIC |
|
else: |
|
dynamic = DimDynamic.DUCK |
|
|
|
dynamic_dims.append(dynamic) |
|
|
|
tx.output.frame_state[name] = frame_state_entry |
|
|
|
return StatefulSymbolicContext( |
|
dynamic_sizes=dynamic_dims, |
|
constraint_sizes=constraint_dims, |
|
view_base_context=view_base_context, |
|
tensor_source=source, |
|
shape_env_to_source_to_symbol_cache=shape_env_to_source_to_symbol_cache, |
|
) |
|
|
|
|
|
|
|
def wrap_to_fake_tensor_and_record( |
|
e, tx, *, source: Optional[Source], is_tensor: bool, parent_context=None |
|
): |
|
if ( |
|
type(e) in (torch.Tensor, torch.nn.Parameter, FakeTensor) |
|
or isinstance(e, torch.Tensor) |
|
or is_traceable_wrapper_subclass(e) |
|
): |
|
assert source is not None |
|
static_shapes, reason = tensor_always_has_static_shape( |
|
e, is_tensor, guard_source=source.guard_source() |
|
) |
|
|
|
if not parent_context: |
|
symbolic_context = _automatic_dynamic(e, tx, source, static_shapes) |
|
else: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
assert isinstance(source, AttrSource) |
|
inner_context_name = source.member |
|
symbolic_context = parent_context.inner_contexts[inner_context_name] |
|
|
|
log.debug( |
|
"wrap_to_fake %s %s %s %s", |
|
source.name(), |
|
tuple(e.shape), |
|
symbolic_context, |
|
type(e), |
|
) |
|
fake_e = wrap_fake_exception( |
|
lambda: tx.fake_mode.from_tensor( |
|
e, |
|
source=source, |
|
symbolic_context=symbolic_context, |
|
) |
|
) |
|
if ( |
|
source is not None |
|
and isinstance(fake_e, FakeTensor) |
|
and (sym_val := fake_e.item_memo) is not None |
|
): |
|
tx.output.tracked_fakes.append( |
|
TrackedFake(sym_val, CallMethodItemSource(source), symbolic_context) |
|
) |
|
|
|
if is_traceable_wrapper_subclass(fake_e): |
|
attrs, _ = fake_e.__tensor_flatten__() |
|
for attr in attrs: |
|
fake_inner = getattr(fake_e, attr) |
|
inner = getattr(e, attr) |
|
inner_source = AttrSource(source, attr) |
|
wrap_to_fake_tensor_and_record( |
|
inner, |
|
tx, |
|
source=inner_source, |
|
is_tensor=isinstance(fake_inner, torch.Tensor), |
|
parent_context=symbolic_context, |
|
) |
|
|
|
tx.output.tracing_context.tensor_to_context[e] = symbolic_context |
|
if is_sparse_any(fake_e): |
|
|
|
|
|
values = fake_e._values() if fake_e.is_sparse else fake_e.values() |
|
tx.output.input_source_to_sizes_strides[source] = { |
|
"size": fake_e.size(), |
|
|
|
|
|
"stride": (1,) * fake_e.ndim, |
|
"values_size": values.size(), |
|
"values_stride": values.stride(), |
|
} |
|
else: |
|
tx.output.input_source_to_sizes_strides[source] = { |
|
"size": fake_e.size(), |
|
"stride": fake_e.stride(), |
|
} |
|
|
|
if ( |
|
is_tensor |
|
and not (static_shapes and source.is_nn_module()) |
|
and not is_constant_source(source) |
|
): |
|
tx.output.tracked_fakes.append( |
|
TrackedFake(fake_e, source, symbolic_context) |
|
) |
|
tx.output.tracked_fakes_id_to_source[id(e)].append(source) |
|
|
|
return fake_e |
|
else: |
|
return e |
|
|
|
|
|
class SourcelessBuilder: |
|
""" |
|
Like builder, but stateless and does not require a source. Useful for simple type->VT objects, or objects |
|
that are being created/evaporated during inlining (ex: consider a locally made list of tensors we then iterate over |
|
.), such a list should not show up as an artifact from inputs, nor in reconstruction, nor in the graph. However, |
|
there may be reasons to represent it as a ListVariable internally. |
|
|
|
NOTE - Objects produced here are born UNGUARDED due to the nature of sources! |
|
|
|
NOTE - This class is very new! It will have some rough edges, but it was created to stem the bleeding of giant |
|
if/else type->VariableTracker trees that were cropping up all over dynamo. |
|
""" |
|
|
|
def __init__(self): |
|
raise AssertionError("Use SourcelessBuilder.create()") |
|
|
|
@staticmethod |
|
def create(tx, value) -> VariableTracker: |
|
value_type = type(value) |
|
fast_handler = SourcelessBuilder._type_handlers.get(value_type) |
|
if fast_handler: |
|
return fast_handler(tx, value) |
|
|
|
if isinstance(value, VariableTracker): |
|
|
|
return value |
|
elif isinstance(value, dataclasses._HAS_DEFAULT_FACTORY_CLASS): |
|
return UserDefinedObjectVariable(value) |
|
elif ConstantVariable.is_literal(value): |
|
return ConstantVariable.create(value) |
|
elif callable(value) and trace_rules.lookup_callable(value) is not None: |
|
if is_callable_allowed(value): |
|
tx.output.has_user_defined_allowed_in_graph = True |
|
return trace_rules.lookup_callable(value)(value) |
|
elif is_function_or_wrapper(value): |
|
return trace_rules.lookup(value)(value) |
|
elif isinstance(value, enum.Enum): |
|
return EnumVariable(value) |
|
elif isinstance(value, (type, abc.ABCMeta)): |
|
return UserDefinedClassVariable(value) |
|
elif isinstance(value, types.MethodWrapperType): |
|
return MethodWrapperVariable(value) |
|
elif isinstance(value, torch.fx.graph_module.GraphModule): |
|
return SourcelessGraphModuleVariable(value) |
|
elif isinstance( |
|
value, (torch.utils._pytree.TreeSpec, torch.utils._pytree.LeafSpec) |
|
): |
|
return UserDefinedObjectVariable(value) |
|
elif PlacementVariable.is_placement(value): |
|
return PlacementVariable(value) |
|
elif DeviceMeshVariable.is_device_mesh(value): |
|
return DeviceMeshVariable(value) |
|
elif isinstance(value, re.Pattern): |
|
return RegexPatternVariable(value) |
|
unimplemented( |
|
f"Unexpected type in sourceless builder {value_type.__module__}.{value_type.__qualname__}" |
|
) |
|
|
|
@staticmethod |
|
def wrap_constant_literal(value): |
|
assert ConstantVariable.is_literal(value) |
|
return ConstantVariable.create(value=value) |
|
|
|
@staticmethod |
|
def make_type_handlers(): |
|
create = SourcelessBuilder.create |
|
handlers = {} |
|
for t in common_constant_types: |
|
handlers[t] = lambda tx, value: ConstantVariable(value) |
|
handlers[set] = lambda tx, value: SetVariable( |
|
[create(tx, x) for x in value], mutable_local=MutableLocal() |
|
) |
|
handlers[dict] = lambda tx, value: ConstDictVariable( |
|
{create(tx, k): create(tx, v) for k, v in value.items()}, |
|
type(value), |
|
mutable_local=MutableLocal(), |
|
) |
|
handlers[list] = lambda tx, value: ListVariable( |
|
[create(tx, x) for x in value], mutable_local=MutableLocal() |
|
) |
|
handlers[tuple] = lambda tx, value: TupleVariable( |
|
[create(tx, x) for x in value] |
|
) |
|
handlers[torch.Size] = lambda tx, value: SizeVariable( |
|
[create(tx, x) for x in value] |
|
) |
|
handlers[collections.OrderedDict] = handlers[dict] |
|
handlers[immutable_dict] = handlers[dict] |
|
handlers[immutable_list] = handlers[list] |
|
handlers[types.ModuleType] = lambda tx, value: PythonModuleVariable(value) |
|
|
|
def passthrough(tx, value): |
|
return value |
|
|
|
for cls in VariableTrackerMeta.all_subclasses: |
|
handlers[cls] = passthrough |
|
return handlers |
|
|
|
|
|
SourcelessBuilder._type_handlers = SourcelessBuilder.make_type_handlers() |
|
|