file_path
stringlengths 21
207
| content
stringlengths 5
1.02M
| size
int64 5
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/ogn.py | """Tools that support interacting with the .ogn format, including parsing and creation.
General tools can be imported directly with the top level import:
.. code-block:: python
import omni.graph.tools.ogn as ogn
help(ogn)
"""
from ._impl.node_generator.attributes.AttributeManager import AttributeManager
from ._impl.node_generator.attributes.management import (
ALL_ATTRIBUTE_TYPES,
ATTRIBUTE_UNION_GROUPS,
expand_attribute_union_groups,
get_attribute_manager,
get_attribute_manager_type,
split_attribute_type_name,
supported_attribute_type_names,
)
from ._impl.node_generator.code_generation import code_generation
from ._impl.node_generator.generate_cpp import generate_cpp
from ._impl.node_generator.generate_documentation import generate_documentation
from ._impl.node_generator.generate_python import generate_python
from ._impl.node_generator.generate_template import generate_template
from ._impl.node_generator.generate_test_imports import generate_test_imports
from ._impl.node_generator.generate_tests import generate_tests
from ._impl.node_generator.generate_usd import generate_usd
from ._impl.node_generator.keys import (
AttributeKeys,
CategoryTypeValues,
CudaPointerValues,
ExclusionTypeValues,
GraphSetupKeys,
IconKeys,
LanguageTypeValues,
MemoryTypeValues,
MetadataKeys,
NodeTypeKeys,
TestKeys,
)
from ._impl.node_generator.nodes import NodeGenerationError
from ._impl.node_generator.parse_scheduling import SchedulingHints
from ._impl.node_generator.utils import (
CarbLogError,
DebugError,
ParseError,
UnimplementedError,
to_cpp_comment,
to_python_comment,
to_usd_comment,
to_usd_docs,
)
from ._impl.ogn_types import ogn_to_sdf, sdf_to_ogn
__all__ = [
"ALL_ATTRIBUTE_TYPES",
"ATTRIBUTE_UNION_GROUPS",
"AttributeKeys",
"AttributeManager",
"CarbLogError",
"CategoryTypeValues",
"code_generation",
"CudaPointerValues",
"DebugError",
"ExclusionTypeValues",
"expand_attribute_union_groups",
"generate_cpp",
"generate_documentation",
"generate_python",
"generate_template",
"generate_test_imports",
"generate_tests",
"generate_usd",
"get_attribute_manager_type",
"get_attribute_manager",
"GraphSetupKeys",
"IconKeys",
"LanguageTypeValues",
"MemoryTypeValues",
"MetadataKeys",
"NodeGenerationError",
"NodeTypeKeys",
"ogn_to_sdf",
"ParseError",
"SchedulingHints",
"sdf_to_ogn",
"split_attribute_type_name",
"supported_attribute_type_names",
"TestKeys",
"to_cpp_comment",
"to_python_comment",
"to_usd_comment",
"to_usd_docs",
"UnimplementedError",
]
# ==============================================================================================================
# These are symbols that should technically be prefaced with an underscore because they are used internally but
# not part of the public API but that would cause a lot of refactoring work so for now they are just added to the
# module contents but not the module exports.
# _ _ _____ _____ _____ ______ _ _
# | | | |_ _| __ \| __ \| ____| \ | |
# | |__| | | | | | | | | | | |__ | \| |
# | __ | | | | | | | | | | __| | . ` |
# | | | |_| |_| |__| | |__| | |____| |\ |
# |_| |_|_____|_____/|_____/|______|_| \_|
#
from ._impl.node_generator.attributes.management import validate_attribute_type_name # noqa: F401
from ._impl.node_generator.attributes.naming import ATTR_NAME_REQUIREMENT # noqa: F401
from ._impl.node_generator.attributes.naming import ATTR_UI_NAME_REQUIREMENT # noqa: F401
from ._impl.node_generator.attributes.naming import INPUT_GROUP # noqa: F401
from ._impl.node_generator.attributes.naming import INPUT_NS # noqa: F401
from ._impl.node_generator.attributes.naming import OUTPUT_GROUP # noqa: F401
from ._impl.node_generator.attributes.naming import OUTPUT_NS # noqa: F401
from ._impl.node_generator.attributes.naming import STATE_GROUP # noqa: F401
from ._impl.node_generator.attributes.naming import STATE_NS # noqa: F401
from ._impl.node_generator.attributes.naming import assemble_attribute_type_name # noqa: F401
from ._impl.node_generator.attributes.naming import attribute_name_as_python_property # noqa: F401
from ._impl.node_generator.attributes.naming import attribute_name_in_namespace # noqa: F401
from ._impl.node_generator.attributes.naming import attribute_name_without_port # noqa: F401
from ._impl.node_generator.attributes.naming import check_attribute_name # noqa: F401
from ._impl.node_generator.attributes.naming import check_attribute_ui_name # noqa: F401
from ._impl.node_generator.attributes.naming import is_input_name # noqa: F401
from ._impl.node_generator.attributes.naming import is_output_name # noqa: F401
from ._impl.node_generator.attributes.naming import is_state_name # noqa: F401
from ._impl.node_generator.attributes.naming import namespace_of_group # noqa: F401
from ._impl.node_generator.attributes.NumericAttributeManager import NumericAttributeManager # noqa: F401
from ._impl.node_generator.attributes.parsing import attributes_as_usd # noqa: F401
from ._impl.node_generator.attributes.parsing import separate_ogn_role_and_type # noqa: F401
from ._impl.node_generator.attributes.parsing import usd_type_name # noqa: F401
from ._impl.node_generator.generate_test_imports import import_file_contents # noqa: F401
from ._impl.node_generator.nodes import NODE_NAME_REQUIREMENT # noqa: F401
from ._impl.node_generator.nodes import NODE_UI_NAME_REQUIREMENT # noqa: F401
from ._impl.node_generator.nodes import NodeInterface # noqa: F401
from ._impl.node_generator.nodes import NodeInterfaceWrapper # noqa: F401
from ._impl.node_generator.nodes import check_node_language # noqa: F401
from ._impl.node_generator.nodes import check_node_name # noqa: F401
from ._impl.node_generator.nodes import check_node_ui_name # noqa: F401
from ._impl.node_generator.OmniGraphExtension import OmniGraphExtension # noqa: F401
from ._impl.node_generator.utils import _EXTENDED_TYPE_ANY as EXTENDED_TYPE_ANY # noqa: F401
from ._impl.node_generator.utils import _EXTENDED_TYPE_REGULAR as EXTENDED_TYPE_REGULAR # noqa: F401
from ._impl.node_generator.utils import _EXTENDED_TYPE_UNION as EXTENDED_TYPE_UNION # noqa: F401
from ._impl.node_generator.utils import OGN_PARSE_DEBUG # noqa: F401
from ._impl.node_generator.utils import GeneratorConfiguration # noqa: F401
from ._impl.node_generator.utils import check_memory_type # noqa: F401
# By placing this in an internal list and exporting the list the backward compatibility code can make use of it
# to allow access to the now-internal objects in a way that looks like they are still published.
_HIDDEN = [
"assemble_attribute_type_name",
"ATTR_NAME_REQUIREMENT",
"ATTR_UI_NAME_REQUIREMENT",
"attribute_name_as_python_property",
"attribute_name_in_namespace",
"attribute_name_without_port",
"attributes_as_usd",
"check_attribute_name",
"check_attribute_ui_name",
"check_memory_type",
"check_node_language",
"check_node_name",
"check_node_ui_name",
"EXTENDED_TYPE_ANY",
"EXTENDED_TYPE_REGULAR",
"EXTENDED_TYPE_UNION",
"GeneratorConfiguration",
"import_file_contents",
"INPUT_GROUP",
"INPUT_NS",
"is_input_name",
"is_output_name",
"is_state_name",
"namespace_of_group",
"NODE_NAME_REQUIREMENT",
"NODE_UI_NAME_REQUIREMENT",
"NodeInterface",
"NodeInterfaceWrapper",
"NumericAttributeManager",
"OGN_PARSE_DEBUG",
"OmniGraphExtension",
"OUTPUT_GROUP",
"OUTPUT_NS",
"separate_ogn_role_and_type",
"STATE_GROUP",
"STATE_NS",
"usd_type_name",
"validate_attribute_type_name",
]
| 7,821 | Python | 40.386243 | 113 | 0.70234 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/make_docs_toc.py | """
Create a table of contents file in index.rst that references all of the OmniGraph node generated
documentation files that live in that directory.
This processing is highly tied to the formatting of the OGN generated documentation files so if they
change this has to as well.
The table of contents will be in two sections.
A table consisting of columns with [node name, node version, link to node doc file, link to node appendix entry]
An appendix with headers consisting of the node name and body consisting of the node's description
"""
from _impl.node_generator import main_docs
main_docs.main_docs()
| 619 | Python | 37.749998 | 116 | 0.781906 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/parse_scheduling.py | from omni.graph.tools._impl.node_generator.parse_scheduling import * # noqa: F401,PLW0401,PLW0614
| 99 | Python | 48.999976 | 98 | 0.787879 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_python.py | from omni.graph.tools._impl.node_generator.generate_python import * # noqa: F401,PLW0401,PLW0614
| 98 | Python | 48.499976 | 97 | 0.785714 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/main_docs.py | from omni.graph.tools._impl.node_generator.main_docs import * # noqa: F401,PLW0401,PLW0614
| 92 | Python | 45.499977 | 91 | 0.771739 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_node_info.py | from omni.graph.tools._impl.node_generator.generate_node_info import * # noqa: F401,PLW0401,PLW0614
| 101 | Python | 49.999975 | 100 | 0.782178 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/main.py | from omni.graph.tools._impl.node_generator.main import * # noqa: F401,PLW0401,PLW0614
| 87 | Python | 42.999979 | 86 | 0.770115 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_template.py | from omni.graph.tools._impl.node_generator.generate_template import * # noqa: F401,PLW0401,PLW0614
| 100 | Python | 49.499975 | 99 | 0.79 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/type_definitions.py | from omni.graph.tools._impl.node_generator.type_definitions import * # noqa: F401,PLW0401,PLW0614
| 99 | Python | 48.999976 | 98 | 0.787879 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_test_imports.py | from omni.graph.tools._impl.node_generator.generate_test_imports import * # noqa: F401,PLW0401,PLW0614
| 104 | Python | 51.499974 | 103 | 0.788462 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/__init__.py | import traceback
from carb import log_warn
_trace = "".join(traceback.format_stack())
log_warn(f"The OmniGraph Node Generator has moved. Use 'import omni.graph.tools.ogn as ogn' to access it.\n{_trace}")
| 206 | Python | 28.571424 | 117 | 0.742718 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_cpp.py | from omni.graph.tools._impl.node_generator.generate_cpp import * # noqa: F401,PLW0401,PLW0614
| 95 | Python | 46.999977 | 94 | 0.778947 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/register_ogn_nodes.py | from omni.graph.tools._impl.node_generator.register_ogn_nodes import * # noqa: F401,PLW0401,PLW0614
| 101 | Python | 49.999975 | 100 | 0.782178 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_tests.py | from omni.graph.tools._impl.node_generator.generate_tests import * # noqa: F401,PLW0401,PLW0614
| 97 | Python | 47.999976 | 96 | 0.783505 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_usd.py | from omni.graph.tools._impl.node_generator.generate_usd import * # noqa: F401,PLW0401,PLW0614
| 95 | Python | 46.999977 | 94 | 0.778947 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/utils.py | from omni.graph.tools._impl.node_generator.utils import * # noqa: F401,PLW0401,PLW0614
| 88 | Python | 43.499978 | 87 | 0.772727 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/ThreadsafeOpen.py | from omni.graph.tools._impl.node_generator.ThreadsafeOpen import * # noqa: F401,PLW0401,PLW0614
| 97 | Python | 47.999976 | 96 | 0.793814 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/keys.py | from omni.graph.tools._impl.node_generator.keys import * # noqa: F401,PLW0401,PLW0614
| 87 | Python | 42.999979 | 86 | 0.770115 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_documentation.py | from omni.graph.tools._impl.node_generator.generate_documentation import * # noqa: F401,PLW0401,PLW0614
| 105 | Python | 51.999974 | 104 | 0.8 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/nodes.py | from omni.graph.tools._impl.node_generator.nodes import * # noqa: F401,PLW0401,PLW0614
| 88 | Python | 43.499978 | 87 | 0.772727 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/OmniGraphExtension.py | from omni.graph.tools._impl.node_generator.OmniGraphExtension import * # noqa: F401,PLW0401,PLW0614
| 101 | Python | 49.999975 | 100 | 0.80198 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/code_generation.py | from omni.graph.tools._impl.node_generator.code_generation import * # noqa: F401,PLW0401,PLW0614
| 98 | Python | 48.499976 | 97 | 0.785714 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_icon.py | from omni.graph.tools._impl.node_generator.generate_icon import * # noqa: F401,PLW0401,PLW0614
| 96 | Python | 47.499976 | 95 | 0.78125 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/category_definitions.py | from omni.graph.tools._impl.node_generator.category_definitions import * # noqa: F401,PLW0401,PLW0614
| 103 | Python | 50.999975 | 102 | 0.796116 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/FloatAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.FloatAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 115 | Python | 56.999972 | 114 | 0.817391 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/TokenAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.TokenAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 115 | Python | 56.999972 | 114 | 0.817391 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/StringAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.StringAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 116 | Python | 57.499971 | 115 | 0.818966 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/TimeCodeAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.TimeCodeAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 118 | Python | 58.499971 | 117 | 0.822034 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/parsing.py | from omni.graph.tools._impl.node_generator.attributes.parsing import * # noqa: F401,PLW0401,PLW0614
| 101 | Python | 49.999975 | 100 | 0.792079 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/ObjectIdAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.ObjectIdAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 118 | Python | 58.499971 | 117 | 0.822034 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/naming.py | from omni.graph.tools._impl.node_generator.attributes.naming import * # noqa: F401,PLW0401,PLW0614
| 100 | Python | 49.499975 | 99 | 0.79 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/DoubleAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.DoubleAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 116 | Python | 57.499971 | 115 | 0.818966 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/NumericAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.NumericAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 117 | Python | 57.999971 | 116 | 0.820513 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/AnyAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.AnyAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 113 | Python | 55.999972 | 112 | 0.814159 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/FrameAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.FrameAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 115 | Python | 56.999972 | 114 | 0.817391 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/NormalAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.NormalAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 116 | Python | 57.499971 | 115 | 0.818966 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/VectorAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.VectorAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 116 | Python | 57.499971 | 115 | 0.818966 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/Int64AttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.Int64AttributeManager import * # noqa: F401,PLW0401,PLW0614
| 115 | Python | 56.999972 | 114 | 0.817391 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/UIntAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.UIntAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 114 | Python | 56.499972 | 113 | 0.815789 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/ExecutionAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.ExecutionAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 119 | Python | 58.999971 | 118 | 0.823529 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/BundleAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.BundleAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 116 | Python | 57.499971 | 115 | 0.818966 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/RoleAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.RoleAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 114 | Python | 56.499972 | 113 | 0.815789 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/management.py | from omni.graph.tools._impl.node_generator.attributes.management import * # noqa: F401,PLW0401,PLW0614
| 104 | Python | 51.499974 | 103 | 0.798077 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/ColorAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.ColorAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 115 | Python | 56.999972 | 114 | 0.817391 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/BoolAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.BoolAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 114 | Python | 56.499972 | 113 | 0.815789 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/PathAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.PathAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 114 | Python | 56.499972 | 113 | 0.815789 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/UInt64AttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.UInt64AttributeManager import * # noqa: F401,PLW0401,PLW0614
| 116 | Python | 57.499971 | 115 | 0.818966 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/UnionAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.UnionAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 115 | Python | 56.999972 | 114 | 0.817391 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/UCharAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.UCharAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 115 | Python | 56.999972 | 114 | 0.817391 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/MatrixAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.MatrixAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 116 | Python | 57.499971 | 115 | 0.818966 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/IntAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.IntAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 113 | Python | 55.999972 | 112 | 0.814159 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/PointAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.PointAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 115 | Python | 56.999972 | 114 | 0.817391 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/HalfAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.HalfAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 114 | Python | 56.499972 | 113 | 0.815789 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/AttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.AttributeManager import * # noqa: F401,PLW0401,PLW0614
| 110 | Python | 54.499973 | 109 | 0.809091 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/QuaternionAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.QuaternionAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 120 | Python | 59.49997 | 119 | 0.825 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/TexCoordAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.TexCoordAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 118 | Python | 58.499971 | 117 | 0.822034 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/debugging.py | """
Collection of tools to help with debugging the operation of scripts.
Mainly lives here so that all OGN-related files can access it, though the tools are pretty general.
"""
import os
import weakref
from contextlib import suppress
from functools import partial, wraps
from typing import Dict, List
from omni.ext import get_dangling_references
__all__ = []
# ======================================================================
# Environment variable gating display and execution of debugging information
# - The value "1" sets OGN_DEBUG for general debugging
# - Any string containing "eval" sets OGN_EVAL_DEBUG
# - Either "1" or a string containing "gc" sets OGN_GC_DEBUG
# - Either "1" or a string containing "ui" sets OGN_UI_DEBUG
# e.g. you could enable UI and GC by setting it to "gc, ui"
_ogn_debug_env_var = os.getenv("OGN_DEBUG")
has_debugging = _ogn_debug_env_var is not None
OGN_DEBUG = _ogn_debug_env_var == "1"
OGN_EVAL_DEBUG = has_debugging and (_ogn_debug_env_var.lower().find("eval") >= 0)
OGN_GC_DEBUG = has_debugging and (_ogn_debug_env_var == "1" or _ogn_debug_env_var.lower().find("gc") >= 0)
OGN_UI_DEBUG = has_debugging and (_ogn_debug_env_var == "1" or _ogn_debug_env_var.lower().find("ui") >= 0)
# ======================================================================
def __dbg(gate_variable: bool, message: str, *args, **kwargs):
"""
Print out a debugging message if the gate_variable is enabled, additional args will be passed
to format the given message.
"""
if gate_variable:
if args or kwargs:
print("DBG: " + message.format(*args, **kwargs), flush=True)
else:
print(f"DBG: {message}", flush=True)
# Define a few helper functions that provide debugging for some standard environment variables.
# Even more efficient use pattern is "OGN_DEBUG and dbg(X)" to prevent side effects.
dbg = partial(__dbg, OGN_DEBUG)
dbg_eval = partial(__dbg, OGN_EVAL_DEBUG)
dbg_gc = partial(__dbg, OGN_GC_DEBUG)
dbg_ui = partial(__dbg, OGN_UI_DEBUG)
# ======================================================================
# String used for indenting debugging information, so that nested function calls are visually distinct
INDENT = ""
# ======================================================================
def function_trace(env_var=None):
"""
Debugging decorator that adds function call tracing, potentially gated by an environment variable.
Use as a normal function decorator:
.. code-block:: python
@function_trace()
def my_function(value: str) -> str:
return value + value
Calling my_function("X") with debugging enabled will print this:
Calling my_function('X')
'my_function' returned 'XX'
The extra parameter lets you selectively disable it based on environment variables:
.. code-block:: python
@function_trace("OGN_DEBUG")
def my_function(value: str) -> str:
return value + value
This version only enables debugging if the environment variable "OGN_DEBUG" is set
"""
def inner_decorator(func):
"""Having an inner decorator allows parameters to be passed to the outer one"""
@wraps(func)
def wrapper_debug(*args, **kwargs):
"""Wrapper function to add debugging information before and after forwarding calls"""
if env_var is None or os.getenv(env_var) is not None:
global INDENT
args_repr = [repr(a) for a in args]
kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()]
signature = ", ".join(args_repr + kwargs_repr)
print(f"{INDENT}Calling {func.__name__}({signature})")
INDENT += " "
value = func(*args, **kwargs)
print(f"{INDENT}{func.__name__!r} returned {value!r}")
INDENT = INDENT[:-2]
return value
return func(*args, **kwargs)
return wrapper_debug
return inner_decorator
# ======================================================================
def __validate_property_destruction(weak_property, name: str):
"""Check that the weak reference to a property value references a destroyed value"""
# Check to see if the property value is still being referenced
if OGN_GC_DEBUG:
with suppress(AttributeError, TypeError):
if weak_property() is not None:
print(f"Property {name} destroy failed: {get_dangling_references(weak_property())}", flush=True)
# ----------------------------------------------------------------------
def __destroy_property_member(obj_property, name: str):
"""Try to call destroy for the obj_property - returns a weak reference to it for later use"""
dbg(f"Destroying member {name} on {obj_property}")
try:
# Use a weak reference to perform a simple test for "real" destruction
weak_property = weakref.ref(obj_property)
obj_property.destroy()
except AttributeError:
dbg_gc(f"...obj_property {name} has no destroy method")
weak_property = None
except TypeError:
dbg_gc(f"...obj_property {name} cannot be weak referenced")
weak_property = None
return weak_property
# ----------------------------------------------------------------------
def __destroy_property_list(property_list: List, base_name: str):
"""Walk a list of properties, recursively destroying them"""
dbg_gc(f"Destroying list {property_list} as {base_name}")
index = 0
# The non-standard loop is to make sure this execution frame does not retain references to the objects
while property_list:
property_member = property_list.pop(0)
debug_name = f"{base_name}[{index}]"
index += 1
dbg_gc(f"...destroying member {debug_name}")
if isinstance(property_member, list):
dbg_gc("...(as list)")
__destroy_property_list(property_member, debug_name)
elif isinstance(property_member, dict):
dbg_gc("...(as dictionary)")
__destroy_property_dict(property_member, debug_name)
else:
dbg_gc("...(as object)")
weak_property = __destroy_property_member(property_member, debug_name)
property_member = None
__validate_property_destruction(weak_property, debug_name)
# ----------------------------------------------------------------------
def __destroy_property_dict(property_dict: Dict, base_name: str):
"""Walk a dictionary of properties, recursively destroying them"""
dbg_gc(f"Destroying dictionary {property_dict} as {base_name}")
# The non-standard loop is to make sure this execution frame does not retain references to the objects
while property_dict:
property_key, property_member = property_dict.popitem()
debug_name = f"{base_name}[{property_key}]"
dbg_gc(f"...destroying member {debug_name}")
if isinstance(property_member, list):
dbg_gc("...(as list)")
__destroy_property_list(property_member, debug_name)
elif isinstance(property_member, dict):
dbg_gc("...(as dictionary)")
__destroy_property_dict(property_member, debug_name)
else:
dbg_gc("...(as object)")
weak_property = __destroy_property_member(property_member, debug_name)
property_member = None
__validate_property_destruction(weak_property, debug_name)
# ----------------------------------------------------------------------
def destroy_property(self, property_name: str):
"""Call the destroy method on a property and set it to None - helps with garbage collection
In a class's destroy() or __del__ method you can call this to generically handle member destruction
when such things do not happen automatically (e.g. when you cross into the C++-bindings, or the
objects have circular references)
def destroy(self):
destroy_property(self, "_widget")
If the property is a list then the list members are individually destroyed.
If the property is a dictionary then the values of the dictionary are individually destroyed.
NOTE: Only call this if you are the ownder of the property, otherwise just set it to None.
Args:
self: The object owning the property to be destroyed (can be anything with a destroy() method)
property_name: Name of the property to be destroyed
"""
debug_name = f"{type(self).__name__}.{property_name}"
# If the property name uses the double-underscore convention for "internal" data then the name must
# be embellished with the class name to allow access, since this function is not part of the class.
property_to_access = property_name if property_name[0:2] != "__" else f"_{type(self).__name__}{property_name}"
obj_property = getattr(self, property_to_access, None)
if obj_property is None:
dbg_gc(f"Destroyed None member {debug_name} {self} {property_to_access}")
return
dbg_gc(f"Destroy property {debug_name}")
if isinstance(obj_property, list):
dbg_gc("(as list)")
__destroy_property_list(obj_property, debug_name)
setattr(self, property_to_access, [])
elif isinstance(obj_property, dict):
dbg_gc("(as dictionary)")
__destroy_property_dict(obj_property, debug_name)
setattr(self, property_to_access, {})
else:
dbg_gc("(as object)")
weak_property = __destroy_property_member(obj_property, debug_name)
setattr(self, property_to_access, None)
obj_property = None
__validate_property_destruction(weak_property, debug_name)
| 9,727 | Python | 41.854625 | 114 | 0.607793 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/deprecate.py | """Manage deprecation for Python features for common use
All deprecation functions can be accessed from the top module level.
The :py:class:`omni.graph.tools.DeprecateMessage` class provides a simple way of logging a message that will only
show up once per session.
The :py:class:`omni.graph.tools.DeprecatedClass` decorator provides a method to emit a deprecation message when the
deprecated class is accessed.
The :py:class:`omni.graph.tools.RenamedClass` decorator is a slightly more sophisticated method of deprecating a
class when the deprecation is simply a name change.
The :py:function:`omni.graph.tools.deprecated_function` decorator provides a method to emit a deprecation message
when the old function is called.
The :py:function:`omni.graph.tools.DeprecatedImport` decorator provides a method to emit a deprecation message
when an entire deprecated file is imported for use. This should not be used for imports that will be included
in the API for backward compatibility, nor should these files be moved as they must continue to exist at the
same import location in order to remain compatible.
"""
import functools
import inspect
import re
import traceback
from typing import Optional, Set
from carb import log_warn, settings
__all__ = []
# ==============================================================================================================
class DeprecationError(Exception):
"""Exception to raise when a hard-deprecated import, class, or function is attempted to be used.
Exists to provide a last bit of information to users who have been ignoring previous deprecation errors.
"""
# ==============================================================================================================
# begin-deprecate-message
class DeprecateMessage:
"""Manager for deprecation messages, to make it efficient to prevent multiple logging of the same
deprecation messages.
The default settings for output is usually enough to help you find where deprecated code is referenced.
If more information is desired these per-class variables can be set to reduce the filtering being done. The
message should contains an action item for the user to upgrade from the deprecated functionality:
.. code-block:: python
DeprecateMessage.deprecated("Install the latest version instead")
# Although it's not usually necessary the class can be tuned using these class variable
SILENCE_LOG = False # When set the output does not go to the console log; useful to disable for testing
SHOW_STACK = True # Report stack trace in the deprecation message - can be turned off if it is too verbose
MAX_STACK_LEVELS = 3 # Maximum number of stack levels to report, after filtering
RE_IGNORE = re.compile("deprecate.py|bindings-python|importlib") # Ignore stack levels matching these patterns
You can use some Python features to handle simple deprecation cases directly such as:
.. code-block:: python
# Rename constant from A to B
A = (DeprecateMessage("A has been renamed to B") and False) or B
# Constant A will be removed
A = (DeprecateMessage("A will be removed, use B instead) and False) or B
"""
# end-deprecate-message
MESSAGES_LOGGED = set()
SILENCE_LOG = False
SHOW_STACK = True
MAX_STACK_LEVELS = 3
RE_IGNORE = re.compile("deprecate.py|bindings-python|importlib")
class NoLogging:
"""Context manager class to let you import a bunch of known deprecated functions without logging warnings.
Typical use would be in providing backward compatibility in a module where submodules have moved.
with DeprecateMessage.NoLogging():
import .v1_0.my_old_function as my_old_function
"""
def __init__(self, *args, **kwargs):
self.__original_logging = None
def __enter__(self):
"""Disable logging for the duration of the context"""
self.__original_logging = DeprecateMessage.SILENCE_LOG
DeprecateMessage.SILENCE_LOG = True
def __exit__(self, exit_type, value, exit_traceback):
"""Restore the original logging state"""
DeprecateMessage.SILENCE_LOG = self.__original_logging
# --------------------------------------------------------------------------------------------------------------
@classmethod
def messages_logged(cls) -> Set[str]:
"""Returns the set of messages that have been logged so far"""
return cls.MESSAGES_LOGGED
# --------------------------------------------------------------------------------------------------------------
@classmethod
def clear_messages(cls):
"""Clear the logged messages so that they can be logged again"""
cls.MESSAGES_LOGGED = set()
# --------------------------------------------------------------------------------------------------------------
@classmethod
def deprecations_are_errors(cls) -> bool:
"""Returns True if deprecations are currently being treated as errors"""
return settings.get_settings().get("/persistent/omnigraph/deprecationsAreErrors")
@classmethod
def set_deprecations_are_errors(cls, make_errors: bool):
"""Enable or disable treating deprecations as errors instead of warnings"""
settings.get_settings().set("/persistent/omnigraph/deprecationsAreErrors", make_errors)
# --------------------------------------------------------------------------------------------------------------
@classmethod
def deprecated(cls, message: str):
"""Log the deprecation message if it has not yet been logged, otherwise do nothing
Args:
message: Message to display; only displays once even if this is called many times
Adds stack trace information if the class member SHOW_STACK is True.
Skips the Carbonite logging if the class member SILENCE_LOG is True (mostly useful for testing when a
warning is the expected result).
"""
if message in cls.MESSAGES_LOGGED:
return
stack = ""
try:
try:
full_stack = traceback.format_stack() if cls.SHOW_STACK else []
except SyntaxError as error:
full_stack = [f"Error encountered when retrieving call stack - {error}"]
if full_stack:
filtered_stack = filter(lambda stack: not cls.RE_IGNORE.search(stack), full_stack)
stack = "\n" + "".join(list(filtered_stack)[-cls.MAX_STACK_LEVELS :])
except SyntaxError as error:
stack = f"Stack trace not accessible - {error}"
if cls.deprecations_are_errors():
raise DeprecationError(f"{message}{stack}")
_ = cls.SILENCE_LOG or log_warn(f"{message}{stack}")
cls.MESSAGES_LOGGED.add(message)
# ==============================================================================================================
# begin-deprecated-class
class DeprecatedClass:
"""Decorator to deprecate a class. Takes one argument that is a string to describe the action the user is to
take to avoid the deprecated class. A deprecation message will be shown once, the first time the deprecated
class is instantiated.
.. code-block:: python
@DeprecatedClass("After version 1.5.0 use og.NewerClass instead")
class OlderClass:
pass
"""
# end-deprecated-class
def __init__(self, deprecation_message: str):
"""Remember the message and only report it on initialization
Args:
deprecation_message: A description of the action the user is to take to avoid the deprecated class.
"""
self.__deprecation_message = deprecation_message
def message(self, deprecated_cls, deprecated_member: Optional[str] = None):
"""Emit a deprecation message with useful information attached"""
try:
old_name = deprecated_cls.__old_name__
except AttributeError:
old_name = deprecated_cls.__name__
what_is_deprecated = old_name if deprecated_member is None else f"{old_name}.{deprecated_member}"
DeprecateMessage.deprecated(f"{what_is_deprecated} is deprecated: {self.__deprecation_message}")
def __call__(self, deprecated_cls):
"""Report the deprecation message if it hasn't already been reported"""
def wrapper(*args, **kwargs):
"""Redirect function calls to the real class"""
self.message(deprecated_cls)
result = deprecated_cls(*args, **kwargs)
return result
# Do some magic here by copying any static methods on the class to the wrapper function object.
# This handles the case where a deprecated class has static or class methods.
for member_name in dir(deprecated_cls):
if isinstance(inspect.getattr_static(deprecated_cls, member_name), staticmethod):
def static_function(cls, method, *sf_args, **sf_kwargs):
"""Wrapper that will give deprecation messages for calling static methods too"""
self.message(cls, method)
return getattr(cls, method)(*sf_args, **sf_kwargs)
setattr(wrapper, member_name, functools.partial(static_function, deprecated_cls, member_name))
elif isinstance(inspect.getattr_static(deprecated_cls, member_name), classmethod):
def class_function(cls, method, *cl_args, **cl_kwargs):
"""Wrapper that will give deprecation messages for calling class methods too"""
self.message(cls, method)
return getattr(cls, method)(*cl_args, **cl_kwargs)
setattr(wrapper, member_name, functools.partial(class_function, deprecated_cls, member_name))
return wrapper
# ==============================================================================================================
# begin-renamed-class
def RenamedClass(cls, old_class_name: str, rename_message: Optional[str] = None) -> object: # noqa: N802
"""Syntactic sugar to provide a class deprecation that is a simple renaming, where all of the functions in
the old class are still present in backwards compatible form in the new class.
Args:
old_class_name: The name of the class that was renamed
rename_message: If not None, what to use instead of the old class. If None then assume the new class is used.
Usage:
.. code-block:: python
MyDeprecatedClass = RenamedClass(MyNewClass, "MyDeprecatedClass")
"""
# end-renamed-class
@DeprecatedClass(f"Use {cls.__name__ if rename_message is None else rename_message} instead")
class _RenamedClass(cls):
__old_name__ = old_class_name
return _RenamedClass
# ==============================================================================================================
# begin-deprecated-function
def deprecated_function(deprecation_message: str, is_property: bool = False):
"""Decorator to deprecate a function.
Args:
deprecation_message: A description of the action the user is to take to avoid the deprecated function.
is_property: Set this True if the function is a property getter or setter.
A deprecation message will only be shown once, the first time the deprecated function is called.
.. code-block:: python
@deprecated_function("After version 1.5.0 use og.newer_function() instead")
def older_function():
pass
For property getters/setters use this decorator *after* the property decorator.
.. code-block:: python
@property
@deprecated_function("use 'your_prop' instead.", is_property=True)
def my_prop(self):
return self.your_prop
@my_prop.setter
@deprecated_function("use 'your_prop' instead.", is_property=True)
def my_prop(self, value):
self.your_prop = value
"""
# end-deprecated-function
def decorator_deprecated(func):
"""Remember the message"""
# The functools internal decorator lets the help functions drill down into the actual function when asked,
# rather that
@functools.wraps(func)
def wrapper_deprecated(*args, **kwargs):
func_str = f"'{func.__name__}'" if is_property else f"{func.__name__}()"
DeprecateMessage.deprecated(f"{func_str} is deprecated: {deprecation_message}")
return func(*args, **kwargs)
return wrapper_deprecated
return decorator_deprecated
# ==============================================================================================================
# begin-deprecated-import
def DeprecatedImport(deprecation_message: str): # noqa: N802
"""Decorator to deprecate a specific file or module import. Usually the functionality has been deprecated and
moved to a different file.
Args:
deprecation_message: String with the action the user is to perform to avoid the deprecated import
Usage:
.. code-block:: python
'''This is the top line of the imported file'''
import omni.graph.tools as og
og.DeprecatedImport("Import 'omni.graph.tools as og' and use og.new_function() instead")
# The rest of the file can be left as-is for best backward compatibility, or import non-deprecated versions
# of objects from their new location to avoid duplication.
"""
# end-deprecated-import
this_module = inspect.currentframe().f_back.f_locals["__name__"]
DeprecateMessage.deprecated(f"{this_module} is deprecated: {deprecation_message}")
| 13,789 | Python | 43.340836 | 119 | 0.618174 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/extension.py | """Extension management support"""
import omni.ext
__all__ = []
class _PublicExtension(omni.ext.IExt):
"""Dummy extension class that just serves to register and deregister the extension"""
def on_startup(self):
"""Callback when the extension is starting up"""
def on_shutdown(self):
"""Callback when the extension is shutting down"""
| 367 | Python | 23.533332 | 89 | 0.675749 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/ogn_types.py | """Helper which contains utilities and data for converting between type representations"""
from typing import Optional
from pxr import Sdf
__all__ = []
# Maping of pxr.Sdf.ValueTypeNames to corresponding OGN types (not including the Array/[] suffixes)
_SDF_BASE_NAME_TO_OGN = {
"Bool": "bool",
"Color3d": "colord[3]",
"Color3f": "colorf[3]",
"Color3h": "colorh[3]",
"Color4d": "colord[4]",
"Color4f": "colorf[4]",
"Color4h": "colorh[4]",
"Double": "double",
"Double2": "double[2]",
"Double3": "double[3]",
"Double4": "double[4]",
"Float": "float",
"Float2": "float[2]",
"Float3": "float[3]",
"Float4": "float[4]",
"Frame4d": "framed[4]",
"Half": "half",
"Half2": "half[2]",
"Half3": "half[3]",
"Half4": "half[4]",
"Int": "int",
"Int2": "int[2]",
"Int3": "int[3]",
"Int4": "int[4]",
"Int64": "int64",
"Matrix2d": "matrixd[2]",
"Matrix3d": "matrixd[3]",
"Matrix4d": "matrixd[4]",
"Normal3d": "normald[3]",
"Normal3f": "normalf[3]",
"Normal3h": "normalh[3]",
"Point3d": "pointd[3]",
"Point3f": "pointf[3]",
"Point3h": "pointh[3]",
"Quatd": "quatd[4]",
"Quatf": "quatf[4]",
"Quath": "quath[4]",
"String": "string",
"TexCoord2d": "texcoordd[2]",
"TexCoord2f": "texcoordf[2]",
"TexCoord2h": "texcoordh[2]",
"TexCoord3d": "texcoordd[3]",
"TexCoord3f": "texcoordf[3]",
"TexCoord3h": "texcoordh[3]",
"TimeCode": "timecode",
"Token": "token",
"UChar": "uchar",
"UInt": "uint",
"UInt64": "uint64",
"Vector3d": "vectord[3]",
"Vector3f": "vectorf[3]",
"Vector3h": "vectorh[3]",
}
# Mapping of OGN types to SDF - not all OGN types can be translated directly
_OGN_TO_SDF_BASE_NAME = {value: key for key, value in _SDF_BASE_NAME_TO_OGN.items()}
# As the Sdf.ValueTypeNames are static Boost objects create a mapping of them back to OGN to avoid linear lookup
_SDF_TO_OGN = {getattr(Sdf.ValueTypeNames, key): value for key, value in _SDF_BASE_NAME_TO_OGN.items()}
_SDF_TO_OGN.update(
{getattr(Sdf.ValueTypeNames, f"{key}Array"): f"{value}[]" for key, value in _SDF_BASE_NAME_TO_OGN.items()}
)
# ================================================================================
def ogn_to_sdf(ogn_type: str) -> Optional[Sdf.ValueTypeNames]:
"""Convert an OGN type string to the equivalent SDF value type name
Args:
ogn_type: String representation of the OGN type as described in its documentation
Return:
Equivalent pxr.Sdf.ValueTypeNames value, or None if there is no equivalent
"""
is_array = False
if ogn_type[-2:] == "[]":
is_array = True
ogn_type = ogn_type[:-2]
try:
sdf_type_name = _OGN_TO_SDF_BASE_NAME[ogn_type]
if is_array:
sdf_type_name += "Array"
sdf_type = getattr(Sdf.ValueTypeNames, sdf_type_name, None)
except KeyError:
sdf_type = None
return sdf_type
# ================================================================================
def sdf_to_ogn(sdf_type: Sdf.ValueTypeName) -> Optional[str]:
"""Convert an SDF type to the equivalent OGN type name
Args:
sdf_type: String representation of the SDF type as described in its documentation
Return:
Equivalent OGN string name value, or None if there is no equivalent
"""
is_array = False
if str(sdf_type)[-5:] == "Array":
is_array = True
try:
ogn_type_name = _SDF_TO_OGN[sdf_type]
if is_array:
ogn_type_name += "[]"
except KeyError:
ogn_type_name = None
return ogn_type_name
| 3,657 | Python | 28.983606 | 112 | 0.563577 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/parse_scheduling.py | """Support for the parsing and interpretation of scheduling hints in the .ogn file"""
from __future__ import annotations # For the forward class reference type in compare()
import re
from enum import Enum
from typing import List, Optional, Union
from .utils import IndentedOutput, ParseError
# ======================================================================
class _AccessType(Enum):
"""Access type for a given scheduling flag
ALL = The data will be both read and written to
READ = The data will only be read
WRITE = The data will only be written
"""
ALL = "ReadWrite"
READ = "ReadOnly"
WRITE = "WriteOnly"
@classmethod
def flag_access_type(cls, flag_name: str):
"""Returns the type of access the flag name implies"""
if flag_name.endswith("-read"):
return cls.READ
if flag_name.endswith("-write"):
return cls.WRITE
return cls.ALL
@classmethod
def as_cpp_enum(cls, access_type: _AccessType) -> str:
"""Returns the C++ enum value corresponding to the access type string taken from the class data values"""
if access_type == cls.READ:
return "eAccessType::eRead"
if access_type == cls.WRITE:
return "eAccessType::eWrite"
return "eAccessType::eReadWrite"
@classmethod
def as_python_enum(cls, access_type: _AccessType) -> str:
"""Returns the Python enum value corresponding to the access type string taken from the class data values"""
if access_type == cls.READ:
return "og.eAccessType.E_READ"
if access_type == cls.WRITE:
return "og.eAccessType.E_WRITE"
return "og.eAccessType.E_READ_WRITE"
# ======================================================================
class _ComputeRule(Enum):
"""Compute Rule for the scheduling flag
DEFAULT = Evaluator default rule
ON_REQUEST = Compute skipped until INode::onRequest
"""
DEFAULT = "compute-default"
ON_REQUEST = "compute-on-request"
@classmethod
def flag_compute_rule(cls, flag_name: str):
"""Returns the type of compute-rule the flag name implies"""
if flag_name == cls.ON_REQUEST.value:
return cls.ON_REQUEST
return cls.DEFAULT
@classmethod
def as_cpp_enum(cls, compute_rule: _ComputeRule) -> str:
"""Returns the C++ enum value corresponding to the string taken from the class data values"""
if compute_rule == cls.ON_REQUEST:
return "eComputeRule::eOnRequest"
return "eComputeRule::eDefault"
@classmethod
def as_python_enum(cls, compute_rule: _ComputeRule) -> str:
"""Returns the Python enum value corresponding to the access type string taken from the class data values"""
if compute_rule == cls.ON_REQUEST:
return "og.eComputeRule.E_ON_REQUEST"
return "og.eComputeRule.E_DEFAULT"
# ======================================================================
# begin-scheduling-hints
class SchedulingHints:
"""Class managing the scheduling hints.
The keywords are case-independent during parsing, specified in lower case here for easy checking.
When there is a -read and -write variant only one of them should be specified at a time:
no suffix: The item in question is accessed for both read and write
-read suffix: The item in question is accessed only for reading
-write suffix: The item in question is accessed only for writing
These class static values list the possible values for the "scheduling" lists in the .ogn file.
# Set when the node accesses other global data, i.e. data stored outside of the node, including the data
# on other nodes.
GLOBAL_DATA = "global"
GLOBAL_DATA_READ = "global-read"
GLOBAL_DATA_WRITE = "global-write"
# Set when a node accesses static data, i.e. data shared among all nodes of the same type
STATIC_DATA = "static"
STATIC_DATA_READ = "static-read"
STATIC_DATA_WRITE = "static-write"
# Set when the node is a threadsafe function, i.e. it can be scheduled in parallel with any other nodes, including
# nodes of the same type. This flag is not allowed to coexist with any of the other types since they all denote
# unsafe threaded data access.
THREADSAFE = "threadsafe"
# Set when the node accesses the graph topology, e.g. connections, attributes, or nodes
TOPOLOGY = "topology"
TOPOLOGY_READ = "topology-read"
TOPOLOGY_WRITE = "topology-write"
# Set when the node accesses the USD stage data (for read-only, write-only, or both read and write)
USD = "usd"
USD_READ = "usd-read"
USD_WRITE = "usd-write"
# Set when the scheduling of the node compute may be modified from the evaluator default.
COMPUTERULE_DEFAULT = "compute-default"
COMPUTERULE_ON_REQUEST = "compute-on-request"
"""
# end-scheduling-hints
GLOBAL_DATA = "global"
GLOBAL_DATA_READ = "global-read"
GLOBAL_DATA_WRITE = "global-write"
STATIC_DATA = "static"
STATIC_DATA_READ = "static-read"
STATIC_DATA_WRITE = "static-write"
THREADSAFE = "threadsafe"
TOPOLOGY = "topology"
TOPOLOGY_READ = "topology-read"
TOPOLOGY_WRITE = "topology-write"
USD = "usd"
USD_READ = "usd-read"
USD_WRITE = "usd-write"
COMPUTERULE_DEFAULT = "compute-default"
COMPUTERULE_ON_REQUEST = "compute-on-request"
def __init__(self, scheduling_hints: Union[List[str], str]):
"""Initialize the scheduling hints from the .ogn description"""
self.global_data = None
self.static_data = None
self.threadsafe = None
self.topology = None
self.usd = None
self.compute_rule = None
self._allowed_tokens = [
getattr(self, token_name) for token_name in dir(SchedulingHints) if token_name.isupper()
]
if not isinstance(scheduling_hints, list) and not isinstance(scheduling_hints, str):
raise ParseError("Scheduling hints must be a comma-separated string or a list of strings")
if isinstance(scheduling_hints, str):
# This trick allows lists to be delimited by arbitrary combinations of commas and spaces, so that the
# user doesn't have to remember which one to use
scheduling_hints = [element for element in re.split(" |, |,", scheduling_hints) if element]
for hints in scheduling_hints:
self.set_flag(hints)
# --------------------------------------------------------------------------------------------------------------
def __str__(self) -> str:
"""Returns a string with the set of flags currently set"""
result = []
result.append(f"GLOBAL={None if self.global_data is None else self.global_data.value}")
result.append(f"STATIC={None if self.static_data is None else self.static_data.value}")
result.append(f"THREADSAFE={False if self.threadsafe is None else self.threadsafe}")
result.append(f"TOPOLOGY={None if self.topology is None else self.topology.value}")
result.append(f"USD={None if self.usd is None else self.usd.value}")
result.append(f'COMPUTE_RULE="{None if self.compute_rule is None else self.compute_rule.value}"')
return ", ".join(result)
# --------------------------------------------------------------------------------------------------------------
def parse_error(self, message: str):
"""Raises a parse error with common information attached to the given message"""
raise ParseError(f"{message} - [{self}]")
# --------------------------------------------------------------------------------------------------------------
def set_flag(self, flag_to_set: str):
"""Tries to enable the named flag.
Raises ParseError if the flag is not legal or not compatible with current flags"""
flag_to_set = flag_to_set.lower()
if flag_to_set not in self._allowed_tokens:
self.parse_error(f"Scheduling flag '{flag_to_set}' not in allowed list {self._allowed_tokens}")
if flag_to_set == self.THREADSAFE:
if [self.usd, self.global_data, self.static_data, self.topology] != [None, None, None, None]:
self.parse_error(f"'{flag_to_set}' scheduling type not compatible with any data modification flags")
self.threadsafe = True
elif flag_to_set in [self.USD, self.USD_READ, self.USD_WRITE]:
if self.usd is not None:
self.parse_error(f"{flag_to_set} must be the only USD flag set")
if self.threadsafe and flag_to_set != self.USD_READ:
self.parse_error(f"{flag_to_set} not compatible with {self.THREADSAFE} flag")
self.usd = _AccessType.flag_access_type(flag_to_set)
elif flag_to_set in [self.STATIC_DATA, self.STATIC_DATA_READ, self.STATIC_DATA_WRITE]:
if self.static_data is not None:
self.parse_error(f"{flag_to_set} must be the only static_data flag set")
if self.threadsafe and flag_to_set != self.STATIC_DATA_READ:
self.parse_error(f"{flag_to_set} not compatible with {self.THREADSAFE} flag")
self.static_data = _AccessType.flag_access_type(flag_to_set)
elif flag_to_set in [self.GLOBAL_DATA, self.GLOBAL_DATA_READ, self.GLOBAL_DATA_WRITE]:
if self.global_data is not None:
self.parse_error(f"{flag_to_set} must be the only global data flag set")
if self.threadsafe and flag_to_set != self.GLOBAL_DATA_READ:
self.parse_error(f"{flag_to_set} not compatible with {self.THREADSAFE} flag")
self.global_data = _AccessType.flag_access_type(flag_to_set)
elif flag_to_set in [self.TOPOLOGY, self.TOPOLOGY_READ, self.TOPOLOGY_WRITE]:
if self.topology is not None:
self.parse_error(f"{flag_to_set} must be the only topology flag set")
if self.threadsafe and flag_to_set != self.TOPOLOGY_READ:
self.parse_error(f"{flag_to_set} not compatible with {self.THREADSAFE} flag")
self.topology = _AccessType.flag_access_type(flag_to_set)
elif flag_to_set in [self.COMPUTERULE_DEFAULT, self.COMPUTERULE_ON_REQUEST]:
if self.compute_rule is not None:
self.parse_error(f"{flag_to_set} must be the only compute-rule flag set")
self.compute_rule = _ComputeRule.flag_compute_rule(flag_to_set)
# --------------------------------------------------------------------------------------------------------------
def compare(self, other: SchedulingHints) -> List[str]:
"""Compare this object against another of the same type to see if their flag configurations match.
If they don't match then a list of differences is returned, otherwise an empty list
"""
errors = []
if self.usd != other.usd:
errors.append(f"usd flag mismatch '{self.usd}' != '{other.usd}'")
if self.global_data != other.global_data:
errors.append(f"global_data flag mismatch '{self.global_data}' != '{other.global_data}'")
if self.topology != other.topology:
errors.append(f"topology flag mismatch '{self.topology}' != '{other.topology}'")
if self.static_data != other.static_data:
errors.append(f"static_data flag mismatch '{self.static_data}' != '{other.static_data}'")
if self.threadsafe != other.threadsafe:
errors.append(f"threadsafe flag mismatch '{self.threadsafe}' != '{other.threadsafe}'")
if self.compute_rule != other.compute_rule:
errors.append(f"compute-rule flag mismatch '{self.compute_rule}' != '{other.compute_rule}'")
return errors
# --------------------------------------------------------------------------------------------------------------
def has_values_set(self) -> bool:
"""Returns True if any of the scheduling hints values have been set"""
return [self.threadsafe, self.global_data, self.static_data, self.topology, self.usd, self.compute_rule] != [
None
] * 6
# --------------------------------------------------------------------------------------------------------------
def cpp_includes_required(self) -> List[str]:
"""Returns a list of files required to be included for the generated C++ code to work"""
return ["#include <omni/graph/core/ISchedulingHints.h>"] if self.has_values_set() else []
# --------------------------------------------------------------------------------------------------------------
def emit_cpp(self, out: IndentedOutput) -> bool:
"""Write the C++ initialization code to the given output stream, writing nothing if no flags were set.
Assumes there is a local variable called nodeTypeObj that contains the NodeTypeObj definition.
Returns True if anything was written.
"""
if not self.has_values_set():
return False
out.write("auto __schedulingInfo = nodeTypeObj.iNodeType->getSchedulingHints(nodeTypeObj);")
out.write('CARB_ASSERT(__schedulingInfo, "Could not acquire the scheduling hints");')
out.write("if (__schedulingInfo)")
if out.indent("{"):
if self.threadsafe:
out.write("__schedulingInfo->setThreadSafety(eThreadSafety::eSafe);")
elif self.threadsafe is not None:
out.write("__schedulingInfo->setThreadSafety(eThreadSafety::eUnsafe);")
if self.global_data is not None:
out.write(
"__schedulingInfo->setDataAccess(eAccessLocation::eGlobal,"
f" {_AccessType.as_cpp_enum(self.global_data)});"
)
if self.static_data is not None:
out.write(
"__schedulingInfo->setDataAccess(eAccessLocation::eStatic,"
f" {_AccessType.as_cpp_enum(self.static_data)});"
)
if self.topology is not None:
out.write(
"__schedulingInfo->setDataAccess(eAccessLocation::eTopology,"
f" {_AccessType.as_cpp_enum(self.topology)});"
)
if self.usd is not None:
out.write(
f"__schedulingInfo->setDataAccess(eAccessLocation::eUsd, {_AccessType.as_cpp_enum(self.usd)});"
)
if self.compute_rule is not None:
out.write(f"__schedulingInfo->setComputeRule({_ComputeRule.as_cpp_enum(self.compute_rule)});")
out.exdent("}")
return True
# --------------------------------------------------------------------------------------------------------------
def emit_python(self, out: IndentedOutput) -> bool:
"""Write the Python initialization code to the given output stream, writing nothing if no flags were set.
Assumes there is a local variable called node_type that contains the Py_NodeType definition.
Returns True if anything was written.
"""
if not self.has_values_set():
return False
out.write("__hints = node_type.get_scheduling_hints()")
if out.indent("if __hints is not None:"):
if self.threadsafe:
out.write("__hints.thread_safety = og.eThreadSafety.E_SAFE")
elif self.threadsafe is not None:
out.write("__hints.thread_safety = og.eThreadSafety.E_UNSAFE")
if self.global_data is not None:
out.write(
"__hints.set_data_access(og.eAccessLocation.E_GLOBAL,"
f" {_AccessType.as_python_enum(self.global_data)})"
)
if self.static_data is not None:
out.write(
"__hints.set_data_access(og.eAccessLocation.E_STATIC,"
f" {_AccessType.as_python_enum(self.static_data)})"
)
if self.topology is not None:
out.write(
"__hints.set_data_access(og.eAccessLocation.E_TOPOLOGY,"
f" {_AccessType.as_python_enum(self.topology)})"
)
if self.usd is not None:
out.write(f"__hints.set_data_access(og.eAccessLocation.E_USD, {_AccessType.as_python_enum(self.usd)})")
if self.compute_rule is not None:
out.write(f"__hints.compute_rule = {_ComputeRule.as_python_enum(self.compute_rule)}")
out.exdent()
return True
# --------------------------------------------------------------------------------------------------------------
@classmethod
def illegal_configurations(cls) -> List[str]:
"""Returns a list of illegal parsing configurations for testing purposes. Keeps the data local"""
return [
'{"not": "a list or string"}',
'["foo"]', # List with bad values
'"usd, bar"', # String with bad values
'["usd", "usd-read"]', # Lists with incompatible values
'["global-write", "global-read"]',
'["topology", "topology-write"]',
'["static", "static-read"]',
'"threadsafe, static"', # String with incompatible values
'["compute-default", "compute-on-request"]',
]
# --------------------------------------------------------------------------------------------------------------
@classmethod
def legal_configurations(cls) -> List[str]:
"""Returns a list of legal parsing configurations and expected results for testing purposes.
The data is a list of pairs where the first element is the flags to be set on the scheduling hints
in the .ogn file (possibly with extra information as needed) and the second element is a SchedulingHints
object configured with the expected results. It has a compare operation so the test will use that to
confirm results
"""
def from_flags(
global_data: Optional[_AccessType] = None,
threadsafe: Optional[bool] = None,
static_data: Optional[_AccessType] = None,
topology: Optional[_AccessType] = None,
usd: Optional[_AccessType] = None,
compute_rule: Optional[_ComputeRule] = None,
) -> SchedulingHints:
"""Returns a SchedulingHints object whose flags are set to the ones passed in"""
scheduling = SchedulingHints([])
scheduling.global_data = global_data
scheduling.threadsafe = threadsafe
scheduling.static_data = static_data
scheduling.topology = topology
scheduling.usd = usd
scheduling.compute_rule = compute_rule
return scheduling
return [
('"global"', from_flags(global_data=_AccessType.ALL)),
('"threadsafe"', from_flags(threadsafe=True)),
('"static-read"', from_flags(static_data=_AccessType.READ)),
('"topology-write"', from_flags(topology=_AccessType.WRITE)),
(
'"usd,global-write,topology-read"',
from_flags(usd=_AccessType.ALL, global_data=_AccessType.WRITE, topology=_AccessType.READ),
),
(
'["usd", "global-read", "topology-write"]',
from_flags(usd=_AccessType.ALL, global_data=_AccessType.READ, topology=_AccessType.WRITE),
),
('"compute-on-request"', from_flags(compute_rule=_ComputeRule.ON_REQUEST)),
('"compute-default"', from_flags(compute_rule=_ComputeRule.DEFAULT)),
]
| 19,726 | Python | 48.3175 | 119 | 0.57812 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_python.py | """Support for generating a pythonic interface class for OmniGraph Nodes.
Exports:
generate_python: Create a NODE.ogn.py file containing a pythonic interface for the node data
"""
import json
import re
from contextlib import suppress
from typing import List, Optional
from .attributes.AttributeManager import AttributeManager
from .attributes.naming import INPUT_NS, OUTPUT_NS, PORT_NAMES, STATE_NS
from .keys import CudaPointerValues, LanguageTypeValues, MemoryTypeValues, MetadataKeyOutput, MetadataKeys
from .nodes import NodeInterfaceGenerator
from .utils import GeneratorConfiguration, logger, shorten_string_lines_to
__all__ = ["generate_python"]
class NodePythonGenerator(NodeInterfaceGenerator):
"""Manage the functions required to generate a Python interface for a node"""
def __init__(self, configuration: GeneratorConfiguration): # noqa: PLW0246
"""Set up the generator and output the Python interface code for the node
Just passes the initialization on to the parent class. See the argument and exception descriptions there.
"""
super().__init__(configuration)
# ----------------------------------------------------------------------
def interface_file_name(self) -> str:
"""Return the path to the name of the Python file"""
return self.base_name + "Database.py"
# ----------------------------------------------------------------------
def database_class_name(self) -> str:
"""Return the name of the generated database class, which is what will be passed to the compute method"""
return f"{self.base_name}Database"
# ----------------------------------------------------------------------
def _value_class_name(self, namespace: str) -> str:
"""Return the name of the internal class that holds attributes in the given namespace"""
return f"ValuesFor{namespace.capitalize()}"
# ----------------------------------------------------------------------
def _pre_class_spacing(self):
"""Writes out spacing before class names - follows Flake8 in verbose mode, nothing otherwise"""
if self.verbose:
self.out.write()
self.out.write()
# ----------------------------------------------------------------------
def _pre_function_spacing(self):
"""Writes out spacing before function definitions - follows Flake8 in verbose mode, nothing otherwise"""
if self.verbose:
self.out.write()
# ----------------------------------------------------------------------
def _filter_out_batched_attributes(self, attribute_list: List[AttributeManager], namespace: str):
"""
Args:
attribute_list: List of attributes belonging to the generated class
namespace: Namespace of attributes in the list. Assumption is that all attributes have the same answer.
Returns:
Two lists of attributes: batched attributes and the filtered list without batched attributes"""
if namespace == STATE_NS:
return [], attribute_list
batched_attribute_list = []
filtered_attribute_list = []
for attribute in attribute_list:
# batching of attributes is not supported for runtime types
# batching of array attributes wouldn't be the most efficient. best is to acquire the right size
# numpy.array once and work with it directly currently only limited to CPU memory
if (
attribute.ogn_base_type() not in ["bundle", "any", "union"]
and attribute.array_depth == 0
and attribute.memory_storage() == MemoryTypeValues.CPU
):
batched_attribute_list.append(attribute)
else:
filtered_attribute_list.append(attribute)
return batched_attribute_list, filtered_attribute_list
# ----------------------------------------------------------------------
def _generate_attribute_class(self, attribute_list: List[AttributeManager], namespace: str) -> Optional[str]:
"""Output a nested class that provides database access for the node's input or output attributes.
Args:
attribute_list: List of attributes belonging to the generated class
namespace: Namespace of attributes in the list. Assumption is that all attributes have the same answer.
Passed explicitly to allow for the possibility of an empty list.
Returns:
The name of the class that was generated (None if not generated)
The attribute classes have two members per attribute:
attr_PROPERTY: Holds a reference to the node's Attribute member for this attribute
PROPERTY: A property through which the attribute values are accessed
"""
# This method is called with all attributes in the same namespace so it's safe to use the first one
# to extract the common definition.
attribute_class = self._value_class_name(namespace)
is_read_only = namespace == INPUT_NS
# For correct syntax the namespace name must be singular
namespace_for_comment = namespace[:-1] if namespace.endswith("s") else namespace
self._pre_function_spacing()
if self.out.indent(f"class {attribute_class}(og.DynamicAttributeAccess):"):
batched_attribute_list, filtered_attribute_list = self._filter_out_batched_attributes(
attribute_list, namespace
)
has_batched_attributes = len(batched_attribute_list) > 0
if has_batched_attributes:
local_property_list = [attribute.python_property_name() for attribute in batched_attribute_list]
if namespace == INPUT_NS:
local_property_list += ["_setting_locked", "_batchedReadAttributes", "_batchedReadValues"]
elif namespace == OUTPUT_NS:
local_property_list += ["_batchedWriteValues"]
batched_str = "{" + ", ".join(f'"{attribute}"' for attribute in local_property_list) + "}"
self.out.write(f"LOCAL_PROPERTY_NAMES = {batched_str}")
elif namespace != STATE_NS:
self.out.write("LOCAL_PROPERTY_NAMES = { }")
self.out.write(
f'"""Helper class that creates natural hierarchical access to {namespace_for_comment} attributes"""'
)
if self.out.indent(
"def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):"
):
self.out.write('"""Initialize simplified access for the attribute data"""')
self.out.write("context = node.get_graph().get_default_graph_context()")
self.out.write("super().__init__(context, node, attributes, dynamic_attributes)")
has_bundles = False
gpu_bundles = []
gpu_ptr_kinds = {}
for attribute in attribute_list:
if attribute.ogn_base_type() == "bundle":
has_bundles = True
if attribute.memory_type != MemoryTypeValues.CPU:
gpu_bundles.append(attribute.usd_name())
with suppress(KeyError):
gpu_ptr_kinds[attribute.usd_name()] = CudaPointerValues.PYTHON[
self.node_interface.cuda_pointer_type
]
if has_bundles:
gpu_ptr_str = "{" + ",".join(f'"{key}": {value}' for key, value in gpu_ptr_kinds.items()) + "}"
self.out.write(
f"self.__bundles = og.BundleContainer(context, node, attributes, {gpu_bundles},"
f" read_only={is_read_only}, gpu_ptr_kinds={gpu_ptr_str})"
)
# Output arrays will need a size since that has to be set when the user gets their values.
# This puts the onus on the caller to set the size before calling get(). For safety, the sizes
# are initialized to None so that failure to set values can generate a sensible error message.
if not is_read_only:
for attribute in attribute_list:
if attribute.fabric_needs_counter():
default_size = "None" if attribute.default is None else len(attribute.default)
self.out.write(f"self.{attribute.python_property_name()}_size = {default_size}")
# Initialize storage for batched values
if namespace == INPUT_NS:
batched_str = (
"["
+ ", ".join(f"self.{attribute.python_attribute_name()}" for attribute in batched_attribute_list)
+ "]"
)
self.out.write(f"self._batchedReadAttributes = {batched_str}")
batched_str = (
"["
+ ", ".join(f"{attribute.python_default_value()}" for attribute in batched_attribute_list)
+ "]"
)
self.out.write(f"self._batchedReadValues = {batched_str}")
elif namespace == OUTPUT_NS:
self.out.write("self._batchedWriteValues = { }")
self.out.exdent()
for attribute in filtered_attribute_list:
# Emit the getters and setters for the attributes.
attribute.generate_python_property_code(self.out)
for index, attribute in enumerate(batched_attribute_list):
# Emit the getters and setters for batched read or write
attribute.generate_python_batched_property_code(index, self.out)
if has_batched_attributes:
# Override any dynamic getters and setters for batched attributes to remove the overhead
self.out.write()
if self.out.indent("def __getattr__(self, item: str):"):
if self.out.indent("if item in self.LOCAL_PROPERTY_NAMES:"):
self.out.write("return object.__getattribute__(self, item)")
self.out.exdent()
if self.out.indent("else:"):
self.out.write("return super().__getattr__(item)")
self.out.exdent()
self.out.exdent()
self.out.write()
if self.out.indent("def __setattr__(self, item: str, new_value):"):
if self.out.indent("if item in self.LOCAL_PROPERTY_NAMES:"):
self.out.write("object.__setattr__(self, item, new_value)")
self.out.exdent()
if self.out.indent("else:"):
self.out.write("super().__setattr__(item, new_value)")
self.out.exdent()
self.out.exdent()
if namespace == INPUT_NS:
self.out.write()
if self.out.indent("def _prefetch(self):"):
self.out.write("readAttributes = self._batchedReadAttributes")
self.out.write("newValues = _og._prefetch_input_attributes_data(readAttributes)")
if self.out.indent("if len(readAttributes) == len(newValues):"):
self.out.write("self._batchedReadValues = newValues")
self.out.exdent()
self.out.exdent()
elif namespace == OUTPUT_NS:
self.out.write()
if self.out.indent("def _commit(self):"):
self.out.write("_og._commit_output_attributes_data(self._batchedWriteValues)")
self.out.write("self._batchedWriteValues = { }")
self.out.exdent()
self.out.exdent()
return attribute_class
# ----------------------------------------------------------------------
def _generate_shared_node_type_initialize(self):
"""
Output the code to set up any shared node type information, like adding attributes and setting metadata.
Assumes this is part of a method where the variable "node_type" contains the node type object to initialize
"""
# Set the metadata for this node type
self.out.write(f"node_type.set_metadata(ogn.MetadataKeys.EXTENSION, {json.dumps(self.extension)})")
for key, value in self.node_interface.metadata.items():
python_key = MetadataKeyOutput.python_name_from_key(key)
if python_key is None:
python_key = json.dumps(key)
# Handle lists as a comma-separated string
if isinstance(value, list):
value = '"' + ",".join([x.replace('"', '\\"') for x in value]) + '"'
else:
value = json.dumps(value)
self.out.write(f"node_type.set_metadata({python_key}, {value})")
if self.node_interface.memory_type != MemoryTypeValues.CPU:
self.out.write(f'node_type.set_metadata(ogn.MetadataKeys.MEMORY_TYPE, "{self.node_interface.memory_type}")')
# The icon path is relative to the extension path, which is only known at runtime, so build it up then.
# To the user it will appear as an absolute path, which they can modify if they wish to.
if self.node_interface.icon_path is not None:
icon_path = json.dumps(self.node_interface.icon_path)
self.out.write(f'icon_path = carb.tokens.get_tokens_interface().resolve("${{{self.extension}}}")')
# Using os.path.join here causes problems due to the backslash path separator on Windows. The components
# both have forward slashes by design so just insert the missing one.
self.out.write(f"icon_path = icon_path + '/' + {icon_path}")
self.out.write("node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path)")
# If any of the scheduling hints flags have been defined then set them here
if self.node_interface.scheduling_hints is not None:
self.node_interface.scheduling_hints.emit_python(self.out)
# Generate the initialization of attributes, including setting defaults and adding them to the node type
if self.node_interface.has_attributes():
self.out.write(f"{self.database_class_name()}.INTERFACE.add_to_node_type(node_type)")
if self.node_interface.all_state_attributes() or self.node_interface.has_state:
self.out.write("node_type.set_has_state(True)")
# ----------------------------------------------------------------------
def _generate_node_registration(self):
"""
Output the definition of the node type's registration support method
By having the node type class object be a static class member a circular import can be avoided.
The node implementation will call OgnTheNodeDatabase.register(OgnTheNode) to handle registration and the
automatic override of any ABI methods that OgnTheNode might implement.
"""
db_class_name = self.database_class_name()
self._pre_function_spacing()
self.out.write("NODE_TYPE_CLASS = None")
# Find the version of this extension in use so that it can be imprinted into the generated file
self.out.write(f"GENERATOR_VERSION = {self.generator_version}")
self.out.write(f"TARGET_VERSION = {self.target_version}")
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def register(node_type_class):"):
self.out.write(f"{db_class_name}.NODE_TYPE_CLASS = node_type_class")
self.out.write(f"og.register_node_type({db_class_name}.abi, {self.node_interface.version})")
self.out.exdent()
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def deregister():"):
self.out.write(f'og.deregister_node_type("{self.node_interface.name}")')
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_abi_get_node_type(self):
"""Output the abi implementation of the get_node_type method"""
db_class_name = self.database_class_name()
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def get_node_type():"):
self.out.write(f"get_node_type_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'get_node_type', None)")
if self.out.indent("if callable(get_node_type_function):"):
self.out.write("return get_node_type_function()")
self.out.exdent(f"return '{self.node_interface.name}'")
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_abi_compute(self):
"""Output the abi implementation of the compute method"""
db_class_name = self.database_class_name()
def __generate_attribute_validate(attribute_list: List[AttributeManager]):
"""Write out any code that verifies the validity of attributes before trying to compute"""
for attribute in attribute_list:
attribute.generate_python_validation(self.out)
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def compute(context, node):"):
# Construct the database that accesses the Fabric data in a Pythonic way
if self.out.indent("try:"):
self.out.write(f"per_node_data = {db_class_name}.PER_NODE_DATA[node.node_id()]")
self.out.write("db = per_node_data.get('_db')")
if self.out.indent("if db is None:"):
self.out.write(f"db = {db_class_name}(node)")
self.out.write("per_node_data['_db'] = db")
self.out.exdent()
self.out.exdent()
# Currently with hot reload we are not getting PER_NODE_DATA initialized. Just generate the db on the fly.
if self.out.indent("except:"):
self.out.write(f"db = {db_class_name}(node)")
self.out.exdent()
self.out.write()
if self.out.indent("try:"):
__generate_attribute_validate(self.node_interface.all_input_attributes())
__generate_attribute_validate(self.node_interface.all_output_attributes())
__generate_attribute_validate(self.node_interface.all_state_attributes())
# The ABI compute method has the same name as the generated compute method to be called, so use
# the fact that the ABI method has more parameters to figure out which one the node has defined.
self.out.write(f"compute_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'compute', None)")
if self.out.indent("if callable(compute_function) and compute_function.__code__.co_argcount > 1:"):
self.out.write("return compute_function(context, node)")
self.out.exdent()
self.out.write()
# Fetch input attributes registered for batch read
self.out.write("db.inputs._prefetch()")
# Special flag that prevents inputs from being modified inside a compute method, which avoids
# synchronization problems. In C++ this is enforced by returning const values; this is equivalent.
# Suppress the error that occurs if no inputs were generated.
self.out.write("db.inputs._setting_locked = True")
# If the node attempted to write a const value the compute will throw AttributeError saying why
if self.out.indent("with og.in_compute():"):
self.out.write(f"return {db_class_name}.NODE_TYPE_CLASS.compute(db)")
self.out.exdent()
self.out.exdent()
# For this error only the name of the attribute is returned, to minimize duplication of strings
if self.out.indent("except Exception as error:"):
self.out.write('stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next))')
self.out.write(
"db.log_error(f'Assertion raised in compute - {error}\\n{stack_trace}', add_context=False)"
)
self.out.exdent()
if self.out.indent("finally:"):
self.out.write("db.inputs._setting_locked = False")
# Commit output attributes registered for batch write
self.out.write("db.outputs._commit()")
self.out.exdent()
self.out.write("return False")
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_abi_initialize(self):
"""Output the abi implementation of the initialize method"""
db_class_name = self.database_class_name()
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def initialize(context, node):"):
# Give the database a chance to cache away any node-specific data that will not change each evaluation
self.out.write(f"{db_class_name}._initialize_per_node_data(node)")
self.out.write(f"initialize_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'initialize', None)")
if self.out.indent("if callable(initialize_function):"):
self.out.write("initialize_function(context, node)")
self.out.exdent()
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_abi_release(self):
"""Output the abi implementation of the release method"""
db_class_name = self.database_class_name()
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def release(node):"):
self.out.write(f"release_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'release', None)")
if self.out.indent("if callable(release_function):"):
self.out.write("release_function(node)")
self.out.exdent()
# Release any node-specific data that was cached during the initialize function
self.out.write(f"{db_class_name}._release_per_node_data(node)")
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_abi_update_node_version(self):
"""Output the abi implementation of the update_node_version method"""
db_class_name = self.database_class_name()
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def update_node_version(context, node, old_version, new_version):"):
self.out.write(
f"update_node_version_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'update_node_version', None)"
)
if self.out.indent("if callable(update_node_version_function):"):
self.out.write("return update_node_version_function(context, node, old_version, new_version)")
self.out.exdent("return False")
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_abi_initialize_type(self):
"""Output the abi implementation of the intialize_type method"""
db_class_name = self.database_class_name()
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def initialize_type(node_type):"):
self.out.write(
f"initialize_type_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'initialize_type', None)"
)
self.out.write("needs_initializing = True")
if self.out.indent("if callable(initialize_type_function):"):
self.out.write("needs_initializing = initialize_type_function(node_type)")
self.out.exdent()
# By returning a bool the initialize_type override can request attribute additions from the parent
# rather than a full override.
if self.out.indent("if needs_initializing:"):
self._generate_shared_node_type_initialize()
self.out.exdent()
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_abi_on_connection_type_resolve(self):
"""Output the abi implementation of the on_connection_type_resolve method"""
db_class_name = self.database_class_name()
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def on_connection_type_resolve(node):"):
self.out.write(
"on_connection_type_resolve_function = "
f"getattr({db_class_name}.NODE_TYPE_CLASS, 'on_connection_type_resolve', None)"
)
if self.out.indent("if callable(on_connection_type_resolve_function):"):
self.out.write("on_connection_type_resolve_function(node)")
self.out.exdent()
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_database_abi(self):
"""Output a registration method and subclass that handles ABI access for the Python node"""
self._pre_function_spacing()
if self.out.indent("class abi:"):
self.out.write('"""Class defining the ABI interface for the node type"""')
self._generate_abi_get_node_type()
self._generate_abi_compute()
self._generate_abi_initialize()
self._generate_abi_release()
self._generate_abi_update_node_version()
self._generate_abi_initialize_type()
self._generate_abi_on_connection_type_resolve()
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_token_help(self):
"""Generate the help information showing how to access any hardcoded tokens in the file"""
if not self.node_interface.tokens:
return
self.out.write()
if self.out.indent("Predefined Tokens:"):
for token_name, _ in self.node_interface.tokens.items():
self.out.write(f"tokens.{token_name}")
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_tokens(self):
"""Generate the code required to define and initialize any hardcoded tokens in the file"""
if not self.node_interface.tokens:
return
self._pre_function_spacing()
if self.out.indent("class tokens:"):
for token_name, token_value in self.node_interface.tokens.items():
value = json.dumps(token_value)
self.out.write(f"{token_name} = {value}")
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_attribute_definitions(self):
"""Output the database class member that describes unchanging attribute data"""
self._pre_function_spacing()
self.out.write("# This is an internal object that provides per-class storage of a per-node data dictionary")
self.out.write("PER_NODE_DATA = {}")
all_attributes = self.node_interface.all_attributes()
self._pre_function_spacing()
self.out.write("# This is an internal object that describes unchanging attributes in a generic way")
self.out.write("# The values in this list are in no particular order, as a per-attribute tuple")
self.out.write("# Name, Type, ExtendedTypeIndex, UiName, Description, Metadata,")
self.out.write("# Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg")
self.out.write("# You should not need to access any of this data directly, use the defined database interfaces")
if self.out.indent("INTERFACE = og.Database._get_interface(["):
empty_list = [None, None, None, None, None, None, None, None, None, None]
for attribute in all_attributes:
attribute_data = empty_list[:]
attribute_data[0] = attribute.name
(extended_type, type_info) = attribute.python_extended_type()
attribute_data[1] = attribute.create_type_name() if type_info is None else type_info
attribute_data[2] = extended_type
with suppress(KeyError):
attribute_data[3] = attribute.metadata[MetadataKeys.UI_NAME]
with suppress(KeyError):
attribute_data[4] = attribute.metadata[MetadataKeys.DESCRIPTION]
metadata = {}
for key, value in attribute.metadata.items():
if key not in [MetadataKeys.UI_NAME, MetadataKeys.DESCRIPTION]:
python_key = MetadataKeyOutput.python_name_from_key(key)
if python_key is None:
python_key = key
metadata[python_key] = value
attribute_data[5] = metadata
attribute_data[6] = attribute.is_required
attribute_data[7] = attribute.default
attribute_data[8] = attribute.is_deprecated
attribute_data[9] = attribute.deprecation_msg if attribute.is_deprecated else ""
raw_output = f"{tuple(attribute_data)},"
# ogn.MetadataKeys is an object name so make sure it is not quoted
raw_output = re.sub(r'"(ogn.MetadataKeys[^"]*)"', r"\1", raw_output)
raw_output = re.sub(r"'(ogn.MetadataKeys[^']*)'", r"\1", raw_output)
self.out.write(raw_output)
self.out.exdent("])")
# ----------------------------------------------------------------------
def _generate_role_definition_method(self):
"""Output the method responsible for initialize the role-based data, if any attributes have roles to set"""
# Find attributes with non-default roles for output.
# Dictionary is {NAMESPACED_ATTRIBUTE, ROLE_NAME}
roles_to_output = {}
for attribute in self.node_interface.all_attributes():
role = attribute.python_role_name()
if role:
roles_to_output[f"{attribute.namespace}.{attribute.python_property_name()}"] = role
# Rely on the base class method if no roles were found
if not roles_to_output:
return
self._pre_function_spacing()
self.out.write("@classmethod")
if self.out.indent("def _populate_role_data(cls):"):
self.out.write('"""Populate a role structure with the non-default roles on this node type"""')
self.out.write("role_data = super()._populate_role_data()")
for attribute_name, role in roles_to_output.items():
self.out.write(f"role_data.{attribute_name} = {role}")
self.out.write("return role_data")
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_attribute_access_help(self):
"""Output the help information describing attribute properties available on this node type"""
if not self.node_interface.has_attributes():
return
def __generate_attribute_access_help(attribute_list: List[AttributeManager]):
"""Output the documentation for a single section of attributes (input/output/state)"""
if not attribute_list:
return
# All attributes are in the same namespace so use the first one to extract its name
if self.out.indent(f"{attribute_list[0].namespace.capitalize()}:"):
for attribute in attribute_list:
self.out.write(f"{attribute.namespace}.{attribute.python_property_name()}")
self.out.exdent()
self.out.write()
if self.out.indent("Attribute Value Properties:"):
__generate_attribute_access_help(self.node_interface.all_input_attributes())
__generate_attribute_access_help(self.node_interface.all_output_attributes())
__generate_attribute_access_help(self.node_interface.all_state_attributes())
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_database_class(self):
"""Output a class that provides database access for the node's compute method.
The class has nested class members called "inputs", "outputs", and "state" that make access to attribute values
more natural:
inputValue = Node.inputs.InputAttribute
Node.outputs.OutputAttribute = inputValue * 2
"""
db_class_name = self.database_class_name()
self._pre_class_spacing()
if self.out.indent(f"class {db_class_name}(og.Database):"):
self.out.write(
f'"""Helper class providing simplified access to data on nodes of type {self.node_interface.name}'
)
self.out.write()
if self.out.indent("Class Members:"):
self.out.write("node: Node being evaluated")
self.out.exdent()
self._generate_attribute_access_help()
self._generate_token_help()
self.out.write('"""')
self._generate_attribute_definitions()
self._generate_tokens()
self._generate_role_definition_method()
input_class_name = self._generate_attribute_class(
self.node_interface.all_input_attributes(), namespace=INPUT_NS
)
output_class_name = self._generate_attribute_class(
self.node_interface.all_output_attributes(), namespace=OUTPUT_NS
)
state_class_name = self._generate_attribute_class(
self.node_interface.all_state_attributes(), namespace=STATE_NS
)
self._pre_function_spacing()
if self.out.indent("def __init__(self, node):"):
self.out.write("super().__init__(node)")
for (value_class_name, namespace) in [
(input_class_name, INPUT_NS),
(output_class_name, OUTPUT_NS),
(state_class_name, STATE_NS),
]:
if value_class_name is not None:
self.out.write(
f"dynamic_attributes = self.dynamic_attribute_data(node, {PORT_NAMES[namespace]})"
)
self.out.write(
f"self.{namespace} = {db_class_name}.{value_class_name}"
f"(node, self.attributes.{namespace}, dynamic_attributes)"
)
self.out.exdent()
# When the node is written in Python there are some helper methods to add
if self.node_interface.language == LanguageTypeValues.PYTHON:
self._generate_database_abi()
# By having the node type class object be a static class member a circular import can be avoided.
# The node implementation will call OgnTheNodeDatabase.register(OgnTheNode) to handle registration and the
# automatic override of any ABI methods that OgnTheNode might implement.
self._generate_node_registration()
# ----------------------------------------------------------------------
def generate_node_interface(self):
"""Output a Python script containing interface and database support for an OmniGraph node
Raises:
NodeGenerationError: When there is a failure in the generation of the Python class
"""
self.out.write(f'"""Support for simplified access to data on nodes of type {self.node_interface.name}')
self.out.write()
for line in shorten_string_lines_to(self.node_interface.description, 120):
self.out.write(line)
self.out.write('"""')
self.out.write()
self.out.write("import omni.graph.core as og")
self.out.write("import omni.graph.core._omni_graph_core as _og")
self.out.write("import omni.graph.tools.ogn as ogn")
imports = []
# Icon path resolution requires more imports
if self.node_interface.icon_path is not None:
imports.append("import carb")
# Python-implemented nodes need access to stack information for compute error reporting
if self.node_interface.language == LanguageTypeValues.PYTHON:
imports.append("import sys")
imports.append("import traceback")
# Imports required by the attributes
for attribute in self.node_interface.all_attributes():
imports += attribute.python_imports()
for import_statement in set(imports):
self.out.write(import_statement)
# Both Python and C++ nodes benefit from the use of the Pythonic database class
self._generate_database_class()
# ======================================================================
def generate_python(configuration: GeneratorConfiguration) -> Optional[str]:
"""Create support files for the pythonic interface to a node
Args:
configuration: Information defining how and where the documentation will be generated
Returns:
String containing the generated Python database definition or None if its generation was not enabled
Raises:
NodeGenerationError: When there is a failure in the generation of the Python database
"""
if not configuration.node_interface.can_generate("python"):
return None
logger.info("Generating Python Database")
generator = NodePythonGenerator(configuration)
generator.generate_interface()
return str(generator.out)
| 38,485 | Python | 52.011019 | 120 | 0.571417 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/main_docs.py | """
Create a table off contents file in index.rst that references all of the OmniGraph node generated
documentation files that live in that directory.
This processing is highly tied to the formatting of the OGN generated documentation files so if they
change this has to as well.
The table of contents will be in two sections.
A table consisting of columns with [node name, node version, link to node doc file, link to node appendix entry]
An appendix with headers consisting of the node name and body consisting of the node's description
"""
import argparse
import logging
import os
from pathlib import Path
from typing import List, Optional
from .generate_documentation import (
RE_OGN_BODY_MARKER,
RE_OGN_DESCRIPTION_TITLE,
RE_OGN_DOC_FILENAME,
RE_OGN_INPUTS_TITLE,
RE_OGN_NAME_INFO,
)
from .utils import WritableDir, logger, rst_table, rst_title
# If True then perform more aggressive directory checks, not safe in a multi-threaded environment
SAFE_DIRECTORY_CREATION = False
# Name of the generated index file
INDEX_FILENAME = "index.rst"
# Selectively turn on logging if the OGN debugging environment variable is set
logger.setLevel(logging.DEBUG if os.getenv("OGN_DEBUG") else logging.WARN)
# ======================================================================
def construct_parser() -> argparse.ArgumentParser:
"""Construct and return the parser for the script arguments"""
# If no output directory is specified generated files will end up in the current directory
default_output_dir = Path.cwd()
# This helps format the usage information in a nicer way
os.putenv("COLUMNS", "120")
# Construct the parsing information. Run the script with "--help" to see the usage.
parser = argparse.ArgumentParser(
description="Read a directory of OGN documentation files and create an index for them",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"-od",
"--ognDirectory",
action=WritableDir,
const=default_output_dir,
type=Path,
metavar="DIR",
help="directory containing the OGN documentation files, where the index will be generated",
)
parser.add_argument("-v", "--verbose", action="store_true", help="output the steps the script is performing")
return parser
# ======================================================================
class OgnIndexCreator:
"""Handler to analyze OGN documentation files and generate a Table of Contents for them
Attributes:
documentation_files: List of documentation files found in the directory
index_file_path: Full path to the generated index file
ogn_directory: Path where the table of contents will be generated
"""
def __init__(self, ogn_directory: Path):
"""Read the contents of the docs directory and prepare it for generation"""
logger.info("Scanning documentation directory %s", ogn_directory)
self.ogn_directory = ogn_directory
self.index_file_path = self.ogn_directory / INDEX_FILENAME
self.documentation_files = []
for path_object in self.ogn_directory.glob("**/*"):
if path_object.is_file() and RE_OGN_DOC_FILENAME.search(str(path_object)):
self.documentation_files.append(path_object)
# ----------------------------------------------------------------------
def extract_node_information(self, ogn_doc_path: str):
"""Read the OGN documentation file and extract the information to use for the index
Patterns assumed, in order:
One line matching RE_OGN_NAME_INFO with the node name
One line matching RE_OGN_DESCRIPTION_TITLE, followed immediately by...
...one line with the title RST (probably dashes)
An undefined number of lines containing the node description
A blank line (to be omitted)
One line matching RE_OGN_INPUTS_TITLE, marking the end of the description
The rest will be ignored as there is no more relevant information
Args:
ogn_doc_path: Path to the node's documentation file
Returns:
(Marker, Name, Description) tuple with the node's information
"""
in_body = False
name = None
description = []
marker = None
found_description = False # True after the description title was found
in_description = False # True after the description body is entered
try:
with open(ogn_doc_path, "r", encoding="utf-8") as doc_fd:
for line in doc_fd:
if not in_body:
body_marker_match = RE_OGN_BODY_MARKER.match(line)
if body_marker_match:
in_body = True
marker = body_marker_match.group(1)
elif name is None:
name_match = RE_OGN_NAME_INFO.match(line)
if name_match:
name = name_match.group(1)
elif found_description:
found_description = False
in_description = True
elif in_description:
if RE_OGN_INPUTS_TITLE.search(line):
in_description = False
break
description.append(line[:-1])
elif RE_OGN_DESCRIPTION_TITLE.search(line):
found_description = True
# If attributes were not found then an extra blank line is needed to separate sections
if in_description:
description.append("\n")
if marker is None:
logger.error("Marker not found in %s", ogn_doc_path)
if name is None:
logger.error("Name not found in %s", ogn_doc_path)
if not description:
logger.error("Description not found in %s", ogn_doc_path)
except Exception as error: # noqa: PLW0703
# Report the failure but continue processing
logger.error("Error processing %s: %s", ogn_doc_path, error)
return (marker, name.rstrip(), description)
# ----------------------------------------------------------------------
def index_is_out_of_date(self) -> bool:
"""Returns True if the index file is older than any of the other files in the directory"""
if not self.index_file_path.is_file():
return True
index_modified_time = self.index_file_path.lstat().st_mtime
return any(
index_modified_time < documentation_file.lstat().st_mtime for documentation_file in self.documentation_files
)
# ----------------------------------------------------------------------
def construct_index(self):
"""Construct the table of contents in an index file"""
if not self.index_is_out_of_date():
logger.info("Documentation is up to date. Index generation skipped")
return
# Dictionary containing the information needed to generate the index file
# Key = Node File, Value = [Name, Version, Description]
node_information = {}
for ogn_doc_file in self.documentation_files:
logger.info("Processing %s", ogn_doc_file)
node_information[ogn_doc_file] = self.extract_node_information(ogn_doc_file)
sorted_keys = sorted(node_information.keys(), key=lambda key: node_information[key][1])
rows = [["Node", "Detailed Documentation"]]
if not node_information:
# Avoid a table with no contents, as that will generate a syntax error
rows.append(["", ""])
else:
for ogn_doc_file in sorted_keys:
(marker, node_name, _) = node_information[ogn_doc_file]
rows.append([f"`{node_name}`_", f":ref:`{marker}`"])
try:
with open(self.index_file_path, "w", newline="\n", encoding="utf-8") as index_file:
index_file.write(rst_title("OGN Node List", 0))
index_file.write("\n\n.. tabularcolumns:: |l|l|\n\n")
index_file.write(rst_table(rows))
index_file.write("\n")
index_file.write(rst_title("Node Descriptions", 1))
index_file.write("\n")
for ogn_doc_file in sorted_keys:
(_, node_name, node_documentation) = node_information[ogn_doc_file]
index_file.write(f"{rst_title(node_name, 1)}\n")
index_file.write("\n".join(node_documentation))
except Exception as error: # noqa: PLW0703
logger.error("Cannot write to index file %s : %s", self.index_file_path, error)
# ======================================================================
def main_docs(args_to_parse: Optional[List] = None):
"""Parse the contents of sys.args and perform the requested function."""
parser = construct_parser()
args = parser.parse_args(args_to_parse)
# If the script steps are to be echoed enable the logger and dump the script arguments as a first step
logger.setLevel(logging.DEBUG if args.verbose else logging.WARN)
logger.info("ognDirectory = %s", args.ognDirectory)
index_handler = OgnIndexCreator(args.ognDirectory)
index_handler.construct_index()
# ======================================================================
if __name__ == "__main__":
main_docs()
| 9,594 | Python | 42.416289 | 120 | 0.58818 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_node_info.py | """
Support for updating the node information file for OmniGraph Nodes.
Exported Methods:
generate_node_info
"""
import json
from json.decoder import JSONDecodeError
from typing import Optional
from .nodes import NodeInterfaceGenerator
from .utils import GeneratorConfiguration, ParseError, ensure_writable_directory, logger
__all__ = [
"generate_node_info",
]
# ======================================================================
class NodeInfoGenerator(NodeInterfaceGenerator):
"""Manage the functions required to generate a C++ interface for a node"""
def __init__(self, configuration: GeneratorConfiguration):
"""Set up the generator and output the meta-information for the node
Just passes the initialization on to the parent class. See the argument and exception descriptions there.
"""
logger.info("Creating NodeInfoGenerator")
try:
ensure_writable_directory(configuration.destination_directory)
except Exception as error:
raise ParseError("Unable to create node information directory") from error
super().__init__(configuration)
try:
with open(self.output_path, "r", encoding="utf-8") as output_fd:
self.node_information = json.load(output_fd)
except (FileNotFoundError, KeyError, JSONDecodeError):
self.node_information = {"nodes": {}}
# ----------------------------------------------------------------------
def interface_file_name(self) -> str:
"""Return the path to the name of the node information file, relative to the configured directory"""
return "nodes.json"
# ----------------------------------------------------------------------
def generate_node_interface(self):
"""Generate the node information for the node"""
logger.info("Generating node information for node %s", self.node_interface.name)
this_nodes_information = {
self.node_interface.name: {
"description": self.node_interface.description,
"version": self.node_interface.version,
"extension": self.extension,
"language": self.node_interface.language,
}
}
self.node_information["nodes"].update(this_nodes_information)
node_info_as_json = json.dumps(self.node_information, indent=4)
self.out.write(node_info_as_json)
# ======================================================================
def generate_node_info(configuration: GeneratorConfiguration) -> Optional[str]:
"""Create or modify the extension's node information file
Args:
configuration: Information defining how and where the node information file will be generated
Returns:
String containing the generated/updated node information
Raises:
NodeGenerationError: When there is a failure in the generation of the node information file
"""
logger.info("Generating node information")
generator = NodeInfoGenerator(configuration)
generator.generate_interface()
return str(generator.out)
| 3,126 | Python | 37.604938 | 113 | 0.615803 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/main.py | # noqa: E501,PLW1203
"""Generate code and documentation for an OmniGraph Node description file.
Takes a JSON file containing information describing the configuration of an OmniGraph node and generates
a header file implementing a simplified interface to the graph ABI.
Run this script with the arg "--help" to see available functions in this form, followed by the current list
of supported attribute types:
usage: generate_node.py [-h] [-cd DIR] [-c [DIR]] [-d [DIR]]
[-e EXTENSION_NAME] [-i [DIR]]
[-in [INTERMEDIATE_DIRECTORY]]
[-m [PYTHON_IMPORT_MODULE]] [-n [FILE.ogn]] [-p [DIR]]
[-s SETTING_NAME] [-t [DIR]] [-td FILE.json]
[-tp [DIR]] [-u] [-usd [DIR]] [-uw [DIR]] [-v]
Parse a node interface description file and generate code or documentation
optional arguments:
-h, --help show this help message and exit
-cd DIR, --configDirectory DIR
the directory containing the code generator configuration files (default is current)
-c [DIR], --cpp [DIR]
generate the C++ interface class into the specified directory (default is current)
-d [DIR], --docs [DIR]
generate the node documentation into the specified directory (default is current)
-e EXTENSION_NAME, --extension EXTENSION_NAME
name of the extension requesting the generation
-i [DIR], --icons [DIR]
directory into which to install the icon, if one is found
-in [INTERMEDIATE_DIRECTORY], --intermediate [INTERMEDIATE_DIRECTORY]
directory into which temporary build information is stored
-m [PYTHON_IMPORT_MODULE], --module [PYTHON_IMPORT_MODULE]
Python module where the Python node files live
-n [FILE.ogn], --nodeFile [FILE.ogn]
file containing the node description (use stdin if file name is omitted)
-p [DIR], --python [DIR]
generate the Python interface class into the specified directory (default is current)
-s SETTING_NAME, --settings SETTING_NAME
define one or more build-specific settings that can be used to change the generated code at runtime
-t [DIR], --tests [DIR]
generate a file containing basic operational tests for this node
-td FILE.json, --typeDefinitions FILE.json
file name containing the mapping to use from OGN type names to generated code types
-tp [DIR], --template [DIR]
generate an annotated template for the C++ node class into the specified directory (default is current)
-u, --unitTests run the unit tests on this file
-usd [DIR], --usdPath [DIR]
generate a file containing a USD template for nodes of this type
-uw [DIR], --unwritable [DIR]
mark the generated directory as unwritable at runtime
-v, --verbose output the steps the script is performing as it performs them
"""
import argparse
import logging
import os
import sys
from pathlib import Path
from typing import List, Optional
from .attributes.management import formatted_supported_attribute_type_names
from .category_definitions import get_category_definitions
from .generate_cpp import generate_cpp
from .generate_documentation import generate_documentation
from .generate_icon import generate_icon
from .generate_node_info import generate_node_info
from .generate_python import generate_python
from .generate_template import generate_template
from .generate_tests import generate_tests
from .generate_usd import generate_usd
from .keys import LanguageTypeValues
from .nodes import NodeInterfaceWrapper
from .type_definitions import apply_type_definitions
from .utils import (
UNWRITABLE_TAG_FILE,
GeneratorConfiguration,
ParseError,
Settings,
UnimplementedError,
ensure_writable_directory,
logger,
)
__all__ = ["main"]
# ======================================================================
def construct_parser() -> argparse.ArgumentParser:
"""Construct and return the parser for the script arguments"""
class ReadableDir(argparse.Action):
"""Helper class for the parser to check for a readable directory"""
def __call__(self, parser, namespace, values, option_string=None):
"""Function called by the arg parser to verify that a directory exists and is readable
Args:
parser: argparser required argument, ignored
namespace: argparser required argument, ignored
values: The path to the directory being checked for writability
option_string: argparser required argument, ignored
Raises:
argparse.ArgumentTypeError if the requested directory cannot be found or created in readable mode
"""
prospective_dir = values
try:
# If the directory can't be read then listdir will raise an exception
if os.listdir(prospective_dir):
setattr(namespace, self.dest, prospective_dir)
except Exception as error:
raise argparse.ArgumentTypeError(str(error))
class WritableDir(argparse.Action):
"""Helper class for the parser to check for a writable directory"""
def __call__(self, parser, namespace, values, option_string=None):
"""Function called by the arg parser to verify that a directory exists and is writable
Args:
parser: argparser required argument, ignored
namespace: argparser required argument, ignored
values: The path to the directory being checked for writability
option_string: argparser required argument, ignored
Raises:
argparse.ArgumentTypeError if the requested directory cannot be found or created in writable mode
"""
prospective_dir = values
try:
ensure_writable_directory(prospective_dir)
setattr(namespace, self.dest, prospective_dir)
except Exception as error:
raise argparse.ArgumentTypeError(str(error))
# If no output directory is specified generated files will end up in the current directory
default_output_dir = os.path.realpath(os.getcwd())
# This helps format the usage information in a nicer way
os.putenv("COLUMNS", "120")
# Generate a message enumerating the set of attribute types currently supported
available_attribute_types = formatted_supported_attribute_type_names()
formatted_types = "\n\t".join(available_attribute_types)
epilog = "Available attribute types:\n\t" + formatted_types
available_settings = Settings().all()
if available_settings:
epilog += "\nAvailable settings:\n\t" + "\n\t".join(
[f"{name}: {description}" for name, (_, description) in available_settings.items()]
)
# Construct the parsing information. Run the script with "--help" to see the usage.
parser = argparse.ArgumentParser(
description="Parse a node interface description file and generate code or documentation",
formatter_class=argparse.RawTextHelpFormatter,
epilog=epilog,
)
parser.add_argument(
"-cd",
"--configDirectory",
action=ReadableDir,
const=default_output_dir,
metavar="DIR",
help="the directory containing the code generator configuration files (default is current)",
)
parser.add_argument(
"-c",
"--cpp",
action=WritableDir,
nargs="?",
const=default_output_dir,
metavar="DIR",
help="generate the C++ interface class into the specified directory (default is current)",
)
parser.add_argument(
"-d",
"--docs",
action=WritableDir,
nargs="?",
const=default_output_dir,
metavar="DIR",
help="generate the node documentation into the specified directory (default is current)",
)
parser.add_argument(
"-e",
"--extension",
action="store",
metavar="EXTENSION_NAME",
default=None,
help="name of the extension requesting the generation",
)
# Notice how, unlike other directory names, this one is not a "WritableDir" as the directory should only
# be created if the node happens to have an icon, which isn't discovered until parse time.
parser.add_argument(
"-i",
"--icons",
action="store",
nargs="?",
const=default_output_dir,
metavar="DIR",
help="directory into which to install the icon, if one is found",
)
parser.add_argument(
"-in",
"--intermediate",
action=WritableDir,
nargs="?",
const=default_output_dir,
metavar="INTERMEDIATE_DIRECTORY",
help="directory into which temporary build information is stored",
)
parser.add_argument(
"-m",
"--module",
nargs="?",
action="store",
metavar="PYTHON_IMPORT_MODULE",
help="Python module where the Python node files live",
)
parser.add_argument(
"-n",
"--nodeFile",
nargs="?",
type=argparse.FileType("r"),
const=sys.stdin,
help="file containing the node description (use stdin if file name is omitted)",
metavar="FILE.ogn",
)
parser.add_argument(
"-p",
"--python",
action=WritableDir,
nargs="?",
const=default_output_dir,
metavar="DIR",
help="generate the Python interface class into the specified directory (default is current)",
)
parser.add_argument(
"-s",
"--settings",
type=str,
action="append",
metavar="SETTING_NAME",
help="define one or more build-specific settings that can be used to change the generated code at runtime",
)
parser.add_argument(
"-t",
"--tests",
action=WritableDir,
nargs="?",
const=default_output_dir,
metavar="DIR",
help="generate a file containing basic operational tests for this node",
)
parser.add_argument(
"-td",
"--typeDefinitions",
action="store",
default=None,
help="file name containing the mapping to use from OGN type names to generated code types",
metavar="FILE.json",
)
parser.add_argument(
"-tp",
"--template",
action=WritableDir,
nargs="?",
const=default_output_dir,
metavar="DIR",
help="generate an annotated template for the C++ node class into the specified directory (default is current)",
)
parser.add_argument("-u", "--unitTests", action="store_true", help="run the unit tests on this file")
parser.add_argument(
"-usd",
"--usdPath",
action=WritableDir,
nargs="?",
const=default_output_dir,
metavar="DIR",
help="generate a file containing a USD template for nodes of this type",
)
parser.add_argument(
"-uw",
"--unwritable",
action=WritableDir,
nargs="?",
const=default_output_dir,
metavar="DIR",
help="mark the generated directory as unwritable at runtime",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="output the steps the script is performing as it performs them"
)
return parser
# ======================================================================
def main(args_to_parse: Optional[List] = None):
"""Parse the contents of the argument list and perform the requested function. Uses sys.argv if None."""
parser = construct_parser()
args = parser.parse_args(args_to_parse)
# If the script steps are to be echoed enable the logger and dump the script arguments as a first step
logger.setLevel(logging.DEBUG if args.verbose else logging.WARN)
logger.info("cpp == %s", args.cpp)
logger.info("configDirectory == %s", args.configDirectory)
logger.info("template == %s", args.template)
logger.info("docs == %s", args.docs)
logger.info("extension == %s", args.extension)
logger.info("icons == %s", args.icons)
logger.info("intermediate == %s", args.intermediate)
logger.info("module == %s", args.module)
logger.info("nodeFile == %s", args.nodeFile)
logger.info("python == %s", args.python)
logger.info("settings == %s", args.settings)
logger.info("tests == %s", args.tests)
logger.info("typeDefinitions == %s", args.typeDefinitions)
logger.info("unitTests == %s", args.unitTests)
logger.info("unwritable == %s", args.unwritable)
logger.info("usdPath == %s", args.usdPath)
logger.info("verbose == %s", args.verbose)
if args.unitTests:
logger.info("Running unit tests")
from ..tests.test_node_generator import run_tests as run_tests_general # noqa: PLE0402
from ..tests.test_node_generator_attributes import run_tests as run_tests_attributes # noqa: PLE0402
from ..tests.test_node_generator_data_types import run_tests as run_tests_data_types # noqa: PLE0402
from ..tests.test_node_generator_illegal import run_tests as run_tests_illegal # noqa: PLE0402
run_tests_general()
run_tests_data_types()
run_tests_illegal()
run_tests_attributes()
# Create the settings object from the list of settings specified on the command line.
# Every setting keyword is assumed to be a boolean, set to true when it is passed in.
settings = Settings()
if args.settings is not None:
for setting in args.settings:
try:
setattr(settings, setting, True)
except AttributeError as error:
raise ParseError(f"{setting} is not in the known settings list [{settings}]") from error
# If there is a node to parse then do so
node_interface_wrapper = None
if not args.nodeFile:
if args.docs or args.cpp or args.template or args.python or args.tests:
logger.error("Cannot generate code unless you specify a nodeFile")
return
try:
# Read in the standard set of category definitions if it can be found
categories_allowed = {}
if args.configDirectory is not None:
config_dir_type_path = Path(args.configDirectory, "CategoryConfiguration.json")
if config_dir_type_path.is_file():
categories_allowed = get_category_definitions(config_dir_type_path)
base_name, node_ext = os.path.splitext(os.path.basename(args.nodeFile.name))
if node_ext != ".ogn":
logger.error("Node files must have the .ogn extension")
return
if (args.python or args.docs or args.tests) and not args.module:
logger.error("When generating Python code or documentation you must include the 'module' argument")
return
node_interface_wrapper = NodeInterfaceWrapper(
args.nodeFile,
extension=args.extension,
config_directory=args.configDirectory,
categories_allowed=categories_allowed,
)
logger.info("Parsed interface for %s", node_interface_wrapper.node_interface.name)
try:
all_supported = True
node_interface_wrapper.check_support()
except UnimplementedError as error:
all_supported = False
logger.warning("Some attributes are not supported. Only documentation will be generated.\n\t%s", error)
# Applying the type definitions make them take immediate effect, which means adding/modifying members of
# the AttributeManager class hierarchy.
if args.typeDefinitions is not None:
type_definition_path = Path(args.typeDefinitions)
if type_definition_path.is_file():
apply_type_definitions(args.typeDefinitions)
elif not type_definition_path.is_absolute():
config_dir_type_path = Path(args.configDirectory, args.typeDefinitions)
if config_dir_type_path.is_file():
apply_type_definitions(config_dir_type_path)
else:
raise ParseError(
f"Type definitions '{args.typeDefinitions}' not found in"
f" config directory '{args.configDirectory}'"
)
else:
raise ParseError(f"Absolute type definition path '{args.typeDefinitions}' not found")
# Sanity check to see if there is a Python file of the same name as the .ogn file but the language was
# not specified as Python.
if node_interface_wrapper.node_interface.language != LanguageTypeValues.PYTHON:
python_file_name = args.nodeFile.name.replace(".ogn", ".py")
if os.path.isfile(python_file_name):
raise ParseError(f"Python node file {python_file_name} exists but language was not set to Python")
# If there is no generation happening then emit a message indicating the success of the parse.
# (Failure of the parse would have already been indicated by a ParseError exception)
if not args.docs and not args.cpp and not args.python:
print(f"Node file {args.nodeFile.name} successfully validated")
configuration = GeneratorConfiguration(
args.nodeFile.name,
node_interface_wrapper.node_interface,
args.extension,
args.module,
base_name,
None,
args.verbose,
settings,
)
# The node interface may have an override on the path - get rid of it if the icon isn't being generated
configuration.destination_directory = args.icons
node_interface_wrapper.node_interface.icon_path = generate_icon(configuration) if args.icons else None
configuration.destination_directory = args.docs
_ = generate_documentation(configuration) if args.docs else None
configuration.destination_directory = str(Path(args.icons).parent)
_ = generate_node_info(configuration) if args.docs and args.icons else None
configuration.destination_directory = args.cpp
_ = generate_cpp(configuration, all_supported) if args.cpp else None
configuration.destination_directory = args.template
_ = generate_template(configuration) if args.template else None
configuration.destination_directory = args.python
_ = generate_python(configuration) if args.python and all_supported else None
configuration.destination_directory = args.tests
_ = generate_tests(configuration) if args.tests and all_supported else None
configuration.destination_directory = args.usdPath
_ = generate_usd(configuration) if args.usdPath and all_supported else None
# The intermediate directory contains a tag file per-node that can be used to determine if the code generator
# has been run since the last time the .ogn file was modified. The cost is that deletion of generated files
# will not trigger their rebuild, but as the information of which files are generated is only known after
# processing that is an acceptable tradeoff. (The alternative would be a much more verbose system that creates
# a separate tag per generated file with all of the extra build dependencies required to make that work.)
if args.intermediate:
logger.info("Tagging the file as being built")
intermediate_tag_path = os.path.join(args.intermediate, f"{os.path.basename(args.nodeFile.name)}.built")
with open(intermediate_tag_path, "w", newline="\n", encoding="utf-8") as tag_fd:
tag_fd.write("The presence of this file tags the last time its .ogn file was processed")
if args.unwritable:
logger.info("Tagging the generated directory as unwritable")
unwritable_tag_path = os.path.join(args.unwritable, UNWRITABLE_TAG_FILE)
with open(unwritable_tag_path, "w", newline="\n", encoding="utf-8") as tag_fd:
tag_fd.write("The presence of this file ensures the directory will not regenerate at runtime")
except Exception as error:
raise ParseError(f"{os.path.basename(args.nodeFile.name)} failed") from error
if __name__ == "__main__":
main()
| 20,790 | Python | 43.61588 | 119 | 0.631746 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_template.py | """Support for generating an annotated C++ template class for OmniGraph Nodes.
Exports:
generate_template: Create a NODE_template.cpp file containing sample uses of the generated interface
"""
from typing import List, Optional
from .attributes.AttributeManager import AttributeManager
from .attributes.naming import INPUT_GROUP, OUTPUT_GROUP, STATE_GROUP, namespace_of_group
from .keys import LanguageTypeValues
from .nodes import NodeInterfaceGenerator
from .utils import GeneratorConfiguration, UnimplementedError, logger, to_comment, to_cpp_comment
__all__ = ["generate_template"]
class NodeTemplateGenerator(NodeInterfaceGenerator):
"""Manage the functions required to generate an annotated template class for a node"""
def __init__(self, configuration: GeneratorConfiguration):
"""Set up the generator and output the annotated template class for the node
Checks the language support.
"""
self.template_extension = None
if configuration.node_interface.language == LanguageTypeValues.CPP:
self.template_extension = "cpp"
elif configuration.node_interface.language == LanguageTypeValues.PYTHON:
self.template_extension = "py"
else:
language_name = "|".join(LanguageTypeValues.ALL[self.node_interface.language])
raise UnimplementedError(f"Template generation not supported for '{language_name}' files")
# This needs the extension set to properly define the interface file name so do it after that
super().__init__(configuration)
# ----------------------------------------------------------------------
def interface_file_name(self) -> str:
"""Return the path to the name of the template file"""
return self.base_name + "." + self.template_extension
# ----------------------------------------------------------------------
def generate_cpp_attribute_info(self, attribute_list: List[AttributeManager], attribute_group: str):
"""Generate the comments explaining how to access the values of attributes in the list
Args:
attribute_list: List of attributes for which explanations are to be emitted
attribute_group: Enum with the attribute's group (input, output, or state)
"""
namespace = namespace_of_group(attribute_group)
for attribute in attribute_list:
self.out.write()
if attribute_group != INPUT_GROUP:
if attribute.fabric_needs_counter():
self.out.write("// Before setting array outputs you must first set their size to allocate space")
self.out.write(f"// db.{namespace}.{attribute.base_name}.size() = newOutputSize;")
self.out.write(f"// auto& output{attribute.base_name} = db.{namespace}.{attribute.base_name}();")
else:
self.out.write(f"// const auto& input_value = db.{namespace}.{attribute.base_name}();")
role = attribute.cpp_role_name()
if role:
self.out.write("// Roles for role-based attributes can be found by name using this member")
self.out.write(f"// auto roleName = db.{namespace}.{attribute.base_name}.role();")
# ----------------------------------------------------------------------
def generate_cpp_template(self):
"""Write out a template for a C++ node describing use of the current OGN configuration.
Raises:
NodeGenerationError: When there is a failure in the generation of the C++ interface
"""
# Rely on the formatter to insert the copyright here
node_description = to_cpp_comment(self.node_interface.description)
self.out.write(f"{node_description}")
self.out.write(f"#include <{self.base_name}Database.h>")
self.out.write(f"class {self.base_name}:")
self.out.write("{")
if self.out.indent("public:"):
self.out.write(f"static bool compute({self.base_name}Database& db)")
if self.out.indent("{"):
input_attributes = self.node_interface.all_input_attributes()
if input_attributes:
self.out.write("// ======================================================================")
self.out.write("// Use these methods to access the input values")
self.out.write("// ======================================================================")
self.generate_cpp_attribute_info(self.node_interface.all_input_attributes(), INPUT_GROUP)
self.out.write()
output_attributes = self.node_interface.all_output_attributes()
if output_attributes:
self.out.write("// ======================================================================")
self.out.write("// Use these methods to set the output values")
self.out.write("// ======================================================================")
self.generate_cpp_attribute_info(self.node_interface.all_output_attributes(), OUTPUT_GROUP)
self.out.write()
state_attributes = self.node_interface.all_state_attributes()
if state_attributes:
self.out.write("// ======================================================================")
self.out.write("// Use these methods to set the state values")
self.out.write("// ======================================================================")
self.generate_cpp_attribute_info(state_attributes, STATE_GROUP)
self.out.write()
self.out.write("// ======================================================================")
self.out.write("// If you have predefined any tokens you can access them by name like this")
self.out.write("// ======================================================================")
self.out.write("auto myColorToken = db.tokens.color;")
self.out.write()
self.out.write("return true;")
self.out.exdent("}")
self.out.exdent("}")
# ----------------------------------------------------------------------
def generate_python_attribute_info(self, attribute_list: List[AttributeManager], attribute_group: str):
"""Generate the comments explaining how to access the values of attributes in the list
Args:
attribute_list: List of attributes for which explanations are to be emitted
attribute_group: Enum with the attribute's group (input, output, or state)
"""
namespace = namespace_of_group(attribute_group)
for attribute in attribute_list:
self.out.write()
if attribute_group != INPUT_GROUP:
if attribute.fabric_needs_counter():
self.out.write("# Before setting array outputs you must first set their size to allocate space")
self.out.write(f"# db.{namespace}.{attribute.base_name}_size = new_output_size")
self.out.write(f"# db.{namespace}.{attribute.base_name} = new_output_value")
else:
self.out.write(f"# input_value = db.{namespace}.{attribute.base_name}")
role = attribute.python_role_name()
if role:
self.out.write("# Roles for role-based attributes can be found by name using this member")
self.out.write(f"# role_name = db.role.{namespace}.{attribute.base_name}")
# ----------------------------------------------------------------------
def generate_python_template(self):
"""Write out the code associated with the node.
Raises:
NodeGenerationError: When there is a failure in the generation of the C++ interface
"""
self.out.write('"""')
self.out.write(f"This is the implementation of the OGN node defined in {self.base_name}.ogn")
self.out.write('"""')
self.out.write()
self.out.write("# Array or tuple values are accessed as numpy arrays so you probably need this import")
self.out.write("import numpy")
self.out.write()
self.out.write()
if self.out.indent(f"class {self.base_name}:"):
node_description = to_comment("", self.node_interface.description, 1)
self.out.write('"""')
self.out.write(node_description)
self.out.write('"""')
self.out.write("@staticmethod")
if self.out.indent("def compute(db) -> bool:"):
self.out.write('"""Compute the outputs from the current input"""\n')
if self.out.indent("try:"):
self.out.write("# With the compute in a try block you can fail the compute by raising an exception")
input_attributes = self.node_interface.all_input_attributes()
if input_attributes:
self.out.write("# ======================================================================")
self.out.write("# Use these methods to access the input values")
self.out.write("# ======================================================================")
self.generate_python_attribute_info(self.node_interface.all_input_attributes(), INPUT_GROUP)
self.out.write()
output_attributes = self.node_interface.all_output_attributes()
if output_attributes:
self.out.write("# ======================================================================")
self.out.write("# Use these methods to set the output values")
self.out.write("# ======================================================================")
self.generate_python_attribute_info(self.node_interface.all_output_attributes(), OUTPUT_GROUP)
self.out.write()
state_attributes = self.node_interface.all_state_attributes()
if state_attributes:
self.out.write("# ======================================================================")
self.out.write("# Use these methods to set the state values")
self.out.write("# ======================================================================")
self.generate_python_attribute_info(state_attributes, STATE_GROUP)
self.out.write()
self.out.write("pass")
self.out.exdent()
if self.out.indent("except Exception as error:"):
self.out.write("# If anything causes your compute to fail report the error and return False")
self.out.write("db.log_error(str(error))")
self.out.write("return False")
self.out.exdent()
self.out.write()
self.out.write("# Even if inputs were edge cases like empty arrays, correct outputs mean success")
self.out.write("return True")
self.out.exdent()
self.out.exdent()
# ----------------------------------------------------------------------
def generate_node_interface(self):
"""Write out a template implementation of the node in the requested language.
Raises:
NodeGenerationError: When there is a failure in the generation of the template
"""
if self.node_interface.language == LanguageTypeValues.CPP:
self.generate_cpp_template()
elif self.node_interface.language == LanguageTypeValues.PYTHON:
self.generate_python_template()
# ======================================================================
def generate_template(configuration: GeneratorConfiguration) -> Optional[str]:
"""Create support files for the C++ interface to a node
For now only a header file is generated for the C++ interface, though there will probably be multiple files
generated in the future. For that reason this single point of contact was created for outside callers.
Args:
configuration: Information defining how and where the template will be generated
Returns:
String containing the generated template class definition or None if its generation was not enabled
Raises:
NodeGenerationError: When there is a failure in the generation of the header
UnimplementedError: When the language of the node does not support template generation
"""
if not configuration.node_interface.can_generate("template"):
return None
logger.info("Generating Template Node Implementation Class")
generator = NodeTemplateGenerator(configuration)
generator.generate_interface()
return str(generator.out)
| 13,115 | Python | 54.812766 | 120 | 0.535951 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/type_definitions.py | """Handle the mapping of OGN types onto the various generated code types"""
import json
from contextlib import suppress
from io import TextIOWrapper
from pathlib import Path
from typing import IO, Dict, List, Tuple, Union
from .attributes.AttributeManager import CppConfiguration
from .attributes.management import get_attribute_manager_type
from .keys import NodeTypeKeys
from .utils import ParseError, is_comment
class __TypeDefinitions:
"""Use the function apply_type_definitions instead of directly instantiating this class"""
def __init__(self, type_definitions: Union[str, IO, Dict, Path, None]):
"""Initialize the type definition maps based on a JSON definition
Internal:
__definitions: Dictionary of type information read from the definition description
"""
try:
self.__definitions = {}
if type_definitions is None:
pass
elif isinstance(type_definitions, str):
self.__definitions = json.loads(type_definitions)[NodeTypeKeys.TYPE_DEFINITIONS]
elif isinstance(type_definitions, Dict):
print("Using type definitions straight from a dictionary")
self.__definitions = type_definitions[NodeTypeKeys.TYPE_DEFINITIONS]
elif isinstance(type_definitions, TextIOWrapper):
self.__definitions = json.load(type_definitions)[NodeTypeKeys.TYPE_DEFINITIONS]
elif isinstance(type_definitions, Path):
self.__definitions = json.load(type_definitions.open("r"))[NodeTypeKeys.TYPE_DEFINITIONS]
else:
raise ParseError(f"Type definition type not handled - {type_definitions}")
except OSError as error:
raise ParseError(f"File error when parsing type definitions {type_definitions} - {error}") from None
except json.decoder.JSONDecodeError as error:
raise ParseError(f"Invalid JSON formatting in file {type_definitions} - {error}") from None
# --------------------------------------------------------------------------------------------------------------
def __apply_cpp_definitions(self, configuration_information: Dict[str, Tuple[str, List[str]]]):
"""Apply type definitions from the definition to the C++ types on the attribute managers
Args:
configuration_information: Dictionary whose keys are the names of attribute types and whose values are
a tuple of the C++ data type name for that attribute type and a list of files to be included to use it
"""
for attribute_type_name, attribute_type_configuration in configuration_information.items():
# Empty configuration means leave it as-is
if not attribute_type_configuration:
continue
if is_comment(attribute_type_name):
continue
# Take a single string to mean the type definition, with no extra includes required
if isinstance(attribute_type_configuration, str):
if attribute_type_configuration:
attribute_type_configuration = [attribute_type_configuration]
else:
attribute_type_configuration = []
attribute_manager = get_attribute_manager_type(attribute_type_name)
if attribute_manager is None:
raise ParseError(f"Could not find attribute manager type for configuration of {attribute_type_name}")
# If there is a change it will have a type and include file list, else skip this one
with suppress(AttributeError, KeyError):
cast_type = attribute_type_configuration[0]
include_files = [] if len(attribute_type_configuration) < 2 else attribute_type_configuration[1]
if not isinstance(cast_type, str):
raise ParseError(
f"Cast type for attribute type {attribute_type_name} must be a string, not {cast_type}"
)
if not isinstance(include_files, list):
raise ParseError(
f"Include files for attribute type {attribute_type_name} must be a list, not {include_files}"
)
attribute_manager.override_cpp_configuration(cast_type, include_files, cast_required=False)
attribute_manager.CPP_CONFIGURATION[attribute_manager.tuple_count] = CppConfiguration(
base_type_name=cast_type, include_files=include_files
)
# --------------------------------------------------------------------------------------------------------------
def apply_definitions(self):
"""Apply any type definitions to the attribute manager to which they apply"""
for language, configuration_information in self.__definitions.items():
if language == "c++":
self.__apply_cpp_definitions(configuration_information)
elif not is_comment(language):
raise ParseError(f"Configuration for language '{language}' is not supported")
# ==============================================================================================================
def apply_type_definitions(type_definitions: Union[str, IO, Dict, Path, None]):
definitions = __TypeDefinitions(type_definitions)
definitions.apply_definitions()
| 5,428 | Python | 52.752475 | 117 | 0.607222 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_test_imports.py | """Contains the function that will generate the __init__.py file for the ogn/tests directory."""
import hashlib
import sys
from importlib import import_module
from pathlib import Path
from typing import List, Optional
from .nodes import NodeGenerationError
from .ThreadsafeOpen import ThreadsafeOpen
from .utils import logger
# ==============================================================================================================
def import_tests_in_directory(module_file: str, module_name: str):
"""Find all of the .ogn-generated tests in a module's directory and import them into that module.
This will only be called from the generated test directory __init__.py file, generated below by the
import_file_contents() function
Args:
module_file: Full path of the __init__.py file for the generated test directory (e.g. its __file__)
module_name: Module name at which the generated test directory is imported (e.g. its __name__)
"""
this_dir = Path(module_file).parent
this_module = sys.modules[module_name]
test_module_files = this_dir.glob("Test*.py")
for test_module_file in test_module_files:
test_module_name = test_module_file.stem
try:
test_module = import_module(f".{test_module_name}", module_name)
setattr(this_module, test_module_name, test_module.TestOgn)
except Exception as error: # noqa: PLW0703
logger.warning(
"Failed to import test %s in module %s (%s) - skipping.", test_module_name, module_name, error
)
# ==============================================================================================================
def import_file_contents(file_to_write: Optional[Path] = None) -> List[str]:
"""Returns the contents of the tests/__init__.py file that imports the given list of test classes.
If a file_to_write is passed in then the contents are written there before returning.
Not part of the interface, but not hidden with a dunder so that a test can be written against it."""
# To enable easier upgrades the test directory initialization mostly happens in a utility function
file_contents = [
'"""====== GENERATED BY omni.graph.tools - DO NOT EDIT ======"""',
"import omni.graph.tools as ogt",
"ogt.import_tests_in_directory(__file__, __name__)",
]
# Use the md5 to avoid overwriting the file multiple times, which could trigger extension reload
expected_md5 = hashlib.md5(bytes("".join([f"{line}\n" for line in file_contents]), "utf-8")).hexdigest()
if file_to_write is not None:
import_directory = file_to_write.parent
# First ensure that the directory in which the file lives exists
if not import_directory.exists():
try:
import_directory.mkdir(mode=0o777, parents=True, exist_ok=True)
except Exception as error:
raise NodeGenerationError(f"Cannot create test directory {import_directory}") from error
# If the path is not a directory then there is a serious problem that cannot be fixed safely here.
if not import_directory.is_dir():
raise NodeGenerationError(f"Cannot write __init__.py file to non-directory {import_directory}")
if file_to_write.exists():
with open(file_to_write, "rb") as f:
if hashlib.md5(f.read()).hexdigest() == expected_md5:
return []
# ThreadsafeOpen will report a warning if there's a problem
with ThreadsafeOpen(file_to_write, "w", newline="\n") as test_init_fd:
test_init_fd.writelines([f"{line}\n" for line in file_contents])
return file_contents
# ==============================================================================================================
def ensure_test_is_imported(test_class_name: str, test_directory: Path):
"""Reads the tests __init__.py file and verifies that a test file is imported in it
Args:
test_class_name: Name of the test class, which is also the name of the file it lives in
test_directory: Directory in which the test file lives and where the import will be added if necessary
Raises:
NodeGenerationError if there was a problem adding the test import
"""
try:
test_init_file = test_directory / "__init__.py"
needs_regeneration = True
if test_init_file.exists():
# For backward compatibility - check to see if the file in place is the latest version.
# If it is then just leave it, otherwise flag it for regeneration.
with open(test_init_file, "r", encoding="utf-8") as init_fd:
needs_regeneration = init_fd.readline().find("GENERATED") < 0
# If anything is out of date the file has to be rewritten
if needs_regeneration:
import_file_contents(test_init_file)
except Exception as error: # noqa: PLW0703
logger.error("Failed to create test import for %s = %s", test_class_name, error)
# ==============================================================================================================
def generate_test_imports(test_directory: Path, write_file: bool = False) -> List[str]:
"""Generates a set of imports for the ogn test modules contained in a directory.
Args:
test_directory: Directory containing all of the ogn test scripts. No subdirectories are checked.
write_file: If True then write the __init__.py file into the named directory, otherwise just return the contents
Returns:
List of lines of Python code that comprise the test import code required in the __init__.py file
"""
# Get the statements to perform safe import of the test files
test_init_file = test_directory / "__init__.py" if write_file else None
file_contents = import_file_contents(test_init_file)
return file_contents
| 5,936 | Python | 48.066115 | 120 | 0.616914 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_cpp.py | # noqa: PLC0302
"""Support for generating C++ interface code for OmniGraph Nodes."""
import json
from itertools import zip_longest
from typing import List, Optional
from .attributes.AttributeManager import AttributeManager
from .attributes.management import list_without_runtime_attributes, split_attribute_list
from .attributes.naming import INPUT_GROUP, INPUT_NS, OUTPUT_GROUP, OUTPUT_NS, STATE_GROUP, STATE_NS, namespace_of_group
from .keys import CudaPointerValues, MemoryTypeValues
from .nodes import NodeInterfaceGenerator
from .utils import (
OMNI_GRAPH_CORE_EXTENSION,
GeneratorConfiguration,
MetadataKeyOutput,
NameManager,
ParseError,
logger,
to_cpp_comment,
to_cpp_str,
)
__all__ = ["generate_cpp"]
# ======================================================================
def grouper(iterable, max_size: int):
"""Returns the iterable decomposed into iterables of size "max_size", filling any excess with None"""
args = [iter(iterable)] * max_size
return zip_longest(*args, fillvalue=None)
# ======================================================================
class NodeCppGenerator(NodeInterfaceGenerator):
"""Manage the functions required to generate a C++ interface for a node
Attributes:
all_supported: True if all attributes in the node are of a supported type
declarations: List of declarations that need to appear after include files but before code
__name_manager: Manager for unique name shortening for unimportant internal generated names
include_files: List of includes that need to appear at the top of the header file
"""
def __init__(self, configuration: GeneratorConfiguration, all_supported: bool):
"""Set up the generator and output the C++ interface code for the node
Args:
configuration: Information used to configure the output
all_supported: True if the node's attributes are all of supported types
"""
super().__init__(configuration)
self.all_supported = all_supported
self.declarations = []
self.preamble_code = []
self.__name_manager = NameManager()
self.__needs_initialize = None
self.__has_deprecated_attributes = None
# ----------------------------------------------------------------------
def __calculate_per_attribute_flags(self):
"""Calculate the flags used to determine whether any attributes meet a certain criteria.
Only call this after all parsing has completed.
"""
if self.__needs_initialize is None:
all_attributes = self.node_interface.all_input_attributes()
all_attributes += self.node_interface.all_output_attributes()
all_attributes += self.node_interface.all_state_attributes()
self.__needs_initialize = False
self.__has_deprecated_attributes = False
for attribute in all_attributes:
if attribute.is_deprecated:
self.__has_deprecated_attributes = True
if attribute.metadata or not attribute.is_required or attribute.is_deprecated:
self.__needs_initialize = True
@property
def needs_initialize(self) -> bool:
self.__calculate_per_attribute_flags()
return self.__needs_initialize
@property
def has_deprecated_attributes(self) -> bool:
self.__calculate_per_attribute_flags()
return self.__has_deprecated_attributes
# ----------------------------------------------------------------------
def nm(self, variable_name: str) -> str:
"""Returns unique, possibly shortened version of the unique variableName"""
return self.__name_manager.name(variable_name)
# ----------------------------------------------------------------------
def database_class_name(self):
"""Returns the name of the generated database class"""
return f"{self.base_name}Database"
# ----------------------------------------------------------------------
def state_manager_name(self):
"""Returns the name of the static object that will be the state manager for this node type"""
return f"sm_stateManager{self.base_name}"
# ----------------------------------------------------------------------
def generator_version_name(self):
"""Returns the name of the static object that will hold the code generator version used for this node type"""
return f"sm_generatorVersion{self.base_name}"
# ----------------------------------------------------------------------
def target_version_name(self):
"""Returns the name of the static object that will hold the code target version used for this node type.
The code target version is the version of omni.graph.core for which the code was generated.
"""
return f"sm_targetVersion{self.base_name}"
# ----------------------------------------------------------------------
def interface_file_name(self) -> str:
"""Return the path to the name of the header file"""
return f"{self.database_class_name()}.h"
# ----------------------------------------------------------------------
def pre_interface_generation(self):
"""Create the header information independent of the node itself"""
self.preamble_code.append("#include <carb/InterfaceUtils.h>")
self.preamble_code.append("#include <omni/graph/core/NodeTypeRegistrar.h>")
self.preamble_code.append("#include <omni/graph/core/iComputeGraph.h>")
self.preamble_code.append("#include <omni/graph/core/CppWrappers.h>")
self.preamble_code.append("#include <carb/flatcache/Enums.h>")
self.preamble_code.append("using carb::flatcache::PtrToPtrKind;")
if self.extension == OMNI_GRAPH_CORE_EXTENSION:
self.preamble_code.append('#include "Token.h"')
self.preamble_code.append("#include <map>")
self.preamble_code.append("#include <vector>")
self.preamble_code.append("#include <tuple>")
self.preamble_code.append("#include <omni/graph/core/OgnHelpers.h>")
if self.node_interface.icon_path:
self.preamble_code.append("#include <carb/tokens/TokensUtils.h>")
self.add_attribute_type_setup()
# ----------------------------------------------------------------------
def get_file_inclusions(self) -> str:
"""Return code with the discovered include files in an order that makes sense"""
pxr_includes = []
regular_includes = []
if self.node_interface.scheduling_hints is not None:
regular_includes += self.node_interface.scheduling_hints.cpp_includes_required()
if self.has_deprecated_attributes:
regular_includes.append("#include <omni/graph/core/IInternal.h>")
# Partition the files into those included from USD and those not. By doing this the
# warnings that including USD files can be harmlessly silenced.
for include_file in self.preamble_code:
if include_file.find("/ogn/UsdTypes.h") < 0:
regular_includes.append(include_file)
else:
pxr_includes.append(include_file)
# Poor include practices make it necessary to include the pxr information used by Graph.h just
# to include the definition of the direct IToken interface, needed since accessing it indirectly
# causes complaints from carb::Framework::tryAcquireInterface().
if self.extension == OMNI_GRAPH_CORE_EXTENSION:
pxr_includes.append("#include <omni/graph/core/PreUsdInclude.h>")
pxr_includes.append("#include <pxr/usd/sdf/path.h>")
pxr_includes.append("#include <pxr/usd/usd/stage.h>")
pxr_includes.append("#include <pxr/usd/usd/prim.h>")
pxr_includes.append("#include <omni/graph/core/PostUsdInclude.h>")
if pxr_includes:
# The namespace makes USD access easier, and it's used everywhere already
pxr_includes.append("using namespace pxr;\n")
return "\n".join(pxr_includes + regular_includes)
# ----------------------------------------------------------------------
def post_interface_generation(self):
"""Insert the file header information, now that it is known"""
header = "#pragma once\n\n"
# Protect the CPU and CUDA code so that the include file only gets one set of definitions.
# Doing it this way rather than generating two files keeps the include rules simple.
if self.node_interface.has_cuda_attributes:
header += "#ifndef __CUDACC__\n"
header += self.get_file_inclusions()
header += "\n"
header += "\n".join(self.declarations)
header += "\n"
self.out.prepend(header)
if self.node_interface.has_cuda_attributes:
self.out.write("#else")
self.generate_cuda_code()
self.out.write("#endif")
# ----------------------------------------------------------------------
def generate_registration_macro(self):
"""Generate the macro that will be called after the node definition to create the registration manager.
This has to be done in two steps like this since the macro will reference code that performs a template-based
introspection on the node class, which can only happen after it has been defined.
This relies on instantiation of the OgnHelpers.h macros "DECLARE_OGN_NODES()" and "INITIALIZE_OGN_NODES()"
in the proper spots.
"""
template_args = ", ".join([self.base_name, self.database_class_name()])
constructor_args = ", ".join(
[f'"{self.node_interface.name}"', f"{self.node_interface.version}", f'"{self.extension}"']
)
self.out.write("#define REGISTER_OGN_NODE() \\")
if self.out.indent("namespace { \\"):
self.out.write(f"ogn::NodeTypeBootstrapImpl<{template_args}> s_registration({constructor_args}); \\")
self.out.exdent()
self.out.write("}")
# ----------------------------------------------------------------------
def add_attribute_type_setup(self):
"""Write out the code to generate the include files used by all attributes.
Raises:
NodeGenerationError: When there is a failure in the generation of the C++ interface
"""
# Required include files and declarations have to be first
includes = []
declarations = []
for input_attribute in self.node_interface.all_input_attributes():
includes += input_attribute.cpp_includes()
declarations += input_attribute.cpp_declarations()
for output_attribute in self.node_interface.all_output_attributes():
includes += output_attribute.cpp_includes()
declarations += output_attribute.cpp_declarations()
for state_attribute in self.node_interface.all_state_attributes():
includes += state_attribute.cpp_includes()
declarations += state_attribute.cpp_declarations()
self.preamble_code += [f"#include <{include_file}>" for include_file in sorted(set(includes))]
if declarations:
self.preamble_code += sorted(set(declarations))
# ----------------------------------------------------------------------
def generate_registration(self):
"""Write out the code to register the node.
Raises:
NodeGenerationError: When there is a failure in the generation of the C++ interface
"""
self.out.write(
f'REGISTER_NODE_TYPE({self.base_name}, "{self.node_interface.name}", {self.node_interface.version})'
)
# ----------------------------------------------------------------------
def generate_attribute_static_data(self):
"""Generate the code required to create the static data structures for unchanging parts of the attributes."""
# Namespace it to create file-local objects with easy access
self.out.write(f"namespace {self.base_name}Attributes")
self.out.write("{")
def generate_static_attributes(attribute_list: List[AttributeManager], attribute_group: str):
"""Helper function that generates the static attribute support classes for all attributes in the list
Args:
attribute_list: List of attributes to generate
attribute_group: Enum with the attribute's group (input, output, or state)
"""
namespace = namespace_of_group(attribute_group)
self.out.write(f"namespace {namespace}")
self.out.write("{")
for attribute in attribute_list:
self.out.write(attribute.cpp_typedef_definitions())
(_initializer_name, initializer_declaration) = attribute.cpp_initializer()
self.out.write(initializer_declaration)
self.out.write("}")
generate_static_attributes(self.node_interface.all_input_attributes(), attribute_group=INPUT_GROUP)
generate_static_attributes(self.node_interface.all_output_attributes(), attribute_group=OUTPUT_GROUP)
generate_static_attributes(self.node_interface.all_state_attributes(), attribute_group=STATE_GROUP)
self.out.write("}")
self.out.write(f"using namespace {self.base_name}Attributes;")
# ----------------------------------------------------------------------
def generate_attribute_accessors(self, attribute_list: List[AttributeManager], attribute_group: str):
"""Write out the code to create the declarations of the attribute accessor pointers.
Args:
attribute_list: List of attributes on the node of that type
attribute_group: Enum with the attribute's group (input, output, or state)
Raises:
NodeGenerationError: When there is a failure in the generation of the C++ interface
"""
# If no attributes of this type then the type will not be referenced and does not need generating
if not attribute_list:
return
namespace = namespace_of_group(attribute_group)
self.out.write()
if self.out.indent("struct {"):
# Write out the declaration for the internal pointers that point into Fabric.
has_local_declarations = False
for attribute in attribute_list:
declaration = attribute.datamodel_local_variables()
if declaration:
self.out.write(declaration)
has_local_declarations = True
if has_local_declarations:
self.out.write()
# The accessible structures are the wrappers through which the data will be accessed. They will
# all have an operator() to access the data type directly, and may have other convenience methods
# for managing things like iterating, resizing, copying, etc.
for attribute in attribute_list:
self.out.write(attribute.datamodel_accessor_declaration())
self.out.exdent(f"}} {namespace};")
# ----------------------------------------------------------------------
def write_n_per_line(self, list_to_write: List[str], stride: int):
"""Write out a comma-separated list with "stride" elements per line
There is no filling of extra entries, or quoting of the provided strings, so writing out a list of the
string names for [1, 10] by fours would be:
one, two, three, four,
five, six, seven, eight,
nine, ten
Args:
list_to_write: List of strings to split out into smaller sublists per line
stride: Number of elements per line
"""
list_size = len(list_to_write)
suffix = ","
for i in range(0, list_size, stride):
if i + stride >= list_size: # Avoid the trailing comma on the last line
suffix = ""
self.out.write(f"{', '.join(list_to_write[i:i + stride])}{suffix}")
# ----------------------------------------------------------------------
def get_attributes_by_memory_type(self, attribute_group: str):
"""Return a trio of attribute lists, partitioned by the type of memory their data occupies.
Args:
attribute_group: Enum with the attribute's group (input, output, or state)
Returns:
(cpu_attributes, : List of attributes exclusively living on the CPU
cuda_attributes, : List of attributes exclusively living on the GPU in CUDA format
any_attributes) : List of attributes that may live in either location
"""
if attribute_group == INPUT_GROUP:
attributes = self.node_interface.all_input_attributes()
elif attribute_group == OUTPUT_GROUP:
attributes = self.node_interface.all_output_attributes()
else:
attributes = self.node_interface.all_state_attributes()
cpu_attributes = [attribute for attribute in attributes if attribute.memory_storage() == MemoryTypeValues.CPU]
cuda_attributes = [attribute for attribute in attributes if attribute.memory_storage() == MemoryTypeValues.CUDA]
any_attributes = [attribute for attribute in attributes if attribute.memory_storage() == MemoryTypeValues.ANY]
return (cpu_attributes, cuda_attributes, any_attributes)
# ----------------------------------------------------------------------
# Enum for the types of handles for which to generate extraction code
HANDLE_INPUT = 0
HANDLE_INPUT_BUNDLE = 1
HANDLE_OUTPUT = 2
HANDLE_OUTPUT_BUNDLE = 3
HANDLE_STATE = 4
HANDLE_STATE_BUNDLE = 5
# List of (get_method, handle_type, handle_object, namespace) for each type of method.
HANDLE_TABLE = {
HANDLE_INPUT: ("getAttributesR", "ConstAttributeDataHandle", "inputDataHandles", INPUT_NS),
HANDLE_INPUT_BUNDLE: ("getAttributesR", "ConstAttributeDataHandle", "inputDataBundleHandles", INPUT_NS),
HANDLE_OUTPUT: ("getAttributesW", "AttributeDataHandle", "outputDataHandles", OUTPUT_NS),
HANDLE_OUTPUT_BUNDLE: ("getAttributesW", "AttributeDataHandle", "outputBundleDataHandles", OUTPUT_NS),
HANDLE_STATE: ("getAttributesW", "AttributeDataHandle", "stateDataHandles", STATE_NS),
HANDLE_STATE_BUNDLE: ("getAttributesW", "AttributeDataHandle", "stateBundleDataHandles", STATE_NS),
}
# Runtime attribute type correspondences
# (accessor_method, handle_type, wrapper_type, namespace, access type, is_const_cast required to reset)
RUNTIME_TABLE = {
HANDLE_INPUT: ("getConstAttributeDataHandle", "ConstAttributeDataHandle", INPUT_NS, True),
HANDLE_OUTPUT: ("getAttributeDataHandle", "AttributeDataHandle", OUTPUT_NS, False),
HANDLE_STATE: ("getAttributeDataHandle", "AttributeDataHandle", STATE_NS, False),
}
# ----------------------------------------------------------------------
def write_attrib_metadata(self, attribute: AttributeManager):
"""Write attribute metadata"""
for key_raw, value_raw in attribute.metadata.items():
cpp_key = MetadataKeyOutput.cpp_name_from_key(key_raw)
if cpp_key is None:
# Run it through json to handle escaping the quotes
cpp_key = json.dumps(key_raw)
# Handle lists of strings or just strings
if isinstance(value_raw, list):
value = '"' + ",".join([x.replace('"', '\\"') for x in value_raw]) + '"'
else:
value = json.dumps(value_raw)
self.out.write(f"iAttribute->setMetadata(attributeObj, {cpp_key}, {value});")
# ----------------------------------------------------------------------
def __generate_data_handles(
self, attributes: list, group_index, handle_type: int, check_if_handle_required: bool = False
):
"""Shared code to initialize data handles for all types of attributes
Args:
attributes: List of attributes for which to generate the handles
group_index: Index of this handle group among all others with the same types
handle_type: Enumerated value used to switch the methods and data members generated
check_if_handle_required: If True do a more expensive check to see if the handle needs to be generated
"""
if not attributes:
return
handle_data = self.HANDLE_TABLE[handle_type]
# The grouping operation might have left Nones in the list - prune them out to make later code more clear
actual_attributes = [attribute for attribute in attributes if attribute is not None]
if not actual_attributes:
return
# If the handle won't be checked in later code then don't get it now - it will only generate unused code
if check_if_handle_required and not any(
attribute.cpp_set_handle_at_runtime() for attribute in actual_attributes
):
return
# Example of line generated here for two input attributes:
# auto inputDataHandles = getAttributesR<
# ConstAttributeDataHandle, ConstAttributeDataHandle
# >(contextObj, nodeHandle, std::make_tuple(
# inputs::attribute1.m_token, inputs::attribute2.m_token
# )
# );
self.out.indent(f"auto {handle_data[2]}{group_index} = {handle_data[0]}<")
# The template arguments require one handle type per attribute in the tuple (written 4 per line for clarity)
self.write_n_per_line([handle_data[1]] * len(actual_attributes), 4)
self.out.indent(">(contextObj, nodeHandle, std::make_tuple(")
# The token arguments are the names of the attribute token declared earlier
tokens = [f"{handle_data[3]}::{attribute.cpp_variable_name()}.m_token" for attribute in actual_attributes]
self.write_n_per_line(tokens, 4)
self.out.exdent(")")
self.out.exdent(");")
# ----------------------------------------------------------------------
def generate_input_array_extraction(self, input_attributes: list, group_index, handle_type: int):
"""Generate the code to extract the information of the input array attributes' data in Fabric.
This presumes all handles were stored locally in the tuple named in the handle table entry.
Args:
input_attributes: List of input attributes
group_index: Which of the subgroupings of attributes is this?
handle_type: HANDLE_* index indicating which type of attribute is being generated
"""
if not input_attributes:
return
actual_attributes = list_without_runtime_attributes(input_attributes)
if not actual_attributes:
return
(_, _, handle_name, namespace) = self.HANDLE_TABLE[handle_type]
# Attributes have been grouped by memory type so the value can be extracted from any attribute in the list
memory_type = actual_attributes[0].memory_storage()
# "Any" and regular types are not extracted here, they are extracted on demand
# This is just added for bundles, for which we need the bundle handle
if memory_type != MemoryTypeValues.ANY and handle_type == self.HANDLE_INPUT_BUNDLE:
# Example line generated here for two attributes:
# std::tie(
# outputs.m_attribute1.m_ptrToData, outputs.m_attribute2.m_ptrToData
# ) = getDataR<
# const float*, const double*
# >(contextObj, {handle__name});
extra_args = ""
get_function = "getDataR"
if memory_type != MemoryTypeValues.CPU:
get_function = "getDataRGpuAt"
if self.node_interface.cuda_pointer_type is not None:
cuda_location = CudaPointerValues.CPP[self.node_interface.cuda_pointer_type]
else:
cuda_location = CudaPointerValues.CPP[CudaPointerValues.CUDA]
extra_args = f", {cuda_location}"
self.out.indent("std::tie(")
pointer_names = [f"{attribute.fabric_pointer()}" for attribute in actual_attributes]
self.write_n_per_line(pointer_names, 4)
self.out.exdent(f") = {get_function}<")
self.out.indent()
data_types = []
for attribute in actual_attributes:
data_types.append(f"{attribute.fabric_raw_type()}*")
self.write_n_per_line(data_types, 4)
self.out.exdent(f">(contextObj, {handle_name}{group_index}{extra_args});")
# All of the attributes that are array types need to manage a counter pointer since that's how Fabric
# knows how big the data is.
for (index, attribute) in enumerate(actual_attributes):
accessor_name = f"{namespace}.{attribute.cpp_variable_name()}"
if attribute.fabric_needs_counter() and memory_type == MemoryTypeValues.CPU:
self.out.write(f"{accessor_name}.resetArrayWrapper();")
if attribute.cpp_set_handle_at_runtime():
extract_handle = f"std::get<{index}>({handle_name}{group_index})"
self.out.write(f"{accessor_name}.setHandle({extract_handle});")
self.out.write(f"{accessor_name}.setContext(contextObj);")
# ----------------------------------------------------------------------
def generate_writable_array_extraction(self, attributes: list, group_index: int, handle_type: int):
"""Generate the code to extract the information of the writable array attributes in Fabric.
Args:
attributes: List of writable attributes for which to extract pointers
group_index: Which of the subgroupings of attributes is this?
handle_type: HANDLE_* index indicating which type of attribute is being generated
"""
if not attributes:
return
# Writable attributes need a definite type to be extracted here. Others will be extracted on demand.
actual_attributes = list_without_runtime_attributes(attributes)
if not actual_attributes:
return
(_, _, handle_name, namespace) = self.HANDLE_TABLE[handle_type]
# All of the attributes that are array types need to manage a counter pointer since that's how Fabric
# knows how big the data is. They also have to keep track of the context and handle to allow for resizing,
# which would modify the Fabric memory locations.
for (index, attribute) in enumerate(actual_attributes):
accessor_name = f"{namespace}.{attribute.cpp_variable_name()}"
extract_handle = f"std::get<{index}>({handle_name}{group_index})"
if attribute.cpp_set_handle_at_runtime():
self.out.write(f"{accessor_name}.setHandle({extract_handle});")
self.out.write(f"{accessor_name}.setContext(contextObj);")
# ----------------------------------------------------------------------
def generate_writable_bundle_extraction(self, attributes: list, group_index: int, handle_type: int):
"""Generate the code to extract the pointers of bundle-type writable attributes' data in Fabric.
Args:
attributes: List of output attributes for which to extract pointers
group_index: Which of the subgroupings of attributes is this?
handle_type: HANDLE_* index indicating which type of attribute is being generated
"""
if not attributes:
return
# The grouping operation might have left Nones in the list - prune them out to make later code more clear
actual_attributes = [attribute for attribute in attributes if attribute is not None]
if not actual_attributes:
return
(_, _, handle_name, namespace) = self.HANDLE_TABLE[handle_type]
# Attributes have been grouped by memory type so the value can be extracted from any attribute in the list
memory_type = actual_attributes[0].memory_storage()
# "Any" types are not extracted here, they are extracted on demand
if memory_type != MemoryTypeValues.ANY:
# Example lines generated here for two attributes:
# {namespace}.m_d1_ = contextObj.iContext->getOutputPrim(contextObj, nodeHandle, {namespace}::d1.m_token);
# {namespace}.m_d2_ = contextObj.iContext->getOutputPrim(contextObj, nodeHandle, {namespace}::d2.m_token);
# Unlike other attribute types the bundle types have to be extracted one at a time; there is no bulk method
arg_prefix = f"contextObj.iContext->getOutputPrim(contextObj, nodeHandle, {namespace}::"
for attribute in actual_attributes:
bundle_prim_handle = f"{namespace}.{attribute.fabric_data_variable_name()}"
self.out.write(f"*{bundle_prim_handle} = {arg_prefix}{attribute.cpp_variable_name()}.m_token);")
# All of the attributes that are array types need to manage a counter pointer since that's how Fabric
# knows how big the data is. They also have to keep track of the context and handle to allow for resizing,
# which would modify the Fabric memory locations.
for (index, attribute) in enumerate(actual_attributes):
accessor_name = f"{namespace}.{attribute.cpp_variable_name()}"
extract_handle = f"std::get<{index}>({handle_name}{group_index})"
if attribute.cpp_set_handle_at_runtime():
self.out.write(f"{accessor_name}.setHandle({extract_handle});")
self.out.write(f"{accessor_name}.setContext(contextObj);")
# ----------------------------------------------------------------------
def generate_runtime_attribute_initialization(self, attributes: list, handle_type: int):
"""Generate the code to generate the initialization of runtime attributes' data from Fabric.
Args:
attributes: List of output attributes for which to extract pointers
group_index: Which of the subgroupings of attributes is this?
handle_type: HANDLE_* index indicating which type of attribute is being generated
"""
(handle_accessor, handle_type, namespace, requires_const_cast) = self.RUNTIME_TABLE[handle_type]
# Example lines generated here for two attributes, sharing temporary attributes per attribute type in order
# to avoid naming conflicts and support the different methods and classes different attribute types require.
# {
# AttributeObj __a;
# ConstAttributeDataHandle __h;
# __a = nodeObj.iNode->getAttributeByToken(nodeObj, inputs::floatOrToken.m_token);
# __h = __a.iAttribute->getConstAttributeDataHandle(__a);
# const_cast<ogn::RuntimeAttribute<ogn::kInput, ogn::kCpu>&>(inputs.m_floatOrToken).reset(contextObj, __h);
#
# __a = nodeObj.iNode->getAttributeByToken(nodeObj, inputs::boolOrFloat.m_token);
# __h = __a.iAttribute->getConstAttributeDataHandle(__a);
# const_cast<ogn::RuntimeAttribute<ogn::kInput, ogn::kCpu>&>(inputs.m_boolOrFloat).reset(contextObj, __h);
# }
declaration = [
f"{handle_type} __h;",
"AttributeObj __a;",
]
for attribute in attributes:
if attribute is not None:
# Only write the declaration if there is at least one attribute to process
if declaration is not None:
self.out.indent("{")
self.out.write(declaration)
declaration = None
member = f"{namespace}::{attribute.cpp_variable_name()}"
self.out.write(f"__a = nodeObj.iNode->getAttributeByToken(nodeObj, {member}.m_token);")
self.out.write(f"__h = __a.iAttribute->{handle_accessor}(__a);")
attr_object = f"{namespace}.{attribute.fabric_data_variable_name()}"
if attribute.array_depth == 0:
if requires_const_cast:
accessor_type = (
f"ogn::RuntimeAttribute<{attribute.attribute_group},"
f" {MemoryTypeValues.CPP[attribute.memory_type]}>"
)
attr_object = f"const_cast<typename std::remove_const_t<{accessor_type}&>>({attr_object})"
self.out.write(f"{attr_object}.reset(contextObj, __h, __a);")
else:
raise ParseError("Arrays not yet supported on runtime attributes")
self.out.write()
if declaration is None:
self.out.exdent("}")
# ----------------------------------------------------------------------
def generate_token_declarations(self):
"""Emit the code required to declare the tokens subclass in the database."""
if not self.node_interface.tokens:
return
self.out.write("struct TokenManager")
if self.out.indent("{"):
for token_name, _ in self.node_interface.tokens.items():
self.out.write(f"NameToken {token_name};")
self.out.exdent("};")
self.out.write("static TokenManager tokens;")
# ----------------------------------------------------------------------
def generate_token_intialization(self):
"""Emit the code required to initialize the tokens subclass in the database."""
if not self.node_interface.tokens:
return
for token_name, token_value in self.node_interface.tokens.items():
value = json.dumps(token_value)
self.out.write(f"{self.database_class_name()}::tokens.{token_name} = iToken.getHandle({value});")
# ----------------------------------------------------------------------
def generate_database_constructor(self):
"""Write out the code that defines the database class constructor"""
# Declare the constructor, taking the same parameters as the ABI compute() method
self.out.write(
f"{self.database_class_name()}(const GraphContextObj& contextObjParam, const NodeObj& nodeObjParam)"
)
self.out.indent(": OmniGraphDatabase(contextObjParam, nodeObjParam) {")
# Extract some common parts of the ABI that will be used later
self.out.write("GraphContextObj const& contextObj = m_graphContext;")
self.out.write("NodeObj const& nodeObj = m_nodeObj;")
self.out.write("NodeContextHandle nodeHandle = nodeObj.nodeContextHandle;")
# TODO: This could use some cleanup - it's a bit redundant and confused at the moment
# Split the input and output attributes into groups by memory type - CPU, CUDA, and ANY.
# These are grouped together to minimize the calls across the ABI. More consolidation could happen
# by getting all of the handles in one shot but that would complicate the management so unless it
# becomes a performance problem it won't be done
cpu_inputs, cuda_inputs, any_inputs = self.get_attributes_by_memory_type(INPUT_GROUP)
cpu_outputs, cuda_outputs, any_outputs = self.get_attributes_by_memory_type(OUTPUT_GROUP)
cpu_state, cuda_state, any_state = self.get_attributes_by_memory_type(STATE_GROUP)
for input_attributes, output_attributes, state_attributes in [
[cpu_inputs, cpu_outputs, cpu_state],
[cuda_inputs, cuda_outputs, cuda_state],
[any_inputs, any_outputs, any_state],
]:
if not input_attributes and not output_attributes and not state_attributes:
continue
# Avoid the necessity to provide unique names within this block
(input_attrs, input_bundles, input_runtime) = split_attribute_list(input_attributes)
(output_attrs, output_bundles, output_runtime) = split_attribute_list(output_attributes)
(state_attrs, state_bundles, state_runtime) = split_attribute_list(state_attributes)
needs_nesting = (
input_attrs or input_bundles or output_attrs or output_bundles or state_attrs or state_bundles
)
if needs_nesting and self.out.indent("{"):
# Arbitrarily split up the list of attributes into groups of 60, in the unlikely event there are more
# than that. Limiting the size of the processed groups avoids potential template recursion depth limits.
# Making it a larger number minimizes the calls across the ABI into the DataModel.
# They are cast to lists so that they can be used more than once (generators cannot be reused)
# groups of 60 seems to be safe for both VC compiler and gcc.
input_single_groups = list(grouper(input_attrs, 60))
input_bundle_groups = list(grouper(input_bundles, 60))
output_bundle_groups = list(grouper(output_bundles, 60))
output_single_groups = list(grouper(output_attrs, 60))
state_bundle_groups = list(grouper(state_bundles, 60))
state_single_groups = list(grouper(state_attrs, 60))
# Keep the generation of the attribute handles from the tokens independent from the rest as in future
# this could be cached on the node and reused since it never changes from one evaluation to the next.
for i, attribute_sublist in enumerate(input_single_groups):
self.__generate_data_handles(attribute_sublist, i, self.HANDLE_INPUT)
for i, attribute_sublist in enumerate(input_bundle_groups):
self.__generate_data_handles(attribute_sublist, i, self.HANDLE_INPUT_BUNDLE)
for i, attribute_sublist in enumerate(output_single_groups):
self.__generate_data_handles(attribute_sublist, i, self.HANDLE_OUTPUT)
for i, attribute_sublist in enumerate(output_bundle_groups):
self.__generate_data_handles(attribute_sublist, i, self.HANDLE_OUTPUT_BUNDLE)
for i, attribute_sublist in enumerate(state_single_groups):
self.__generate_data_handles(attribute_sublist, i, self.HANDLE_STATE)
for i, attribute_sublist in enumerate(state_bundle_groups):
self.__generate_data_handles(attribute_sublist, i, self.HANDLE_STATE_BUNDLE)
# Generate the code that extracts the Fabric pointers for each attribute.
for i, attribute_sublist in enumerate(input_single_groups):
self.generate_input_array_extraction(attribute_sublist, i, self.HANDLE_INPUT)
for i, attribute_sublist in enumerate(input_bundle_groups):
self.generate_input_array_extraction(attribute_sublist, i, self.HANDLE_INPUT_BUNDLE)
for i, attribute_sublist in enumerate(output_single_groups):
self.generate_writable_array_extraction(attribute_sublist, i, self.HANDLE_OUTPUT)
for i, attribute_sublist in enumerate(output_bundle_groups):
self.generate_writable_bundle_extraction(attribute_sublist, i, self.HANDLE_OUTPUT_BUNDLE)
for i, attribute_sublist in enumerate(state_single_groups):
self.generate_writable_array_extraction(attribute_sublist, i, self.HANDLE_STATE)
for i, attribute_sublist in enumerate(state_bundle_groups):
self.generate_writable_bundle_extraction(attribute_sublist, i, self.HANDLE_STATE_BUNDLE)
self.out.exdent("}")
# Runtime attributes are set up in one step since their handles are only required once, to set up
# their data wrappers, which will access the data at runtime.
self.generate_runtime_attribute_initialization(input_runtime, self.HANDLE_INPUT)
self.generate_runtime_attribute_initialization(output_runtime, self.HANDLE_OUTPUT)
self.generate_runtime_attribute_initialization(state_runtime, self.HANDLE_STATE)
self.out.exdent("}")
# ----------------------------------------------------------------------
def generate_release(self):
"""Write out the code that defines the database internal state accessor, if any.
Since the existence of the internal state can only be determined at compile time, not code generation time,
the method is always emitted, relying on the state manager to efficiently handle nodes with and without state.
"""
self.out.write("static void release(const NodeObj& nodeObj)")
if self.out.indent("{"):
self.out.write(f"{self.state_manager_name()}.removeState(nodeObj.nodeHandle);")
self.out.exdent("}")
# ----------------------------------------------------------------------
def generate_initialize(self):
"""Write out the code for the function that initializes a node of this type after creation.
Raises:
NodeGenerationError: When there is a failure in the generation of the C++ interface
"""
all_attributes = self.node_interface.all_input_attributes()
all_attributes += self.node_interface.all_output_attributes()
all_attributes += self.node_interface.all_state_attributes()
if not all_attributes:
return
self.out.write("static void initialize(const GraphContextObj&, const NodeObj& nodeObj)")
if self.out.indent("{"):
self.out.write("const INode* iNode = nodeObj.iNode;")
if self.has_deprecated_attributes:
self.out.write("const IInternal* iInternal = carb::getCachedInterface<omni::graph::core::IInternal>();")
self.out.write("const IAttribute* iAttribute = carb::getCachedInterface<omni::graph::core::IAttribute>();")
self.out.write("AttributeObj attributeObj;")
for attribute in all_attributes:
if not attribute.metadata and attribute.is_required and not attribute.is_deprecated:
continue
member = f"{attribute.namespace}::{attribute.cpp_variable_name()}"
self.out.write(f"attributeObj = iNode->getAttributeByToken(nodeObj, {member}.token());")
self.write_attrib_metadata(attribute)
# Set the optional flag if it isn't the default
if not attribute.is_required:
self.out.write("iAttribute->setIsOptionalForCompute(attributeObj, true);")
# Deprecate the attribute, if necessary
if attribute.is_deprecated:
self.out.write(
f"iInternal->deprecateAttribute(attributeObj, {to_cpp_str(attribute.deprecation_msg)});"
)
self.out.exdent("}")
# ----------------------------------------------------------------------
def generate_initialize_type(self):
"""Write out the code for the function that initializes the node type.
Raises:
NodeGenerationError: When there is a failure in the generation of the C++ interface
"""
input_attributes = self.node_interface.all_input_attributes()
output_attributes = self.node_interface.all_output_attributes()
state_attributes = self.node_interface.all_state_attributes()
self.out.write("static void initializeType(const NodeTypeObj& nodeTypeObj)")
self.out.indent("{")
# Metadata always needs this, and attributes will as well if there are any
self.out.write("const INodeType* iNodeType = nodeTypeObj.iNodeType;")
# Avoid generation of an unused variable
if input_attributes or output_attributes or state_attributes or self.node_interface.tokens:
# Cannot use the token interface stored in the database class since it doesn't exist here.
self.out.write("auto iTokenPtr = carb::getCachedInterface<carb::flatcache::IToken>();")
if self.out.indent("if( ! iTokenPtr ) {"):
self.out.write(f'CARB_LOG_ERROR("IToken not found when initializing {self.node_interface.name}");')
self.out.write("return;")
self.out.exdent("}")
# The extra step of making a reference is taken so that future accesses to the interface are the
# same no matter which extension you are in.
self.out.write("auto& iToken{ *iTokenPtr };")
self.generate_token_intialization()
# Generate the initialization of attributes, including setting defaults and adding them to the node type
def generate_attribute_initialize(attribute_list: List[AttributeManager], namespace: str):
"""Helper to initialize attributes of a given type. Prevents triplication of the loop"""
self.out.write()
for attribute in attribute_list:
attribute.cpp_pre_initialization(self.out)
for attribute in attribute_list:
self.out.write(
f"{namespace}::{attribute.cpp_variable_name()}.initialize(iToken, *iNodeType, nodeTypeObj);"
)
for attribute in attribute_list:
attribute.cpp_post_initialization(self.out)
generate_attribute_initialize(input_attributes, INPUT_NS)
generate_attribute_initialize(output_attributes, OUTPUT_NS)
generate_attribute_initialize(state_attributes, STATE_NS)
# Generate the initialization of the node metadata, including the hardcoded one holding the extension name
self.out.write(
f"iNodeType->setMetadata(nodeTypeObj, {MetadataKeyOutput.EXTENSION}, {json.dumps(self.extension)});"
)
for key, value in self.node_interface.metadata.items():
cpp_key = MetadataKeyOutput.cpp_name_from_key(key)
if cpp_key is None:
# Run it through json to handle escaping the quotes
cpp_key = json.dumps(key)
self.out.write(f"iNodeType->setMetadata(nodeTypeObj, {cpp_key}, {json.dumps(value)});")
# If any of the scheduling hints flags have been defined then set them here
if self.node_interface.scheduling_hints is not None:
self.node_interface.scheduling_hints.emit_cpp(self.out)
# The icon path is relative to the extension path, which is only known at runtime, so build it up then.
# To the user it will appear as an absolute path, which they can modify if they wish to.
if self.node_interface.icon_path is not None:
icon_path = json.dumps(self.node_interface.icon_path)
self.out.write("auto iTokens = carb::getCachedInterface<carb::tokens::ITokens>();")
if self.out.indent("if( ! iTokens ) {"):
self.out.write(
'CARB_LOG_ERROR("Extension path not available - ITokens not found when initializing'
f' {self.node_interface.name}");'
)
self.out.exdent("}")
self.out.write("else")
if self.out.indent("{"):
self.out.write(f'std::string iconPath = carb::tokens::resolveString(iTokens, "${{{self.extension}}}");')
self.out.write('iconPath.append("/");')
self.out.write(f"iconPath.append({icon_path});")
self.out.write(f"iNodeType->setMetadata(nodeTypeObj, {MetadataKeyOutput.ICON_PATH}, iconPath.c_str());")
self.out.exdent("}")
# Set up the state variable, directly if there are state attributes
if self.node_interface.has_state:
self.out.write("iNodeType->setHasState(nodeTypeObj, true);")
# Close off the method definition
self.out.exdent("}")
# ----------------------------------------------------------------------
def generate_validate(self):
"""Write out the code for the function that validates the attributes before compute.
Raises:
NodeGenerationError: When there is a failure in the generation of the C++ interface
"""
self.out.indent("bool validate() {")
attributes_to_walk = [
[INPUT_NS, self.node_interface.all_input_attributes()],
[OUTPUT_NS, self.node_interface.all_output_attributes()],
[STATE_NS, self.node_interface.all_state_attributes()],
]
conditions = []
for attribute_prefix, attribute_list in attributes_to_walk:
for attribute in attribute_list:
if attribute.is_required:
if attribute.has_fixed_type():
conditions.append(f"{attribute_prefix}.{attribute.cpp_variable_name()}.isValid()")
elif attribute.do_validation:
conditions.append(f"{attribute_prefix}.{attribute.cpp_variable_name()}().resolved()")
if conditions:
self.out.indent(f"return {conditions[0]}")
for condition in conditions[1:]:
self.out.write(f"&& {condition}")
self.out.exdent(";")
else:
self.out.write("return true;")
self.out.exdent("}")
# ----------------------------------------------------------------------
def generate_database(self):
"""Write out the code to initialize the required attributes.
Raises:
NodeGenerationError: When there is a failure in the generation of the C++ interface
"""
self.generate_attribute_static_data()
# The namespace encapsulates the attribute access to make it easier to isolate it
self.out.write(f"namespace I{self.base_name}")
self.out.write("{")
# Optional description information. Can be omitted if the generated code is never examined.
node_description = to_cpp_comment(self.node_interface.description)
self.out.write(f"{node_description}")
# Generate the class wrapping all of the access to the DataModel, including the core ABI objects
self.out.write(f"class {self.database_class_name()} : public omni::graph::core::ogn::OmniGraphDatabase")
self.out.write("{")
if self.out.indent("public:"):
self.generate_token_declarations()
self.out.write("template <typename StateInformation>")
if self.out.indent("static StateInformation& sInternalState(const NodeObj& nodeObj) {"):
self.out.write(f"return {self.state_manager_name()}.getState<StateInformation>(nodeObj.nodeHandle);")
self.out.exdent("}")
self.out.write("template <typename StateInformation>")
if self.out.indent("StateInformation& internalState() {"):
self.out.write("return sInternalState<StateInformation>(abi_node());")
self.out.exdent("}")
self.out.write(f"static ogn::StateManager {self.state_manager_name()};")
self.out.write(f"static std::tuple<int, int, int>{self.generator_version_name()};")
self.out.write(f"static std::tuple<int, int, int>{self.target_version_name()};")
self.generate_attribute_accessors(self.node_interface.all_input_attributes(), INPUT_GROUP)
self.generate_attribute_accessors(self.node_interface.all_output_attributes(), OUTPUT_GROUP)
self.generate_attribute_accessors(self.node_interface.all_state_attributes(), STATE_GROUP)
self.generate_database_constructor()
# The ABI initializeType method is implemented as a member of this class to simplify registration
self.generate_initialize_type()
# The ABI initialize method is implemented as a member of this class to facilitate attribute
# metadata and internal state data.
if self.needs_initialize:
self.generate_initialize()
# The ABI initialize method is implemented as a member of this class to facilitate internal state data.
self.generate_release()
# Compute will require validation that all required attributes exist before running
self.generate_validate()
# Terminate the class definition
self.out.exdent("};")
# Initialize the static objects here, to avoid potential namespace clashes later
class_name = self.database_class_name()
self.out.write(f"ogn::StateManager {class_name}::{self.state_manager_name()};")
# Remember the generator and code target version, in case it is needed later for backwards compatibility
generator_version = ",".join([str(version) for version in self.generator_version])
self.out.write(
f"std::tuple<int, int, int> {class_name}::{self.generator_version_name()}"
f"{{std::make_tuple({generator_version})}};"
)
target_version = ",".join([str(version) for version in self.target_version])
self.out.write(
f"std::tuple<int, int, int> {class_name}::{self.target_version_name()}"
f"{{std::make_tuple({target_version})}};"
)
# If there are tokens declare the class member that implements them
if self.node_interface.tokens:
self.out.write(f"{class_name}::TokenManager {class_name}::tokens;")
# Hide the namespace enclosure from the node code
self.out.write("}")
self.out.write(f"using namespace I{self.base_name};")
# ----------------------------------------------------------------------
def generate_cuda_code(self):
"""Write out the code that will be used by the CUDA code.
Here is some sample output for a node multiplying a vector by a constant
namespace OgnFooCudaTypes
{
namespace inputs
{
using multiplier_t = const float&;
using input_t = const float3&;
}
namespace outputs
{
using result_t = float;
}
}
using namespace OgnFooCudaTypes;
With this set of definitions and the ones in the CPU types you can easily define functions that cross the
CPU/GPU boundary. This would be the signature in the .cpp file:
extern "C" void scaleVector(inputs::input_t, inputs::multiplier_t, outputs::result_t);
with this definition in the .cu file:
extern "C" void scaleVector(inputs::input_t, inputs::multiplier_t, outputs::result_t);
The pointer is necessary as the CPU cannot directly access GPU data so it has to pass through
the extracted Fabric data as (undereferencable) pointers. It is made explicit rather than burying
it in the type definition in order to make it obvious to the user that they are dealing with a pointer.
Raises:
NodeGenerationError: When there is a failure in the generation of the CUDA interface
"""
def generate_cuda_includes(attribute_list: List[AttributeManager]) -> List[str]:
"""Helper function that returns a list of all include statements needed by CUDA definitions"""
includes = []
for attribute in attribute_list:
includes += attribute.cuda_includes()
return includes
def generate_cuda_typedefs(attribute_list: List[AttributeManager], attribute_group: str):
"""Helper function that generates the CUDA typedefs all attributes in the list
The types here are similar but not identical to those generated in the static attribute
information definition class created in generate_attribute_static_data().
Args:
attribute_list: List of attributes to generate
attribute_group: Enum with the attribute's group (input, output, or state)
"""
namespace = namespace_of_group(attribute_group)
self.out.write(f"namespace {namespace}")
self.out.write("{")
for attribute in attribute_list:
attribute_data_type = attribute.cuda_type_name()
# No type means the attribute type cannot be passed to CUDA code
if attribute_data_type is not None:
modifier = "const " if attribute.is_read_only() else ""
initializer_name = attribute.cpp_variable_name()
self.out.write(f"using {initializer_name}_t = {modifier}{attribute_data_type};")
self.out.write("}")
input_includes = generate_cuda_includes(self.node_interface.all_input_attributes())
output_includes = generate_cuda_includes(self.node_interface.all_output_attributes())
state_includes = generate_cuda_includes(self.node_interface.all_state_attributes())
for include_file in sorted(set(input_includes + output_includes + state_includes)):
self.out.write(f"#include <{include_file}>")
# Namespace it to create file-local objects with easy access
self.out.write(f"namespace {self.base_name}CudaTypes")
self.out.write("{")
generate_cuda_typedefs(self.node_interface.all_input_attributes(), INPUT_GROUP)
generate_cuda_typedefs(self.node_interface.all_output_attributes(), OUTPUT_GROUP)
generate_cuda_typedefs(self.node_interface.all_state_attributes(), STATE_GROUP)
self.out.write("}")
self.out.write(f"using namespace {self.base_name}CudaTypes;")
# ----------------------------------------------------------------------
def generate_node_interface(self):
"""Write out the code associated with the node.
Raises:
NodeGenerationError: When there is a failure in the generation of the C++ interface
"""
if self.all_supported:
self.generate_database()
self.generate_registration_macro()
else:
# self.generate_attribute_tokens()
self.generate_registration()
# ======================================================================
def generate_cpp(configuration: GeneratorConfiguration, all_supported: bool) -> Optional[str]:
"""Create support files for the C++ interface to a node
For now only a header file is generated for the C++ interface, though there will probably be multiple files
generated in the future. For that reason this single point of contact was created for outside callers.
Args:
configuration: Information defining how and where the header will be generated
all_supported: True if all attributes in the file are of supported types.
If False then no initialization code will be omitted, only registration.
Returns:
String containing the generated header code or None if its generation was not enabled
Raises:
NodeGenerationError: When there is a failure in the generation of the header
"""
if not configuration.node_interface.can_generate("c++"):
return None
logger.info("Generating C++ Database Definition")
generator = NodeCppGenerator(configuration, all_supported)
generator.generate_interface()
return str(generator.out)
| 59,658 | Python | 52.50583 | 120 | 0.60986 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/register_ogn_nodes.py | from omni.graph.tools import DeprecationError
raise DeprecationError("register_ogn_nodes has moved from omni.graph.tools to omni.graph.core")
| 143 | Python | 34.999991 | 95 | 0.825175 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_tests.py | """Support for generating simple regression test code for OmniGraph Nodes.
The tests do three things:
1. Do a test "import" of the Python interface to the node
2. Do a test load of the USDA template interface for the node
3. Run all of the "tests" cases specified in the node's .ogn file
Exports:
generate_tests: Create a TestNODE.py file containing standard unit tests of the node operation
"""
from pathlib import Path
from typing import List, Optional
from .attributes.management import list_without_runtime_attributes
from .generate_test_imports import ensure_test_is_imported
from .keys import MemoryTypeValues, TestKeys
from .nodes import NodeInterfaceGenerator
from .utils import GeneratorConfiguration, UnimplementedError, logger
__all__ = ["generate_tests"]
class NodeTestGenerator(NodeInterfaceGenerator):
"""Manage the functions required to generate basic test scripts for a node"""
def __init__(self, configuration: GeneratorConfiguration): # noqa: PLW0246
"""Set up the generator and output the test scripts for the node
Just passes the initialization on to the parent class. See the argument and exception descriptions there.
"""
super().__init__(configuration)
# ----------------------------------------------------------------------
def interface_file_name(self) -> str:
"""Return the path to the name of the Python test file"""
return self.test_class_name() + ".py"
# ----------------------------------------------------------------------
def test_class_name(self) -> str:
"""Returns the name to use for the test class and base name for the test file"""
return f"Test{self.base_name}"
# ----------------------------------------------------------------------
def generate_user_test_data(self, all_tests: List) -> bool:
"""Generate the section of the user test that creates the test data to be iterated over.
The variable "test_data" is set up to be a list of test data, whose elements consist of four sub-lists
- values for input attributes, set before the test starts
- values for output attributes, checked after the test finishes
- initial values for state attributes, set before the test starts
- final values for state attributes, checked after the test finishes
Each of the data elements consist of a 3-tuple of the name of the attribute, the value for it, and whether the
attribute's data should be manipulated on the GPU.
Simple example of one test where two inputs are set, one output is checked, one state attribute has an
initial value set and a final value checked, and a node of type "MyNodeType" is created before the test runs:
test_data = [
{
"inputs": [
["inputs:attr", INPUT_VALUE, False],
["inputs:attr2", INPUT_VALUE2, False],
],
"outputs": [
["outputs:attr", OUTPUT_VALUE, False],
],
"state_set": [
["state:attr", INITIAL_VALUE, False],
],
"state_get": [
["state:attr", FINAL_VALUE, False],
],
"setup": {
"create_nodes": [["MyNode", "MyNodeType"]]
}
}
]
Args:
all_tests: List of test dictionaries with the test information to be consolidated
Returns:
True if the node tests use the old V1 setup data, meant for the deprecated OmniGraphHelper
"""
test_data = []
uses_v1_setup = False
for node_test_data in all_tests:
# The outputs who decide on being on the GPU at runtime have to be marked as such in the test data.
# That way the test knows where to look for the results.
gpu_outputs = node_test_data.gpu_outputs
test_run = {}
# Do all inputs first, for clarity
if node_test_data.input_values:
input_data = []
for attribute, attribute_value in node_test_data.input_values.items():
if attribute.memory_type == MemoryTypeValues.ANY:
UnimplementedError(f"Input '{attribute.name}' has unsupported memory CPU/GPU")
on_gpu = attribute.memory_type != MemoryTypeValues.CPU
input_data.append([attribute.name, attribute_value, on_gpu])
test_run[TestKeys.INPUTS] = input_data
# ...then all outputs
if node_test_data.expected_outputs:
output_data = []
for attribute, attribute_value in node_test_data.expected_outputs.items():
if attribute.memory_type == MemoryTypeValues.ANY:
on_gpu = attribute.name in gpu_outputs
else:
on_gpu = attribute.memory_type != MemoryTypeValues.CPU
output_data.append([attribute.name, attribute_value, on_gpu])
test_run[TestKeys.OUTPUTS] = output_data
# ...then all state initial values
if node_test_data.state_initial_values:
state_data = []
for attribute, attribute_value in node_test_data.state_initial_values.items():
if attribute.memory_type == MemoryTypeValues.ANY:
on_gpu = attribute.name in gpu_outputs
else:
on_gpu = attribute.memory_type != MemoryTypeValues.CPU
state_data.append([attribute.name, attribute_value, on_gpu])
test_run[TestKeys.STATE_SET] = state_data
# ... all state final values
if node_test_data.state_final_values:
state_data = []
for attribute, attribute_value in node_test_data.state_final_values.items():
if attribute.memory_type == MemoryTypeValues.ANY:
on_gpu = attribute.name in gpu_outputs
else:
on_gpu = attribute.memory_type != MemoryTypeValues.CPU
state_data.append([attribute.name, attribute_value, on_gpu])
test_run[TestKeys.STATE_GET] = state_data
# ... and graph setup (where None has a different meaning than the empty list)
if node_test_data.graph_setup is not None:
test_run[TestKeys.SETUP] = node_test_data.graph_setup
uses_v1_setup |= node_test_data.uses_v1_setup
test_data.append(test_run)
self.out.write(f"test_data = {test_data}")
return uses_v1_setup
# ----------------------------------------------------------------------
def generate_user_test_run(self, uses_v1_setup: bool = False):
"""Generate the section of the user test that iterates test runs over the test data"""
node_type_name = self.node_interface.name
# Emit the test loop, creating a new node each iteration to ensure that all unset values are their default
self.out.write("test_node = None")
self.out.write("test_graph = None")
if self.out.indent("for i, test_run in enumerate(test_data):"):
self.out.write(f"inputs = test_run.get('{TestKeys.INPUTS}', [])")
self.out.write(f"outputs = test_run.get('{TestKeys.OUTPUTS}', [])")
self.out.write(f"state_set = test_run.get('{TestKeys.STATE_SET}', [])")
self.out.write(f"state_get = test_run.get('{TestKeys.STATE_GET}', [])")
self.out.write(f"setup = test_run.get('{TestKeys.SETUP}', None)")
# Clean out the stage for the next test, unless the setup is to be carried on
if self.out.indent("if setup is None or setup:"):
self.out.write("await omni.usd.get_context().new_stage_async()")
self.out.write("test_graph = None")
self.out.exdent()
if self.out.indent("elif not setup:"):
# An empty setup means "use the previous setup", which should have created a graph and a node
self.out.write(
"self.assertTrue(test_graph is not None and test_graph.is_valid(), "
+ '"Test is misconfigured - empty setup cannot be in the first test")'
)
self.out.exdent()
# Emit the construction of the test node
if self.out.indent("if setup:"):
if uses_v1_setup:
self.out.write("await ogts.setup_test_environment()")
self.out.write('test_nodes = og.OmniGraphHelper().edit_graph("/", setup)')
self.out.write("test_graph = og.get_current_graph()")
else:
self.out.write('(test_graph, test_nodes, _, _) = og.Controller.edit("/TestGraph", setup)')
self.out.write("self.assertTrue(test_nodes)")
self.out.write("test_node = test_nodes[0]")
self.out.exdent()
if self.out.indent("elif setup is None:"):
# A previous setup will have created a test_graph, otherwise create a default graph for testing.
if self.out.indent("if test_graph is None:"):
self.out.write('test_graph = og.Controller.create_graph("/TestGraph")')
self.out.write("self.assertTrue(test_graph is not None and test_graph.is_valid())")
self.out.exdent()
if self.out.indent("test_node = og.Controller.create_node("):
self.out.write(f'("TestNode_{self.safe_name()}", test_graph), "{node_type_name}"')
self.out.exdent(")")
self.out.exdent()
self.out.write('self.assertTrue(test_graph is not None and test_graph.is_valid(), "Test graph invalid")')
self.out.write('self.assertTrue(test_node is not None and test_node.is_valid(), "Test node invalid")')
# First evaluation sets up the node and Fabric
self.out.write("await og.Controller.evaluate(test_graph)")
# Emit the code that sets the inputs of the node first
self.out.write("values_to_set = inputs + state_set")
if self.out.indent("if values_to_set:"):
if self.out.indent("for attribute_name, attribute_value, _ in inputs + state_set:"):
self.out.write("og.Controller((attribute_name, test_node)).set(attribute_value)")
self.out.exdent()
self.out.exdent()
# Emit code to evaluate the node
self.out.write("await og.Controller.evaluate(test_graph)")
# Emit code to read the outputs and state and compare against the expected values
if self.out.indent("for attribute_name, expected_value, _ in outputs + state_get:"):
self.out.write("attribute = og.Controller.attribute(attribute_name, test_node)")
self.out.write("actual_output = og.Controller.get(attribute)")
self.out.write("expected_type = None")
if self.out.indent("if isinstance(expected_value, dict):"):
self.out.write('expected_type = expected_value["type"]')
self.out.write('expected_value = expected_value["value"]')
self.out.exdent()
error_message = f"{node_type_name} User test case #{{i+1}}: {{attribute_name}} attribute value error"
self.out.write(f'ogts.verify_values(expected_value, actual_output, f"{error_message}")')
if self.out.indent("if expected_type:"):
self.out.write("tp = og.AttributeType.type_from_ogn_type_name(expected_type)")
self.out.write("actual_type = attribute.get_resolved_type()")
if self.out.indent("if tp != actual_type:"):
self.out.write(
f'raise ValueError(f"{node_type_name} User tests - {{attribute_name}}: '
f'Expected {{expected_type}}, saw {{actual_type.get_ogn_type_name()}}")'
)
self.out.exdent()
self.out.exdent()
self.out.exdent()
self.out.exdent()
self.out.exdent()
# ----------------------------------------------------------------------
def generate_user_tests(self):
"""Generate the test method that exercises the tests specified in the .ogn file"""
all_tests = self.node_interface.all_tests()
# If the user did not specify any tests then do not generate this test function
if not all_tests:
return
self.out.write("")
if self.out.indent("async def test_generated(self):"):
uses_v1_setup = self.generate_user_test_data(all_tests)
self.generate_user_test_run(uses_v1_setup)
# ----------------------------------------------------------------------
def generate_data_access_test(self):
"""Generate the test method for loading the generated USD file"""
check_usd = "usd" not in self.node_interface.excluded_generators
check_python = "python" not in self.node_interface.excluded_generators
# There is not enough information to test data access if there's no Python database and no USD file
if not check_usd and not check_python:
return
node_type_name = self.node_interface.name
node_name = f"Template_{self.safe_name()}"
# The node may have generated a .usda file with default values on it. If so then load it in and confirm
# that the node exists after load and the inputs have the defaults.
self.out.write("")
self.out.write("async def test_data_access(self):")
if self.out.indent():
if check_python:
db_name = f"{self.base_name}Database"
self.out.write(f"from {self.module}.ogn.{db_name} import {db_name}")
# If USD testing is turned on then use that as a source of the test node
if check_usd:
self.out.write(f'test_file_name = "{self.base_name}Template.usda"')
# The file is found in the usd/ subdirectory of this script's test directory.
self.out.write('usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name)')
if self.out.indent("if not os.path.exists(usd_path):"):
self.out.write('self.assertTrue(False, f"{usd_path} not found for loading test")')
self.out.exdent()
self.out.write("(result, error) = await ogts.load_test_file(usd_path)")
self.out.write("self.assertTrue(result, f'{error} on {usd_path}')")
# This node name is hardcoded into the USD generation
self.out.write(f'test_node = og.Controller.node("/TestGraph/{node_name}")')
# otherwise create the node directly
else:
self.out.write('(_, (test_node,), _, _) = og.Controller.edit("/TestGraph", {')
self.out.write(f' og.Controller.Keys.CREATE_NODES: ("{node_name}", "{node_type_name}")')
self.out.write("})")
if check_python:
self.out.write(f"database = {db_name}(test_node)")
self.out.write("self.assertTrue(test_node.is_valid())")
# Checking the version instead of the name allows for node type name aliases
self.out.write("node_type_name = test_node.get_type_name()")
version = self.node_interface.version
self.out.write(f"self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), {version})")
self.out.write("")
if self.out.indent("def _attr_error(attribute: og.Attribute, usd_test: bool) -> str:"):
self.out.write('test_type = "USD Load" if usd_test else "Database Access"')
self.out.write('return f"{node_type_name} {test_type} Test - {attribute.get_name()} value error"')
self.out.exdent()
self.out.write("")
# Emit code to read the inputs and compare against the default values
for attribute in list_without_runtime_attributes(self.node_interface.all_attributes()):
# Optional attributes are not written to the template file
if not attribute.is_required:
continue
name_to_check = attribute.usd_name()
self.out.write("")
self.out.write(f'self.assertTrue(test_node.get_attribute_exists("{name_to_check}"))')
# Always get the value, so that the code is exercised
self.out.write(f'attribute = test_node.get_attribute("{name_to_check}")')
# Values aren't available for testing if the memory type is purely CUDA
if check_python and attribute.memory_type != MemoryTypeValues.CUDA:
attribute_accessor = f"{attribute.namespace}.{attribute.python_property_name()}"
# If memory type is determined at runtime the property is an accessor, not a value
if attribute.memory_type == MemoryTypeValues.ANY and not attribute.cpp_accessor_on_cpu():
attribute_accessor += ".cpu"
self.out.write(f"db_value = database.{attribute_accessor}")
# Only check input numbers since they should be well-defined as the defaults for all nodes
if not attribute.is_read_only():
continue
expected_value = attribute.python_default_value()
if expected_value is not None:
self.out.write(f"expected_value = {expected_value}")
if check_usd:
self.out.write("actual_value = og.Controller.get(attribute)")
self.out.write("ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))")
if check_python and attribute.memory_type != MemoryTypeValues.CUDA:
self.out.write("ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))")
self.out.exdent()
# ----------------------------------------------------------------------
def generate_node_interface(self):
"""Generate the test method for the named node"""
self.generate_user_tests()
self.generate_data_access_test()
if self.interface_directory is not None:
ensure_test_is_imported(self.test_class_name(), Path(self.interface_directory))
# ----------------------------------------------------------------------
def pre_interface_generation(self):
"""Create the imports and common test framework used by tests for all nodes"""
self.out.write("import omni.kit.test")
self.out.write("import omni.graph.core as og")
self.out.write("import omni.graph.core.tests as ogts")
if "usd" not in self.node_interface.excluded_generators:
self.out.write("import os")
self.out.write("")
self.out.write("")
self.out.write("class TestOgn(ogts.OmniGraphTestCase):")
self.out.indent()
# ======================================================================
def generate_tests(configuration: GeneratorConfiguration) -> Optional[str]:
"""Create support files for the tests on the node
Args:
configuration: Information defining how and where the test files will be generated
Returns:
String containing the generated test script code or None if its generation was not enabled
Raises:
NodeGenerationError: When there is a failure in the generation of the tests
"""
if not configuration.node_interface.can_generate("tests"):
return None
logger.info("Generating Node Type Tests")
generator = NodeTestGenerator(configuration)
generator.generate_interface()
return str(generator.out)
| 20,359 | Python | 53.878706 | 120 | 0.56766 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_usd.py | """Support for generating USD template files for OmniGraph Nodes.
Exports:
generate_usd: Create a NODETemplate.usda file containing a template for instantiation of the described node type
"""
from typing import List, Optional
from .attributes.AttributeManager import AttributeManager
from .nodes import NodeInterfaceGenerator
from .utils import GeneratorConfiguration, logger, to_usd_docs
def pluralize(count: int):
"""Return a string with the pluralization suffix for the given count ("s" for non-1, "" for 1)"""
return "" if count == 1 else "s"
class NodeUsdGenerator(NodeInterfaceGenerator):
"""Manage the functions required to generate a USD template file representing a node type"""
def __init__(self, configuration: GeneratorConfiguration): # noqa: PLW0246
"""Set up the generator and output the USD template for the node
Just passes the initialization on to the parent class. See the argument and exception descriptions there.
"""
super().__init__(configuration)
# ----------------------------------------------------------------------
def interface_file_name(self):
"""Return the path to the name of the USD file"""
return self.base_name + "Template.usda"
# ----------------------------------------------------------------------
def __prim_name(self) -> str:
"""Returns a string comprising the name of the prim representing this node in the USD file"""
return f"Template_{self.safe_name()}"
# ----------------------------------------------------------------------
def generate_attributes_usd(self, attributes: List[AttributeManager]):
"""Write out USD code corresponding to the node
Args:
attributes: List of attributes whose USD is to be generated
Raises:
NodeGenerationError: When there is a failure in the generation of the USD file
"""
for attribute in attributes:
attribute.emit_usd_declaration(self.out)
# ----------------------------------------------------------------------
def generate_node_interface(self):
"""Generate the USD code corresponding to the node_interface
Raises:
NodeGenerationError: When there is a failure in the generation of the USD file
"""
node_name = self.node_interface.name
node_version = self.node_interface.version
self.out.write("")
if self.out.indent(f'def OmniGraphNode "{self.__prim_name()}" ('):
self.out.write(to_usd_docs(self.node_interface.description))
self.out.exdent(")")
self.out.write("{")
self.out.indent()
self.out.write(f'token node:type = "{node_name}"')
self.out.write(f"int node:typeVersion = {node_version}")
for attributes in [
self.node_interface.all_input_attributes(),
self.node_interface.all_output_attributes(),
self.node_interface.all_state_attributes(),
]:
if attributes:
attribute_count = len(attributes)
self.out.write("")
self.out.write(f"# {attribute_count} attribute{pluralize(attribute_count)}")
self.generate_attributes_usd(attributes)
self.out.exdent("}")
# ----------------------------------------------------------------------
def pre_interface_generation(self):
"""Create the USD header information and the graph enclosing the node"""
self.out.write("#usda 1.0")
self.out.write("(")
self.out.write(f' doc ="""Generated from node description file {self.base_name}.ogn')
self.out.write('Contains templates for node types found in that file."""')
self.out.write(")")
self.out.write("")
self.out.write('def OmniGraph "TestGraph"')
if self.out.indent("{"):
self.out.write('token evaluator:type = "push"')
self.out.write("int2 fileFormatVersion = (1, 3)")
self.out.write('token flatCacheBacking = "Shared"')
self.out.write('token pipelineStage = "pipelineStageSimulation"')
# ----------------------------------------------------------------------
def post_interface_generation(self):
"""Close the graph definition"""
self.out.exdent()
self.out.write("}")
# ======================================================================
def generate_usd(configuration: GeneratorConfiguration) -> Optional[str]:
"""Create support files for the USD template definition for a node
Args:
configuration: Information defining how and where the template will be generated
Returns:
String containing the generated USD or None if its generation was not enabled
Raises:
NodeGenerationError: When there is a failure in the generation of the USD file
"""
if not configuration.node_interface.can_generate("usd"):
return None
logger.info("Generating USD Template File")
generator = NodeUsdGenerator(configuration)
generator.generate_interface()
return str(generator.out)
| 5,136 | Python | 40.427419 | 116 | 0.584891 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/utils.py | # noqa: PLC0302
"""Common constants, methods, and classes used by the various sections of the node generator.
These were split out into a separate file to avoid circular inclusions.
"""
import argparse
import csv
import io
import locale
import logging
import os
import re
import subprocess
import sys
from functools import partial
from pathlib import Path
from typing import IO, Any, Dict, List, Optional, Tuple, Union
# Support for deprecated location of these types
from .keys import IconKeys # noqa
from .keys import MemoryTypeValues # noqa
from .keys import MetadataKeyOutput # noqa
from .keys import MetadataKeys # noqa
# ======================================================================
# Environment variable gating display and execution of parse debugging information
# The parsing debugging is turned on in any one of these situations:
# OGN_DEBUG=1
# OGN_DEBUG.contains("parse")
# OGN_PARSE_DEBUG=1
env_var = os.getenv("OGN_DEBUG")
has_debugging = env_var is not None
OGN_PARSE_DEBUG = (
has_debugging
and (env_var == "1" or env_var.lower().find("parse") >= 0)
or (os.getenv("OGN_PARSE_DEBUG") is not None)
)
OGN_REG_DEBUG = (
has_debugging and (env_var == "1" or env_var.lower().find("reg") >= 0) or (os.getenv("OGN_REG_DEBUG") is not None)
)
# ======================================================================
def __dbg(gate: bool, message: str, *args, **kwargs):
"""
Print out a debugging message if the gate_variable is enabled, additional args will be passed
to format the given message.
"""
if gate:
if args or kwargs:
print("DBG: " + message.format(*args, **kwargs), flush=True)
else:
print(f"DBG: {message}", flush=True)
dbg_parse = partial(__dbg, OGN_PARSE_DEBUG)
dbg_reg = partial(__dbg, OGN_REG_DEBUG)
# Color type that can be either a hex string or an RGBA tuple
ColorType = Union[str, Tuple[int, int, int, int]]
# Constant defining the name of the OmniGraph core extension, since some code needs to generate differently for it
OMNI_GRAPH_CORE_EXTENSION = "omni.graph.core"
# Special file inserted into a a generated ogn/ directory to tag it as not requiring runtime regeneration
UNWRITABLE_TAG_FILE = "__ogn_files_prebuilt"
# Legacy file name that causes part of the packaging process to breakdown due to the leading dot
__OLD_UNWRITABLE_TAG_FILE = ".ogn_files_prebuilt"
# Deprecated - use keys.MemoryTypeValues instead
MEMORY_TYPE_CPU = MemoryTypeValues.CPU
MEMORY_TYPE_CUDA = MemoryTypeValues.CUDA
MEMORY_TYPE_ANY = MemoryTypeValues.ANY
ALL_MEMORY_TYPES = MemoryTypeValues.ALL
CPP_MEMORY_TYPES = MemoryTypeValues.CPP
# Pattern for legal token names
# - starts with a letter or underscore
# - then an arbitrary number of alphanumerics or underscores
# - other special characters cause problems in the generated code and so are disallowed
RE_TOKEN_NAME = re.compile(r"^[A-Za-z_][A-Za-z0-9_,]*$")
TOKEN_NAME_REQUIREMENT = (
"Token name '{}' should be CamelCase with letters, numbers, underscores."
" Tokens with special characters should use a dictionary rather than a list, where the key is the name."
)
# Enum values corresponding to extended attribute types (to avoid relying on omni.graph.core)
_EXTENDED_TYPE_REGULAR = 0
_EXTENDED_TYPE_UNION = 1
_EXTENDED_TYPE_ANY = 2
# Global logger avoids multiple loggers clashing with each other or duplicating output
logger = None
# ======================================================================
def global_logger():
"""Global status logger for the node generator.
Delay initialization so that it can be set up from the main function as well as scripts that import this one.
Returns:
A logging.Logger instance that will be shared by all scripts used for node generation
"""
global logger
if logger is None:
logger = logging.getLogger("generate_node")
logging_handler = logging.StreamHandler(sys.stdout)
logging_handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
logger.addHandler(logging_handler)
logger.setLevel(logging.INFO if OGN_PARSE_DEBUG else logging.ERROR)
return logger
# Bootstrap initialization of the global variable
global_logger()
# ================================================================================
class CarbLogError(Exception):
"""Exception to raise when there is an error that requires logging"""
# ================================================================================
class DebugError(Exception):
"""Exception to raise when there is an error that requires a debug message but no specific action"""
# ======================================================================
class ParseError(Exception):
"""Exception to raise when there is an error in the parsing of the node interface description"""
# ======================================================================
class UnimplementedError(Exception):
"""Custom exception to raise when attempting to access unimplemented functionality"""
# ======================================================================
class Settings:
"""Manage the build settings that can be used for tuning the code generation. The settings are all available
as properties on the class.
Add any new settings with their default and description in the __init__ method. New settings should also be
reflected in the omni.graph.core.Settings class.
The only name not allowed is "all" as that is used to return the list of all available settings.
Defining slots allows interception of attempts to get/set unknown settings with an AttributeError.
"""
__slots__ = ["__settings"]
def __init__(self):
"""Initialize the list of available settings and their defaults."""
self.__settings = {
"pyOptimize": (False, "When generating Python nodes use a more optimized approach"),
}
for setting_name in self.__settings:
self.__slots__.append(setting_name)
for setting_name, (default_value, description) in self.__settings.items():
def _get(self, _default=default_value) -> bool:
return _default
def _set(self, setting_value, _name=setting_name, _description=description):
self.__settings[_name] = (setting_value, _description)
setattr(Settings, setting_name, property(_get, _set))
def __str__(self) -> str:
"""Returns a string containing the list of allowed settings"""
return ", ".join(list(self.__settings.keys()))
def all(self) -> Dict[str, Tuple[Any, str]]: # noqa: A003
"""Return a dictionary of all known settings mapped onto (DEFAULT, DESCRIPTION)"""
return self.__settings
# ======================================================================
class GeneratorConfiguration:
"""Storage class containing common information used by the generators.
Mostly created to avoid passing long argument lists around.
Properties:
base_name: Name of the .ogn file with directory and extension stripped away
destination_directory: Directory for the generated code
extension: Name of the extension running the generation
generator_version: Version identification for this extension to embed in generated code
module: Python module in which the generated Python files will live
needs_directory: Destination will be ensured to exist before running the generator
node_file_path: Location of the .ogn file used to generate the code
node_interface: Node interface class to be processed
target_version: Identification for the version of the omni.graph.core extension for which code was generated
verbose: True if extra debugging information is to be output
settings: List of settings that were enabled as part of the build
generator_version_override: Generator version to use instead of the one extracted from omni.graph.tools
target_version_override: Target version to use instead of the one extracted from omni.graph.core
"""
def __init__(
self,
node_file_path: str,
node_interface,
extension: str,
module: str,
base_name: str,
destination_directory: Optional[str],
verbose: bool = False,
settings: Optional[Settings] = None,
generator_version_override: Optional[Tuple[int, int, int]] = None,
target_version_override: Optional[Tuple[int, int, int]] = None,
):
"""Collect the data members into the structure"""
self.node_file_path = node_file_path.replace("\\", "/") if node_file_path else None # Standardize the separator
self.node_interface = node_interface
self.extension = extension
self.module = module
self.base_name = base_name
self.destination_directory = destination_directory
self.needs_directory = True
self.verbose = verbose
self.settings = settings or Settings()
# It would have been nicer to use the toml package here but it's not available to the build script, and
# finding the version is trivial anyway.
tools_extension_root = Path(__file__).parent.parent.parent.parent.parent.parent
if generator_version_override is None:
self.generator_version = (0, 0, 0)
re_version = re.compile('version = "(.*)"')
toml_path = tools_extension_root / "config" / "extension.toml"
if not toml_path.is_file():
raise ParseError(f"Could not find generator file containing the version information '{toml_path}'")
with open(toml_path, "r", encoding="utf-8") as toml_fd:
for line in toml_fd:
match = re_version.match(line)
if match:
self.generator_version = tuple(
int(version) for version in f"{match.group(1)}.0.0.0".split(".")[0:3]
)
break
else:
self.generator_version = generator_version_override
# There is no dependency from tools to the OmniGraph core but for now they always appear in the same build
# tree so rely on that fact to find the configuration information.
if target_version_override is None:
self.target_version = (0, 0, 0)
# The path may have extra information in it such as version, SHA1, or platform, so use a pattern
core_toml_path = None
for core_dir in tools_extension_root.parent.rglob("omni.graph.core*"):
core_toml_path = core_dir / "config" / "extension.toml"
# There should only be one, but break on the first one found anyway
if core_toml_path.is_file():
break
# Do not fail if a file wasn't found, but issue a warning and use the default values
if core_toml_path is not None and core_toml_path.is_file():
with open(core_toml_path, "r", encoding="utf-8") as toml_fd:
for line in toml_fd:
match = re_version.match(line)
if match:
self.target_version = tuple(
int(version) for version in f"{match.group(1)}.0.0.0".split(".")[0:3]
)
break
else:
pass
else:
self.target_version = target_version_override
# --------------------------------------------------------------------------------------------------------------
def __str__(self):
"""Convert the configuration to a string for debugging - one property per line"""
return f"""generator_version = {self.generator_version}
node_file_path = {self.node_file_path}
node_interface = {self.node_interface}
extension = {self.extension}
module = {self.module}
base_name = {self.base_name}
directory = {self.destination_directory}
target_version = {self.target_version}
verbose = {self.verbose}"""
# ======================================================================
class IndentedOutput:
"""Helper class that provides output capabilities to messages with preserved indentation levels
Properties:
output: File type that receives the output
indent_level: Number of indentation levels for the current output
indent_string: String representing the current indentation level
"""
def __init__(self, output: IO):
"""Initialize the indentation level and prepare for output
Args:
output: IO object to which this object will be sending its output
Both io.TextIOWrapper (output from "open()") and io.StringIO can be used
"""
self.output = output
self.indent_level = 0
self.indent_string = ""
# ----------------------------------------------------------------------
def indent(self, message: str = None) -> bool:
"""Increase the indentation level for emitted code
If a message is specified then emit that message immediately before indenting,
allowing you to easily open sections like: out.indent("{")
Returns True so that indented sections can be indented in the code:
if output.indent("begin {"):
output.exdent("})
"""
if message is not None:
self.write(message)
self.indent_level += 1
self.indent_string = " " * self.indent_level
return True
# ----------------------------------------------------------------------
def exdent(self, message: str = None):
"""Decrease the indentation level for emitted code
If a message is specified then emit that message immediately after exdenting,
allowing you to easily close sections like: out.exdent("}")
"""
assert self.indent_level > 0
self.indent_level -= 1
self.indent_string = " " * self.indent_level
if message is not None:
self.write(message)
# ----------------------------------------------------------------------
def __str__(self):
"""Return the accumulated string saved when there is no file to write, or the file path if there was"""
if isinstance(self.output, io.StringIO):
return self.output.getvalue()
return self.output.name
# ----------------------------------------------------------------------
def prepend(self, message: str):
"""Write the message line at the beginning of the output.
This rewrites the entire output so it is best to minimize its use, and stick with string implementations.
The message is written as-is with no newlines or indenting
"""
if isinstance(self.output, io.StringIO):
current_output = self.output.getvalue()
self.output = io.StringIO()
self.output.write(message)
self.output.write(current_output)
else:
filename = self.output.name
self.output = open(filename, "r+", newline="\n", encoding="utf-8") # noqa: SIM115,PLR1732
content = self.output.read()
self.output.seek(0, 0)
self.output.write(message + content)
# ----------------------------------------------------------------------
def write(self, message: Union[List, str] = ""):
"""Output a single message line to the file.
This assumes indentation will be used and a newline will be appended.
Passing in a list will write each list member on its own line.
Args:
message: Line of text being emitted
"""
if not message:
self.output.write("\n")
elif isinstance(message, list):
for line in message:
self.write(line)
else:
self.output.write(f"{self.indent_string}{message}\n")
# ----------------------------------------------------------------------
def write_as_is(self, message: Union[List, str]):
"""Output a string to the output file without indentation or added newline
Passing in a list will write each list member on its own line.
Args:
message: Line of text being emitted
"""
if isinstance(message, list):
for line in message:
self.write_as_is(line)
elif message:
self.output.write(f"{message}")
# ======================================================================
def is_comment(keyword: str) -> bool:
"""Returns True if the keyword matches the specially reserved pattern for comments, the leading '$'"""
return keyword[0] == "$" if keyword else False
# ======================================================================
def is_unwritable(generated_directory: str) -> bool:
"""Returns True if the OGN generated directory is tagged as unwritable (i.e. part of a build) or if it is
physically unwritable.
"""
unwritable_tag = os.path.join(generated_directory, UNWRITABLE_TAG_FILE)
old_unwritable_tag = os.path.join(generated_directory, __OLD_UNWRITABLE_TAG_FILE)
if os.path.isfile(unwritable_tag) or os.path.isfile(old_unwritable_tag):
return True
# The directory is not tagged as unwritable, now check to see if it is physically unwritable.
try:
# Attempting to write a temp file is the only reliable way to detect unwritable directories on Windows
test_path = Path(generated_directory) / "__test_file__"
with open(test_path, "w", encoding="utf-8"):
pass
test_path.unlink()
except OSError:
# error.errno == errno.EACCES or error.errno == errno.EEXIST
return True
return False
# ======================================================================
def ensure_quoted(value: str) -> str:
"""Returns the value in quotes if it wasn't already quoted, or just itself if it was"""
if len(value) > 1 and ((value[0] == "'" and value[-1] == "'") or (value[0] == '"' and value[-1] == '"')):
return value
value_escaped = value.replace('"', '\\"')
return f'"{value_escaped}"'
# ======================================================================
def shorten_string_lines_to(full_string: str, suggested_limit: int) -> List[str]:
"""Convert a single long line into a list of shorter lines
Args:
full_string: Single line to be trimmed
suggested_limit: Minimum length of line; line will extend to the next space past this limit
"""
shortened_strings = []
while len(full_string) > suggested_limit:
next_space = full_string.find(" ", suggested_limit)
if next_space > 0:
shortened_strings.append(full_string[0:next_space])
full_string = full_string[next_space + 1 :]
else:
break
shortened_strings.append(full_string)
return shortened_strings
# ======================================================================
def attrib_description_to_string(description):
"""Convert convert to string if the input has a List type"""
description_list = description if isinstance(description, List) else [description]
return "\n".join(description_list)
# ======================================================================
def to_cpp_str(raw: Union[str, List[str]], separator: str = " "):
"""Convert a string or list of string into a string literal safe for writing to a .cpp file.
Args:
raw: The string or list of strings to be converted.
separator: If an list of strings is supplied they will be concatenated together with this arg separating them.
"""
if isinstance(raw, list):
raw = separator.join(raw)
t = "".maketrans({"\\": "\\\\", "\n": "\\n", "\r": "\\r", '"': '\\"'})
return '"' + raw.translate(t) + '"'
# ======================================================================
def to_comment(comment_separator: str, multiline_string: str, indent_level: int = 0):
"""Convert a multiline string into a comment where each line begins with the comment_separator
Args:
comment_separator: Character that indicates a line of comments, usually language-specific
multiline_string: String with potential newlines in it
indent_level: Number of spaces the resulting comment should be indented
Returns:
String representing a comment with one line of the comment per one line of the input.
Each line of the string is indented the given number of spaces.
"""
# Convert the multiline string into a set of truncated strings that pack into comments nicely
comment_lines = []
for single_line in multiline_string.splitlines():
shortened_lines = shorten_string_lines_to(single_line, 80)
comment_lines += shortened_lines
# Empty lines should not have a trailing space so embed that in the non-empty lines before joining them
string_lines = [f"{comment_separator} {x}" if x else f"{comment_separator}" for x in comment_lines]
if indent_level > 0:
indent_string = " " * indent_level
string_lines = [f"{indent_string}{line}" for line in string_lines]
return "\n".join(string_lines)
# ======================================================================
def to_cpp_comment(multiline_string: str, indent_level: int = 0):
"""Convert a multiline string into a C++ comment
Args:
multiline_string: String with potential newlines in it
indent_level: Number of spaces the resulting comment should be indented
Returns:
String representing a C++ comment with one line of the comment per one line of the input.
Each line of the string is indented the given number of spaces.
"""
return to_comment("//", multiline_string, indent_level)
# ======================================================================
def to_python_comment(multiline_string: str, indent_level: int = 0):
"""Convert a multiline string into a Python comment
Args:
multiline_string: String with potential newlines in it
indent_level: Number of spaces the resulting comment should be indented
Returns:
String representing a Python comment with one line of the comment per one line of the input.
Each line of the string is indented the given number of spaces.
"""
return to_comment("#", multiline_string, indent_level)
# ======================================================================
def to_usd_comment(multiline_string: str, indent_level: int = 0):
"""Convert a multiline string into a USD comment
Args:
multiline_string: String with potential newlines in it
indent_level: Number of spaces the resulting comment should be indented
Returns:
String representing a USD comment with one line of the comment per one line of the input.
Each line of the string is indented the given number of spaces.
"""
return to_comment("#", multiline_string, indent_level)
# ======================================================================
def to_usd_docs(docs: Union[List, str]) -> List[str]:
"""Returns the USD documentation as a list of strings with the docs= included"""
if not docs:
return 'docs="""No documentation provided"""'
if isinstance(docs, list):
text = [f'docs="""{docs[0]}']
if len(docs) > 1:
text += docs[1:]
text[-1] += '"""'
return text
return f'docs="""{docs}"""'
# ======================================================================
def value_as_usd(python_value: Union[None, Tuple, List, str, bool, int, float]) -> str:
"""Convert a Python data type into a USD structure equivalent
Args:
python_value: Python value to convert. Dictionaries and sets have no equivalent.
Returns:
Structure representing the USD version of the value passed in, for converting to a string
"""
if python_value is None:
return None
if isinstance(python_value, str):
return ensure_quoted(python_value)
if isinstance(python_value, bool):
return "true" if python_value else "false"
if isinstance(python_value, (int, float)):
return python_value
# Lists and tuples both appear as parenthesized values so convert them to that. There is also no
# representation of an empty array so return None if the list or tuple is empty.
if isinstance(python_value, List):
usd_list = [value_as_usd(value) for value in python_value]
return tuple(usd_list) if usd_list else None
if isinstance(python_value, Tuple):
usd_list = [value_as_usd(value) for value in python_value]
return tuple(usd_list) if usd_list else None
return None
# ======================================================================
def rst_title(title: str, title_level: int = 0) -> str:
"""Returns a string implementing a title_level header formatting for the string"""
title_char = ["=", "-", "~", "_", "+", "*", ":", "^"][title_level]
return f"\n{title}\n{title_char * len(title)}"
# ======================================================================
def rst_table(table_to_format: List[List[str]]) -> str:
"""
Utility to take a list of lists representing a text table and format it in
reStructedText format. This means equalizing all of the column widths,
separating columns with " | ", separating the second and third rows with
"+==+..==+" and putting "+--+..--+" between other rows, and at the top and
bottom.
e.g. this input [["Name", "Value"], ["Fred", "Flinstone"], ["Bamm-Bamm", "Rubble"]]
yields this output:
+-----------+------------+
| Name | Value |
+===========+============+
| Fred | Flintstone |
+-----------+------------+
| Bamm-Bamm | Rubble |
+-----------+------------+
Note how the columns have been adjusted to have constant width, and the header has different padding characters.
Args:
table_to_format: List of columns to go in the table, where the first list is the header row and
subsequent lists must all be the same length.
Returns:
A string implementing the table of data, formatted as an RST aligned-table.
Raises:
ValueError: If the inner lists have different lengths
"""
# Verify the list sizes before any work begins
if not table_to_format:
return ""
if not table_to_format[0]:
return ""
list_size = len(table_to_format[0])
for i in range(1, len(table_to_format)):
if list_size != len(table_to_format[i]):
raise ValueError(f"Table row {i} does not have the expected size {list_size}")
# Compute the maximum column sizes, as they will have to all be padded to that amount + 1 space on each side
max_widths = [0] * len(table_to_format[0])
for row in table_to_format:
for index, column in enumerate(row):
if len(str(column)) > max_widths[index]:
max_widths[index] = len(str(column))
# Title and row separators are the same width as the columns, with different characters for padding
title_separator = "+"
row_separator = "+"
for column_width in max_widths:
title_separator += f"{'=' * (column_width + 2)}+"
row_separator += f"{'-' * (column_width + 2)}+"
title_separator += "\n"
row_separator += "\n"
table = row_separator
# Walk each of the inner lists, adding spaces to each column as required, and column separators
first_row = True
for row in table_to_format:
formatted_columns = []
for index, column in enumerate(row):
padding = max_widths[index] - len(str(column))
formatted_columns.append(f"{column}{' ' * padding}")
table += f"| {' | '.join(formatted_columns)} |\n"
if first_row:
first_row = False
table += title_separator
else:
table += row_separator
return table
# ======================================================================
def check_color(color: ColorType):
"""Check to see if the color has a legal specification.
Args:
color Value to check using one of these two formats
"#AABBGGRR" Hex digits of color components in 0-255
[R, G, B, A] Decimal values of color components in 0-255
Returns:
Hex string representing the RGBA values (to be used as metadata, using uppercase letters as #AABBGGRR)
Raises:
ParseError if the color specification was not legal
"""
if isinstance(color, List):
if len(color) != 4:
raise ParseError(f"Color list '{color}' must have 4 elements - R, G, B, A")
try:
(red, green, blue, alpha) = [int(component) for component in color]
except TypeError as error:
raise ParseError(f"Color list '{color}' must have 4 integer elements in [0, 255] - R, G, B, A") from error
elif isinstance(color, str):
try:
red = int(f"0x{color[7:9]}", 16)
green = int(f"0x{color[5:7]}", 16)
blue = int(f"0x{color[3:5]}", 16)
alpha = int(f"0x{color[1:3]}", 16)
except (TypeError, ValueError) as error:
raise ParseError(f"Color string '{color}' must be in the hexadecimal format '#AABBGGRR'") from error
if red < 0 or red > 255:
raise ParseError("Red component '{red}' is out of the range [0, 255]")
if green < 0 or green > 255:
raise ParseError("Green component '{green}' is out of the range [0, 255]")
if blue < 0 or blue > 255:
raise ParseError("Blue component '{blue}' is out of the range [0, 255]")
if alpha < 0 or alpha > 255:
raise ParseError("Alpha component '{alpha}' is out of the range [0, 255]")
return f"#{format(alpha, '02X')}{format(blue, '02X')}{format(green, '02X')}{format(red, '02X')}".upper()
# ======================================================================
def check_icon_information(icon_info: Union[str, Dict[str, ColorType]]):
"""Raises ParseError if the icon_path is not legal, otherwise returns the path
Args:
icon_info: If a string then it is the icon path relative to the .ogn file
If a dictionary then the dictionary contains extended icon information with these keywords
Returns:
(path, color, background_color, border_color) extracted from the icon information
If any element was not specified then it will be None
Raises:
ParseError if any of the icon properties are illegal
"""
path = None
color = None
background_color = None
border_color = None
# Simple spec - just the path
if isinstance(icon_info, str):
if icon_info.startswith("/") or icon_info.startswith("\\") or icon_info.find(":") >= 0:
raise ParseError(f'Icon path "{icon_info}" must be a string path relative to the .ogn file location')
path = icon_info
# Extended spec - dictionary of properties
elif isinstance(icon_info, dict):
for key, value in icon_info.items():
if key == IconKeys.PATH:
if value.startswith("/") or value.startswith("\\") or value.find(":") >= 0:
raise ParseError(f'Icon path "{value}" must be a string path relative to the .ogn file location')
path = value
elif key == IconKeys.COLOR:
color = check_color(value)
elif key == IconKeys.BACKGROUND_COLOR:
background_color = check_color(value)
elif key == IconKeys.BORDER_COLOR:
border_color = check_color(value)
else:
raise ParseError(f"Icon keyword '{key}' not in legal list of path, color, backgroundColor, borderColor")
else:
raise ParseError(f"Icon information not a string path or a dictionary of properties - `{icon_info}`")
return (path, color, background_color, border_color)
# ======================================================================
def check_memory_type(memory_type: str):
"""Raises ParseError if the memory type is not legal, otherwise returns the memory type value"""
if memory_type not in MemoryTypeValues.ALL:
raise ParseError(f'Memory type "{memory_type} not in allowed list of {MemoryTypeValues.ALL}')
return memory_type
# ======================================================================
def check_token_name(token_name: str):
"""Raises a ParseError if the given node name has an illegal pattern, else returns the node name"""
if not RE_TOKEN_NAME.match(token_name):
raise ParseError(TOKEN_NAME_REQUIREMENT.format(token_name))
return token_name
# ======================================================================
def get_metadata_dictionary(metadata):
"""Raises ParseError if the metadata is not legal, otherwise returns it as a dictionary with comments removed.
This function only does generic checks, applicable to all types of metadata. More specific metadata checks,
in particular for legal values and keywords, is done elsewhere
"""
def to_string(value: Any) -> str:
"""Turn metadata values into strings, notably lists and tuples are comma-separated strings"""
output = io.StringIO()
# Metadata specified as a list is stored as a comma-separated string
if isinstance(value, (tuple, list)):
csv_data = list(value)
# Metadata specified as a dictionary means the values might contain special characters that the generated code
# cannot handle and the keys are safe names. For storing the actual metadata only the values are of interest.
elif isinstance(value, dict):
csv_data = list(value.values())
# Simple elements are still run through CSV to quote any embedded commas
else:
csv_data = [value]
writer = csv.writer(output, quoting=csv.QUOTE_MINIMAL)
writer.writerow(csv_data)
return output.getvalue().rstrip()
try:
pruned_metadata = {key: to_string(value) for key, value in metadata.items() if key[0] != "$"}
logger.info(" -> %s", pruned_metadata)
except AttributeError as error:
raise ParseError("Metadata must be a dictionary of strings") from error
return pruned_metadata
# If True then perform more aggressive directory checks, not safe in a multi-threaded environment
SAFE_DIRECTORY_CREATION = False
# ======================================================================
def ensure_writable_directory(prospective_dir: Union[Path, str]):
"""Ensure a directory exists and is writable
Args:
prospective_dir: Full path to the directory to check or create
Raises:
ValueError: If the path could not be made into a writable directory for any reason
"""
# There are race condition issues with the prospective directory checking so only do it
# if it was explicitly requested. (It has to be hardcoded since this test happens during
# argument parsing so you can't safely pass an argument to enable that.)
writable_dir = prospective_dir if isinstance(prospective_dir, Path) else Path(prospective_dir)
try:
if SAFE_DIRECTORY_CREATION:
if writable_dir.is_file():
logger.warning('Directory "%s" existed as a file - removing', writable_dir)
writable_dir.unlink()
if not writable_dir.is_dir():
writable_dir.mkdir(mode=0o777, parents=True, exist_ok=True)
if not writable_dir.is_dir():
raise Exception
if not os.access(str(writable_dir), os.W_OK):
raise Exception
else:
if not writable_dir.is_dir():
writable_dir.mkdir(mode=0o777, parents=True, exist_ok=True)
except Exception as error:
raise ValueError(f"writable_dir:{prospective_dir} could not be made into a writable directory") from error
# ==============================================================================================================
class WritableDir(argparse.Action):
"""Helper class for the argparser to check for a writable directory"""
def __call__(self, parser, namespace, values, option_string=None):
"""Function called by the arg parser to verify that a directory exists and is writable
Args:
parser: argparser required argument, ignored
namespace: argparser required argument, ignored
values: The Path of the directory being checked for writability
option_string: argparser required argument, ignored
Raises:
argparse.ArgumentTypeError if the requested directory cannot be found or created in writable mode
"""
try:
ensure_writable_directory(values)
setattr(namespace, self.dest, values)
except Exception as error:
raise argparse.ArgumentTypeError(error)
# ======================================================================
#
# Collection of functions to make a symbolic link - ends at the next separator with "=====" in it or EOF
#
def _find_junction_location(junction_path: str) -> str:
"""Returns the location to which the junction path points
As with the os.symlink call the equivalent fsutil function can only be run with admin privileges,
resulting in the necessity of this roundabout path to the same information.
- use the /A:L functions to get the file type information in the parent directory
- find the entry that matches junction_path
- parse the link location from the remainder of the line
"""
# Normalizing the path ensures we don't end up at the target's parent instead of the link's parent
with subprocess.Popen(
("dir", "/A:L", os.path.normpath(os.path.join(junction_path, os.pardir))),
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
) as results:
out, _ = results.communicate()
out = out.decode(locale.getdefaultlocale()[1])
if results.returncode == 0:
lines = out.splitlines()
keys = ["<JUNCTION>", "<SYMLINKD>"]
for line in lines:
for key in keys:
start = line.find(key)
if start == -1:
continue
end = start + len(key)
terms = line[end:].split("[")
if len(terms) < 2:
continue
junction_name = os.path.normcase(terms[0].strip())
junction_target = terms[1].strip("]")
junction_name_to_find = os.path.normcase(os.path.basename(junction_path))
if junction_name == junction_name_to_find:
return junction_target
raise OSError(f"Failed to get link target for '{junction_path}'")
# ----------------------------------------------------------------------
def _find_linked_location(link_path: str) -> str:
"""Looks for the location to which the link_path points
Args:
link_path: Location of the link to check
Returns:
Location the link points to
Raises:
OSError: If the link doesn't exist, is the wrong type, or could not be read
"""
try:
# First the easy way...
return os.readlink(link_path)
except Exception as error:
# Then the hard way on Windows...
if os.name == "nt":
try:
return _find_junction_location(link_path)
except Exception as secondary_error:
raise OSError() from secondary_error
raise OSError() from error
# ----------------------------------------------------------------------
def _try_os_symlink(existing_path: str, link_to_create: str):
"""Implementation of symbolic link that uses the Python os.symlink method
Args:
existing_path: Current location to which the link will point
link_to_create: Location of the new link
Raises:
OSError: If the link could not be created
"""
try:
os.symlink(existing_path, link_to_create, target_is_directory=True)
except FileExistsError as error:
# Find the linked location
target = _find_linked_location(link_to_create)
# If the link is to a different location than the one requested that's bad
if os.path.normcase(target) != os.path.normcase(existing_path):
raise OSError("Link already exists, pointing to a different location") from error
# ----------------------------------------------------------------------
def _try_junction_link(existing_path: str, link_to_create: str):
"""Implementation of symbolic link that uses the native Windows linking capabilities.
This is only necessary because Windows requires admin privileges to create symlinks and we may not have them
Args:
existing_path: Current location to which the link will point
link_to_create: Location of the new link
Raises:
OSError: If the link could not be created
"""
# Even though they do exactly the same things "mklink" can be done with admin privileges
with subprocess.Popen(
("mklink", "/j", link_to_create, existing_path),
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
) as results:
_, err = results.communicate()
err = err.decode(locale.getdefaultlocale()[1])
if results.returncode:
if "file already exists" in err:
target = _find_linked_location(link_to_create)
# If the link exists but is to the same place as was requested that's good
if os.path.normcase(target) == os.path.normcase(existing_path):
return
raise OSError(f"{err.strip()} ({link_to_create} ==> {existing_path})")
# ----------------------------------------------------------------------
def create_symbolic_link(existing_path: str, link_to_create: str):
"""Create a symbolic link, if possible
Args:
existing_path: Current location to which the link will point
link_to_create: Location of the new link
Raises:
OSError: If the link could not be created
"""
try:
_try_os_symlink(existing_path, link_to_create)
except OSError as error:
# On Windows there can be privilege errors that prevent the link from being made, but there is another way...
if os.name == "nt" and "privilege not held" in str(error):
_try_junction_link(existing_path, link_to_create)
else:
raise error
except Exception as error:
raise OSError(str(error)) from error
# ======================================================================
class NameManager:
"""Class that manages naming of generated code where the name is not important to the user
Name uniqueness is only important within a single file generation, so different name managers should be used for
different languages (e.g. one for C++, a different one for Python).
Internal Properties:
__current: Current unique index for the next name
__shortened_names: Map of original name to shortened name.
"""
# Control the naming algorithm through environment variables.
SHORTEN_NAMES = os.getenv("DEBUG") or os.getenv("OGN_DEBUG")
def __init__(self):
"""Initialize with an empty name map"""
self.__shortened_names = {}
self.__current = 0
def name(self, original_name: str) -> str:
"""Returns a shortened unique name corresponding to original_name if shortening is enabled, otherwise the name.
This is similar to tokenization, with the goal of minimizing the amount of code the compiler has to
read when compiling/interpreting the generated code that's invisible to the user. For instance there might
be a unique local variable called "attribute_inputs_myInput" that can be shorted to "__2"
"""
if self.SHORTEN_NAMES:
try:
return self.__shortened_names[original_name]
except KeyError:
self.__shortened_names[original_name] = f"__{self.__current}"
self.__current += 1
return self.__shortened_names[original_name]
return original_name
| 44,876 | Python | 41.29689 | 120 | 0.593458 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/ThreadsafeOpen.py | r"""
Support for safe file writing from multiple threads.
The build system is threaded, which introduces the possibility of multiple threads trying to access the
same generated OGN files at the same time (e.g. the Python test initialization file ogn/tests/__init__.py)
For that reason it is necessary to be able to atomically write to a file.
You use this in the same way you would use open().
.. code-block:: python
with ThreadsafeOpen("myFile.txt", "w") as my_fd:
my_fd.write("Hello World\n)
"""
import os
from io import TextIOWrapper
from warnings import warn
# ======================================================================
# Implement the right definitions of shared functions based on the OS
try:
# Posix based file locking (Linux, Ubuntu, MacOS, etc.)
import fcntl
def lock_file(file_to_lock: TextIOWrapper):
"""Lock a file for exclusive access"""
fcntl.lockf(file_to_lock, fcntl.LOCK_EX)
def unlock_file(file_to_unlock: TextIOWrapper):
"""Unlock exclusive access to a file"""
fcntl.lockf(file_to_unlock, fcntl.LOCK_UN)
except ModuleNotFoundError:
# Windows file locking (triggered by failed import of fcntl)
# Only the first byte is locked, but that is enough for our purposes.
import msvcrt
def lock_file(file_to_lock: TextIOWrapper):
"""Lock a file for exclusive access"""
file_to_lock.seek(0)
msvcrt.locking(file_to_lock.fileno(), msvcrt.LK_LOCK, 1)
def unlock_file(file_to_unlock: TextIOWrapper):
"""Unlock exclusive access to a file"""
file_to_unlock.seek(0)
msvcrt.locking(file_to_unlock.fileno(), msvcrt.LK_UNLCK, 1)
# ======================================================================
class ThreadsafeOpen:
"""
Class for ensuring that all file operations are atomic, treat
initialization like a standard call to 'open' that happens to be atomic.
This file opener *must* be used in a "with" block.
"""
def __init__(self, path, *args, **kwargs):
"""
Open the file with the given arguments. Then acquire a lock on that file object
WARNING: Advisory locking
"""
self.file = open(path, *args, **kwargs) # noqa: SIM115,PLR1732,PLW1514
self.writing_to_file = "r" not in args
try:
lock_file(self.file)
except IOError as error:
warn(f"Could not lock {path}, may be out of sync - {error}")
def __enter__(self, *args, **kwargs):
"""Yield the locked file descriptor on entry"""
return self.file
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
"""Release the locked file descriptor and close the file on exit"""
# Flush to make sure all buffered contents are written to file before unlocking.
if self.writing_to_file:
try:
self.file.flush()
os.fsync(self.file.fileno())
except OSError as error:
warn(f"Error in sync of {self.file.name} - {error}")
try:
unlock_file(self.file)
self.file.close()
except PermissionError as error:
warn(f"Could not unlock {self.file.name} - {error}")
# By default any exceptions are raised to the user.
return exc_type is None
| 3,338 | Python | 34.903225 | 106 | 0.61444 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/keys.py | """Common location for all of the keyword definitions for the .ogn format"""
# ==============================================================================================================
class AttributeKeys:
"""Container for the text for all of the .ogn keywords used at the attribute definition level"""
ALLOWED_TOKENS = "allowedTokens"
DEFAULT = "default"
DEPRECATED = "deprecated"
DESCRIPTION = "description"
MAXIMUM = "maximum"
MEMORY_TYPE = "memoryType"
METADATA = "metadata"
MINIMUM = "minimum"
OPTIONAL = "optional"
TYPE = "type"
UI_NAME = "uiName"
UNVALIDATED = "unvalidated"
# Attribute keywords required to exist for all attributes
MANDATORY = [DESCRIPTION, TYPE]
# Attribute keys that are always directly processed, not relying on AttributeManager derived classes to do it
PROCESSED = MANDATORY + [DEFAULT, DEPRECATED, MEMORY_TYPE, METADATA, OPTIONAL, UI_NAME, UNVALIDATED]
# ==============================================================================================================
class CategoryTypeValues:
"""Container for the set of values allowed for defining node type categories"""
ANIMATION = "animation"
DEBUG = "debug"
FUNCTION = "function"
GENERIC = "generic"
GEOMETRY = "geometry"
INPUT = "input"
IO = "io"
MATERIAL = "material"
MATH = "math"
RENDERING = "rendering"
SCENE_GRAPH = "scene_graph"
TEXTURE = "texture"
TIME = "time"
UI = "ui"
# ==============================================================================================================
class CudaPointerValues:
"""Container for the set of values .ogn accepts for the cudaPointers node type"""
CPU = "cpu"
CUDA = "cuda"
NA = "na"
# These values are members of the enum class carb::flatcache::PtrToPtrKind, assuming to have a "using" declaration
CPP = {
CPU: "PtrToPtrKind::eCpuPtrToGpuPtr",
CUDA: "PtrToPtrKind::eGpuPtrToGpuPtr",
NA: "PtrToPtrKind::eNotApplicable",
}
PYTHON = {
CPU: "og.PtrToPtrKind.CPU",
CUDA: "og.PtrToPtrKind.CUDA",
NA: "og.PtrToPtrKind.NA",
}
# ==============================================================================================================
class ExclusionTypeValues:
"""Container for the .ogn keywords allowed for types of generated code that can be excluded"""
CPP = "c++"
DOCS = "docs"
PYTHON = "python"
TEMPLATE = "template"
TESTS = "tests"
USD = "usd"
# ==============================================================================================================
class GraphSetupKeys:
"""Container for keywords encapsulating the graph setup entries in the test dictionary, v2+."""
CONNECT = "connect"
CREATE_NODES = "create_nodes"
CREATE_PRIMS = "create_prims"
CREATE_VARIABLES = "create_variables"
DELETE_NODES = "delete_nodes"
DISCONNECT = "disconnect"
DISCONNECT_ALL = "disconnect_all"
EXPOSE_PRIMS = "expose_prims"
SET_VALUES = "set_values"
ALL = [
CONNECT,
CREATE_NODES,
CREATE_PRIMS,
CREATE_VARIABLES,
DELETE_NODES,
DISCONNECT,
DISCONNECT_ALL,
EXPOSE_PRIMS,
SET_VALUES,
]
# ==============================================================================================================
class PrimExposureValues:
"""Options for importing a prim into OmniGraph"""
READ_PRIM = "read"
"""Read the prim and create dynamic attributes to access each prim attribute"""
READ_PRIM_BUNDLE = "readBundle"
"""Read the prim and create a single bundle with every prim attribute in it"""
WRITE_PRIM = "write"
"""Create inputs for every attribute in the prim,
writing them to the prim if they are connected"""
# ======================================================================
class IconKeys:
"""Holder for the set of keywords that could appear in the icon dictionary"""
BACKGROUND_COLOR = "backgroundColor"
BORDER_COLOR = "borderColor"
COLOR = "color"
PATH = "path"
# ======================================================================
class LanguageTypeValues:
"""Holder for the set of values that define a language specification"""
CPP = "C++"
PYTHON = "Python"
ALL = {CPP: ["cpp", "c++", "C++"], PYTHON: ["py", "python", "Python"]}
@staticmethod
def key_from_text(language: str) -> str:
"""Gets the language name in a canonical form, or raises ValueError if it is not a recognized language"""
if language in LanguageTypeValues.ALL[LanguageTypeValues.CPP]:
return LanguageTypeValues.CPP
if language in LanguageTypeValues.ALL[LanguageTypeValues.PYTHON]:
return LanguageTypeValues.PYTHON
raise ValueError(f"Unrecognized language '{language}' - should be one of {list(LanguageTypeValues.ALL.keys())}")
# ======================================================================
class MemoryTypeValues:
"""Holder for the set of keywords identifying memory types"""
CPU = "cpu"
CUDA = "cuda"
ANY = "any"
ALL = [ANY, CPU, CUDA]
# These values are members of the type omni::graph::core::ogn::eMemoryType
CPP = {CUDA: "ogn::kCuda", CPU: "ogn::kCpu", ANY: "ogn::kAny"}
# These values are members of the type omni.graph.core.MemoryType
PYTHON = {CUDA: "og.MemoryType.CUDA", CPU: "og.MemoryType.CPU", ANY: "og.MemoryType.ANY"}
# ======================================================================
class MetadataKeys:
"""Holder for common metadata information
These should match the C++ constant values found in include/omni/graph/core/ogn/Database.h
as well as the members of MetadataKeyOutput below.
"""
ALLOW_MULTI_INPUTS = "allowMultiInputs"
ALLOWED_TOKENS = "allowedTokens"
ALLOWED_TOKENS_RAW = "__allowedTokens"
CATEGORIES = "__categories"
CATEGORY_DESCRIPTIONS = "__categoryDescriptions"
CUDA_POINTERS = "__cudaPointers"
DEFAULT = "__default"
DESCRIPTION = "__description"
EXCLUSIONS = "__exclusions"
EXTENSION = "__extension"
HIDDEN = "hidden"
ICON_BACKGROUND_COLOR = "__iconBackgroundColor"
ICON_BORDER_COLOR = "__iconBorderColor"
ICON_COLOR = "__iconColor"
ICON_PATH = "__icon"
INTERNAL = "internal"
LANGUAGE = "__language"
MEMORY_TYPE = "__memoryType"
OBJECT_ID = "__objectId"
OPTIONAL = "__optional"
OUTPUT_ONLY = "outputOnly"
LITERAL_ONLY = "literalOnly"
SINGLETON = "__singleton"
TAGS = "tags"
TOKENS = "__tokens"
UI_NAME = "uiName"
UI_TYPE = "uiType"
@classmethod
def key_names(cls):
key_names = []
for key, value in cls.__dict__.items():
if key == key.upper():
key_names.append(value)
return key_names
# ======================================================================
class MetadataKeyOutput:
"""Names of the C++ equivalent constants from MetadataKeys.
These should match the C++ constant names found in include/omni/graph/core/ogn/Database.h
as well as the members of MetadataKeys above
"""
ALLOW_MULTI_INPUTS = "kOgnMetadataAllowMultiInputs"
ALLOWED_TOKENS = "kOgnMetadataAllowedTokens"
ALLOWED_TOKENS_RAW = "kOgnMetadataAllowedTokensRaw"
CATEGORIES = "kOgnMetadataCategories"
CATEGORY_DESCRIPTIONS = "kOgnMetadataCategoryDescriptions"
CUDA_POINTERS = "kOgnMetadataCudaPointers"
DEFAULT = "kOgnMetadataDefault"
DESCRIPTION = "kOgnMetadataDescription"
EXCLUSIONS = "kOgnMetadataExclusions"
EXTENSION = "kOgnMetadataExtension"
HIDDEN = "kOgnMetadataHidden"
ICON_BACKGROUND_COLOR = "kOgnMetadataIconBackgroundColor"
ICON_BORDER_COLOR = "kOgnMetadataIconBorderColor"
ICON_COLOR = "kOgnMetadataIconColor"
ICON_PATH = "kOgnMetadataIconPath"
INTERNAL = "kOgnMetadataInternal"
LANGUAGE = "kOgnMetadataLanguage"
MEMORY_TYPE = "kOgnMetadataMemoryType"
OBJECT_ID = "kOgnMetadataObjectId"
OPTIONAL = "kOgnMetadataOptional"
OUTPUT_ONLY = "kOgnMetadataOutputOnly"
LITERAL_ONLY = "kOgnMetadataLiteralOnly"
SINGLETON = "kOgnSingletonName"
TAGS = "kOgnMetadataTags"
TOKENS = "kOgnMetadataTokens"
UI_NAME = "kOgnMetadataUiName"
UI_TYPE = "kOgnMetadataUiType"
@classmethod
def cpp_name_from_key(cls, metadata_key: str) -> str:
"""Returns the C++ constant name that defines the given metdata key string, the key itself if no match"""
# If the key is already one of the constants use it directly
if metadata_key in MetadataKeyOutput.__dict__.values():
return metadata_key
# Find the key corresponding to the value name, if it exists
for key, value in MetadataKeys.__dict__.items():
if value == metadata_key:
return getattr(cls, key)
# Use the string directly, but return None so that the caller knows to quote it
return None
@classmethod
def python_name_from_key(cls, metadata_key: str) -> str:
"""Returns the Python constant name that defines the given metadata key string, the key itself if no match"""
# If it's already a member variable use it directly
if metadata_key.startswith("ogn.MetadataKeys"):
return metadata_key
# Find the key corresponding to the value name, if it exists
for key, value in MetadataKeys.__dict__.items():
if value == metadata_key:
return f"ogn.MetadataKeys.{key}"
# Use the string directly, but return None so that the caller knows to quote it
return None
# ==============================================================================================================
class NodeTypeKeys:
"""Container for the text for all of the .ogn keywords used at the node definition level"""
CATEGORIES = "categories"
CATEGORY_DEFINITIONS = "categoryDefinitions"
CUDA_POINTERS = "cudaPointers"
DESCRIPTION = "description"
EXCLUDE = "exclude"
ICON = "icon"
INPUTS = "inputs"
LANGUAGE = "language"
MEMORY_TYPE = "memoryType"
METADATA = "metadata"
OUTPUTS = "outputs"
SCHEDULING = "scheduling"
SINGLETON = "singleton"
STATE = "state"
TAGS = "tags"
TESTS = "tests"
TOKENS = "tokens"
TYPE_DEFINITIONS = "typeDefinitions"
UI_NAME = "uiName"
VERSION = "version"
# Node type keywords required to exist for all attributes
MANDATORY = [DESCRIPTION]
# ==============================================================================================================
class TestKeys:
"""Container for the text for all of the .ogn keywords used at the test definition level"""
DESCRIPTION = "description"
GPU_ATTRIBUTES = "gpu"
INPUTS = "inputs"
OUTPUTS = "outputs"
SETUP = "setup"
STATE = "state"
STATE_GET = "state_get"
STATE_SET = "state_set"
# ==============================================================================================================
# _____ ______ _____ _____ ______ _____ _______ ______ _____
# | __ \ | ____|| __ \ | __ \ | ____|/ ____| /\ |__ __|| ____|| __ \
# | | | || |__ | |__) || |__) || |__ | | / \ | | | |__ | | | |
# | | | || __| | ___/ | _ / | __| | | / /\ \ | | | __| | | | |
# | |__| || |____ | | | | \ \ | |____| |____ / ____ \ | | | |____ | |__| |
# |_____/ |______||_| |_| \_\|______|\_____|/_/ \_\|_| |______||_____/
#
class GraphSetupKeys_V1: # noqa: N801
"""Container for deprecated .ogn keywords for the graph setup section of tests - the subset from
OmniGraphHelper.edit_graph for creation. Syntax of the contents are left to the helper when the test runs."""
CONNECTIONS = "connections"
NODES = "nodes"
PRIMS = "prims"
VALUES = "values"
| 11,949 | Python | 35.882716 | 120 | 0.556699 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_documentation.py | """
Support for generating documentation files for OmniGraph Nodes.
The documentation is written in the reStructuredText format, consistent with API documentation.
Note that the script ../make_docs_toc.py relies on parsing the files so if the format changes it must be updated.
Exported Methods:
generate_documention
Exported Constants
RE_OGN_DOC_FILENAME
RE_OGN_NAME_INFO
RE_OGN_DESCRIPTION_TITLE
RE_OGN_INPUTS_TITLE
RE_OGN_BODY_MARKER
"""
import re
from contextlib import suppress
from typing import List, Optional
from .attributes.AttributeManager import AttributeManager
from .nodes import NodeInterfaceGenerator
from .utils import GeneratorConfiguration, MetadataKeys, UnimplementedError, logger, rst_table, rst_title
__all__ = [
"generate_documentation",
"RE_OGN_DOC_FILENAME",
"RE_OGN_NAME_INFO",
"RE_OGN_DESCRIPTION_TITLE",
"RE_OGN_INPUTS_TITLE",
"RE_OGN_BODY_MARKER",
]
# Special restructuredText markers so that the parser can quickly find the start of any section
RST_ID_DOC = "GENERATED - Documentation"
# Pattern to recognize the name of OGN documentation files
RE_OGN_DOC_FILENAME = re.compile("Ogn.*.rst$")
# Pattern to recognize the name of the node in the file (must coordinate with the output of generate_documentation.py)
RE_OGN_NAME_INFO = re.compile("OmniGraph Node ([^ ]+)$")
# Patterns for title lines in the file
RE_OGN_DESCRIPTION_TITLE = re.compile("Description$")
RE_OGN_INPUTS_TITLE = re.compile("Inputs$")
# Pattern marking the start of the node body
RE_OGN_BODY_MARKER = re.compile(f"^.. _({RST_ID_DOC} .*):")
# Pattern to recognize .ogn files that are part of the internal tutorials
RE_TUTORIAL_FILE = re.compile(".*(omni.graph.core/tutorials.*)")
# ======================================================================
class NodeDocumentationGenerator(NodeInterfaceGenerator):
"""Manage the functions required to generate a C++ interface for a node"""
def __init__(self, configuration: GeneratorConfiguration):
"""Set up the generator and output the documentation for the node
Just passes the initialization on to the parent class. See the argument and exception descriptions there.
"""
logger.info("Creating NodeDocumentationGenerator")
super().__init__(configuration)
# ----------------------------------------------------------------------
def interface_file_name(self) -> str:
"""Return the path to the name of the documentation file"""
return self.base_name + ".rst"
# ----------------------------------------------------------------------
def generate_attributes_documentation(self, attributes: List[AttributeManager]):
"""Write out documentation code corresponding to the node
Args:
attributes: List of attributes whose documentation is to be generated
Raises:
NodeGenerationError: When there is a failure in the generation of the documentation file
"""
logger.info("Generating documentation for %s attributes", len(attributes))
# RST tables are very particular about sizing so first find out how big the columns need to be
attribute_table = [["Name", "Type", "Default", "Required?", "Descripton"]]
for attribute in attributes:
name = attribute.name
try:
type_name = attribute.ogn_type()
except AttributeError:
type_name = "[Unsupported]"
try:
default_value = attribute.default
except UnimplementedError:
default_value = "[Unsupported]"
required = "**Y**" if attribute.is_required else ""
description = attribute.description
if isinstance(description, list):
description = " ".join(description)
description = description.replace("\n", " ")
attribute_table.append([name, type_name, default_value, required, description])
# If there is any metadata add it in name/value pairs below the attribute definition
for key, value in attribute.metadata.items():
if key != MetadataKeys.DESCRIPTION:
attribute_table.append(["", key, value, "", ""])
self.out.write(rst_table(attribute_table))
# ----------------------------------------------------------------------
def generate_code(self, title: str, code_file_path: str, code_type: str, code_id: str):
"""Generate a code block with the given title containing the contents of the given file
If the file does not exist then a message to that effect is emitted and no code block is generated
"""
logger.info("Generating code titled '%s' of type '%s'", title, code_type)
self.out.write(rst_title(title, 0))
self.out.write()
try:
self.out.write(f".. _{code_id}:\n")
self.out.write(f".. code:: {code_type}")
self.out.write()
self.out.indent()
with open(code_file_path, "r", encoding="utf-8") as code_fd:
for code_line in code_fd:
self.out.write(code_line.rstrip())
self.out.exdent()
self.out.write()
except FileNotFoundError:
relative_path = RE_TUTORIAL_FILE.match(code_file_path).group(1)
self.out.write(f"File not found: {relative_path}")
return
# ----------------------------------------------------------------------
def pre_interface_generation(self):
"""Generate the documentation setup, which is just the link to the top of the generated documentation"""
self.out.write(f".. _{RST_ID_DOC} _ogn{self.node_interface.name}:\n")
# ----------------------------------------------------------------------
def generate_node_interface(self):
"""Generate the documentation for the node"""
logger.info("Generating documentation for node %s", self.node_interface.name)
node_name = self.node_interface.name
self.out.write(rst_title(f"OmniGraph Node {node_name}", 0))
# Gather the node metadata for reporting
node_metadata_table = [["Name", "Value"]]
node_metadata_table.append(["Version", self.node_interface.version])
node_metadata_table.append(["Extension", self.extension])
if self.node_interface.icon_path is not None:
node_metadata_table.append(["Icon", self.node_interface.icon_path])
node_metadata_table.append(["Has State?", self.node_interface.has_state])
node_metadata_table.append(["Implementation Language", self.node_interface.language])
node_metadata_table.append(["Default Memory Type", self.node_interface.memory_type])
excluded = self.node_interface.excluded_generators
exclusions = ", ".join(excluded) if excluded else "None"
node_metadata_table.append(["Generated Code Exclusions", exclusions])
for key, value in self.node_interface.metadata.items():
# Some metadata appears in other locations already so skip it here
if key not in [MetadataKeys.EXTENSION, MetadataKeys.DESCRIPTION]:
node_metadata_table.append([key, value])
node_metadata_table.append(["Generated Class Name", f"{self.base_name}Database"])
node_metadata_table.append(["Python Module", f"{self.module}"])
self.out.write(rst_title(f"{node_name} Properties", 1))
self.out.write(rst_table(node_metadata_table))
# The "node_name" here, and in attribute titles, is kind of redundant. It is mainly here to prevent a whole
# bunch of duplicate tag errors in the documentation generator, which does not seem to have a way to turn
# off automatic link generation for a given heading.
self.out.write(rst_title(f"{node_name} Description", 1))
self.out.write(self.node_interface.description)
# Files in the tutorials/ directory have more detailed documentation available elsewhere so link to it
with suppress(TypeError):
if RE_TUTORIAL_FILE.match(self.node_file_path):
self.out.write()
self.out.write(f"See the accompanying explanation and annotated code at :ref:`ogn{node_name}`")
attributes = self.node_interface.all_input_attributes()
if attributes:
self.out.write(rst_title(f"{node_name} Inputs", 1))
self.generate_attributes_documentation(attributes)
attributes = self.node_interface.all_output_attributes()
if attributes:
self.out.write(rst_title(f"{node_name} Outputs", 1))
self.generate_attributes_documentation(attributes)
attributes = self.node_interface.all_state_attributes()
if attributes:
self.out.write(rst_title(f"{node_name} State", 1))
self.generate_attributes_documentation(attributes)
# ======================================================================
def generate_documentation(configuration: GeneratorConfiguration) -> Optional[str]:
"""Create support files for the documentation of a node
Args:
configuration: Information defining how and where the documentation will be generated
Returns:
String containing the generated documentation or None if its generation was not enabled
Raises:
NodeGenerationError: When there is a failure in the generation of the documentation file
"""
if not configuration.node_interface.can_generate("docs"):
return None
logger.info("Generating documentation")
generator = NodeDocumentationGenerator(configuration)
generator.generate_interface()
return str(generator.out)
| 9,749 | Python | 45.650717 | 118 | 0.630424 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/nodes.py | # noqa: PLC0302
"""
Support for the various attribute types used in a node description file.
Provides classes that allow generic calls to check attribute information retrieved from the JSON node description
data. The main interface class AttributeManager is used to decipher, validate, and provide access to all
attribute data in the dictionary passed into it.
"""
import io
import json
import os
import re
from contextlib import suppress
from dataclasses import dataclass
from pathlib import Path
from typing import IO, Any, Dict, List, Optional, Tuple, Union
from .attributes.AttributeManager import AttributeManager
from .attributes.management import get_attribute_manager
from .attributes.naming import (
INPUT_GROUP,
INPUT_NS,
OUTPUT_GROUP,
OUTPUT_NS,
STATE_GROUP,
STATE_NS,
attribute_name_in_namespace,
check_attribute_name,
is_input_name,
is_output_name,
is_state_name,
split_attribute_name,
)
from .category_definitions import merge_category_definitions
from .keys import (
CudaPointerValues,
ExclusionTypeValues,
GraphSetupKeys,
GraphSetupKeys_V1,
LanguageTypeValues,
MemoryTypeValues,
NodeTypeKeys,
TestKeys,
)
from .parse_scheduling import SchedulingHints
from .type_definitions import apply_type_definitions
from .utils import (
GeneratorConfiguration,
IndentedOutput,
MetadataKeys,
ParseError,
UnimplementedError,
check_icon_information,
check_memory_type,
check_token_name,
get_metadata_dictionary,
is_comment,
logger,
)
# ======================================================================
# Deprecated - Use the definitions in keys.NodeTypeKeys interface instead
KEY_NODE_DESCRIPTION = NodeTypeKeys.DESCRIPTION
KEY_NODE_EXCLUDE = NodeTypeKeys.EXCLUDE
KEY_NODE_ICON = NodeTypeKeys.ICON
KEY_NODE_INPUTS = NodeTypeKeys.INPUTS
KEY_NODE_LANGUAGE = NodeTypeKeys.LANGUAGE
KEY_NODE_MEMORY_TYPE = NodeTypeKeys.MEMORY_TYPE
KEY_NODE_METADATA = NodeTypeKeys.METADATA
KEY_NODE_OUTPUTS = NodeTypeKeys.OUTPUTS
KEY_NODE_SCHEDULING = NodeTypeKeys.SCHEDULING
KEY_NODE_SINGLETON_METADATA = NodeTypeKeys.SINGLETON
KEY_NODE_STATE = NodeTypeKeys.STATE
KEY_NODE_TAGS_METADATA = NodeTypeKeys.TAGS
KEY_NODE_TESTS = NodeTypeKeys.TESTS
KEY_NODE_TOKENS = NodeTypeKeys.TOKENS
KEY_NODE_UI_NAME_METADATA = NodeTypeKeys.UI_NAME
KEY_NODE_VERSION = NodeTypeKeys.VERSION
# Deprecated - Use the definitions in keys.TestKeys interface instead
KEY_TEST_DESCRIPTION = TestKeys.DESCRIPTION
KEY_TEST_GPU_ATTRIBUTES = TestKeys.GPU_ATTRIBUTES
KEY_TEST_INPUTS = TestKeys.INPUTS
KEY_TEST_OUTPUTS = TestKeys.OUTPUTS
KEY_TEST_SETUP = TestKeys.SETUP
KEY_TEST_STATE = TestKeys.STATE
KEY_TEST_STATE_GET = TestKeys.STATE_GET
KEY_TEST_STATE_SET = TestKeys.STATE_SET
# Deprecated - use the keys.LanguageTypeValues interface instead
LANGUAGE_CPP = LanguageTypeValues.CPP
LANGUAGE_PYTHON = LanguageTypeValues.PYTHON
ALL_LANGUAGES = LanguageTypeValues.ALL
EXCLUSION_TYPES = [value for key, value in vars(ExclusionTypeValues).items() if not key.startswith("__")]
V1_GRAPH_SETUP_KEYS = [value for key, value in vars(GraphSetupKeys_V1).items() if not key.startswith("__")]
GRAPH_SETUP_KEYS_ALLOWED = [value for key, value in vars(GraphSetupKeys).items() if not key.startswith("__")]
# Pattern for legal node names
# - starts with a letter or underscore
# - then an arbitrary number of alphanumerics or underscores
# - other special characters cause problems in USD and so are disallowed
RE_NODE_NAME = re.compile(r"^[A-Za-z_][A-Za-z0-9_\.]*$")
RE_NODE_NAME_NORMAL = re.compile(r"^[A-Z][A-Za-z0-9_]*$")
NODE_NAME_REQUIREMENT = (
"Node name '{}' should be CamelCase with letters, numbers, underscores,"
" with optional '.' to override the namespace'"
)
# UI names can contain pretty much anything - quotes are problematic though so those are disallowed
RE_NODE_UI_NAME = re.compile("^[^'\"]*$")
NODE_UI_NAME_REQUIREMENT = "User-friendly node name cannot contain quotes"
# Helper for namespace related messages
USE_NAMESPACE = f'must begin with "{INPUT_NS}", "{OUTPUT_NS}", or "{STATE_NS}'
# ======================================================================
def check_node_language(node_language: str):
"""Raises a ParseError if the given language name is not legal, else returns the corresponding language key"""
try:
new_language = LanguageTypeValues.key_from_text(node_language)
except ValueError as error:
raise ParseError() from error
return new_language
# ======================================================================
def check_node_name(node_name: str):
"""Raises a ParseError if the given node name has an illegal pattern, else returns the node name"""
name_info = NODE_NAME_REQUIREMENT.format(node_name)
if not RE_NODE_NAME.match(node_name):
raise ParseError(name_info)
# if not RE_NODE_NAME_NORMAL.match(node_name):
# print(f"INFO: {name_info.format(node_name)}", flush=True)
return node_name
# ======================================================================
def check_node_ui_name(node_ui_name: str):
"""Raises a ParseError if the given user-friendly node name has an illegal pattern, else returns the node name"""
if not RE_NODE_UI_NAME.match(node_ui_name):
raise ParseError(NODE_UI_NAME_REQUIREMENT)
return node_ui_name
# ======================================================================
def check_node_version(node_version):
"""Raises a ParseError if the given node version is not an integer"""
if not isinstance(node_version, int):
raise ParseError(f'Node version "{node_version}" is not an integer')
# ======================================================================
class NodeGenerationError(Exception):
"""Exception to raise when there is an error in the generation of the node interface, tests, or documentation"""
# ==============================================================================================================
@dataclass
class AllAttributes:
"""Container class holding the inputs, outputs, and state attributes in a common structure"""
inputs: List[AttributeManager]
outputs: List[AttributeManager]
state: List[AttributeManager]
# ======================================================================
class TestData:
"""Class that holds the information required to run a single test.
Attributes:
graph_setup: Dictionary in the format of og.Controller.edit to create an initial graph for the test
None means use the previous setup, without changing anything.
input_values: Dictionary of INPUT_ATTR:INPUT_VALUE to set as part of the test
state_initial_values: Dictionary of STATE_ATTR:STATE_VALUE to set as part of the test
state_final_values: Dictionary of STATE_ATTR:STATE_VALUE to check as part of the test
expected_outputs: Dictionary of OUTPUT_ATTR:EXPECTED_VALUE to check as part of the test
gpu_outputs: List of output attributes expected to be on the GPU at runtime
uses_v1_setup: True if the setup data is from the V1 setup, using OmniGraphHelper syntax
"""
def __init__(self):
"""Initialize an empty test configuration, to be populated later"""
self.input_values = {}
self.state_initial_values = {}
self.state_final_values = {}
self.expected_outputs = {}
self.graph_setup = None
self.uses_v1_setup = False
self.gpu_outputs = []
def add_input(self, input_name: str, input_value):
"""Add a new input value for the test configuration"""
self.input_values[input_name] = input_value
def add_output(self, output_name: str, output_value):
"""Add a new expected output value for the test configuration"""
self.expected_outputs[output_name] = output_value
def add_set_state(self, state_name: str, state_value):
"""Add a new state initialization value for the test configuration"""
self.state_initial_values[state_name] = state_value
def add_get_state(self, state_name: str, state_value):
"""Add a new state expected value for the test configuration"""
self.state_final_values[state_name] = state_value
def set_gpu_outputs(self, gpu_outputs: List[str]):
"""Set the list of output attributes that should be read from the GPU, where the decision is made at runtime"""
self.gpu_outputs = gpu_outputs
def set_graph_setup(self, setup: Dict, uses_v1_setup: bool = False):
"""Set the list of output attributes that should be read from the GPU, where the decision is made at runtime"""
self.graph_setup = setup
self.uses_v1_setup = uses_v1_setup
# ======================================================================
class NodeInterface:
"""Class constructed from a node interface description to provide an easier method of extracting information
Attributes:
name: Name of the node (mandatory)
__categories_allowed: List of categories the node can legally accept (loaded from the main parser and the node)
cuda_pointer_type: Where the pointers to GPU arrays are retrieved to
description: Description of what the node does (mandatory)
excluded_generators: List of generators the node does not want to run
has_cuda_attributes: True if there is at least one input or output attribute that will be accessed from CUDA
has_inputs: True if __inputs has size > 0 (cached for performance)
has_outputs: True if __outputs has size > 0 (cached for performance)
has_state: True if __state has size > 0 (cached for performance)
icon_path: Location of the node type's icon, relative to the extension's directory (None means no icon)
__inputs: Dictionary of input attributes as (attribute name, AttributeManager)
language: Language in which the node will be implemented
memory_type: Default location for attribute memory
__outputs: Dictionary of output attributes as (attribute name, AttributeManager)
__state: Dictionary of state attributes as (attribute name, AttributeManager)
tests: List of TestData containing information describing the set of tests in the file
version: Version of the described node, as an integer
config_directory: Path of directory in which to find any configuration files
"""
def __init__(
self,
node_name: str,
node_data: dict,
config_directory: str,
categories_allowed: Dict[str, str] = None,
node_directory: Optional[str] = None,
):
"""
Rearrange the node interface description to optimize access for code and documentation generation.
Args:
node_name: Name of the node being accessed
node_data: Dictionary containing the node interface data, as extracted from the JSON
config_directory: Path to the directory containing the system configuration files
categories_allowed: Dictionary of name:description of all categories found in the configuration files
node_directory: Directory in which the node definition lives, None if it does not live in the file system
Raises:
ParseError: If there are any errors parsing the node description - string contains the problem
"""
self.name = node_name
self.__categories_allowed = categories_allowed if categories_allowed is not None else {}
self.__node_directory = node_directory
self.categories = []
self.description = None
self.version = 1
self.has_cuda_attributes = False
self.cuda_pointer_type = None
self.has_inputs = False
self.has_outputs = False
self.has_state = False
self.icon_path = None
self.memory_type = MemoryTypeValues.CPU
self.metadata = {}
self.__inputs = {}
self.__outputs = {}
self.__state = {}
self.tests = []
self.tokens = {}
self.excluded_generators = []
self.language = LanguageTypeValues.CPP
self.config_directory = config_directory
self.scheduling_hints = None
logger.info("Extracting node interface for %s", node_name)
if not isinstance(node_data, dict):
raise ParseError(f"Value of node name key {node_name} must be a dictionary")
# Parse the mandatory description
try:
self.description = node_data[NodeTypeKeys.DESCRIPTION]
logger.info("Extracted description %s", self.description)
except KeyError:
raise ParseError(f'"description" value is mandatory for node "{node_name}"') from None
# Parse the node version number
with suppress(KeyError):
check_node_version(node_data[NodeTypeKeys.VERSION])
self.version = node_data[NodeTypeKeys.VERSION]
logger.info("Extracted node version -> %s", self.version)
# Parse the node metadata
with suppress(KeyError):
self.metadata = get_metadata_dictionary(node_data[NodeTypeKeys.METADATA])
logger.info("Extracted node metadata")
# Parse the node memory type
with suppress(KeyError):
self.memory_type = check_memory_type(node_data[NodeTypeKeys.MEMORY_TYPE])
self.metadata[MetadataKeys.MEMORY_TYPE] = self.memory_type
logger.info("Extracted node memory type -> %s", self.memory_type)
# Parse the node override icon path, if any
with suppress(KeyError):
(self.icon_path, color, background_color, border_color) = check_icon_information(
node_data[NodeTypeKeys.ICON]
)
if color is not None:
self.metadata[MetadataKeys.ICON_COLOR] = color
if background_color is not None:
self.metadata[MetadataKeys.ICON_BACKGROUND_COLOR] = background_color
if border_color is not None:
self.metadata[MetadataKeys.ICON_BORDER_COLOR] = border_color
logger.info("Extracted override icon path -> %s", self.icon_path)
# See if the node uses the shortcut for the singleton metadata
with suppress(KeyError):
singleton = node_data[NodeTypeKeys.SINGLETON]
logger.info("Extracted %s flag", NodeTypeKeys.SINGLETON)
if not isinstance(singleton, bool):
raise ParseError("Singleton value must be a boolean")
# Metadata can only be a string so change the boolean to a 0/1 value
if singleton:
self.metadata[MetadataKeys.SINGLETON] = "1"
# See if the node has a definition for cuda pointer locations
with suppress(KeyError):
self.cuda_pointer_type = node_data[NodeTypeKeys.CUDA_POINTERS]
if not hasattr(CudaPointerValues, self.cuda_pointer_type.upper()):
allowed = [value for value in dir(CudaPointerValues) if not value.startswith("_")]
raise ParseError(f"{NodeTypeKeys.CUDA_POINTERS} is {self.cuda_pointer_type}, must be one of {allowed}")
logger.info("Extracted %s flag", NodeTypeKeys.CUDA_POINTERS)
# See if the node uses the shortcut for the tags metadata
with suppress(KeyError):
tags = node_data[NodeTypeKeys.TAGS]
logger.info("Extracted node tags")
if not isinstance(tags, list) and not isinstance(tags, str):
raise ParseError("Tags must be a comma-separated string or a list of strings")
# Metadata can only be a string so flatten a list with commas
if isinstance(tags, list):
tags = ",".join(tags)
self.metadata[MetadataKeys.TAGS] = tags
# See if the node uses the shortcut for the uiName metadata
with suppress(KeyError):
ui_name = node_data[NodeTypeKeys.UI_NAME]
logger.info("Extracted node uiName")
if not isinstance(ui_name, str):
raise ParseError("UI Name must be a single string")
self.metadata[MetadataKeys.UI_NAME] = ui_name
# See if any token names are to be hardcoded for the node
with suppress(KeyError):
raw_tokens = node_data[NodeTypeKeys.TOKENS]
logger.info("Extracted tokens")
if isinstance(raw_tokens, str):
token_list = raw_tokens.split(",")
self.tokens = {check_token_name(token_name): token_name for token_name in token_list}
elif isinstance(raw_tokens, list):
self.tokens = {check_token_name(token): token for token in raw_tokens}
elif isinstance(raw_tokens, dict):
self.tokens = {check_token_name(token): value for token, value in raw_tokens.items()}
else:
raise ParseError(f"Unknown type of tokens to handle - '{raw_tokens}'")
# Store the raw tokens as metadata so that they can be retrieved to regenerate the file
self.metadata[MetadataKeys.TOKENS] = json.dumps(raw_tokens)
# See if the node is overriding any of the type definitions.
with suppress(KeyError):
type_definitions = node_data[NodeTypeKeys.TYPE_DEFINITIONS]
logger.info("Extracted type definitions")
# If the data is just a string then assume it is a file and try to load it, checking the configuration
# directory if it exists.
if isinstance(type_definitions, str):
type_definition_path = Path(type_definitions)
if type_definition_path.is_file():
apply_type_definitions(type_definition_path)
elif not type_definition_path.is_absolute() and self.config_directory is not None:
config_dir_type_path = Path(self.config_directory, type_definition_path)
if config_dir_type_path.is_file():
apply_type_definitions(config_dir_type_path)
else:
raise ParseError(
f"Type definitions file '{type_definitions}' not found in config directory"
f" '{self.config_directory}'"
)
else:
raise ParseError(f"Type definitions file '{type_definitions}' not found")
# If the data is a dictionary assume it contains the type definitions directly (should be rare)
elif isinstance(type_definitions, dict):
apply_type_definitions({NodeTypeKeys.TYPE_DEFINITIONS: type_definitions})
else:
raise ParseError(f"Type definitions only recognize a string or dictionary type - '{type_definitions}'")
# See if the node is using any extra category definitions.
with suppress(KeyError):
category_definitions = node_data[NodeTypeKeys.CATEGORY_DEFINITIONS]
logger.info("Extracted category definitions")
def __add_categories(category_spec):
# If the data is just a string then assume it is a file and try to load it, checking the configuration
# directory if it exists.
if isinstance(category_definitions, str):
category_definition_path = Path(category_definitions)
# If the absolute path exists, prefer that
if category_definition_path.is_file():
merge_category_definitions(self.__categories_allowed, category_definition_path)
return
if not category_definition_path.is_absolute():
# Check if the path exists relative to the .ogn file's directory
if self.__node_directory is not None:
config_dir_type_path = Path(self.__node_directory, category_definition_path)
if config_dir_type_path.is_file():
merge_category_definitions(self.__categories_allowed, config_dir_type_path)
return
# Check if the path exists relative to the specified config directory
if self.config_directory is not None:
config_dir_type_path = Path(self.config_directory, category_definition_path)
if config_dir_type_path.is_file():
merge_category_definitions(self.__categories_allowed, config_dir_type_path)
return
node_directory_error = (
"" if self.__node_directory is None else f" or node file directory '{node_directory}'"
)
raise ParseError(
f"Category definitions file '{category_definitions}' not found in config directory"
f" '{self.config_directory}'{node_directory_error}"
)
raise ParseError(f"Category definitions file '{category_definitions}' not found")
if isinstance(category_definitions, dict):
merge_category_definitions(self.__categories_allowed, category_definitions)
return
raise ParseError(
f"Category definitions only recognize a string or dictionary type - '{category_definitions}'"
)
if isinstance(category_definitions, list):
_ = [__add_categories(category_spec) for category_spec in category_definitions]
else:
__add_categories(category_definitions)
# Categories have to be parsed after category definitions
with suppress(KeyError):
categories = node_data[NodeTypeKeys.CATEGORIES]
def __verify_category(category_to_verify: str):
"""Raise an error if the category is not one of the allowed ones"""
if category_to_verify not in self.__categories_allowed:
raise ParseError(
f"Category {category_to_verify} not in the allowed list {self.__categories_allowed}"
)
category_metadata = None
new_categories = {}
if isinstance(categories, str):
category_metadata = categories
for category in categories.split(","):
__verify_category(category)
elif isinstance(categories, list):
category_list = []
for category_item in categories:
if isinstance(category_item, str):
category_list.append(category_item)
elif isinstance(category_item, dict):
category_list += list(category_item.keys())
new_categories.update(category_item)
merge_category_definitions(self.__categories_allowed, category_item)
else:
raise ParseError(
f"Category description must be a string, dictionary, or list of them - saw {categories}"
)
category_metadata = ",".join(category_list)
for category in category_list:
__verify_category(category)
elif isinstance(categories, dict):
new_categories.update(
{
name: description
for name, description in categories.items()
if name not in self.__categories_allowed
}
)
merge_category_definitions(self.__categories_allowed, categories)
category_metadata = ",".join(sorted(categories.keys()))
if category_metadata:
self.metadata[MetadataKeys.CATEGORIES] = category_metadata
if new_categories:
# Use a tab as separator and filter them out of the description
combined_metadata = []
for name, info in new_categories.items():
if name.find(",") >= 0 or name.find("\t") >= 0:
raise ParseError(f"Category name '{name}' cannot contain a comma or tab character")
safe_info = info.replace("\t", " ")
combined_metadata.append(f"{name},{safe_info}")
self.metadata[MetadataKeys.CATEGORY_DESCRIPTIONS] = "\t".join(combined_metadata)
logger.info("Added node type categories -> {category_metadata}")
# Parse the generated type exclusions, if any
with suppress(KeyError):
self.excluded_generators += node_data[NodeTypeKeys.EXCLUDE]
logger.info("Extracted generator inclusions -> %s", self.excluded_generators)
# Parse the input attributes, if any
with suppress(KeyError):
self.__inputs = self.construct_attributes(node_data[NodeTypeKeys.INPUTS], INPUT_NS)
self.has_inputs = bool(self.__inputs)
logger.info("Extracted input attributes")
# Parse the output attributes, if any
with suppress(KeyError):
self.__outputs = self.construct_attributes(node_data[NodeTypeKeys.OUTPUTS], OUTPUT_NS)
self.has_outputs = bool(self.__outputs)
logger.info("Extracted output attributes")
# Parse the state attributes, if any
try:
self.__state = self.construct_attributes(node_data[NodeTypeKeys.STATE], STATE_NS)
# Even if no state attributes were constructed, the existence of the state section flags to the
# node that state information will be used, so that scheduling can take that into account.
self.has_state = True
logger.info("Extracted state attributes")
except KeyError:
self.has_state = False
# Parse the attribute allowedToken metadata to include in hardcoded token names
for attrib in self.all_attributes():
self.tokens.update(attrib.get_allowed_tokens())
# Parse the language specification, if any (C++ is the default)
if NodeTypeKeys.LANGUAGE in node_data:
logger.info("Extracting the language information")
self.language = check_node_language(node_data[NodeTypeKeys.LANGUAGE])
logger.info(" --> Language set to %s", self.language)
# Parse the node scheduling hints
with suppress(KeyError):
self.scheduling_hints = SchedulingHints(node_data[NodeTypeKeys.SCHEDULING])
logger.info("Extracted scheduler hints")
# Read in the test configurations. Make sure this happens after all attributes are constructed
try:
test_list = node_data[NodeTypeKeys.TESTS]
self.construct_tests(test_list)
logger.info("Extracted %s node tests", len(test_list))
except (KeyError, UnimplementedError):
self.tests = [] # Remove any partially constructed tests
# For long strings the description will be a list to be concatenated (due to the
# limited ways JSON can represent long strings). If that's the case convert back to a single string.
if isinstance(self.description, list):
self.description = " ".join(self.description)
self.metadata[MetadataKeys.DESCRIPTION] = self.description
if self.excluded_generators:
self.metadata[MetadataKeys.EXCLUSIONS] = ",".join(self.excluded_generators)
if self.language != LanguageTypeValues.CPP:
self.metadata[MetadataKeys.LANGUAGE] = self.language
# Empty descriptions are anti-social
if not self.description:
warning = "Node description should not be empty"
if os.getenv("OGN_STRICT_DEBUG"):
raise ParseError(warning)
print(f"WARNING: {warning}", flush=True)
# ----------------------------------------------------------------------
def add_test(
self, test_data: TestData, attribute_name: str, attribute_namespace: str, attribute_value, in_set: bool
):
"""Extract Python-compatible information for an attribute based on its JSON name, type, and value.
Args:
test_data: Object containing the current test data - updated based on the information passed in
attribute_name: Raw attribute name, may or may not include the namespace
attribute_namespace: Expected namespace of the attribute
attribute_value: Value to be read or written to the attribute - must be compatible to the attribute type
in_set: If True and the namespace is STATE_GROUP then put the value in the set of values to be set on state
attributes before the test begins, otherwise put it on the set of values to check after the test ends
"""
# Rely on called methods to properly raise formatting exceptions, if any
name_in_namespace = attribute_name_in_namespace(attribute_name, attribute_namespace)
(attribute, attribute_group) = self.attribute_by_name(name_in_namespace)
attribute.validate_value_structure(attribute_value)
value_for_test = attribute.value_for_test(attribute_value)
if attribute_group == INPUT_GROUP:
test_data.add_input(attribute, value_for_test)
elif attribute_group == OUTPUT_GROUP:
test_data.add_output(attribute, value_for_test)
elif in_set:
test_data.add_set_state(attribute, value_for_test)
else:
test_data.add_get_state(attribute, value_for_test)
# --------------------------------------------------------------------------------------------------------------
def __add_value_to_test(
self, test_data: TestData, raw_attribute_name: str, attribute_value: Any, attributes: AllAttributes
):
"""Add a single value for getting or setting to the test.
Args:
test_data: Test to be amended
raw_attribute_name: Full specification of attribute to be in the test
attribute_value: Value to bet set or tested on the attribute
attributes: Attribute managers of all types participating in the test
"""
# Strip the suffix from the state namespace and set the flag to indicate if it is get or set
is_setting = False
attribute_name = raw_attribute_name
if raw_attribute_name.startswith(f"{STATE_NS}_get"):
attribute_name = raw_attribute_name.replace("_get", "")
elif raw_attribute_name.startswith(f"{STATE_NS}_set"):
attribute_name = raw_attribute_name.replace("_set", "")
is_setting = True
# This special key is used to tag output attributes whose value will be available on the GPU.
# This is only necessary when the attribute defines its memory location at runtime.
# The name isn't modified since it doesn't correspond to a real attribute, and the values must
# have the fully qualified attribute name (e.g. outputs.X)
if attribute_name == TestKeys.GPU_ATTRIBUTES:
test_data.set_gpu_outputs(attribute_value)
return
# Allow specification of the bare attribute name, so long as there are no conflicts with the same name
# in multiple namespaces
if (
not is_input_name(attribute_name)
and not is_output_name(attribute_name)
and not is_state_name(attribute_name)
):
if attribute_name in attributes.inputs and attribute_name in attributes.outputs:
raise ParseError(f'Test attribute "{attribute_name}" is both an input and an output {USE_NAMESPACE}')
if attribute_name in attributes.inputs and attribute_name in attributes.state:
raise ParseError(f'Test attribute "{attribute_name}" is both an input and a state {USE_NAMESPACE}')
if attribute_name in attributes.outputs and attribute_name in attributes.state:
raise ParseError(f'Test attribute "{attribute_name}" is both an output and a state {USE_NAMESPACE}')
if attribute_name in attributes.inputs:
self.add_test(test_data, attribute_name, INPUT_NS, attribute_value, False)
elif attribute_name in attributes.outputs:
self.add_test(test_data, attribute_name, OUTPUT_NS, attribute_value, False)
elif attribute_name in attributes.state:
# Using this shortcut assume the state value is to be checked, not set
self.add_test(test_data, attribute_name, STATE_NS, attribute_value, is_setting)
else:
raise ParseError(f"Test attribute {attribute_name} not recognized")
else:
(namespace, base_name) = split_attribute_name(attribute_name)
# Assume the namespace is correct if specified
if namespace == INPUT_NS and base_name not in attributes.inputs:
raise ParseError(f'Namespaced attribute "{attribute_name}" not an input')
if namespace == OUTPUT_NS and base_name not in attributes.outputs:
raise ParseError(f'Namespaced attribute "{attribute_name}" not an output')
if namespace.startswith(STATE_NS) and base_name not in attributes.state:
raise ParseError(f'Namespaced attribute "{attribute_name}" not a state')
if namespace not in [INPUT_NS, OUTPUT_NS, STATE_NS]:
raise ParseError(f'Test attribute "{attribute_name}" has illegal namespace "{namespace}"')
self.add_test(test_data, attribute_name, namespace, attribute_value, is_setting)
# ----------------------------------------------------------------------
def create_test_from_raw_data(self, test_info: Dict, attributes: AllAttributes):
"""Return a normalized set of test data as parsed from the two allowed formats.
Test data can appear in either expanded or compressed formats. In expanded format it has inputs, outputs,
and state in separate dictionaries:
{
"description": "Optional, and to be ignored",
"inputs": {
"InputAttr": "ValueToSet"
},
"outputs": {
"OutputAttr": "ExpectedValue"
},
"state_set": {
"StateAttr": "InitialState"
},
"state_get": {
"StateAttr": "ExpectedState"
},
"setup": {
"nodes": ["TestNode", "omni.examples.myNode"]
}
}
or in simplified format where attributes use their full namespace in a single dictionary, with state attributes
splitting into "state" or "state_out" for expected values and "state_in" for initial values
{
"inputs:InputAttr": "ValueToSet"
"outputs:OutputAttr": "ExpectedValue",
"state_in:StateAttr": "InitialState",
"state:StateAttr": "ExpectedState",
}
A mix of both will be accepted, though there is no reason to use that approach.
It is also acceptable if the first format specifies names as "inputs:InputAttr", though self-defeating for
shortening the input data.
In addition, if the attribute names are unique the namespace can be omitted. i.e. "inputs:x1" and "outputs:x2"
can be shortened to "x1" and "x2", but "inputs:a1" and "outputs:a1" must be fully qualified.
The optional "description" field is removed and the format is modified if necessary to the simplified form.
Args:
test_info: Test data in one of the two allowed forms
attributes: List of all legal input, output, and state attribute managers
Note:
For the purposes of this test only the initial state values can be checked. If you wish to check for
state changes you must write a separate test script that evaluates multiple times.
Returns:
test_info normalized to remove ignored fields and put into the simplified format
Raises:
ParseError: If the formatting of the test clause was not correct
"""
test_data = TestData()
if TestKeys.INPUTS in test_info:
for input_name, input_value in test_info[TestKeys.INPUTS].items():
# Comments are marked by a leading "$" and can appear anywhere, even among attribute data
if input_name[0] == "$":
continue
self.add_test(test_data, input_name, INPUT_NS, input_value, False)
if TestKeys.OUTPUTS in test_info:
for output_name, output_value in test_info[TestKeys.OUTPUTS].items():
# Comments are marked by a leading "$" and can appear anywhere, even among attribute data
if output_name[0] == "$":
continue
self.add_test(test_data, output_name, OUTPUT_NS, output_value, False)
with suppress(KeyError):
setup = test_info[TestKeys.SETUP]
uses_v1_setup = False
for key in setup:
if key not in GRAPH_SETUP_KEYS_ALLOWED:
if key in V1_GRAPH_SETUP_KEYS:
logger.warning(
"'%s' graph setup is from the obsolete OmniGraphHelper. Update to use og.Controller.", key
)
uses_v1_setup = True
else:
raise ParseError(f"Graph setup key '{key}' not in the allowed set {GRAPH_SETUP_KEYS_ALLOWED}")
test_data.set_graph_setup(setup, uses_v1_setup)
if TestKeys.STATE in test_info:
for state_name, state_value in test_info[TestKeys.STATE].items():
# Comments are marked by a leading "$" and can appear anywhere, even among attribute data
if state_name[0] == "$":
continue
self.add_test(test_data, state_name, STATE_NS, state_value, False)
if TestKeys.STATE_GET in test_info:
for state_name, state_value in test_info[TestKeys.STATE_GET].items():
# Comments are marked by a leading "$" and can appear anywhere, even among attribute data
if state_name[0] == "$":
continue
self.add_test(test_data, state_name, STATE_NS, state_value, False)
if TestKeys.STATE_SET in test_info:
for state_name, state_value in test_info[TestKeys.STATE_SET].items():
# Comments are marked by a leading "$" and can appear anywhere, even among attribute data
if state_name[0] == "$":
continue
self.add_test(test_data, state_name, STATE_NS, state_value, True)
for raw_attribute_name, attribute_value in test_info.items():
# Check to make sure this isn't the attribute grouping rather than an actual name, to parse the simple form
if raw_attribute_name not in [
TestKeys.INPUTS,
TestKeys.OUTPUTS,
TestKeys.SETUP,
TestKeys.STATE,
TestKeys.STATE_GET,
TestKeys.STATE_SET,
TestKeys.DESCRIPTION,
]:
# Comments are marked by a leading "$" and can appear anywhere, even among attribute data
if raw_attribute_name[0] == "$":
continue
self.__add_value_to_test(test_data, raw_attribute_name, attribute_value, attributes)
return test_data
# ----------------------------------------------------------------------
def construct_tests(self, test_list: List[Dict]):
"""Construct the internal list of test configurations from their description"""
attributes = AllAttributes(
[split_attribute_name(attribute)[1] for attribute, _ in self.__inputs.items()],
[split_attribute_name(attribute)[1] for attribute, _ in self.__outputs.items()],
[split_attribute_name(attribute)[1] for attribute, _ in self.__state.items()],
)
# Convert the JSON-based test data to something that can be output as Python
for test_information in test_list:
self.tests.append(self.create_test_from_raw_data(test_information, attributes))
# ----------------------------------------------------------------------
def construct_attributes(self, attribute_interfaces: dict, namespace: str) -> Dict[str, AttributeManager]:
"""Create attribute interface classes for every attribute in the description dictionary
Args:
attribute_interfaces: Dictionary of (attribute name, dictionary) from which to extract interfaces
namespace: Prefix for attribute names in this interface; will be prepended to the name if missing
Returns:
A dictionary of (attribute name, AttributeManager) extracted from the attribute interface list
"""
extracted_interfaces = {}
logger.info("Construct attributes from interface %s", attribute_interfaces)
for raw_attribute_name, attribute_data in attribute_interfaces.items():
if raw_attribute_name[0] == "$": # Special IDs are not actual attributes
logger.info("Ignoring comment tagged %s", raw_attribute_name)
continue
# Allow namespace to be already on the name, and ensure it is present either way
attribute_name = attribute_name_in_namespace(raw_attribute_name, namespace)
(_, _) = check_attribute_name(attribute_name)
if attribute_name in extracted_interfaces:
raise ParseError(f'Attribute "{raw_attribute_name}" appears more than once in the node definition')
attribute_manager = get_attribute_manager(attribute_name, attribute_data)
extracted_interfaces[attribute_name] = attribute_manager
if attribute_manager.memory_type is None:
attribute_manager.memory_type = self.memory_type
if attribute_manager.memory_type != MemoryTypeValues.CPU:
self.has_cuda_attributes = 1
attribute_manager.cuda_pointer_type = self.cuda_pointer_type
# After creation, ensure that the attribute manager has a valid configuration
try:
attribute_manager.validate_configuration()
if attribute_manager.ogn_base_type().startswith("transform"):
logger.warning(
"'%s' is being deprecated by USD. Use 'framed[4]' or 'matrixd[4]' instead",
attribute_manager.ogn_type(),
)
except Exception as error:
raise ParseError(f"Attribute {attribute_name}") from error
return extracted_interfaces
# ----------------------------------------------------------------------
def attribute_by_name(self, attribute_name: str) -> Tuple[AttributeManager, str]:
"""Look up an attribute on the node by name.
Args:
attribute_name: Name of the attribute to find
Returns:
(Manager of the named attribute, type of the attribute)
Raises:
AttributeError: If the attribute does not exist on the node.
"""
if attribute_name in self.__inputs:
return (self.__inputs[attribute_name], INPUT_GROUP)
if attribute_name in self.__outputs:
return (self.__outputs[attribute_name], OUTPUT_GROUP)
if attribute_name in self.__state:
return (self.__state[attribute_name], STATE_GROUP)
# Handle the case of short-form names
match_found = None
attribute_as_input = attribute_name_in_namespace(attribute_name, INPUT_NS)
if attribute_as_input in self.__inputs:
match_found = (self.__inputs[attribute_as_input], INPUT_GROUP)
attribute_as_output = attribute_name_in_namespace(attribute_name, OUTPUT_NS)
if attribute_as_output in self.__outputs:
if match_found is not None:
raise AttributeError(f'"{attribute_name}" ambiguously matched multiple types')
match_found = (self.__outputs[attribute_as_output], OUTPUT_GROUP)
attribute_as_state = attribute_name_in_namespace(attribute_name, STATE_NS)
if attribute_as_state in self.__state:
if match_found is not None:
raise AttributeError(f'"{attribute_name}" ambiguously matched multiple types')
match_found = (self.__state[attribute_as_state], STATE_GROUP)
if match_found is not None:
return match_found
raise AttributeError(f'"{attribute_name}" was not found in the node"')
# ----------------------------------------------------------------------
def all_input_attributes(self) -> List[AttributeManager]:
"""Get the list of all input attributes extracted from the description
Returns:
The list of attribute interfaces for inputs on the node
"""
return NodeInterface.sorted_values(self.__inputs)
# ----------------------------------------------------------------------
def all_output_attributes(self) -> List[AttributeManager]:
"""Get the list of all output attributes extracted from the description
Returns:
The list of attribute interfaces for outputs on the node
"""
return NodeInterface.sorted_values(self.__outputs)
# ----------------------------------------------------------------------
def all_state_attributes(self) -> List[AttributeManager]:
"""Get the list of all state attributes extracted from the description
Returns:
The list of attribute interfaces for state on the node
"""
return NodeInterface.sorted_values(self.__state)
# ----------------------------------------------------------------------
def all_attributes(self) -> List[AttributeManager]:
"""Get the list of all attributes of all types extracted from the description
Returns:
The list of attribute interfaces for all attributes defined on the node
"""
return self.all_input_attributes() + self.all_output_attributes() + self.all_state_attributes()
# ----------------------------------------------------------------------
def has_attributes(self) -> bool:
"""Returns true if this node type has any attributes.
This provides a quick check so that code generators can skip attribute sections when none exist.
For code sections containing only a single type of attribute use, e.g., if node.all_state_attributes():
"""
return self.__inputs or self.__outputs or self.__state
# ----------------------------------------------------------------------
@staticmethod
def sorted_values(attributes: dict) -> List[AttributeManager]:
"""Get the list of dictionary values sorted by the dictionary keys
Args:
attributes: A dictionary with sortable keys
Returns:
The list of dictionary values sorted by the dictionary keys
"""
return [attributes[key] for key in sorted(attributes.keys())]
# ----------------------------------------------------------------------
def all_tests(self) -> List[TestData]:
"""Returns the list of all sets of tests data extracted from the description"""
return self.tests
# ----------------------------------------------------------------------
def check_support(self):
"""Checks to see if this node contains currently unsupported attributes
Raises:
AttributeError: If any attributes on the node are not going to be supported
UnimplementedError: If any attributes on the node are currently not supported but will be
"""
for attribute in self.all_input_attributes():
try:
attribute.check_support()
except AttributeError as error:
raise AttributeError(f"{self.name} input not supported") from error
except UnimplementedError as error:
raise UnimplementedError(f"{self.name} input not yet supported") from error
for attribute in self.all_output_attributes():
try:
attribute.check_support()
except AttributeError as error:
raise AttributeError(f"{self.name} output not supported") from error
except UnimplementedError as error:
raise UnimplementedError(f"{self.name} output not yet supported") from error
for attribute in self.all_state_attributes():
try:
attribute.check_support()
except AttributeError as error:
raise AttributeError(f"{self.name} state not supported") from error
except UnimplementedError as error:
raise UnimplementedError(f"{self.name} state not yet supported") from error
# ----------------------------------------------------------------------
def can_generate(self, generation_type: str) -> bool:
"""Checks to see if a particular type of output should be generated.
Args:
generation_type: Name of output generation type. Exact values respected are in main.py
Return:
True if the generation_type of data is allowed by the node
"""
if generation_type in self.excluded_generators:
return False
if generation_type == "c++" and self.language in [LanguageTypeValues.PYTHON]:
return False
return True
# ======================================================================
class NodeInterfaceGenerator:
"""Manage the common functions used by all types of node generators
Override the interface_file_name() and generate_node_interface() methods for a derived generator.
You can also override pre_interface_generation() if you have something to emit at the beginning of the
interface that is not replicated for each node.
Attributes:
base_name: Base name of the file containing the node descriptions
extension: Name of the extension requesting the generation
generator_version: The version information for this extension when it was run
interface_directory: Path to the directory where the file should be written (None is as string)
module: Root import for the files of the node
node_interface: Node interface whose interface is being generated
out: File object which is the destination of the test output
output_path: Location of output destination, None if only going to a string
target_version: The version information for the omni.graph.core extension the generated code is meant for
verbose: True if extra debugging output is to be added
"""
def __init__(self, configuration: GeneratorConfiguration):
"""Set up the generator and output the Python interface code for the node
Args:
configuration: Information defining how and where the documentation will be generated
Raises:
NodeGenerationError: When for some reason the Python code could not be generated
"""
self.base_name = configuration.base_name
self.generator_version = configuration.generator_version
self.target_version = configuration.target_version
self.extension = configuration.extension
self.node_interface = configuration.node_interface
self.module = configuration.module
self.node_file_path = configuration.node_file_path
self.interface_directory = configuration.destination_directory
self.needs_directory = configuration.needs_directory
self.verbose = configuration.verbose
self.output_path = None
try:
# Choose the Linux-style newlines to keep output consistent and simple
if self.interface_directory and self.interface_file_name():
self.output_path = os.path.join(self.interface_directory, self.interface_file_name())
# The generated file sizes will never be too large to fit into a string, and generating them to a
# string first is far more efficient so set up the output to buffer into a string first and then
# write it to a file when complete, if requested
self.out = IndentedOutput(io.StringIO())
except IOError as error:
raise NodeGenerationError(f"Could not obtain write access to {self.interface_directory}") from error
# ----------------------------------------------------------------------
def __str__(self) -> str:
"""Return the interface generated as a string, if requested. Otherwise return the interface file path"""
return str(self.out)
# ----------------------------------------------------------------------
def safe_name(self) -> str:
"""Returns the name of the node type, filtered to be safe for Python, USD, or C++ use"""
return self.node_interface.name.replace(".", "_")
# ----------------------------------------------------------------------
def interface_file_name(self) -> Optional[str]:
"""Return the path for the generated file - should be overridden by derived classes"""
logger.info("Generator has not overridden the interface_file_name")
# ----------------------------------------------------------------------
def generate_node_interface(self):
"""Create interface output from the given node interface.
This does nothing; it should be overridden in a base class
"""
logger.info("Generator has not overidden the node interface")
# ----------------------------------------------------------------------
def pre_interface_generation(self):
"""Create the information information preceding the node-specific stuff"""
logger.info("Generator has not overridden the pre-node interface")
# ----------------------------------------------------------------------
def post_interface_generation(self):
"""Create the information information following the node-specific stuff"""
logger.info("Generator has not overridden the post-node interface")
# ----------------------------------------------------------------------
def generate_interface(self):
"""Create an interface for the node
Raises:
NodeGenerationError: When there is a failure in the generation of the interface
"""
self.pre_interface_generation()
self.generate_node_interface()
self.post_interface_generation()
# Now that the entire interface has been generated it can be written out to disk.
if self.output_path is not None:
self.__check_interface_directory()
if self.needs_to_write():
with open(self.output_path, "w", newline="\n", encoding="utf-8") as generated_file:
generated_file.write(str(self.out))
else:
# We still need to update the mod-time for the sake of timestamp-based build rules
os.utime(self.output_path)
# --------------------------------------------------------------------------------------------------------------
def needs_to_write(self):
"""Check if the file needs to be written
Returns:
True if the file does not exist or its contents differ from the generated text
"""
if os.path.exists(self.output_path):
with open(self.output_path, newline="\n", encoding="utf-8") as fp:
data = fp.read()
if data == str(self.out):
return False
return True
# --------------------------------------------------------------------------------------------------------------
def __check_interface_directory(self):
"""Check to see if the interface directory is required, creating it if it is.
Raises:
NodeGenerationError if the interface directory was required but did not exist and could not be created
"""
# No directory needed, that's good
if not self.needs_directory:
return
# No directory specified but one is needed, that's bad
if not self.interface_directory:
raise NodeGenerationError(f"Required an interface directory for {self.__class__} but did not specify one")
# Directory is needed and specified, and exists, that's good
directory = Path(self.interface_directory)
if directory.is_dir():
return
# Try to create the missing directory
try:
directory.mkdir(mode=0o777, parents=True, exist_ok=True)
logger.info("Created interface destination directory %s", directory)
except Exception as error:
raise NodeGenerationError(f"Failed to create interface directory '{directory}'") from error
# ======================================================================
class NodeInterfaceWrapper:
"""Converts a JSON node description file into a set of interfaces to the node contained in it
Reads and parses a node interface description file in order to present an interface to the data
that is more specific to the type of data that is in the file.
Attributes:
node_interface: The NodeInterface parsed from the JSON data
"""
def __init__(
self,
node_as_json: Union[str, IO, Dict],
extension: str,
config_directory: Optional[str] = None,
categories_allowed: Dict[str, str] = None,
):
"""Initialize the class by parsing the node description file or the already-retrieved description
Args:
node_as_json: File object, path to file, or raw dictionary containing the node interface description data
extension: Name of the extension in which the node was defined
config_directory: Location of directory in which the attribute type configuration files can be found. If
None then use the directory where this script lives.
categories_allowed: Dictionary of name:description values for legal categories
Raises:
ParseError: If there are any errors parsing the node description - string contains the problem
"""
self.node_interface = None
json_description = None
node_directory = None
if categories_allowed is None:
categories_allowed = {}
if config_directory is None:
config_directory = os.path.dirname(os.path.realpath(__file__))
if isinstance(node_as_json, str):
logger.info("Parsing node interface as string")
# logger.info(json.dumps(node_as_json, indent=4))
try:
json_description = json.loads(node_as_json)
except json.decoder.JSONDecodeError as error:
raise ParseError(f"Invalid JSON formatting in string - {error}\n{node_as_json}") from None
elif isinstance(node_as_json, Dict):
json_description = node_as_json
else:
logger.info("Parsing node interface as a file")
try:
json_description = json.load(node_as_json)
node_directory = os.path.dirname(node_as_json.name)
except json.decoder.JSONDecodeError as error:
raise ParseError(f"Invalid JSON formatting in file {node_as_json.name} - {error}") from None
if not json_description or not json_description.keys():
raise ParseError("Not a valid JSON file")
if len([main_key for main_key in json_description.keys() if not is_comment(main_key)]) > 1:
raise ParseError(f"Only one node definition allowed per file - found {list(json_description.keys())}")
logger.info("Extracting node information")
for node_type_name, node_type_description in json_description.items():
if node_type_name[0] == "$": # Special IDs are not actual nodes
logger.info("Ignoring comment tagged %s", node_type_name)
continue
logger.info("Extracting node data for %s", node_type_name)
check_node_name(node_type_name)
if self.node_interface is not None:
raise ParseError("Only one node per JSON description is supported")
if node_type_name.find(".") < 0:
# If no explicit namespace then prepend the extension name to guarantee uniqueness
node_type_name = f"{extension}.{node_type_name}"
self.node_interface = NodeInterface(
node_type_name, node_type_description, config_directory, categories_allowed, node_directory
)
# ----------------------------------------------------------------------
def can_generate(self, generation_type: str) -> bool:
"""Checks to see if a particular type of output should be generated.
Args:
generation_type: Name of output generation type. Exact values respected are in main.py
Return:
True if the generation_type of data is allowed by the node
"""
return self.node_interface.can_generate(generation_type) if self.node_interface else False
# ----------------------------------------------------------------------
def check_support(self):
"""Raises AttributeError if any attributes on the node are currently not supported"""
self.node_interface.check_support()
| 62,207 | Python | 48.410643 | 119 | 0.605462 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/OmniGraphExtension.py | """
Support for managing the automatic creation and handling of an OGN-enabled extension.
This manages extensions outside of the build system, using Kit's automatic extension loading capabilities.
It explicitly does not handle running the generation scripts on .ogn files, though it does provide the directories
to which such generated files should be written.
The file structure of a newly created extension, beginning at the root of the extension looks like this:
ROOT/
my.cool.extension/
config/
extension.toml # (Generated) Information required to load the extension
docs/
README.md # Description of your extension
my/cool/extension/
__init__.py # (Generated) Initialization of your extension
nodes/
# Contains the .ogn and .py files implementing your OmniGraph nodes
ogn/
__init__.py # (Generated) Registration of your OmniGraph nodes
docs/
# (Generated) Documentation describing your nodes
include/
# (Generated) C++ Database files, to make it easy for C++ nodes to access data
tests/
# (Generated) Test scripts that exercise your nodes
python/
# (Generated) Python Database files, to make it easy for Python nodes to access data
usd/
# (Generated) Template USD file setting up the attributes in your nodes
"""
import os
import re
from warnings import warn
from .utils import create_symbolic_link, dbg_reg
class OmniGraphExtension:
"""Class handling all of the requirements of an OGN-enabled extension
Properties:
ogn_docs_directory: Path to the directory containing user-written docs
ogn_include_directory: Path to the directory containing .ogn-generated C++ header files
ogn_nodes_directory: Path to the directory containing user-implemented nodes (.ogn and .py/.cpp)
ogn_python_directory: Path to the directory containing .ogn-generated Python interface files
ogn_tests_directory: Path to the directory containing .ogn-generated Python test scripts
ogn_usd_directory: Path to the directory containing .ogn-generated USD template files
Internal:
extension_root: Top level directory where the extension is defined
python_directory: Directory in which the Python import root for this extension is found
import_path: Import path for the Python root of this extension (e.g. omni.my.example)
Also used to determine the extension subdirectory
(e.g. $extension_root/omni.my.example/omni/my/example)
"""
def __init__(self, extension_root: str, import_path: str):
"""Initialize the location information for an extension
Args:
extension_root: Root directory of the extension to be managed.
"""
self._extension_root = extension_root
self._import_path = import_path
self.extension_directory = None
self.python_directory = None
self.ogn_docs_directory = None
self.ogn_include_directory = None
self.ogn_nodes_directory = None
self.ogn_python_directory = None
self.ogn_tests_directory = None
self.ogn_usd_directory = None
self.extension_name = None
self.rebuild_configuration()
# ================================================================================
def __str__(self):
"""Returns a string with the class's information nicely formatted"""
return "\n".join(
[
f"Root = {self._extension_root}",
f"Import = {self._import_path}",
f"Directory = {self.extension_directory}",
f"Python Directory = {self.python_directory}",
f"Name = {self.extension_name}",
]
)
# ================================================================================
def rebuild_configuration(self):
"""Reset all of the internal file paths and variables based on current configurations"""
self.extension_directory = os.path.join(self._extension_root, self._import_path)
self.python_directory = os.path.join(self.extension_directory, *self._import_path.split("."))
# extension_name is an InterCaps version of the import_path, plus the word "Extension"
# e.g. omni.my.example -> OmniMyExampleExtension
self.extension_name = "".join(word.capitalize() for word in self._import_path.split(".")) + "Extension"
self.ogn_nodes_directory = os.path.join(self.python_directory, "nodes")
self.ogn_python_directory = os.path.join(self.python_directory, "ogn")
self.ogn_docs_directory = os.path.join(self.ogn_python_directory, "docs")
self.ogn_include_directory = os.path.join(self.ogn_python_directory, "include")
self.ogn_tests_directory = os.path.join(self.ogn_python_directory, "tests")
self.ogn_usd_directory = os.path.join(self.ogn_python_directory, "usd")
# ================================================================================
@property
def import_path(self):
"""Returns the current value of the extension's import path"""
return self._import_path
@import_path.setter
def import_path(self, new_import_path: str):
"""Sets the import path to the new location, updating all internal file paths and variables
Note that if you call this all of your existing paths will be reconfigured to the default layout.
Args:
new_import_path: Python import path for the extension
Raises:
ValueError if new_import_path is not a valid path
"""
if not self.validate_import_path(new_import_path):
raise ValueError(
"Import path must be a valid Python name with dot-separated components consisting of the uppercase"
" and lowercase letters A through Z, the underscore _ and, except for the first character, the digits"
f" 0 through 9. '{new_import_path}' does not satisfy that requirement."
)
self._import_path = new_import_path
self.rebuild_configuration()
@staticmethod
def validate_import_path(import_path: str) -> bool:
"""Returns True iff the given import path has a legal name"""
re_path_component_name = re.compile("^[_A-Za-z][_0-9A-Za-z]*$")
return all(re_path_component_name.match(path_component) for path_component in import_path.split("."))
# ================================================================================
@property
def extension_root(self):
"""Get the current value of the extension's root directory"""
return self._extension_root
@extension_root.setter
def extension_root(self, new_root_directory: str):
"""Sets the root directory of the extension the new location, updating all internal file paths and variables
Note that if you call this all of your existing paths will be reconfigured to the default layout.
Args:
new_root_directory: Root directory for the extension files
"""
self._extension_root = new_root_directory
self.rebuild_configuration()
# ================================================================================
def create_directory_tree(self):
"""Create all of the directories that comprise the OGN-enabled extension, including locations for new nodes"""
os.makedirs(os.path.join(self.extension_directory, "config"), exist_ok=True)
os.makedirs(os.path.join(self.extension_directory, "docs"), exist_ok=True)
os.makedirs(self.ogn_docs_directory, exist_ok=True)
os.makedirs(self.ogn_include_directory, exist_ok=True)
os.makedirs(self.ogn_nodes_directory, exist_ok=True)
os.makedirs(self.ogn_python_directory, exist_ok=True)
os.makedirs(self.ogn_tests_directory, exist_ok=True)
os.makedirs(self.ogn_usd_directory, exist_ok=True)
# Linking the nodes directory accomplishes the dual goals of keeping the generated and handwritten code
# separated, while still encapsulating everything needed for OGN inside a single directory.
try:
create_symbolic_link(self.ogn_nodes_directory, os.path.join(self.ogn_python_directory, "nodes"))
except Exception as error: # noqa: PLW0703
dbg_reg(f"Could not symlink to {self.ogn_nodes_directory}, will look for directory named 'nodes' - {error}")
# ================================================================================
def __remove_directory_contents(self, directory_path: str, file_pattern: str):
"""Remove the specified files in a directory
Args:
directory_path: Path to the directory from which to remove the files
file_pattern: Regular expression string specifying which files are to be removed
"""
file_matcher = re.compile(file_pattern)
for file_to_check in os.listdir(directory_path):
if file_matcher.match(file_to_check):
try:
os.remove(os.path.join(directory_path, file_to_check))
except Exception as error: # noqa: PLW0703
dbg_reg(f"Could not remove generated file {file_to_check} from {directory_path} - {error}")
# ================================================================================
def remove_generated_files(self):
"""Delete all of the files under the extension that are automatically generated from a .ogn file"""
self.__remove_directory_contents(self.ogn_python_directory, r".*\.py$")
self.__remove_directory_contents(self.ogn_docs_directory, r".*\.rst$")
self.__remove_directory_contents(self.ogn_include_directory, r".*\.h$")
self.__remove_directory_contents(self.ogn_tests_directory, r".*\.py$")
self.__remove_directory_contents(self.ogn_usd_directory, r".*\.usda$")
# ================================================================================
def write_extension_init(self, force: bool):
"""Writes out the extension's main __init__.py file, responsible for setting up the extension"""
init_file_path = os.path.join(self.python_directory, "__init__.py")
if not force and os.path.isfile(init_file_path):
return
try:
with open(init_file_path, "w", newline="\n", encoding="utf-8") as init_fd:
init_fd.write(
f"""
import omni.ext
from .ogn import *
# Any class derived from `omni.ext.IExt` in a top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when the extension is enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() will be called.
class {self.extension_name}(omni.ext.IExt):
# ext_id is the current extension id. It can be used with the extension manager to query additional information,
# such as where this extension is located in the filesystem.
def on_startup(self, ext_id):
print("[{self._import_path}] {self.extension_name} startup", flush=True)
def on_shutdown(self):
print("[{self._import_path}] {self.extension_name} shutdown", flush=True)
"""
)
except Exception as error: # noqa: PLW0703
warn(f"Could not write extension's init file {init_file_path} - {error}")
# ================================================================================
def write_ogn_init(self, force: bool):
"""Write out the OGN __init__.py file that registers all of the nodes in the ogn/ subdirectory"""
init_file_path = os.path.join(self.python_directory, "ogn", "__init__.py")
if not force and os.path.isfile(init_file_path):
return
try:
with open(init_file_path, "w", newline="\n", encoding="utf-8") as init_fd:
init_fd.write(
f'''
"""
Dynamically import every file in a directory tree that looks like a Python Ogn Node.
This includes linked directories, which is the mechanism by which nodes can be hot-reloaded from the source tree.
"""
import omni.graph.core as og
og.register_ogn_nodes(__file__, "{self._import_path}")
'''
)
except Exception as error: # noqa: PLW0703
warn(f"Could not write ogn's init file {init_file_path} - {error}")
# ================================================================================
def write_extension_toml(self, force: bool):
"""Write out the extension definition file, used for configuring it when the extension loads"""
toml_file_path = os.path.join(self.extension_directory, "config", "extension.toml")
if not force and os.path.isfile(toml_file_path):
return
try:
with open(toml_file_path, "w", newline="\n", encoding="utf-8") as toml_fd:
toml_fd.write(
f"""
[package]
# Semantic Versioning is used: https://semver.org/
version = "0.1.0"
# Lists people or organizations that are considered the "authors" of the package.
authors = []
# The title and description fields are primarly for displaying extension info in UI
title = "Omniverse Graph Extension Example"
description="Example extension for OmniGraph nodes."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository="https://gitlab-master.nvidia.com/omniverse/kit-extensions/example"
# One of categories for UI.
category = "Example"
# Keywords for the extension
keywords = ["kit", "omnigraph"]
# Watch the .ogn files for hot reloading (only works for Python files)
[fswatcher.patterns]
include = ["*.ogn", "*.py"]
exclude = ["Ogn*Database.py"]
[dependencies]
"omni.kit.test" = {{}}
"omni.graph" = {{}}
# Main python module this extension provides, it will be publicly available as "import {self._import_path}".
[[python.module]]
name = "{self._import_path}"
# Additional python module with tests, to make them discoverable by test system.
[[python.module]]
name = "{self._import_path}.ogn.tests"
"""
)
except Exception as error: # noqa: PLW0703
warn(f"Could not write extension's configuration file {toml_file_path} - {error}")
# ================================================================================
def write_readme(self, force: bool):
"""Write out the README.md file that the extension uses to identify itself in the extension window"""
readme_path = os.path.join(self.extension_directory, "docs", "README.md")
if not force and os.path.isfile(readme_path):
return
try:
with open(readme_path, "w", newline="\n", encoding="utf-8") as readme_fd:
readme_fd.write(
f"""
# OmniGraph Extension [{self._import_path}]
Extension with implementation of some OmniGraph nodes
"""
)
except Exception as error: # noqa: PLW0703
warn(f"Could not write extension's description file {readme_path} - {error}")
# ================================================================================
def write_all_files(self, force: bool = False):
"""Write all of the manually generated extension's files
Args:
force: If True then write out the files even if they already exist
"""
self.write_extension_init(force)
self.write_ogn_init(force)
self.write_extension_toml(force)
self.write_readme(force)
| 15,919 | Python | 46.522388 | 120 | 0.603807 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/code_generation.py | """Interactive access to the .ogn code generator"""
import json
from typing import Dict, Optional, Union
from .generate_cpp import generate_cpp
from .generate_documentation import generate_documentation
from .generate_icon import generate_icon
from .generate_python import generate_python
from .generate_template import generate_template
from .generate_tests import generate_tests
from .generate_usd import generate_usd
from .nodes import NodeInterfaceWrapper
from .utils import OGN_PARSE_DEBUG, GeneratorConfiguration, ParseError, Settings, UnimplementedError, logger
def code_generation(
ogn: Union[str, Dict[str, Dict]], class_name: str, extension: str, module: str, settings: Optional[Settings] = None
) -> Dict[str, str]:
"""Run the code generator on the ogn input, which is in the same JSON format as the .ogn file
Args:
ogn: Raw OGN data, either in string version or in JSON parsed form
class_name: Base name for the OGN generated classes. e.g. OgnMyNode
extension: Extension to which the generated node type will belong
module: Python module from which the generated node type will be imported
settings: Optional code generator settings that will modify the type of code generated
Returns:
Dictionary of the generated code. The key value is the type of code, the value is the actual code.
If no code is generated for a particular key value then it will contain None.
cpp = C++ header defining the database for a node implemented in C++
template = C++ or Python template implementation
docs = .rst format containing the node documentation
icon = path to the icon specified in the node description or None
python = Python database definition, for both C++ and Python nodes
tests = Python code implementing some simple tests on the node
usd = Sample USD that defines the node type as a prim template
node = Node wrapper object
Raises:
ParseError if the ogn dictionary is not parseable as legal OGN data.
"""
if isinstance(ogn, str):
try:
ogn = json.loads(ogn)
except json.decoder.JSONDecodeError as error:
raise ParseError("Failed to parse dictionary") from error
generated_code = {}
try:
node_interface_wrapper = NodeInterfaceWrapper(ogn, extension)
configuration = GeneratorConfiguration(
None,
node_interface_wrapper.node_interface,
extension,
module,
class_name,
None,
OGN_PARSE_DEBUG,
settings or Settings(),
)
try:
all_supported = True
node_interface_wrapper.check_support()
except UnimplementedError as error:
all_supported = False
logger.warning("Some attributes are not supported. Only documentation will be generated.\n\t%s", error)
generated_code["icon"] = generate_icon(configuration)
generated_code["cpp"] = generate_cpp(configuration, all_supported)
generated_code["docs"] = generate_documentation(configuration)
generated_code["python"] = generate_python(configuration)
generated_code["template"] = generate_template(configuration)
generated_code["tests"] = generate_tests(configuration)
generated_code["usd"] = generate_usd(configuration)
generated_code["node"] = node_interface_wrapper.node_interface
except ParseError as error:
raise ParseError("Failed to parse dictionary") from error
return generated_code
| 3,653 | Python | 43.024096 | 119 | 0.679989 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_icon.py | """Support for generating an icon file representing a node type in the build directory."""
import os
import re
import shutil
from typing import Optional
from .nodes import NodeInterfaceGenerator
from .utils import GeneratorConfiguration, ParseError, ensure_writable_directory, logger
__all__ = ["generate_icon"]
class NodeIconGenerator(NodeInterfaceGenerator):
"""Manage the functions required to install a representative icon for a node type"""
def __init__(self, configuration: GeneratorConfiguration): # noqa: PLW0246
"""Set up the generator to get ready to copy the icon file
Just passes the initialization on to the parent class. See the argument and exception descriptions there.
"""
super().__init__(configuration)
# ----------------------------------------------------------------------
def interface_file_name(self) -> str:
"""Return the path to the name of the node icon file"""
return f"{self.node_interface.name}.svg"
# ----------------------------------------------------------------------
def generate_node_interface(self):
"""Generate the test method for the named node"""
# If there is no place to put the file then the specified path, if it exists, is considered absolute
if self.output_path is None:
self.out.write(self.node_interface.icon_path)
return
# This ensures the file is not overwritten by the base class after returning
output_path = self.output_path
self.output_path = None
potential_path = self.node_interface.icon_path
if self.node_file_path is not None:
# If a file name was not explicit then see if a .svg file with the same name as the node exists
if potential_path is None:
# If no file was specified it's not an error if it doesn't exist
potential_path = self.node_file_path.replace(".ogn", ".svg")
if not os.path.isfile(potential_path):
return
else:
# The specified file name is relative to the node's directory; make it absolute
potential_path = os.path.join(os.path.dirname(self.node_file_path), potential_path)
if not potential_path.endswith(".svg"):
raise ParseError(f"Node icon path must be an SVG file. '{potential_path}' not allowed")
# Copy the file from the source location to the output path
try:
ensure_writable_directory(os.path.dirname(output_path))
shutil.copy(potential_path, output_path)
except Exception as error:
raise ParseError("Failed to copy node icon file") from error
# Find the icon path relative to the extension directory
match = re.match(f".*/{self.extension}/(.*)", output_path.replace("\\", "/"))
if not match:
raise ParseError(f"Icon location '{output_path}' needs to appear under extension '{self.extension}'")
extension_relative_path = match.group(1)
self.out.write(extension_relative_path)
# ======================================================================
def generate_icon(configuration: GeneratorConfiguration) -> Optional[str]:
"""Create support files for the icons defined within the node
Args:
configuration: Information defining how and where the documentation will be generated
Returns:
Relative path of the icon file if it existed and was successfully installed, else None
Raises:
NodeGenerationError: When there is a failure in the generation of the icon files
"""
if not configuration.node_interface.can_generate("icon"):
return None
logger.info("Generating icon")
needed_directory = configuration.needs_directory
configuration.needs_directory = False
generator = NodeIconGenerator(configuration)
generator.generate_interface()
configuration.needs_directory = needed_directory
icon_path = str(generator.out).rstrip()
return icon_path if icon_path else None
| 4,099 | Python | 41.708333 | 113 | 0.632349 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/category_definitions.py | """Handle the mapping of OGN types onto the various generated code types"""
import json
from io import TextIOWrapper
from pathlib import Path
from typing import IO, Dict, List, Union
from .keys import NodeTypeKeys
from .utils import ParseError, is_comment
CategoryListType = Dict[str, List[str]]
# ==============================================================================================================
def get_category_definitions(category_information: Union[str, Dict, IO, Path, None]) -> CategoryListType:
"""Get the set of category definitions specified in the category file
Args:
category_information: Reference to a file containing a category dictionary or the dictionary itself
Returns:
Dictionary of MainCategory:SubCategories of category types found in the file
Raises:
ParseError if the file could not be parsed
"""
try:
definitions = {}
if category_information is None:
pass
elif isinstance(category_information, str):
definitions = json.loads(category_information)[NodeTypeKeys.CATEGORY_DEFINITIONS]
elif isinstance(category_information, TextIOWrapper):
definitions = json.load(category_information)[NodeTypeKeys.CATEGORY_DEFINITIONS]
elif isinstance(category_information, Path):
definitions = json.load(category_information.open("r"))[NodeTypeKeys.CATEGORY_DEFINITIONS]
elif isinstance(category_information, Dict):
definitions = category_information
else:
raise ParseError(f"Category definition file type not handled - {category_information}")
except OSError as error:
raise ParseError(f"File error when parsing category definitions {category_information} - {error}") from None
except json.decoder.JSONDecodeError as error:
raise ParseError(f"Invalid JSON formatting in file {category_information} - {error}") from None
# Filter out the comments before returning the dictionary
definitions = {key: value for key, value in definitions.items() if not is_comment(key)}
return definitions
# ==============================================================================================================
def merge_category_definitions(
merged_definitions: CategoryListType, definitions_to_merge: Union[str, Dict, IO, Path, None]
):
"""Merge the second set of category definitions with the first one"""
merged_definitions.update(get_category_definitions(definitions_to_merge))
| 2,524 | Python | 43.298245 | 116 | 0.658479 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/FloatAttributeManager.py | """
Contains the support class for managing attributes whose data is single precision numbers
"""
from typing import Any, List
from ..utils import ParseError
from .AttributeManager import CppConfiguration, CudaConfiguration
from .NumericAttributeManager import NumericAttributeManager, is_number_or_list_of_numbers, values_in_range
class FloatAttributeManager(NumericAttributeManager):
"""Support class for attributes of type float"""
OGN_TYPE = "float"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"float": CppConfiguration("float", cast_required=False),
"float[2]": CppConfiguration("pxr::GfVec2f", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
"float[3]": CppConfiguration("pxr::GfVec3f", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
"float[4]": CppConfiguration("pxr::GfVec4f", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
}
CUDA_CONFIGURATION = {
"float": CudaConfiguration("float", cast_required=False),
"float[2]": CudaConfiguration("float3", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
"float[3]": CudaConfiguration("float3", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
"float[4]": CudaConfiguration("float4", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
values = [4.5, 2.5]
if self.tuple_count > 1:
values = [tuple(value + i * 0.125 for i in range(self.tuple_count)) for value in values]
if for_usd:
from pxr import Gf
gf_type = getattr(Gf, f"Vec{self.tuple_count}f")
values = [gf_type(*value) for value in values] # noqa: PLE1133
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
def numerical_type(self) -> int:
"""Returns the numerical TYPE_* value for this attribute's data"""
return NumericAttributeManager.TYPE_FLOAT
def validate_value(self, value):
"""Raises a ParseError if value is not a valid float value"""
if not is_number_or_list_of_numbers(value, self.tuple_count):
raise ParseError(f"Value {value} on a float[{self.tuple_count}] attribute is not a matching type")
# Values not representable exactly due to precision considerations are still accepted
if not values_in_range(value, -3402823400e38, 3402823400e38):
raise ParseError(f"Value {value} on a 32-bit float[{self.tuple_count}] attribute is out of range")
super().validate_value(value)
@staticmethod
def tuples_supported() -> List[int]:
"""USD supports only these tuples natively so restrict support to them for now"""
return [1, 2, 3, 4]
def cpp_includes(self) -> List[str]:
"""Tack on the include implementing the USD tuple type"""
regular_includes = super().cpp_includes()
if self.tuple_count in [2, 3, 4]:
regular_includes.append("omni/graph/core/ogn/UsdTypes.h")
return regular_includes
def python_type_name(self):
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("float")
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "Float"
def usd_type_name(self):
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_containers("float")
| 3,972 | Python | 46.297618 | 115 | 0.656093 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/TokenAttributeManager.py | """
Contains the support class for managing attributes whose data is strings represented as tokens
"""
import json
from contextlib import suppress
from typing import Any, Dict, List
from ..utils import IndentedOutput, MetadataKeys, ParseError, check_token_name
from .AttributeManager import AttributeManager, CppConfiguration, CudaConfiguration
from .parsing import is_type_or_list_of_types
class TokenAttributeManager(AttributeManager):
"""Support class for attributes of type curated unique string.
The values passed around are still strings, it is only the internal data type that is different
from a regular string (NameToken instead of std::string)
"""
OGN_TYPE = "token"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"token": CppConfiguration("NameToken", cast_required=False),
}
CUDA_CONFIGURATION = {
"token": CudaConfiguration("NameToken", cast_required=False),
}
def __init__(self, attribute_name: str, attribute_type_name: str):
"""Initialize the token-based attribute information
Args:
attribute_name: Name to use for this attribute
attribute_type_name: Unique name for this attribute type
"""
super().__init__(attribute_name, attribute_type_name)
self.__allowed_tokens = {}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
values = ["Ahsoka", "Tano"]
if self.tuple_count > 1:
values = [tuple(value + "x" * i for i in range(self.tuple_count)) for value in values]
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
@staticmethod
def tuples_supported() -> List[int]:
"""Simple values are supported, no multiple tuples"""
return [1]
def get_allowed_tokens(self) -> Dict[str, str]:
"""Returns a dictionary of tokens allowed on this attribute type, raising a ParseError if they are not legal.
This dictionary will be merged into the main node token dictionary, with duplicates removed. This is done
separately from regular metadata and token parsing since it crosses both worlds with different requirements"""
allowed_tokens = {}
raw_tokens = self.__allowed_tokens
if isinstance(raw_tokens, str):
token_list = raw_tokens.split(",")
allowed_tokens = {check_token_name(token_name): token_name for token_name in token_list}
elif isinstance(raw_tokens, list):
allowed_tokens = {check_token_name(token): token for token in raw_tokens}
elif isinstance(raw_tokens, dict):
allowed_tokens = {check_token_name(token): value for token, value in raw_tokens.items()}
else:
raise ParseError(f"allowedTokens can only be a string, list, or dictionary - '{raw_tokens}'")
return allowed_tokens
def parse_metadata(self, metadata: Dict[str, Any]):
"""Parse the metadata attached to the attribute type.
Overrides to this method can add additional interpretation of special metadata.
"""
super().parse_metadata(metadata)
with suppress(KeyError):
self.__allowed_tokens = metadata[MetadataKeys.ALLOWED_TOKENS]
# The allowed tokens could be a dictionary, the keys of which would be lost if only the processed version
# of the tokens was saved in the metadata so add in the raw data as well.
self.metadata[MetadataKeys.ALLOWED_TOKENS_RAW] = json.dumps(self.__allowed_tokens)
def cpp_default_initializer(self):
"""Default value setting is delayed so only set the array pointers to null if required."""
return "nullptr, 0" if self.array_depth > 0 else None
def cpp_pre_initialization(self, out: IndentedOutput):
"""If there is a default, output the code to initialize the token from the string default"""
super().cpp_pre_initialization(out)
if not self.default:
return
default_variable_name = f"{self.namespace}::{self.cpp_variable_name()}"
size_param = ""
if isinstance(self.default, list):
default_to_set = f"std::array<NameToken, {len(self.default)}>{{"
default_strings = [f'"{value}"' for value in self.default] # noqa: PLE1133
default_to_set += ", ".join([f"iToken.getHandle({value})" for value in default_strings])
default_to_set += "}.data()"
size_param = f", {len(self.default)}"
else:
default_to_set = f'iToken.getHandle("{self.default}")'
out.write(f"{default_variable_name}.setDefault({default_to_set}{size_param});")
def cpp_element_value(self, value, remaining_depth: int = None):
"""String defaults must be quoted - use the json library to do it right"""
return json.dumps(value) if value is not None else None
def cuda_includes(self) -> List[str]:
"""Cuda cannot include iComputeGraph so it directly includes the handle definition file for token access"""
includes = super().cuda_includes()
includes.append("omni/graph/core/Handle.h")
return includes
def validate_value(self, value):
"""Raises a ParseError if value is not a valid string value"""
if not is_type_or_list_of_types(value, str, self.tuple_count):
raise ParseError(f"Value {value} on a token[{self.tuple_count}] attribute is not a matching type")
super().validate_value(value)
def python_value(self, value):
"""Token defaults must be quoted - use the json library to do it right"""
return json.dumps(value) if value is not None else None
def value_for_test(self, value):
"""The test data runs through JSON so there is no need to add outer quotes"""
return value
def python_type_name(self):
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("str")
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "Token"
def usd_type_name(self):
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_containers("token")
def empty_base_value(self) -> str:
"""Return the default for a token, which must include quotes"""
return ""
| 6,798 | Python | 44.630872 | 118 | 0.65431 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/StringAttributeManager.py | """
Contains the support class for managing attributes whose data is strings
"""
import json
from typing import Any, List
from ..keys import CudaPointerValues, MemoryTypeValues
from ..utils import ParseError
from .AttributeManager import AttributeManager, CppConfiguration, CudaConfiguration
from .parsing import is_type_or_list_of_types
class StringAttributeManager(AttributeManager):
"""Support class for attributes of type string"""
OGN_TYPE = "string"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"string": CppConfiguration("char*", cast_required=False, role="eText"),
}
CUDA_CONFIGURATION = {
"string": CudaConfiguration("char*", cast_required=False, role="eText"),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
values = ["Anakin", "Skywalker"]
if self.tuple_count > 1:
values = [tuple(value + "x" * i for i in range(self.tuple_count)) for value in values]
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
@staticmethod
def tuples_supported() -> List[int]:
"""Strings don't have tuples"""
return [1]
@staticmethod
def array_depths_supported() -> List[int]:
"""String arrays are not yet supported by fabric"""
return [0]
def validate_value(self, value):
"""Raises a ParseError if value is not a valid string value"""
if not is_type_or_list_of_types(value, str, self.tuple_count):
raise ParseError(f"Value {value} on a string[{self.tuple_count}] attribute is not a matching type")
super().validate_value(value)
def cpp_includes(self) -> List[str]:
"""Tack on the include implementing the string wrapper types"""
regular_includes = super().cpp_includes()
regular_includes.append("omni/graph/core/ogn/ArrayAttribute.h")
return regular_includes
def cpp_element_value(self, value):
"""String defaults must be quoted - use the json library to do it right"""
return f"{json.dumps(value)}" if value is not None else None
def cpp_default_initializer(self):
"""The string initializer recognizes that a simple string is actual stored as an array."""
# Arrays of strings have the same form as regular arrays
if self.array_depth > 0:
return super().cpp_default_initializer()
# Regular strings look like arrays in that they have a defined length, yet not, because they can be represented
# as an actual string rather than a std::array
raw_value = self.cpp_element_value(self.default)
if not raw_value:
return "nullptr, 0"
return f"{raw_value}, {len(self.default)}"
def cpp_wrapper_class(self) -> str:
"""Returns a string with the wrapper class used to access attribute data in the C++ database along
with the non-default parameters to that class's template"""
wrapper_class = ""
if self.is_read_only():
modifier = "const "
wrapper_class = "ogn::ArrayInput"
else:
modifier = ""
wrapper_class = "ogn::ArrayOutput"
template_arguments = [f"{modifier}char", MemoryTypeValues.CPP[self.memory_type]]
if self.cuda_pointer_type is not None:
template_arguments.append(CudaPointerValues.CPP[self.cuda_pointer_type])
return (wrapper_class, template_arguments)
def fabric_needs_counter(self) -> bool:
"""Even simple strings require counter variables since they are implemented as arrays"""
return True
def python_value(self, value):
"""String values must be quoted - use the json library to do it right"""
return json.dumps(value) if value is not None else None
def value_for_test(self, value):
"""The test data runs through JSON so there is no need to add outer quotes"""
return value
def python_type_name(self):
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("str")
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "String"
def usd_type_name(self):
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_containers("string")
def empty_base_value(self) -> str:
"""Return the default for a string, which must include quotes"""
return ""
| 4,924 | Python | 39.702479 | 119 | 0.649269 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/TimeCodeAttributeManager.py | """
Contains the support class for managing attributes whose data is time codes
"""
from typing import Any, List
from .AttributeManager import CppConfiguration, CudaConfiguration
from .DoubleAttributeManager import DoubleAttributeManager
class TimeCodeAttributeManager(DoubleAttributeManager):
"""Support class for all attributes of type timecode
This is just an alias for double with a time-based interpretation
"""
OGN_TYPE = "timecode"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"timecode": CppConfiguration("pxr::SdfTimeCode", include_files=["pxr/usd/sdf/timeCode.h"], role="eTimeCode"),
}
CUDA_CONFIGURATION = {
"timecode": CudaConfiguration("double", cast_required=False),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
values = [5.0, 6.0]
if self.tuple_count > 1:
values = [tuple(value + i * 0.125 for i in range(self.tuple_count)) for value in values]
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
@staticmethod
def tuples_supported() -> List[int]:
"""Timecodes do not have tuple representation in USD"""
return [1]
def python_role_name(self) -> str:
"""Returns a string with the Python role name for this attribute"""
return "og.Database.ROLE_TIMECODE"
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "TimeCode"
def usd_type_name(self) -> str:
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_arrays("timecode")
| 2,035 | Python | 36.018181 | 117 | 0.660442 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/parsing.py | """
Constants used in parsing attributes in a .ogn file
"""
import ast
from typing import List, Tuple
from ..keys import AttributeKeys
# ======================================================================
# Legacy keyword support - use the values from keys.py for new code
KEY_ATTR_DEFAULT = AttributeKeys.DEFAULT
KEY_ATTR_DESCRIPTION = AttributeKeys.DESCRIPTION
KEY_ATTR_MINIMUM = AttributeKeys.MINIMUM
KEY_ATTR_MAXIMUM = AttributeKeys.MAXIMUM
KEY_ATTR_MEMORY_TYPE = AttributeKeys.MEMORY_TYPE
KEY_ATTR_METADATA = AttributeKeys.METADATA
KEY_ATTR_OPTIONAL = AttributeKeys.OPTIONAL
KEY_ATTR_TYPE = AttributeKeys.TYPE
KEY_ATTR_UI_NAME_METADATA = AttributeKeys.UI_NAME
KEY_ATTR_UNVALIDATED = AttributeKeys.UNVALIDATED
MANDATORY_ATTR_KEYS = AttributeKeys.MANDATORY
PROCESSED_ATTR_KEYS = AttributeKeys.PROCESSED
# ======================================================================
def attributes_as_usd(attribute_info: List[Tuple[str, str]]) -> List[str]:
"""Returns a list of the attribute definitions in the USDA file format
Most attributes are listed as their normal type, with a few exceptions:
prim: Output bundles
any: Extended attribute type with any value
union[a,b,c...] Extended attribute type with any of the types a, b, c...
"""
usd_lines = []
metadata = {}
for (attr_type, attr_name) in attribute_info:
if attr_type == "bundle":
usd_lines.append(f'def Output "{attr_name}" {{ }}')
elif attr_type == "any":
usd_lines.append(f'custom token {attr_name} = "any"')
metadata[attr_name] = "ExtendedAttributeType-->Any"
elif attr_type.find("union") == 0:
type_list = ",".join(ast.literal_eval(attr_type[5:]))
usd_lines.append(f'custom token {attr_name} = "union of {type_list}"')
metadata[attr_name] = f"ExtendedAttributeType-->Union->{type_list}"
else:
usd_lines.append(f"custom {attr_type} {attr_name}")
if metadata:
usd_lines.append('def ComputeNodeMetaData "metaData"')
usd_lines.append("{")
for name, information in metadata.items():
usd_lines.append(f' custom token {name} = "{information}"')
usd_lines.append("}")
return usd_lines
# ======================================================================
def is_type_or_list_of_types(value, type_definition, type_count: int):
"""Return True if the value is of the type passed in, or is a list of those types of the defined length"""
if isinstance(value, type_definition) and type_count == 1:
return True
if isinstance(value, list):
if len(value) != type_count:
return False
return all(isinstance(single_value, type_definition) for single_value in value)
return False
# Support for separating roles and types
SUFFIX_TO_TYPE = {"f": "float", "d": "double", "h": "half"}
# ======================================================================
def separate_ogn_role_and_type(raw_type_name: str) -> Tuple[str, str]:
"""Extract the base data type and role name from a raw OGN type, which could include a role"""
if raw_type_name[:-1] in ["quat", "matrix", "normal", "point", "color", "texcoord", "vector"]:
return (SUFFIX_TO_TYPE[raw_type_name[-1]], raw_type_name[:-1])
if raw_type_name in ["frame", "transform", "timecode"]:
return ("double", raw_type_name)
if raw_type_name == "execution":
return ("uint", "execution")
if raw_type_name == "string":
return ("uchar", "text")
if raw_type_name == "path":
return ("uchar", "path")
return (raw_type_name, "none")
# ======================================================================
def usd_type_name(type_name: str, tuple_count: int, is_array: bool) -> str:
"""Returns the USD type_name for the attribute with the given parameters
Args:
name: Base type (int, float, ...)
tuple_count: Number of fixed elements (int,2 -> [int, int])
is_array: True if the attribute has a variable number of elements ([int, int, ...])
e.g. (int, 2, False) -> "int2"
(float, 3, True) -> "float3[]"
Returns:
A string containing the USD version of the constructed name
"""
full_type = type_name
if tuple_count > 1:
if type_name[:-1] in ["matrix", "point", "color", "texCoord", "frame", "transform"]:
full_type = f"{type_name[:-1]}{tuple_count}{type_name[-1]}"
elif type_name[:-1] != "quat":
# Quaternions are assumed to be 4 so do not have the tuple count added
full_type += str(tuple_count)
if is_array:
full_type += "[]"
return full_type
# ======================================================================
def sdf_type_name(value_type_name: str, tuple_count: int, is_array: bool) -> str:
"""Returns the SDF ValueTypeName for the attribute with the given parameters
Args:
name: Base Sdf type
tuple_count: Number of fixed elements
is_array: True if the attribute has a variable number of elements
e.g. (Int, 2, False) -> "Int2"
(Float, 3, True) -> "Float3Array"
Returns:
A string containing the SDF ValueTypeName version corresponding to the attribute parameters
"""
full_type = value_type_name
if tuple_count > 1:
if value_type_name[:-1] in ["Color", "Frame", "Matrix", "Normal", "Point", "TexCoord", "Transform", "Vector"]:
full_type = f"{value_type_name[:-1]}{tuple_count}{value_type_name[-1]}"
elif value_type_name[:-1] != "Quat":
# Quaternions are assumed to be 4 so do not have the tuple count added
full_type += str(tuple_count)
if is_array:
full_type += "Array"
return full_type
| 5,809 | Python | 40.798561 | 118 | 0.585127 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/ObjectIdAttributeManager.py | """
Contains the support class for managing attributes whose data is strings represented as tokens
"""
from .AttributeManager import CppConfiguration, CudaConfiguration
from .UInt64AttributeManager import UInt64AttributeManager
class ObjectIdAttributeManager(UInt64AttributeManager):
"""Support class for attributes of type objectId.
The values passed around are the same as uint64, except that they are interpreted as references to objects
in the scene from a managed object store. The mechanism for interpreting them differently is the metadata
stored on the attribute.
"""
OGN_TYPE = "objectId"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"objectId": CppConfiguration("uint64_t", cast_required=False, role="eObjectId"),
}
CUDA_CONFIGURATION = {
"objectId": CudaConfiguration("uint64_t", cast_required=False),
}
def create_type_name(self) -> str:
"""When creating the attribute this special type name will set up the underlying metadata correctly"""
return "objectId[]" if self.array_depth == 1 else "objectId"
def python_role_name(self) -> str:
"""Returns a string with the Python role name for this attribute"""
return "og.Database.ROLE_OBJECT_ID"
| 1,342 | Python | 40.968749 | 110 | 0.726528 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/naming.py | """This file contains support for the various utilities and constants used to manage attribute naming."""
from __future__ import annotations
import re
from typing import Any, Dict, Optional, Tuple
from ..utils import ParseError
# ======================================================================
# Namespaces for attribute types
INPUT_NS = "inputs"
OUTPUT_NS = "outputs"
STATE_NS = "state"
ALL_NS = [INPUT_NS, OUTPUT_NS, STATE_NS]
# ======================================================================
# Port type enum names corresponding to the namespaces
PORT_NAMES = {
INPUT_NS: "og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT",
OUTPUT_NS: "og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT",
STATE_NS: "og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE",
}
# ======================================================================
# Unique identifier of attribute types (corresponds to the C++ enum value for each type)
INPUT_GROUP = "ogn::kOgnInput"
OUTPUT_GROUP = "ogn::kOgnOutput"
STATE_GROUP = "ogn::kOgnState"
# Pattern for legal attribute names, not including the applied type-namespace prefix
# - starts with a letter or underscore
# - then an arbitrary number of alphanumerics, dots, or colons (colon is namespace separator)
RE_ATTRIBUTE_NAME = re.compile("^[A-Za-z_][A-Za-z0-9_:.]*$")
ATTR_NAME_REQUIREMENT = (
"Attribute name must be a letter or underscore followed by letters, numbers, or"
' the special characters "_", ":", or "."'
)
# Requirements for user-friendly names. Only quotes are prohibited as they are problematic.
RE_ATTRIBUTE_UI_NAME = re.compile("^[^'\"]*$")
ATTR_UI_NAME_REQUIREMENT = "User-friendly attribute name cannot contain a quote"
# ======================================================================
def namespace_of_group(attribute_group: str) -> str:
"""Returns the namespace for the attributes of the given type"""
if attribute_group == INPUT_GROUP:
return INPUT_NS
if attribute_group == OUTPUT_GROUP:
return OUTPUT_NS
if attribute_group == STATE_GROUP:
return STATE_NS
raise ParseError(f"Attribute with unknown type {attribute_group}")
# ======================================================================
def attribute_name_in_namespace(attribute_name: str, namespace: str) -> str:
"""Returns the attribute_name with the namespace prepended - ignores if it is already there
Handles the case of nested namespaces by prepending in those cases as well. Does not look for a degenerate
case like the named namespace nested inside of a different one (e.g. change ("a:b:c", "b") to just "b:c")
"""
(current_namespace, current_name) = split_attribute_name(attribute_name)
# Prepend the namespace if there is none, or if there is an unmatching one
if current_namespace is None:
return f"{namespace}:{current_name}"
if current_namespace != namespace:
return f"{namespace}:{attribute_name}"
return attribute_name
# ======================================================================
def is_input_name(attribute_name: str) -> bool:
"""Returns True if the attribute_name lives in the input attribute namespace"""
# Input namespace always appears at the top level so it's only necessary to check the prefix
return attribute_name.startswith(f"{INPUT_NS}:")
# ======================================================================
def is_output_name(attribute_name: str) -> bool:
"""Returns True if the attribute_name lives in the output attribute namespace"""
# Output namespace always appears at the top level so it's only necessary to check the prefix
return attribute_name.startswith(f"{OUTPUT_NS}:") or attribute_name.startswith(f"{OUTPUT_NS}_")
# ======================================================================
def is_state_name(attribute_name: str) -> bool:
"""Returns True if the attribute_name lives in the state attribute namespace"""
# State namespace always appears at the top level so it's only necessary to check the prefix
return attribute_name.startswith(f"{STATE_NS}:")
# ======================================================================
def split_attribute_name(attribute_name: str) -> Tuple[Optional[str], str]:
"""Returns the namespace and basename extracted from the full attribute name"""
name_information = attribute_name.split(":")
if len(name_information) < 2:
return (None, attribute_name)
if len(name_information) > 2:
return (name_information[0], ":".join(name_information[1:]))
return tuple(name_information)
# ======================================================================
def attribute_name_without_port(attribute_name: str) -> str:
"""Returns the attribute name with its port namespace removed"""
for prefix in [f"{OUTPUT_NS}:", f"{OUTPUT_NS}_", f"{INPUT_NS}:", f"{STATE_NS}:"]:
if attribute_name.startswith(prefix):
return attribute_name.replace(prefix, "")
return attribute_name
# ======================================================================
def attribute_name_as_python_property(attribute_name: str) -> str:
"""
Returns the attribute name in a form suitable for a Python property, with the namespace stripped off and
any ":" separators changed to "_"
"""
if attribute_name.startswith(INPUT_NS):
raw_name = attribute_name[len(INPUT_NS) + 1 :]
elif attribute_name.startswith(OUTPUT_NS):
raw_name = attribute_name[len(OUTPUT_NS) + 1 :]
elif attribute_name.startswith(STATE_NS):
raw_name = attribute_name[len(STATE_NS) + 1 :]
else:
raw_name = split_attribute_name(attribute_name)[1]
return raw_name.replace(":", "_")
# ======================================================================
def check_attribute_name(attribute_name: str):
"""Returns a pair of (namespace,base_name) is the attribute name is legal
Raises:
ParseError: Attribute name is not legally constructed
"""
if not is_input_name(attribute_name) and not is_output_name(attribute_name) and not is_state_name(attribute_name):
raise ParseError(f'Attribute name "{attribute_name}" is not correctly namespaced as input, output, or state')
(actual_namespace, base_name) = split_attribute_name(attribute_name)
if not RE_ATTRIBUTE_NAME.match(base_name):
raise ParseError(ATTR_NAME_REQUIREMENT)
return (actual_namespace, base_name)
# ======================================================================
def check_attribute_ui_name(attribute_ui_name: str):
"""Raises ParseError if the new user-friendly name was illegal, else returns the name itself"""
if not RE_ATTRIBUTE_UI_NAME.match(attribute_ui_name):
raise ParseError(ATTR_UI_NAME_REQUIREMENT)
# ======================================================================
def assemble_attribute_type_name(
type_name: str,
tuple_count: int,
array_depth: int,
extra_info: Optional[Dict[str, Any]] = None,
):
"""Assemble a fully qualified attribute name from its constituent parts.
Basically the reversal of split_attribute_type_name().
This method does no validation; use management.py:validate_attribute_type_name for that
Args:
type_name: Base name of the attribute type
tuple_count: Number of tuple elements in the attribute type
array_depth: Levels of arrays in the attribute type
"""
full_type = None
if type_name == "union":
full_type = list(extra_info.keys()) if extra_info else []
else:
full_type = type_name
if tuple_count > 1:
full_type += f"[{tuple_count}]"
full_type += "[]" * array_depth
return full_type
| 7,740 | Python | 41.532967 | 118 | 0.60478 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/DoubleAttributeManager.py | """
Contains the support class for managing attributes whose data is double precision numbers
"""
from typing import Any, List
from ..utils import ParseError
from .AttributeManager import CppConfiguration, CudaConfiguration
from .NumericAttributeManager import NumericAttributeManager, is_number_or_list_of_numbers
class DoubleAttributeManager(NumericAttributeManager):
"""Support class for attributes of type double.
As Python does not really have a double type, float values are used for that portion of the code.
"""
OGN_TYPE = "double"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"double": CppConfiguration("double", cast_required=False),
"double[2]": CppConfiguration("pxr::GfVec2d", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
"double[3]": CppConfiguration("pxr::GfVec3d", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
"double[4]": CppConfiguration("pxr::GfVec4d", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
}
CUDA_CONFIGURATION = {
"double": CudaConfiguration("double", cast_required=False),
"double[2]": CudaConfiguration("double3", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
"double[3]": CudaConfiguration("double3", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
"double[4]": CudaConfiguration("double4", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
values = [4.125, 2.125]
if self.tuple_count > 1:
values = [tuple(value + i * 0.125 for i in range(self.tuple_count)) for value in values]
if for_usd:
from pxr import Gf
gf_type = getattr(Gf, f"Vec{self.tuple_count}d")
values = [gf_type(*value) for value in values] # noqa: PLE1133
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
def numerical_type(self) -> int:
"""Returns the numerical TYPE_* value for this attribute's data"""
return NumericAttributeManager.TYPE_DECIMAL
def validate_value(self, value):
"""Raises a ParseError if value is not a valid double value"""
if not is_number_or_list_of_numbers(value, self.tuple_count):
raise ParseError(f"Value {value} on a double[{self.tuple_count}] attribute is not a matching type")
super().validate_value(value)
@staticmethod
def tuples_supported() -> List[int]:
"""USD supports only these tuples natively so restrict support to them for now"""
return [1, 2, 3, 4]
def python_type_name(self) -> str:
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("float")
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "Double"
def usd_type_name(self) -> str:
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_containers("double")
| 3,527 | Python | 44.818181 | 117 | 0.657499 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/NumericAttributeManager.py | """
Contains the support class for managing attributes whose data is all types of numeric values
Exports:
is_number
is_number_or_list_of_numbers
NumericAttributeManager
values_in_range
"""
from typing import List, Union
from ..utils import ParseError
from .AttributeManager import AttributeManager, PropertySet
from .parsing import KEY_ATTR_MAXIMUM, KEY_ATTR_MINIMUM
# ======================================================================
def is_number(value):
"""Return True if the value is a number, not including booleans"""
if isinstance(value, float) or (isinstance(value, int) and not isinstance(value, bool)):
return True
return False
# ======================================================================
def is_number_or_list_of_numbers(value, type_count: int):
"""Return True if the value is a number or a list of "type_count" numbers, not including booleans"""
if is_number(value) and type_count == 1:
return True
if isinstance(value, list):
if len(value) != type_count:
return False
return all(is_number(single_value) for single_value in value)
return False
# ======================================================================
def values_in_range(value, min_value: int, max_value: int):
"""Return True if the value is a number or list of numbers in the range [min_value,max_value]"""
if isinstance(value, list):
return all(not (single_value < min_value or single_value > max_value) for single_value in value)
return min_value <= value <= max_value
# ======================================================================
class NumericAttributeManager(AttributeManager):
"""Support class for attributes with simple numeric types
Attributes:
minimum: Minimum allowable value of the numeric attribute. None means no minimum.
maximum: Maximum allowable value of the numeric attribute. None means no maximum.
"""
# Convenience types for determining numerical type information about these attributes with numerical_type()
TYPE_OTHER = 0
TYPE_INTEGER = 1
TYPE_UNSIGNED_INTEGER = 2
TYPE_DECIMAL = 3
TYPE_FLOAT = 4
TYPE_OBJECT_ID = 5
def __init__(self, attribute_name: str, attribute_type_name: str):
"""Initialize the numeric attribute information
Args:
attribute_name: Name to use for this attribute
attribute_type_name: Type of this attribute
"""
super().__init__(attribute_name, attribute_type_name)
self.minimum = None
self.maximum = None
def numerical_type(self) -> int:
"""Returns the numerical TYPE_* value for this attribute's data"""
return NumericAttributeManager.TYPE_OTHER
def cpp_element_value(self, value) -> str:
"""Ensure floating point elements have decimal values so that they are properly recognized."""
if self.numerical_type() == self.TYPE_DECIMAL:
return f"{value * 1.0}"
if self.numerical_type() == self.TYPE_FLOAT:
return f"{value * 1.0}f"
return f"{int(value)}"
def validate_value(self, value):
"""Validate that the given data is a matching numeric type, and in range when min/max are specified
The parse_extra_properties() method should have been called before validating anything.
Args:
value: Data value to verify
Raises:
ParseError if the min/max range is not respected by the value
"""
if not is_number_or_list_of_numbers(value, self.tuple_count):
raise ParseError(f"Value {value} on a double[{self.tuple_count}] attribute is not a matching type")
self.validate_numbers_in_range(value)
def validate_numbers_in_range(self, value):
"""Validate that the given value is in the legal numeric range, if any are specified
Args:
value: Data value to verify
Raises:
ParseError if the min/max range is not respected by the value
"""
# Convert single values to lists for uniform handling
values = value if isinstance(value, list) else [value]
if self.minimum is not None:
minimums = self.minimum if isinstance(self.minimum, list) else [self.minimum]
for single_value, minimum in zip(values, minimums):
if single_value < minimum:
raise ParseError(f"Value of {value} is less than the allowed minimum of {self.minimum}")
if self.maximum is not None:
maximums = self.maximum if isinstance(self.maximum, list) else [self.maximum]
for single_value, maximum in zip(values, maximums):
if single_value > maximum:
raise ParseError(f"Value of {value} is greater than the allowed maximum of {self.maximum}")
super().validate_value(value)
def parse_extra_properties(self, property_set: dict) -> PropertySet:
"""Parse properties specific to numeric attribute types
Args:
property_set: (NAME, VALUE) for properties the attribute type might support
Raises:
ParseError: If any of the extra properties are invalid or not recognized
"""
remaining_properties = self.parse_min_max_properties(property_set)
unchecked_properties = super().parse_extra_properties(remaining_properties)
return unchecked_properties
def parse_min_max_properties(self, property_set: dict) -> PropertySet:
"""Check that the min and max property values in property_set are acceptable values (or not present).
Args:
property_set: (KEY:VALUE) set from which the min and max will be extracted
Returns:
The subset of property_set not checked by this method
"""
unchecked_set = {}
for property_name, property_value in property_set.items():
if property_name in [KEY_ATTR_MINIMUM, KEY_ATTR_MAXIMUM]:
try:
# Min/Max values are applied to all array members, but elements must be specified individually
self.validate_value_nested(property_value, [])
except ParseError as error:
raise ParseError(f"Setting {property_name} on {self.name}") from error
setattr(self, property_name, property_value)
else:
unchecked_set[property_name] = property_value
return unchecked_set
def cpp_includes(self) -> List[str]:
"""Numeric tuples can use the core 'tuple' class to provide some helpful support methods"""
regular_includes = super().cpp_includes()
if self.tuple_count > 1:
regular_includes.append("omni/graph/core/tuple.h")
return regular_includes
def empty_base_value(self) -> Union[float, int]:
"""Returns an empty value of the current attribute type without tuples or arrays"""
return 0.0 if self.numerical_type() in [self.TYPE_DECIMAL, self.TYPE_FLOAT] else 0
| 7,079 | Python | 40.893491 | 114 | 0.623817 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/AnyAttributeManager.py | """
Contains the support class for managing attributes whose data is any type of data, determined at runtime
"""
from typing import List
from ..keys import CudaPointerValues, MemoryTypeValues
from ..utils import _EXTENDED_TYPE_ANY, IndentedOutput, to_usd_docs, value_as_usd
from .AttributeManager import AttributeManager, CppConfiguration, CudaConfiguration
class AnyAttributeManager(AttributeManager):
"""
Support class for attributes whose type is only determined at runtime.
Most of the generated code is removed for this type of attribute since the interface types are not yet known.
"""
OGN_TYPE = "any"
CPP_CONFIGURATION = {
# Type information is overridden but the include file is important to specialize
"any": CppConfiguration(None, include_files=["omni/graph/core/ogn/UsdTypes.h"])
}
CUDA_CONFIGURATION = {"any": CudaConfiguration(None, cast_required=False)}
# ----------------------------------------------------------------------
def requires_default(self):
"""Extended types never need default values as their data types are not known in advance"""
return False
# ----------------------------------------------------------------------
def validate_value(self, value):
"""All values are welcome"""
return True
# ----------------------------------------------------------------------
def validate_value_structure(self, value_to_validate):
"""we can't validate until runtime"""
return True
# ----------------------------------------------------------------------
@staticmethod
def array_depths_supported() -> List[int]:
"""The meaning of an array of mixed union types is unclear and will not be supported at this time"""
return [0]
# ----------------------------------------------------------------------
@staticmethod
def tuples_supported() -> List[int]:
"""USD supports only these tuples natively so restrict support to them for now"""
return [1]
# ----------------------------------------------------------------------
def cpp_configuration(self) -> CppConfiguration:
"""Returns the C++ configuration data that applies to the attribute type implemented by this manager
If no implementation is defined then return an empty dictionary.
"""
try:
return self.CPP_CONFIGURATION["any"]
except AttributeError:
return CppConfiguration("any")
# ----------------------------------------------------------------------
def cpp_extended_type(self):
"""Returns the extended type identifier for C++ types"""
return "kExtendedAttributeType_Any"
# ----------------------------------------------------------------------
def cpp_base_type_name(self):
"""Returns a string with the C++ type of the attribute data
This value relies on the fact that the group names correspond to the template parameters for RuntimeAttribute.
"""
template_args = [self.attribute_group, MemoryTypeValues.CPP[self.memory_storage()]]
if self.cuda_pointer_type is not None:
template_args.append(CudaPointerValues.CPP[self.cuda_pointer_type])
return f"ogn::RuntimeAttribute<{', '.join(template_args)}>"
# ----------------------------------------------------------------------
def cpp_element_type_name(self) -> str:
"""The configuration is all manual here so override the default method"""
return self.cpp_base_type_name()
# ----------------------------------------------------------------------
def cpp_includes(self) -> List[str]:
"""Tack on the include implementing the runtime attribute wrappers"""
regular_includes = super().cpp_includes()
regular_includes.append("omni/graph/core/ogn/RuntimeAttribute.h")
return regular_includes
# ----------------------------------------------------------------------
def cpp_accessor_on_cpu(self) -> bool:
"""Extended type wrappers provide a type-casting accessor that will always live on the CPU"""
return True
# ----------------------------------------------------------------------
def fabric_data_variable_name(self) -> str:
"""This type uses a local variable to encapsulate the Fabric data so reference that instead."""
return f"{self.cpp_variable_name()}_"
# ----------------------------------------------------------------------
def datamodel_accessor_constructor_args(self) -> List[str]:
"""The runtime attribute is a local variable so the pointer has to be added to the constructor"""
return [f"&{self.fabric_data_variable_name()}"] + super().datamodel_accessor_constructor_args()
# ----------------------------------------------------------------------
def datamodel_local_variables(self):
"""The runtime attributes require a wrapper for data access (RuntimeAttribute). Create one here to avoid
the overhead of recreating it every time access to it is needed, and to avoid needing a parallel set of
accessor classes just for extended attribute types.
"""
return [f"{self.fabric_raw_type()} {self.fabric_data_variable_name()}{{}};"]
# ----------------------------------------------------------------------
def has_fixed_type(self) -> bool:
"""Variable typed attributes have runtime type identification"""
return False
# ----------------------------------------------------------------------
def ogn_type(self) -> List:
"""Returns a string containing the fully expanded name of this attribute type in a .ogn file"""
return "any"
# ----------------------------------------------------------------------
def python_extended_type(self):
"""Returns the extended type identifier for Python attribute types"""
return (_EXTENDED_TYPE_ANY, "any")
# ----------------------------------------------------------------------
def python_imports(self) -> List[str]:
"""Return a list of modules to import in the Python header for proper parsing of this type"""
return super().python_imports() + ["from typing import Any"]
# ----------------------------------------------------------------------
def python_type_name(self):
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("Any")
# ----------------------------------------------------------------------
def generate_python_property_code(self, out: IndentedOutput):
"""Emits the generated code implementing a readable property for this extended attribute.
This class overrides the default behaviour because it needs a wrapper class to access the internal
functionality of the runtime data.
"""
property_name = self.python_property_name()
out.write()
out.write("@property")
if out.indent(f"def {property_name}(self) -> og.RuntimeAttribute:"):
out.write(f'"""Get the runtime wrapper class for the attribute {self.namespace}.{property_name}"""')
out.write(
f"return og.RuntimeAttribute(self._attributes.{property_name}.get_attribute_data(),"
f" self._context, {self.is_read_only()})"
)
out.exdent()
# For this type of attribute a setter can forward the assignment to the value of the attribute, where legal
out.write()
out.write(f"@{property_name}.setter")
if out.indent(f"def {property_name}(self, value_to_set: Any):"):
out.write(f'"""Assign another attribute\'s value to outputs.{property_name}"""')
if out.indent("if isinstance(value_to_set, og.RuntimeAttribute):"):
out.write(f"self.{property_name}.value = value_to_set.value")
out.exdent()
if out.indent("else:"):
out.write(f"self.{property_name}.value = value_to_set")
out.exdent()
out.exdent()
# ----------------------------------------------------------------------
def generate_python_validation(self, out: IndentedOutput):
"""Emit code that checks to make sure the attribute type is resolved before computing"""
if self.do_validation:
name = f"{self.namespace}.{self.python_property_name()}"
if out.indent(f"if db.{name}.type.base_type == og.BaseDataType.UNKNOWN:"):
out.write(f"db.log_warning('Required extended attribute {self.name} is not resolved, compute skipped')")
out.write("return False")
out.exdent()
# ----------------------------------------------------------------------
def usd_type_name(self):
"""As the type of the attribute is not known at load time use a token value to describe the accepted types"""
return "token"
# ----------------------------------------------------------------------
def usd_type_accepted_description(self) -> str:
"""Returns a string that will be the default value of the USD token, describing accepted types"""
return "any"
# ----------------------------------------------------------------------
def emit_usd_declaration(self, out):
"""USD declaration for extended types use a placeholder type of token so the code path must be replaced
Args:
out: Output handler where the USD will be emitted
"""
usd_name = self.usd_name()
usd_type = self.usd_type_name()
docs = to_usd_docs(self.description)
if self.array_depth == 0:
default = value_as_usd(self.usd_type_accepted_description())
else:
default = value_as_usd([self.usd_type_accepted_description()] * self.array_depth)
if out.indent(f"custom {usd_type} {usd_name} = {default} ("):
out.write(docs)
out.exdent(")")
| 10,050 | Python | 48.029268 | 120 | 0.532139 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/FrameAttributeManager.py | """
Contains the support class for managing attributes whose data is cartesian frames
"""
from typing import Any, List
from .AttributeManager import CppConfiguration, CudaConfiguration
from .MatrixAttributeManager import MatrixAttributeManager
class FrameAttributeManager(MatrixAttributeManager):
"""Support class for the attribute with role "frame" or "transform"
These are aliases for "matrixd[4]", with support for different USD naming
"""
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"frame[4]": CppConfiguration(
"pxr::GfMatrix4d", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eFrame"
),
"transform[4]": CppConfiguration(
"pxr::GfMatrix4d", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eTransform"
),
}
CUDA_CONFIGURATION = {
"frame[4]": CudaConfiguration("Matrix4d", include_files=["omni/graph/core/cuda/Matrix4d.h"], role="eFrame"),
"transform[4]": CudaConfiguration(
"Matrix4d", include_files=["omni/graph/core/cuda/Matrix4d.h"], role="eTransform"
),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: If True return as the data type used to set the value in USD attributes, else return Python values
"""
if self.attribute_type_name == "frame":
a = 1.0
b = 0.0
c = 2.0
d = 3.0
else:
a = 1.5
b = 0.5
c = 2.5
d = 3.5
values = [
tuple(tuple(a if i == j else b for i in range(self.tuple_count)) for j in range(self.tuple_count)),
tuple(tuple(c if i == j else d for i in range(self.tuple_count)) for j in range(self.tuple_count)),
]
if for_usd:
from pxr import Gf
gf_type = getattr(Gf, f"Matrix{self.tuple_count}d")
values = [gf_type(*values[0]), gf_type(*values[1])]
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
@staticmethod
def roles():
"""Return a list of valid role names for this type"""
return ["frame", "transform"]
@classmethod
def is_matrix_type(cls) -> bool:
"""Frames and Transforms are matrix types"""
return True
def suffix(self):
"""Always uses matrix4d"""
return "d"
def python_role_name(self) -> str:
"""Returns a string with the Python role name for this attribute"""
return "og.Database.ROLE_FRAME" if self.attribute_type_name == "frame" else "og.Database.ROLE_TRANSFORM"
@staticmethod
def tuples_supported() -> List[int]:
"""This type of matrix can only be 4d"""
return [4]
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return f"Frame{self.suffix()}"
def usd_type_name(self) -> str:
"""Returns a string with the data type the attribute would use in a USD file"""
# transform4d is not a valid USD type, but for our purposes it's the same as frame4d
return self.usd_add_arrays(f"frame{self.tuple_count}{self.suffix()}")
| 3,489 | Python | 37.351648 | 119 | 0.610777 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/NormalAttributeManager.py | """
Contains the support class for managing attributes whose data is arrays interpreted as surface normals
"""
from typing import Any, List
from .AttributeManager import CppConfiguration, CudaConfiguration
from .RoleAttributeManager import RoleAttributeManager
class NormalAttributeManager(RoleAttributeManager):
"""Support class for all attributes of type normal
This encompasses all legal USD types of normal(3|4)(d|f|h)
"""
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"normald[3]": CppConfiguration(
"pxr::GfVec3d", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eNormal"
),
"normalf[3]": CppConfiguration(
"pxr::GfVec3f", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eNormal"
),
"normalh[3]": CppConfiguration(
"pxr::GfVec3h", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eNormal"
),
}
CUDA_CONFIGURATION = {
"normald[3]": CudaConfiguration(
"double3", include_files=["cuda_fp16.h", "omni/graph/core/CUDAUtils.h"], role="eNormal"
),
"normalf[3]": CudaConfiguration(
"float3", include_files=["cuda_fp16.h", "omni/graph/core/CUDAUtils.h"], role="eNormal"
),
"normalh[3]": CudaConfiguration(
"__half3", include_files=["cuda_fp16.h", "omni/graph/core/CUDAUtils.h"], role="eNormal"
),
"normald[4]": CudaConfiguration(
"double4", include_files=["cuda_fp16.h", "omni/graph/core/CUDAUtils.h"], role="eNormal"
),
"normalf[4]": CudaConfiguration(
"float4", include_files=["cuda_fp16.h", "omni/graph/core/CUDAUtils.h"], role="eNormal"
),
"normalh[4]": CudaConfiguration(
"__half4", include_files=["cuda_fp16.h", "omni/graph/core/CUDAUtils.h"], role="eNormal"
),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: If True return as the data type used to set the value in USD attributes, else return Python values
"""
values = {
"d": [0.01625, 0.14125],
"f": [0.125, 0.1625],
"h": [0.5, 0.25],
}[self.suffix()]
values = [tuple((value + 0.125 * i) for i in range(self.tuple_count)) for value in values]
if for_usd:
from pxr import Gf
gf_type = getattr(Gf, f"Vec{self.tuple_count}{self.suffix()}")
values = [gf_type(*value) for value in values]
if self.array_depth > 0:
values = [[value, value[::-1]] for value in values]
return [[value] for value in values] if for_usd else values
@staticmethod
def roles():
"""Return a list of valid role names for this type"""
return ["normald", "normalf", "normalh"]
def python_role_name(self) -> str:
"""Returns a string with the Python role name for this attribute"""
return "og.Database.ROLE_NORMAL"
@staticmethod
def tuples_supported() -> List[int]:
"""This type can only have 3 members, not 1"""
return [3]
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return f"Normal{self.suffix()}"
def usd_type_name(self) -> str:
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_arrays(f"normal{self.tuple_count}{self.suffix()}")
| 3,690 | Python | 39.560439 | 119 | 0.607588 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/VectorAttributeManager.py | """
Contains the support class for managing attributes whose data is arrays interpreted as cartesian vectors
"""
from typing import Any, List
from .AttributeManager import CppConfiguration, CudaConfiguration
from .RoleAttributeManager import RoleAttributeManager
class VectorAttributeManager(RoleAttributeManager):
"""Support class for all attributes of type vector
This encompasses all legal USD types of (vector3d|vector3f|vector3h)
"""
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"vectord[3]": CppConfiguration(
"pxr::GfVec3d", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eVector"
),
"vectorf[3]": CppConfiguration(
"pxr::GfVec3f", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eVector"
),
"vectorh[3]": CppConfiguration(
"pxr::GfVec3h", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eVector"
),
}
CUDA_CONFIGURATION = {
"vectord[3]": CudaConfiguration(
"double3", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"], role="eVector"
),
"vectorf[3]": CudaConfiguration(
"float3", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"], role="eVector"
),
"vectorh[3]": CudaConfiguration(
"__half3", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"], role="eVector"
),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: If True return as the data type used to set the value in USD attributes, else return Python values
"""
values = {
"d": [0.01625, 0.14125],
"f": [0.125, 0.1625],
"h": [0.5, 0.25],
}[self.suffix()]
values = [tuple((value + 0.125 * i) for i in range(self.tuple_count)) for value in values]
if for_usd:
from pxr import Gf
gf_type = getattr(Gf, f"Vec{self.tuple_count}{self.suffix()}")
values = [gf_type(*value) for value in values]
if self.array_depth > 0:
values = [[value, value[::-1]] for value in values]
return [[value] for value in values] if for_usd else values
@staticmethod
def roles():
"""Return a list of valid role names for this type"""
return ["vectord", "vectorf", "vectorh"]
def python_role_name(self) -> str:
"""Returns a string with the Python role name for this attribute"""
return "og.Database.ROLE_VECTOR"
@staticmethod
def tuples_supported() -> List[int]:
"""This type can only have 3 members, not 1"""
return [3]
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return f"Vector{self.suffix()}"
def usd_type_name(self) -> str:
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_arrays(f"vector{self.tuple_count}{self.suffix()}")
| 3,262 | Python | 38.792682 | 119 | 0.616493 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/Int64AttributeManager.py | """
Contains the support class for managing attributes whose data is 64 bit integers
"""
from typing import Any
from ..utils import ParseError
from .AttributeManager import CppConfiguration, CudaConfiguration
from .NumericAttributeManager import NumericAttributeManager, values_in_range
from .parsing import is_type_or_list_of_types
class Int64AttributeManager(NumericAttributeManager):
"""Support class for attributes of type 64-bit integer"""
OGN_TYPE = "int64"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"int64": CppConfiguration("int64_t", cast_required=False),
}
CUDA_CONFIGURATION = {
"int64": CudaConfiguration("int64_t", include_files=["stdint.h"], cast_required=False),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
values = [-46, -64]
if self.tuple_count > 1:
values = [tuple(value + i for i in range(self.tuple_count)) for value in values]
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
def numerical_type(self) -> int:
"""Returns the numerical TYPE_* value for this attribute's data"""
return NumericAttributeManager.TYPE_INTEGER
def validate_value(self, value):
"""Raises a ParseError if value is not a valid integer value"""
if not is_type_or_list_of_types(value, int, self.tuple_count):
raise ParseError(f"Value {value} on an int64[{self.tuple_count}] attribute is not a matching type")
if not values_in_range(value, -9223372036854775808, 9223372036854775807):
raise ParseError(f"Value {value} on a 64-bit integer[{self.tuple_count}] attribute is out of range")
super().validate_value(value)
def python_type_name(self):
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("int")
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "Int64"
def usd_type_name(self):
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_containers("int64")
| 2,585 | Python | 41.393442 | 112 | 0.671954 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/UIntAttributeManager.py | """
Contains the support class for managing attributes whose data is unsigned 32 bit integers
"""
from typing import Any, List
from ..utils import ParseError
from .AttributeManager import CppConfiguration, CudaConfiguration
from .NumericAttributeManager import NumericAttributeManager, values_in_range
from .parsing import is_type_or_list_of_types
class UIntAttributeManager(NumericAttributeManager):
"""Support class for attributes of type unsigned 32-bit integer"""
OGN_TYPE = "uint"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"uint": CppConfiguration("uint32_t", cast_required=False),
}
CUDA_CONFIGURATION = {
"uint": CudaConfiguration("uint32_t", cast_required=False),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
values = [32, 23]
if self.tuple_count > 1:
values = [tuple(value + i for i in range(self.tuple_count)) for value in values]
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
def numerical_type(self) -> int:
"""Returns the numerical TYPE_* value for this attribute's data"""
return NumericAttributeManager.TYPE_UNSIGNED_INTEGER
def validate_value(self, value):
"""Raises a ParseError if value is not a valid integer value"""
if not is_type_or_list_of_types(value, int, self.tuple_count):
raise ParseError(f"Value {value} on a uint[{self.tuple_count}] attribute is not a matching type")
if not values_in_range(value, 0, 4294967295):
raise ParseError(f"Value {value} on a uint[{self.tuple_count}] attribute is out of range")
super().validate_value(value)
@staticmethod
def tuples_supported() -> List[int]:
"""USD only supports a single unsigned value, no tuples"""
return [1]
def python_type_name(self):
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("int")
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "UInt"
def usd_type_name(self):
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_containers("uint")
def cuda_includes(self) -> str:
"""Add on the include for this data type for CUDA compilation"""
includes = super().cuda_includes()
includes.append("stdint.h")
return includes
| 2,903 | Python | 39.333333 | 109 | 0.662074 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/ExecutionAttributeManager.py | """
Contains the support class for managing attributes whose data is arrays interpreted as surface normals
"""
from typing import Any, List
from ..utils import ParseError, to_usd_docs
from .AttributeManager import CppConfiguration, CudaConfiguration
from .NumericAttributeManager import NumericAttributeManager, values_in_range
from .parsing import is_type_or_list_of_types
from .RoleAttributeManager import RoleAttributeManager
class ExecutionAttributeManager(RoleAttributeManager):
"""Support class for attributes of type execution
These are uint and uint[] types with the internal Execution Role
"""
OGN_TYPE = "execution"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {"execution": CppConfiguration("uint32_t", cast_required=False, role="eExecution")}
CUDA_CONFIGURATION = {"execution": CudaConfiguration("int", cast_required=False, role="eExecution")}
@staticmethod
def roles():
"""Return a list of valid role names for this type"""
return ["execution"]
def numerical_type(self) -> int:
"""Returns the numerical TYPE_* value for this attribute's data"""
return NumericAttributeManager.TYPE_UNSIGNED_INTEGER
@staticmethod
def array_depths_supported() -> List[int]:
"""Arrays of execution values doesn't make any sense"""
return [0]
@staticmethod
def tuples_supported() -> List[int]:
"""Executions don't have tuples"""
return [1]
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
# execution must be in the range of the enum ExecutionAttributeState
values = [0, 1, 2]
return [[value] for value in values] if for_usd else values
def validate_value(self, value):
"""Raises a ParseError if value is not a valid uint value"""
if not is_type_or_list_of_types(value, int, self.tuple_count):
raise ParseError(f"Value {value} on a uint[{self.tuple_count}] attribute is not a matching type")
if not values_in_range(value, 0, 4294967295):
raise ParseError(f"Value {value} on a uint[{self.tuple_count}] attribute is out of range")
super().validate_value(value)
def requires_default(self):
"""Execution attributes are transient by nature, we shouldn't really store them at all"""
return False
def python_role_name(self) -> str:
"""Returns a string with the Python role name for this attribute"""
return "og.Database.ROLE_EXECUTION"
def cuda_base_type_name(self) -> str:
"""Returns a string with the CUDA base type of the attribute data"""
return "uint32_t"
def sdf_type_name(self) -> str:
"""Execution attribs have no pxr::SdfValueTypeName"""
return
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "UInt"
def python_type_name(self) -> str:
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("int")
def usd_type_name(self) -> str:
"""Returns a string with the data type the attribute would use in a USD file"""
# FIXME: This isn't accurate for execution case, the usd type is 'uint' but we are passing in 'execution' when
# creating the attribute so that it can be recognized as a uint with eExecution role.
# (see emit_usd_declaration())
return "execution"
def emit_usd_declaration(self, out) -> List[str]:
"""Print a declaration for this attribute in USD
Args:
out: Output handler where the USD will be emitted
"""
try:
usd_name = self.usd_name()
# Overriding default behavior of using self.usd_type_name()
usd_type = "uint"
except ParseError:
# Attributes without USD representations can be skipped
return
docs = to_usd_docs(self.description)
default_value = self.usd_default_value()
if out.indent(f"custom {usd_type} {usd_name}{default_value} ("):
out.write(docs)
out.exdent(")")
| 4,478 | Python | 38.637168 | 118 | 0.656543 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/BundleAttributeManager.py | """
Support for handling attributes of type "bundle" - i.e. attributes whose job it is to encapsulate arbitrary
collections of other attributes, including other bundle attributes.
"""
from typing import List
from ..keys import CudaPointerValues, MemoryTypeValues
from ..utils import IndentedOutput, ParseError, ensure_quoted, to_usd_docs
from .AttributeManager import AttributeManager, CppConfiguration
from .naming import INPUT_GROUP, OUTPUT_GROUP, STATE_GROUP
class BundleAttributeManager(AttributeManager):
"""
Support class for attributes of type attribute bundle.
This type of attribute is more complex than standard attributes since it has many more features to handle.
"""
OGN_TYPE = "bundle"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
# Type information is overridden but the include file is important to specialize
"bundle": CppConfiguration(None, include_files=["omni/graph/core/ogn/UsdTypes.h"])
}
def requires_default(self):
"""Bundles never need default values as nothing other than an empty bundle makes sense"""
return False
@staticmethod
def array_depths_supported() -> List[int]:
"""Bundle arrays are not yet supported in Fabric"""
return [0]
def memory_storage(self) -> str:
"""Bundle handles will always be stored on the CPU as that is where Fabric forces them"""
return MemoryTypeValues.CPU
def cpp_base_type_name(self):
"""This type name switches based on read status so this has to override the default method"""
return "ConstBundleHandle" if self.is_read_only() else "BundleHandle"
def cpp_element_type_name(self) -> str:
"""The configuration is all manual here so override the default method"""
return self.cpp_base_type_name()
def cpp_default_initializer(self):
"""The bundle doesn't really have a default possible so initialize it to an invalid handle or empty array."""
return "nullptr, 0" if self.array_depth > 0 else "BundleHandle::invalidValue()"
def cpp_includes(self) -> List[str]:
"""Tack on the include implementing the bundle wrappers"""
includes = super().cpp_includes()
includes.append("omni/graph/core/ogn/Bundle.h")
return includes
def cpp_accessor_on_cpu(self) -> bool:
"""Bundle wrappers provide a type-casting accessor that will always live on the CPU"""
return True
def cpp_wrapper_class(self) -> str:
"""Returns the bundle-specific wrapper class name used to access attribute data in the C++ database"""
template_args = [self.attribute_group, MemoryTypeValues.CPP[self.memory_type]]
if self.cuda_pointer_type is not None:
template_args.append(CudaPointerValues.CPP[self.cuda_pointer_type])
return ("ogn::BundleAttribute", template_args)
def cpp_set_handle_at_runtime(self) -> bool:
"""Bundle types do not use attribute handles"""
return False
def validate_value(self, value):
"""Raises a ParseError if value is not a valid bundle value"""
raise ParseError("Bundles do not have values")
def cuda_includes(self) -> List[str]:
"""The bundle data is the same type of data in CUDA as it is in C++"""
includes = super().cuda_includes()
includes.append("omni/graph/core/Handle.h")
return includes
def cuda_base_type_name(self) -> str:
"""Returns a string with the CUDA base type of the attribute data"""
return "omni::graph::core::ConstBundleHandle" if self.is_read_only() else "omni::graph::core::BundleHandle"
def cuda_element_type_name(self) -> str:
"""The configuration is all manual here so override the default method"""
return self.cuda_base_type_name()
def fabric_data_variable_name(self) -> str:
"""Returns a string containing the generated name of the Fabric pointer for this attribute."""
return f"{self.cpp_variable_name()}.m_bundleHandle"
def fabric_pointer_exists(self) -> str:
"""Return a string that checks for the existence of the Fabric pointer variable value"""
return f"{self.fabric_data_variable_name()}.isValid()"
def python_role_name(self) -> str:
"""Returns a string with the Python role name for this attribute"""
return "og.Database.ROLE_BUNDLE"
def usd_name(self) -> str:
"""Bundled output and state attributes are represented as virtual prims in USD so they cannot have namespaces"""
# Output and state bundles are prims so they have to follow the prim naming restrictions (no colons allowed)
if self.attribute_group == INPUT_GROUP:
return self.name
return self.name.replace(":", "_")
def create_type_name(self) -> str:
"""Bundled attributes have a special name when creating so that they can be instantiated differently"""
return "bundle"
# ----------------------------------------------------------------------
def generate_python_property_code(self, out: IndentedOutput):
"""Emits the generated code implementing a property for this bundle attribute.
This class overrides the default behaviour because it needs a wrapper class to access the internal
functionality of the bundle.
"""
property_name = self.python_property_name()
out.write()
out.write("@property")
if out.indent(f"def {property_name}(self) -> og.BundleContents:"):
out.write(f'"""Get the bundle wrapper class for the attribute {self.namespace}.{property_name}"""')
out.write(f"return self.__bundles.{property_name}")
out.exdent()
# No setters at all for read only bundles
if self.is_read_only():
return
property_name = self.python_property_name()
out.write()
out.write(f"@{property_name}.setter")
if out.indent(f"def {property_name}(self, bundle: og.BundleContents):"):
out.write(f'"""Overwrite the bundle attribute {self.namespace}.{property_name} with a new bundle"""')
if out.indent("if not isinstance(bundle, og.BundleContents):"):
out.write('carb.log_error("Only bundle attributes can be assigned to another bundle attribute")')
out.exdent()
out.write(f"self.__bundles.{property_name}.bundle = bundle")
out.exdent()
# ----------------------------------------------------------------------
def python_imports(self) -> List[str]:
"""Return a list of modules to import in the Python header for proper parsing of this type"""
return super().python_imports() + ["import carb"]
# ----------------------------------------------------------------------
def emit_usd_declaration(self, out: IndentedOutput):
"""Print a declaration for this attribute in USD
Args:
out: Output handler where the USD will be emitted
"""
try:
usd_name = self.usd_name()
except ParseError:
# Attributes without USD representations can be skipped
return
docs = to_usd_docs(self.description)
# Bundle attributes are stored as virtual prims, not actual USD attributes, so they require a
# different type of declaration. Inputs are relationships, outputs are defined as nested prims.
if self.attribute_group == INPUT_GROUP:
if out.indent(f"custom rel {usd_name} ("):
out.write(docs)
out.exdent(")")
else:
# Prim names cannot have colons in them
usd_name = usd_name.replace(":", "_")
if self.attribute_group == OUTPUT_GROUP:
side = "Output"
elif self.attribute_group == STATE_GROUP:
side = "State"
else:
side = "Unknown"
if out.indent(f"def {side} {ensure_quoted(usd_name)} ("):
out.write(docs)
out.exdent(")")
out.write("{")
out.write("}")
| 8,205 | Python | 44.087912 | 120 | 0.628154 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/RoleAttributeManager.py | """
Contains the support class for managing attributes whose data is values with specific interpretations
"""
from typing import List
from ..utils import ParseError
from .NumericAttributeManager import NumericAttributeManager
# ======================================================================
class RoleAttributeManager(NumericAttributeManager):
"""Base class for all attribute types that assume different names to tag their special roles"""
def __init__(self, attribute_name: str, attribute_type_name: str):
"""Initialize the role-based attribute information
Args:
attribute_name: Name to use for this attribute
attribute_type_name: Unique name for this attribute type
"""
super().__init__(attribute_name, attribute_type_name)
if attribute_type_name not in self.roles():
raise ParseError(f"Only {'|'.join(self.roles())} are legal - {attribute_type_name} is not")
def suffix(self):
"""Returns the role suffix, for easy type construction"""
return self.attribute_type_name[-1]
def tuples_allowed(self) -> List[int]:
"""No tuples are currently supported for this type"""
return []
def ogn_base_type(self) -> str:
"""Returns a string containing the fully expanded name of this attribute type in a .ogn file"""
return self.attribute_type_name
def ogn_type(self) -> str:
"""Returns a string containing the fully expanded name of this attribute type in a .ogn file"""
full_type = self.attribute_type_name
if self.tuple_count > 1:
full_type += f"[{self.tuple_count}]"
full_type += "[]" * self.array_depth
return full_type
def numerical_type(self) -> int:
"""Returns the numerical TYPE_* value for this attribute's data"""
return NumericAttributeManager.TYPE_DECIMAL if self.suffix() == "d" else NumericAttributeManager.TYPE_FLOAT
@staticmethod
def roles():
"""Return a list of valid role names for this type"""
return []
def python_type_name(self) -> str:
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("float")
| 2,271 | Python | 38.859648 | 115 | 0.641127 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/management.py | """
Collection of utilities and constants to handle interaction with the set of all attribute managers
.. data:: ATTRIBUTE_MANAGERS
Dictionary of {BASE_TYPE: AttributeManager} for all supported root attribute types. A root type is
"int" but not "int[2]". Tuple counts and array depths are parsed by the manager's constructor.
"""
import json
import re
from contextlib import suppress
from typing import Dict, List, Optional, Tuple, Union
from ..keys import AttributeKeys
from ..utils import MetadataKeys, ParseError, UnimplementedError, attrib_description_to_string, check_memory_type
from .AnyAttributeManager import AnyAttributeManager
from .AttributeManager import AttributeManager
from .BoolAttributeManager import BoolAttributeManager
from .BundleAttributeManager import BundleAttributeManager
from .ColorAttributeManager import ColorAttributeManager
from .DoubleAttributeManager import DoubleAttributeManager
from .ExecutionAttributeManager import ExecutionAttributeManager
from .FloatAttributeManager import FloatAttributeManager
from .FrameAttributeManager import FrameAttributeManager
from .HalfAttributeManager import HalfAttributeManager
from .Int64AttributeManager import Int64AttributeManager
from .IntAttributeManager import IntAttributeManager
from .MatrixAttributeManager import MatrixAttributeManager
from .NormalAttributeManager import NormalAttributeManager
from .ObjectIdAttributeManager import ObjectIdAttributeManager
from .PathAttributeManager import PathAttributeManager
from .PointAttributeManager import PointAttributeManager
from .QuaternionAttributeManager import QuaternionAttributeManager
from .StringAttributeManager import StringAttributeManager
from .TexCoordAttributeManager import TexCoordAttributeManager
from .TimeCodeAttributeManager import TimeCodeAttributeManager
from .TokenAttributeManager import TokenAttributeManager
from .UCharAttributeManager import UCharAttributeManager
from .UInt64AttributeManager import UInt64AttributeManager
from .UIntAttributeManager import UIntAttributeManager
from .UnionAttributeManager import UnionAttributeManager
from .VectorAttributeManager import VectorAttributeManager
# ======================================================================
# Collection of all supported attribute type classes
ATTRIBUTE_MANAGERS = {
support_class.OGN_TYPE: support_class
for support_class in [
AnyAttributeManager,
BoolAttributeManager,
BundleAttributeManager,
DoubleAttributeManager,
FloatAttributeManager,
HalfAttributeManager,
IntAttributeManager,
Int64AttributeManager,
ObjectIdAttributeManager,
PathAttributeManager,
StringAttributeManager,
TimeCodeAttributeManager,
TokenAttributeManager,
UCharAttributeManager,
UIntAttributeManager,
UInt64AttributeManager,
UnionAttributeManager,
]
}
# Role-based attributes have more than one possible type name so they have to be iterated
ATTRIBUTE_MANAGERS.update({role: ColorAttributeManager for role in ColorAttributeManager.roles()})
ATTRIBUTE_MANAGERS.update({role: MatrixAttributeManager for role in MatrixAttributeManager.roles()})
ATTRIBUTE_MANAGERS.update({role: FrameAttributeManager for role in FrameAttributeManager.roles()})
ATTRIBUTE_MANAGERS.update({role: NormalAttributeManager for role in NormalAttributeManager.roles()})
ATTRIBUTE_MANAGERS.update({role: PointAttributeManager for role in PointAttributeManager.roles()})
ATTRIBUTE_MANAGERS.update({role: QuaternionAttributeManager for role in QuaternionAttributeManager.roles()})
ATTRIBUTE_MANAGERS.update({role: TexCoordAttributeManager for role in TexCoordAttributeManager.roles()})
ATTRIBUTE_MANAGERS.update({role: VectorAttributeManager for role in VectorAttributeManager.roles()})
ATTRIBUTE_MANAGERS.update({role: ExecutionAttributeManager for role in ExecutionAttributeManager.roles()})
ALL_ATTRIBUTE_TYPES = ATTRIBUTE_MANAGERS # Backward compatibility
# Attribute types that are OGN shorthand for a list of types, usable within union type definition list
ATTRIBUTE_UNION_GROUPS = {
"integral_scalers": ["uchar", "int", "uint", "uint64", "int64"],
"integral_tuples": ["int[2]", "int[3]", "int[4]"],
"decimal_scalers": ["double", "float", "half", "timecode"],
"decimal_tuples": [
"double[2]",
"double[3]",
"double[4]",
"float[2]",
"float[3]",
"float[4]",
"half[2]",
"half[3]",
"half[4]",
"colord[3]",
"colord[4]",
"colorf[3]",
"colorf[4]",
"colorh[3]",
"colorh[4]",
"normald[3]",
"normalf[3]",
"normalh[3]",
"pointd[3]",
"pointf[3]",
"pointh[3]",
"texcoordd[2]",
"texcoordd[3]",
"texcoordf[2]",
"texcoordf[3]",
"texcoordh[2]",
"texcoordh[3]",
"quatd[4]",
"quatf[4]",
"quath[4]",
"vectord[3]",
"vectorf[3]",
"vectorh[3]",
],
"matrices": ["matrixd[3]", "matrixd[4]", "transform[4]", "frame[4]"],
}
# Add meta-level union groups
ATTRIBUTE_UNION_GROUPS["integral_array_elements"] = (
ATTRIBUTE_UNION_GROUPS["integral_scalers"] + ATTRIBUTE_UNION_GROUPS["integral_tuples"]
)
ATTRIBUTE_UNION_GROUPS["integral_arrays"] = [tp + "[]" for tp in ATTRIBUTE_UNION_GROUPS["integral_array_elements"]]
ATTRIBUTE_UNION_GROUPS["integrals"] = (
ATTRIBUTE_UNION_GROUPS["integral_array_elements"] + ATTRIBUTE_UNION_GROUPS["integral_arrays"]
)
ATTRIBUTE_UNION_GROUPS["decimal_array_elements"] = (
ATTRIBUTE_UNION_GROUPS["decimal_scalers"] + ATTRIBUTE_UNION_GROUPS["decimal_tuples"]
)
ATTRIBUTE_UNION_GROUPS["decimal_arrays"] = [tp + "[]" for tp in ATTRIBUTE_UNION_GROUPS["decimal_array_elements"]]
ATTRIBUTE_UNION_GROUPS["decimals"] = (
ATTRIBUTE_UNION_GROUPS["decimal_array_elements"] + ATTRIBUTE_UNION_GROUPS["decimal_arrays"]
)
ATTRIBUTE_UNION_GROUPS["numeric_scalers"] = (
ATTRIBUTE_UNION_GROUPS["integral_scalers"] + ATTRIBUTE_UNION_GROUPS["decimal_scalers"]
)
ATTRIBUTE_UNION_GROUPS["numeric_tuples"] = (
ATTRIBUTE_UNION_GROUPS["integral_tuples"] + ATTRIBUTE_UNION_GROUPS["decimal_tuples"]
)
ATTRIBUTE_UNION_GROUPS["numeric_array_elements"] = (
ATTRIBUTE_UNION_GROUPS["numeric_scalers"]
+ ATTRIBUTE_UNION_GROUPS["numeric_tuples"]
+ ATTRIBUTE_UNION_GROUPS["matrices"]
)
ATTRIBUTE_UNION_GROUPS["numeric_arrays"] = [tp + "[]" for tp in ATTRIBUTE_UNION_GROUPS["numeric_array_elements"]]
ATTRIBUTE_UNION_GROUPS["numerics"] = (
ATTRIBUTE_UNION_GROUPS["numeric_array_elements"] + ATTRIBUTE_UNION_GROUPS["numeric_arrays"]
)
ATTRIBUTE_UNION_GROUPS["array_elements"] = ATTRIBUTE_UNION_GROUPS["numeric_array_elements"] + ["token"]
ATTRIBUTE_UNION_GROUPS["arrays"] = ATTRIBUTE_UNION_GROUPS["numeric_arrays"] + ["token[]"]
# Pattern match for the attribute type names
# group(1) = Base name of the attribute type (e.g. double, float, ...)
# group(2) = Element count (None if no element count specified)
# group(3) = String with matching array brackets - len(group(3))/2 = array depth
RE_ATTRIBUTE_TYPE = re.compile(r"([^\[\]]+)(?:\[([1-9][0-9]{0,2})\])?((?:\[\]){0,2})$")
# ======================================================================
def expand_attribute_union_groups(union_types: List) -> List:
"""Expands any group names in a union type declaration, in-place. The
list will have duplicate types removed.
Args:
union_types: The list of union types
"""
union_type_set = set()
for tp in union_types:
for literal_type in ATTRIBUTE_UNION_GROUPS.get(tp, [tp]):
union_type_set.add(literal_type)
return list(union_type_set)
# ======================================================================
def split_attribute_type_name(
full_type_name: Union[str, List]
) -> Tuple[str, int, int, Optional[Dict[str, AttributeManager]]]:
"""Split a fully qualified attribute type name into its identifying parts
Args:
full_type_name: Fully qualified attribute type consisting of type name
followed by optional element count as "[X]" and optional arrays as "[]"
Returns:
(base name, element count, array depth, extra_info) extracted from the full name.
e.g. "int[5][]" returns ("int", 5, 1) and "float[][]" returns ("float", 1, 2)
If the type manager requires any construction parameters they will be in the last tuple
element (None means no construction parameters need be passed)
Raises:
ParseError: the name passed in doesn't follow the naming pattern
"""
extra_info = None
if isinstance(full_type_name, list):
attribute_name = "union"
# Special syntax for annotating "lists of union types" encloses the entire attribute name in [] rather
# than appending them to the end. e.g. "type": [["float", "double"]], versus "type": "float[]"
if full_type_name and isinstance(full_type_name[0], list):
array_depth = 1
types_accepted = full_type_name[0]
else:
array_depth = 0
types_accepted = full_type_name
extra_info = {}
for attribute_type in expand_attribute_union_groups(types_accepted):
extra_info[attribute_type] = get_attribute_manager_type(attribute_type)
tuple_count = 1
else:
type_match = RE_ATTRIBUTE_TYPE.match(full_type_name)
if type_match is None:
raise ParseError(f"Attribute type name {full_type_name} does not match pattern TYPE{{[X]}}{{[]{{[]}}}}")
attribute_name = type_match.group(1)
tuple_count = 1
if type_match.group(2) is not None:
tuple_count = int(type_match.group(2))
array_depth = 0
if type_match.group(3) is not None:
array_depth = int(len(type_match.group(3)) / 2)
return (attribute_name, tuple_count, array_depth, extra_info)
# ======================================================================
def validate_attribute_type_name(type_name: str, tuple_count: int, array_depth: int):
"""Validate a fully qualified attribute name from its constituent parts.
Use naming.py:assemble_attribute_type_name to give you the full name once it is validated.
It would be done here except that would create a circular import problem. Normal usage is
try:
validate_attribute_type_name(type_name, tuple_count, array_depth)
full_name = assemble_attribute_type_name(type_name, tuple_count, array_depth)
except AttributeError:
pass
Args:
type_name: Base name of the attribute type
tuple_count: Number of tuple elements in the attribute type
array_depth: Levels of arrays in the attribute type
Raises:
AttributeError: the constituent parts cannot be assembled into a legal attribute type
"""
try:
manager_type = ATTRIBUTE_MANAGERS[type_name]
except KeyError as error:
raise AttributeError(
f"Type name {type_name} is not on the recognized list {list(ATTRIBUTE_MANAGERS.keys())}"
) from error
tuples_supported = manager_type.tuples_supported()
array_depths_supported = manager_type.array_depths_supported()
def __get_legal_type_names() -> List[str]:
"""Returns the list of valid names for a legal type_name"""
legal_tuple_names = [type_name] if 1 in tuples_supported else []
legal_tuple_names = [f"{type_name}[{tuple_value}]" for tuple_value in tuples_supported if tuple_value > 1]
all_legal_names = []
for legal_depth in array_depths_supported:
array_tag = "[]" * legal_depth
for tuple_name in legal_tuple_names:
all_legal_names.append(f"{type_name}{tuple_name}{array_tag}")
return all_legal_names
if tuple_count not in tuples_supported:
raise AttributeError(
f"Tuple count {tuple_count} is not supported. Attribute type must be one of {__get_legal_type_names()}"
)
if array_depth not in array_depths_supported:
raise AttributeError(
f"Array depth {array_depth} is not supported. Attribute type must be one of {__get_legal_type_names()}"
)
# ======================================================================
def get_attribute_manager_type(attribute_type: str, attribute_name: str = "inputs:default"):
"""Returns an attribute manager that matches the name and type, with no other data.
The attribute manager returned will be incomplete, and may not be valid. It is meant to use for things
like verifying legal values for a type, avoiding the chicken-and-egg scenario of needing the manager to
supply a legal value in order to set the legal value on that manager.
Args:
attribute_type: Fully encoded attribute type value, e.g. "int[3][]"
Returns:
Populated attribute manager for the given type (all other required values will be set to defaults)
Raises:
ParseError if the attribute type is not a recognized legal type
"""
# Find the manager for this attribute's type
try:
base_type_name, tuple_count, array_depth, extra_info = split_attribute_type_name(attribute_type)
except KeyError as error:
raise ParseError(
f'Could not decode attribute type "{attribute_type}" for attribute "{attribute_name}"'
) from error
try:
validate_attribute_type_name(base_type_name, tuple_count, array_depth)
except AttributeError as e:
raise ParseError(f'Unsupported attribute type "{attribute_type}" for attribute "{attribute_name}"') from e
if extra_info is None:
attribute_manager = ATTRIBUTE_MANAGERS[base_type_name](attribute_name, base_type_name)
else:
attribute_manager = ATTRIBUTE_MANAGERS[base_type_name](attribute_name, base_type_name, extra_info)
attribute_manager.tuple_count = tuple_count
attribute_manager.array_depth = array_depth
return attribute_manager
# ======================================================================
def get_attribute_manager(attribute_name: str, attribute_data: dict) -> AttributeManager:
"""Deciphers, validates, and provides consistent access to attributes described by a dictionary.
This function deciphers the type of attribute it contains and then runs a sub-parser appropriate to that type of
attribute which performs semantic validation (e.g. that a default value is within a min/max range).
Args:
attribute_name: Fully namespaced name of the attribute being accessed
attribute_data: Dictionary containing the attribute interface data, as extracted from the JSON
Raise:
ParseError: If there are any errors parsing the attribute description - string contains the problem
Returns:
Object that provides an interface to the parsed attribute
"""
if not isinstance(attribute_data, dict):
raise ParseError(f"Value of node name key {attribute_name} must be a dictionary")
# Find the manager for this attribute's type
attribute_manager = get_attribute_manager_type(attribute_data[AttributeKeys.TYPE], attribute_name=attribute_name)
# Set the mandatory attribute values in a generic way
for attr_key in AttributeKeys.MANDATORY:
try:
setattr(attribute_manager, attr_key, attribute_data[attr_key])
except KeyError:
raise ParseError(f'"{attr_key}" value is mandatory for attribute "{attribute_name}"') from None
# Check to see if the attribute is optional
with suppress(KeyError):
attribute_manager.is_required = not attribute_data[AttributeKeys.OPTIONAL]
# Check to see if the attribute has been deprecated
with suppress(KeyError):
attribute_manager.deprecation_msg = attribute_data[AttributeKeys.DEPRECATED]
if isinstance(attribute_manager.deprecation_msg, list):
attribute_manager.deprecation_msg = " ".join(attribute_manager.deprecation_msg)
attribute_manager.is_deprecated = True
# Check to see if the attribute can go into compute without validation
with suppress(KeyError):
attribute_manager.do_validation = not attribute_data[AttributeKeys.UNVALIDATED]
# Check to see if the attribute has metadata
with suppress(KeyError):
attribute_manager.parse_metadata(attribute_data[AttributeKeys.METADATA])
# Store the attribute description in the metadata as there is no direct ABI for it
attribute_manager.metadata[MetadataKeys.DESCRIPTION] = attrib_description_to_string(attribute_manager.description)
# Check to see if the attribute is using the shorter definition of uiName metadata
with suppress(KeyError):
attribute_manager.metadata[MetadataKeys.UI_NAME] = attribute_data[AttributeKeys.UI_NAME]
# Check to see if the attribute is overriding its memory type
with suppress(KeyError):
attribute_manager.memory_type = check_memory_type(attribute_data[AttributeKeys.MEMORY_TYPE])
attribute_manager.metadata[MetadataKeys.MEMORY_TYPE] = attribute_manager.memory_type
# Check to see if the attribute has a default value set
try:
attribute_manager.default = attribute_data[AttributeKeys.DEFAULT]
# Store the default value as metadata so that it can be retrieved to regenerate the file
attribute_manager.metadata[MetadataKeys.DEFAULT] = json.dumps(attribute_manager.default)
except KeyError:
if attribute_manager.requires_default():
attribute_manager.default = attribute_manager.empty_value()
# Process the keys that are not mandatory for the specific discovered attribute type
extra_properties = {key: value for key, value in attribute_data.items() if key not in AttributeKeys.PROCESSED}
try:
unparsed_properties = attribute_manager.parse_extra_properties(extra_properties)
unparsed_errors = [_prop for _prop in unparsed_properties if _prop[0] != "$"]
if unparsed_errors:
raise ParseError(f"Unparsed fields {unparsed_errors}")
except ParseError as error:
raise ParseError(f"Attribute {attribute_name}") from error
return attribute_manager
# ======================================================================
def supported_attribute_type_names(do_formatting: bool = False) -> List[str]:
"""Returns a list of the OGN type names of all currently supported attribute types (e.g. "int[3]", not "int3")
Args:
do_formatting: If True then group together like tuples and arrays
Returns:
List of string representing all currently supported attribute types
"""
supported_type_names = []
for attribute_type_name, attribute_manager in ATTRIBUTE_MANAGERS.items():
# USD no longer supports the transformX attribute types so filter them out
if attribute_type_name.startswith("transform"):
continue
try:
# Create a temporary to check support
manager = attribute_manager("inputs:temp", attribute_type_name)
supported_tuples = manager.tuples_supported()
supported_array_depths = manager.array_depths_supported()
supported_list = []
for array_depth in supported_array_depths:
for tuple_count in supported_tuples:
full_name = attribute_type_name
full_name = f"{full_name}[{tuple_count}]" if tuple_count > 1 else full_name
full_name += "[]" * array_depth
supported_list.append(full_name)
# Add any supported types found in a single list
if supported_list:
if do_formatting:
supported_type_names.append(", ".join(supported_list))
else:
supported_type_names += supported_list
except TypeError:
# This is hit when you try to get a union type, which should be reported differently anyway
pass
except (AttributeError, UnimplementedError):
pass
supported_type_names.sort()
return supported_type_names
# ======================================================================
def formatted_supported_attribute_type_names() -> List[str]:
"""Returns a list of the names of all currently supported attribute types, formatted in lines for easy reading"""
supported_type_names = supported_attribute_type_names(do_formatting=True)
supported_type_names.append('["A", "B", "C"... = Any one of the listed types]')
return supported_type_names
# ======================================================================
def split_attribute_list(
attributes: List[AttributeManager],
) -> Tuple[List[AttributeManager], List[AttributeManager], List[AttributeManager]]:
"""Split a list of attributes into three sets of sublists based on type of data the attribute holds.
Args:
attributes: List of attribute managers encapsulating the list of attributes to be split
Returns:
Tuple(SingleAttributes, BundleAttributes, RuntimeAttributes)
SingleAttributes: Attributes containing a single piece of data, including tuples and arrays
BundleAttributes: Attributes which are a bundle of other attributes, not having any actual data
RuntimeAttributes: Attributes whose data type is only known at runtime
"""
single_attributes = []
bundle_attributes = []
runtime_attributes = []
for attribute in attributes:
if attribute is None:
continue
if not attribute.has_fixed_type():
runtime_attributes.append(attribute)
elif attribute.create_type_name() == "bundle":
bundle_attributes.append(attribute)
else:
single_attributes.append(attribute)
return (single_attributes, bundle_attributes, runtime_attributes)
# ======================================================================
def list_without_runtime_attributes(attributes: List[AttributeManager]) -> List[AttributeManager]:
"""Return the attribute list, filtered to remove all attributes whose data type is determined at runtime.
Args:
attributes: List of attribute managers encapsulating the list of attributes to be filtered
Returns:
List of attributes whose types are known at compile time
"""
known_attributes = []
for attribute in attributes:
if attribute is None:
continue
if attribute.has_fixed_type():
known_attributes.append(attribute)
return known_attributes
| 22,695 | Python | 44.482966 | 118 | 0.674994 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.