file_path
stringlengths 21
202
| content
stringlengths 12
1.02M
| size
int64 12
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 10
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/nodes.py | # noqa: PLC0302
"""
Support for the various attribute types used in a node description file.
Provides classes that allow generic calls to check attribute information retrieved from the JSON node description
data. The main interface class AttributeManager is used to decipher, validate, and provide access to all
attribute data in the dictionary passed into it.
"""
import io
import json
import os
import re
from contextlib import suppress
from dataclasses import dataclass
from pathlib import Path
from typing import IO, Any, Dict, List, Optional, Tuple, Union
from .attributes.AttributeManager import AttributeManager
from .attributes.management import get_attribute_manager
from .attributes.naming import (
INPUT_GROUP,
INPUT_NS,
OUTPUT_GROUP,
OUTPUT_NS,
STATE_GROUP,
STATE_NS,
attribute_name_in_namespace,
check_attribute_name,
is_input_name,
is_output_name,
is_state_name,
split_attribute_name,
)
from .category_definitions import merge_category_definitions
from .keys import (
CudaPointerValues,
ExclusionTypeValues,
GraphSetupKeys,
GraphSetupKeys_V1,
LanguageTypeValues,
MemoryTypeValues,
NodeTypeKeys,
TestKeys,
)
from .parse_scheduling import SchedulingHints
from .type_definitions import apply_type_definitions
from .utils import (
GeneratorConfiguration,
IndentedOutput,
MetadataKeys,
ParseError,
UnimplementedError,
check_icon_information,
check_memory_type,
check_token_name,
get_metadata_dictionary,
is_comment,
logger,
)
# ======================================================================
# Deprecated - Use the definitions in keys.NodeTypeKeys interface instead
KEY_NODE_DESCRIPTION = NodeTypeKeys.DESCRIPTION
KEY_NODE_EXCLUDE = NodeTypeKeys.EXCLUDE
KEY_NODE_ICON = NodeTypeKeys.ICON
KEY_NODE_INPUTS = NodeTypeKeys.INPUTS
KEY_NODE_LANGUAGE = NodeTypeKeys.LANGUAGE
KEY_NODE_MEMORY_TYPE = NodeTypeKeys.MEMORY_TYPE
KEY_NODE_METADATA = NodeTypeKeys.METADATA
KEY_NODE_OUTPUTS = NodeTypeKeys.OUTPUTS
KEY_NODE_SCHEDULING = NodeTypeKeys.SCHEDULING
KEY_NODE_SINGLETON_METADATA = NodeTypeKeys.SINGLETON
KEY_NODE_STATE = NodeTypeKeys.STATE
KEY_NODE_TAGS_METADATA = NodeTypeKeys.TAGS
KEY_NODE_TESTS = NodeTypeKeys.TESTS
KEY_NODE_TOKENS = NodeTypeKeys.TOKENS
KEY_NODE_UI_NAME_METADATA = NodeTypeKeys.UI_NAME
KEY_NODE_VERSION = NodeTypeKeys.VERSION
# Deprecated - Use the definitions in keys.TestKeys interface instead
KEY_TEST_DESCRIPTION = TestKeys.DESCRIPTION
KEY_TEST_GPU_ATTRIBUTES = TestKeys.GPU_ATTRIBUTES
KEY_TEST_INPUTS = TestKeys.INPUTS
KEY_TEST_OUTPUTS = TestKeys.OUTPUTS
KEY_TEST_SETUP = TestKeys.SETUP
KEY_TEST_STATE = TestKeys.STATE
KEY_TEST_STATE_GET = TestKeys.STATE_GET
KEY_TEST_STATE_SET = TestKeys.STATE_SET
# Deprecated - use the keys.LanguageTypeValues interface instead
LANGUAGE_CPP = LanguageTypeValues.CPP
LANGUAGE_PYTHON = LanguageTypeValues.PYTHON
ALL_LANGUAGES = LanguageTypeValues.ALL
EXCLUSION_TYPES = [value for key, value in vars(ExclusionTypeValues).items() if not key.startswith("__")]
V1_GRAPH_SETUP_KEYS = [value for key, value in vars(GraphSetupKeys_V1).items() if not key.startswith("__")]
GRAPH_SETUP_KEYS_ALLOWED = [value for key, value in vars(GraphSetupKeys).items() if not key.startswith("__")]
# Pattern for legal node names
# - starts with a letter or underscore
# - then an arbitrary number of alphanumerics or underscores
# - other special characters cause problems in USD and so are disallowed
RE_NODE_NAME = re.compile(r"^[A-Za-z_][A-Za-z0-9_\.]*$")
RE_NODE_NAME_NORMAL = re.compile(r"^[A-Z][A-Za-z0-9_]*$")
NODE_NAME_REQUIREMENT = (
"Node name '{}' should be CamelCase with letters, numbers, underscores,"
" with optional '.' to override the namespace'"
)
# UI names can contain pretty much anything - quotes are problematic though so those are disallowed
RE_NODE_UI_NAME = re.compile("^[^'\"]*$")
NODE_UI_NAME_REQUIREMENT = "User-friendly node name cannot contain quotes"
# Helper for namespace related messages
USE_NAMESPACE = f'must begin with "{INPUT_NS}", "{OUTPUT_NS}", or "{STATE_NS}'
# ======================================================================
def check_node_language(node_language: str):
"""Raises a ParseError if the given language name is not legal, else returns the corresponding language key"""
try:
new_language = LanguageTypeValues.key_from_text(node_language)
except ValueError as error:
raise ParseError() from error
return new_language
# ======================================================================
def check_node_name(node_name: str):
"""Raises a ParseError if the given node name has an illegal pattern, else returns the node name"""
name_info = NODE_NAME_REQUIREMENT.format(node_name)
if not RE_NODE_NAME.match(node_name):
raise ParseError(name_info)
# if not RE_NODE_NAME_NORMAL.match(node_name):
# print(f"INFO: {name_info.format(node_name)}", flush=True)
return node_name
# ======================================================================
def check_node_ui_name(node_ui_name: str):
"""Raises a ParseError if the given user-friendly node name has an illegal pattern, else returns the node name"""
if not RE_NODE_UI_NAME.match(node_ui_name):
raise ParseError(NODE_UI_NAME_REQUIREMENT)
return node_ui_name
# ======================================================================
def check_node_version(node_version):
"""Raises a ParseError if the given node version is not an integer"""
if not isinstance(node_version, int):
raise ParseError(f'Node version "{node_version}" is not an integer')
# ======================================================================
class NodeGenerationError(Exception):
"""Exception to raise when there is an error in the generation of the node interface, tests, or documentation"""
# ==============================================================================================================
@dataclass
class AllAttributes:
"""Container class holding the inputs, outputs, and state attributes in a common structure"""
inputs: List[AttributeManager]
outputs: List[AttributeManager]
state: List[AttributeManager]
# ======================================================================
class TestData:
"""Class that holds the information required to run a single test.
Attributes:
graph_setup: Dictionary in the format of og.Controller.edit to create an initial graph for the test
None means use the previous setup, without changing anything.
input_values: Dictionary of INPUT_ATTR:INPUT_VALUE to set as part of the test
state_initial_values: Dictionary of STATE_ATTR:STATE_VALUE to set as part of the test
state_final_values: Dictionary of STATE_ATTR:STATE_VALUE to check as part of the test
expected_outputs: Dictionary of OUTPUT_ATTR:EXPECTED_VALUE to check as part of the test
gpu_outputs: List of output attributes expected to be on the GPU at runtime
uses_v1_setup: True if the setup data is from the V1 setup, using OmniGraphHelper syntax
"""
def __init__(self):
"""Initialize an empty test configuration, to be populated later"""
self.input_values = {}
self.state_initial_values = {}
self.state_final_values = {}
self.expected_outputs = {}
self.graph_setup = None
self.uses_v1_setup = False
self.gpu_outputs = []
def add_input(self, input_name: str, input_value):
"""Add a new input value for the test configuration"""
self.input_values[input_name] = input_value
def add_output(self, output_name: str, output_value):
"""Add a new expected output value for the test configuration"""
self.expected_outputs[output_name] = output_value
def add_set_state(self, state_name: str, state_value):
"""Add a new state initialization value for the test configuration"""
self.state_initial_values[state_name] = state_value
def add_get_state(self, state_name: str, state_value):
"""Add a new state expected value for the test configuration"""
self.state_final_values[state_name] = state_value
def set_gpu_outputs(self, gpu_outputs: List[str]):
"""Set the list of output attributes that should be read from the GPU, where the decision is made at runtime"""
self.gpu_outputs = gpu_outputs
def set_graph_setup(self, setup: Dict, uses_v1_setup: bool = False):
"""Set the list of output attributes that should be read from the GPU, where the decision is made at runtime"""
self.graph_setup = setup
self.uses_v1_setup = uses_v1_setup
# ======================================================================
class NodeInterface:
"""Class constructed from a node interface description to provide an easier method of extracting information
Attributes:
name: Name of the node (mandatory)
__categories_allowed: List of categories the node can legally accept (loaded from the main parser and the node)
cuda_pointer_type: Where the pointers to GPU arrays are retrieved to
description: Description of what the node does (mandatory)
excluded_generators: List of generators the node does not want to run
has_cuda_attributes: True if there is at least one input or output attribute that will be accessed from CUDA
has_inputs: True if __inputs has size > 0 (cached for performance)
has_outputs: True if __outputs has size > 0 (cached for performance)
has_state: True if __state has size > 0 (cached for performance)
icon_path: Location of the node type's icon, relative to the extension's directory (None means no icon)
__inputs: Dictionary of input attributes as (attribute name, AttributeManager)
language: Language in which the node will be implemented
memory_type: Default location for attribute memory
__outputs: Dictionary of output attributes as (attribute name, AttributeManager)
__state: Dictionary of state attributes as (attribute name, AttributeManager)
tests: List of TestData containing information describing the set of tests in the file
version: Version of the described node, as an integer
config_directory: Path of directory in which to find any configuration files
"""
def __init__(
self,
node_name: str,
node_data: dict,
config_directory: str,
categories_allowed: Dict[str, str] = None,
node_directory: Optional[str] = None,
):
"""
Rearrange the node interface description to optimize access for code and documentation generation.
Args:
node_name: Name of the node being accessed
node_data: Dictionary containing the node interface data, as extracted from the JSON
config_directory: Path to the directory containing the system configuration files
categories_allowed: Dictionary of name:description of all categories found in the configuration files
node_directory: Directory in which the node definition lives, None if it does not live in the file system
Raises:
ParseError: If there are any errors parsing the node description - string contains the problem
"""
self.name = node_name
self.__categories_allowed = categories_allowed if categories_allowed is not None else {}
self.__node_directory = node_directory
self.categories = []
self.description = None
self.version = 1
self.has_cuda_attributes = False
self.cuda_pointer_type = None
self.has_inputs = False
self.has_outputs = False
self.has_state = False
self.icon_path = None
self.memory_type = MemoryTypeValues.CPU
self.metadata = {}
self.__inputs = {}
self.__outputs = {}
self.__state = {}
self.tests = []
self.tokens = {}
self.excluded_generators = []
self.language = LanguageTypeValues.CPP
self.config_directory = config_directory
self.scheduling_hints = None
logger.info("Extracting node interface for %s", node_name)
if not isinstance(node_data, dict):
raise ParseError(f"Value of node name key {node_name} must be a dictionary")
# Parse the mandatory description
try:
self.description = node_data[NodeTypeKeys.DESCRIPTION]
logger.info("Extracted description %s", self.description)
except KeyError:
raise ParseError(f'"description" value is mandatory for node "{node_name}"') from None
# Parse the node version number
with suppress(KeyError):
check_node_version(node_data[NodeTypeKeys.VERSION])
self.version = node_data[NodeTypeKeys.VERSION]
logger.info("Extracted node version -> %s", self.version)
# Parse the node metadata
with suppress(KeyError):
self.metadata = get_metadata_dictionary(node_data[NodeTypeKeys.METADATA])
logger.info("Extracted node metadata")
# Parse the node memory type
with suppress(KeyError):
self.memory_type = check_memory_type(node_data[NodeTypeKeys.MEMORY_TYPE])
self.metadata[MetadataKeys.MEMORY_TYPE] = self.memory_type
logger.info("Extracted node memory type -> %s", self.memory_type)
# Parse the node override icon path, if any
with suppress(KeyError):
(self.icon_path, color, background_color, border_color) = check_icon_information(
node_data[NodeTypeKeys.ICON]
)
if color is not None:
self.metadata[MetadataKeys.ICON_COLOR] = color
if background_color is not None:
self.metadata[MetadataKeys.ICON_BACKGROUND_COLOR] = background_color
if border_color is not None:
self.metadata[MetadataKeys.ICON_BORDER_COLOR] = border_color
logger.info("Extracted override icon path -> %s", self.icon_path)
# See if the node uses the shortcut for the singleton metadata
with suppress(KeyError):
singleton = node_data[NodeTypeKeys.SINGLETON]
logger.info("Extracted %s flag", NodeTypeKeys.SINGLETON)
if not isinstance(singleton, bool):
raise ParseError("Singleton value must be a boolean")
# Metadata can only be a string so change the boolean to a 0/1 value
if singleton:
self.metadata[MetadataKeys.SINGLETON] = "1"
# See if the node has a definition for cuda pointer locations
with suppress(KeyError):
self.cuda_pointer_type = node_data[NodeTypeKeys.CUDA_POINTERS]
if not hasattr(CudaPointerValues, self.cuda_pointer_type.upper()):
allowed = [value for value in dir(CudaPointerValues) if not value.startswith("_")]
raise ParseError(f"{NodeTypeKeys.CUDA_POINTERS} is {self.cuda_pointer_type}, must be one of {allowed}")
logger.info("Extracted %s flag", NodeTypeKeys.CUDA_POINTERS)
# See if the node uses the shortcut for the tags metadata
with suppress(KeyError):
tags = node_data[NodeTypeKeys.TAGS]
logger.info("Extracted node tags")
if not isinstance(tags, list) and not isinstance(tags, str):
raise ParseError("Tags must be a comma-separated string or a list of strings")
# Metadata can only be a string so flatten a list with commas
if isinstance(tags, list):
tags = ",".join(tags)
self.metadata[MetadataKeys.TAGS] = tags
# See if the node uses the shortcut for the uiName metadata
with suppress(KeyError):
ui_name = node_data[NodeTypeKeys.UI_NAME]
logger.info("Extracted node uiName")
if not isinstance(ui_name, str):
raise ParseError("UI Name must be a single string")
self.metadata[MetadataKeys.UI_NAME] = ui_name
# See if any token names are to be hardcoded for the node
with suppress(KeyError):
raw_tokens = node_data[NodeTypeKeys.TOKENS]
logger.info("Extracted tokens")
if isinstance(raw_tokens, str):
token_list = raw_tokens.split(",")
self.tokens = {check_token_name(token_name): token_name for token_name in token_list}
elif isinstance(raw_tokens, list):
self.tokens = {check_token_name(token): token for token in raw_tokens}
elif isinstance(raw_tokens, dict):
self.tokens = {check_token_name(token): value for token, value in raw_tokens.items()}
else:
raise ParseError(f"Unknown type of tokens to handle - '{raw_tokens}'")
# Store the raw tokens as metadata so that they can be retrieved to regenerate the file
self.metadata[MetadataKeys.TOKENS] = json.dumps(raw_tokens)
# See if the node is overriding any of the type definitions.
with suppress(KeyError):
type_definitions = node_data[NodeTypeKeys.TYPE_DEFINITIONS]
logger.info("Extracted type definitions")
# If the data is just a string then assume it is a file and try to load it, checking the configuration
# directory if it exists.
if isinstance(type_definitions, str):
type_definition_path = Path(type_definitions)
if type_definition_path.is_file():
apply_type_definitions(type_definition_path)
elif not type_definition_path.is_absolute() and self.config_directory is not None:
config_dir_type_path = Path(self.config_directory, type_definition_path)
if config_dir_type_path.is_file():
apply_type_definitions(config_dir_type_path)
else:
raise ParseError(
f"Type definitions file '{type_definitions}' not found in config directory"
f" '{self.config_directory}'"
)
else:
raise ParseError(f"Type definitions file '{type_definitions}' not found")
# If the data is a dictionary assume it contains the type definitions directly (should be rare)
elif isinstance(type_definitions, dict):
apply_type_definitions({NodeTypeKeys.TYPE_DEFINITIONS: type_definitions})
else:
raise ParseError(f"Type definitions only recognize a string or dictionary type - '{type_definitions}'")
# See if the node is using any extra category definitions.
with suppress(KeyError):
category_definitions = node_data[NodeTypeKeys.CATEGORY_DEFINITIONS]
logger.info("Extracted category definitions")
def __add_categories(category_spec):
# If the data is just a string then assume it is a file and try to load it, checking the configuration
# directory if it exists.
if isinstance(category_definitions, str):
category_definition_path = Path(category_definitions)
# If the absolute path exists, prefer that
if category_definition_path.is_file():
merge_category_definitions(self.__categories_allowed, category_definition_path)
return
if not category_definition_path.is_absolute():
# Check if the path exists relative to the .ogn file's directory
if self.__node_directory is not None:
config_dir_type_path = Path(self.__node_directory, category_definition_path)
if config_dir_type_path.is_file():
merge_category_definitions(self.__categories_allowed, config_dir_type_path)
return
# Check if the path exists relative to the specified config directory
if self.config_directory is not None:
config_dir_type_path = Path(self.config_directory, category_definition_path)
if config_dir_type_path.is_file():
merge_category_definitions(self.__categories_allowed, config_dir_type_path)
return
node_directory_error = (
"" if self.__node_directory is None else f" or node file directory '{node_directory}'"
)
raise ParseError(
f"Category definitions file '{category_definitions}' not found in config directory"
f" '{self.config_directory}'{node_directory_error}"
)
raise ParseError(f"Category definitions file '{category_definitions}' not found")
if isinstance(category_definitions, dict):
merge_category_definitions(self.__categories_allowed, category_definitions)
return
raise ParseError(
f"Category definitions only recognize a string or dictionary type - '{category_definitions}'"
)
if isinstance(category_definitions, list):
_ = [__add_categories(category_spec) for category_spec in category_definitions]
else:
__add_categories(category_definitions)
# Categories have to be parsed after category definitions
with suppress(KeyError):
categories = node_data[NodeTypeKeys.CATEGORIES]
def __verify_category(category_to_verify: str):
"""Raise an error if the category is not one of the allowed ones"""
if category_to_verify not in self.__categories_allowed:
raise ParseError(
f"Category {category_to_verify} not in the allowed list {self.__categories_allowed}"
)
category_metadata = None
new_categories = {}
if isinstance(categories, str):
category_metadata = categories
for category in categories.split(","):
__verify_category(category)
elif isinstance(categories, list):
category_list = []
for category_item in categories:
if isinstance(category_item, str):
category_list.append(category_item)
elif isinstance(category_item, dict):
category_list += list(category_item.keys())
new_categories.update(category_item)
merge_category_definitions(self.__categories_allowed, category_item)
else:
raise ParseError(
f"Category description must be a string, dictionary, or list of them - saw {categories}"
)
category_metadata = ",".join(category_list)
for category in category_list:
__verify_category(category)
elif isinstance(categories, dict):
new_categories.update(
{
name: description
for name, description in categories.items()
if name not in self.__categories_allowed
}
)
merge_category_definitions(self.__categories_allowed, categories)
category_metadata = ",".join(sorted(categories.keys()))
if category_metadata:
self.metadata[MetadataKeys.CATEGORIES] = category_metadata
if new_categories:
# Use a tab as separator and filter them out of the description
combined_metadata = []
for name, info in new_categories.items():
if name.find(",") >= 0 or name.find("\t") >= 0:
raise ParseError(f"Category name '{name}' cannot contain a comma or tab character")
safe_info = info.replace("\t", " ")
combined_metadata.append(f"{name},{safe_info}")
self.metadata[MetadataKeys.CATEGORY_DESCRIPTIONS] = "\t".join(combined_metadata)
logger.info("Added node type categories -> {category_metadata}")
# Parse the generated type exclusions, if any
with suppress(KeyError):
self.excluded_generators += node_data[NodeTypeKeys.EXCLUDE]
logger.info("Extracted generator inclusions -> %s", self.excluded_generators)
# Parse the input attributes, if any
with suppress(KeyError):
self.__inputs = self.construct_attributes(node_data[NodeTypeKeys.INPUTS], INPUT_NS)
self.has_inputs = bool(self.__inputs)
logger.info("Extracted input attributes")
# Parse the output attributes, if any
with suppress(KeyError):
self.__outputs = self.construct_attributes(node_data[NodeTypeKeys.OUTPUTS], OUTPUT_NS)
self.has_outputs = bool(self.__outputs)
logger.info("Extracted output attributes")
# Parse the state attributes, if any
try:
self.__state = self.construct_attributes(node_data[NodeTypeKeys.STATE], STATE_NS)
# Even if no state attributes were constructed, the existence of the state section flags to the
# node that state information will be used, so that scheduling can take that into account.
self.has_state = True
logger.info("Extracted state attributes")
except KeyError:
self.has_state = False
# Parse the attribute allowedToken metadata to include in hardcoded token names
for attrib in self.all_attributes():
self.tokens.update(attrib.get_allowed_tokens())
# Parse the language specification, if any (C++ is the default)
if NodeTypeKeys.LANGUAGE in node_data:
logger.info("Extracting the language information")
self.language = check_node_language(node_data[NodeTypeKeys.LANGUAGE])
logger.info(" --> Language set to %s", self.language)
# Parse the node scheduling hints
with suppress(KeyError):
self.scheduling_hints = SchedulingHints(node_data[NodeTypeKeys.SCHEDULING])
logger.info("Extracted scheduler hints")
# Read in the test configurations. Make sure this happens after all attributes are constructed
try:
test_list = node_data[NodeTypeKeys.TESTS]
self.construct_tests(test_list)
logger.info("Extracted %s node tests", len(test_list))
except (KeyError, UnimplementedError):
self.tests = [] # Remove any partially constructed tests
# For long strings the description will be a list to be concatenated (due to the
# limited ways JSON can represent long strings). If that's the case convert back to a single string.
if isinstance(self.description, list):
self.description = " ".join(self.description)
self.metadata[MetadataKeys.DESCRIPTION] = self.description
if self.excluded_generators:
self.metadata[MetadataKeys.EXCLUSIONS] = ",".join(self.excluded_generators)
if self.language != LanguageTypeValues.CPP:
self.metadata[MetadataKeys.LANGUAGE] = self.language
# Empty descriptions are anti-social
if not self.description:
warning = "Node description should not be empty"
if os.getenv("OGN_STRICT_DEBUG"):
raise ParseError(warning)
print(f"WARNING: {warning}", flush=True)
# ----------------------------------------------------------------------
def add_test(
self, test_data: TestData, attribute_name: str, attribute_namespace: str, attribute_value, in_set: bool
):
"""Extract Python-compatible information for an attribute based on its JSON name, type, and value.
Args:
test_data: Object containing the current test data - updated based on the information passed in
attribute_name: Raw attribute name, may or may not include the namespace
attribute_namespace: Expected namespace of the attribute
attribute_value: Value to be read or written to the attribute - must be compatible to the attribute type
in_set: If True and the namespace is STATE_GROUP then put the value in the set of values to be set on state
attributes before the test begins, otherwise put it on the set of values to check after the test ends
"""
# Rely on called methods to properly raise formatting exceptions, if any
name_in_namespace = attribute_name_in_namespace(attribute_name, attribute_namespace)
(attribute, attribute_group) = self.attribute_by_name(name_in_namespace)
attribute.validate_value_structure(attribute_value)
value_for_test = attribute.value_for_test(attribute_value)
if attribute_group == INPUT_GROUP:
test_data.add_input(attribute, value_for_test)
elif attribute_group == OUTPUT_GROUP:
test_data.add_output(attribute, value_for_test)
elif in_set:
test_data.add_set_state(attribute, value_for_test)
else:
test_data.add_get_state(attribute, value_for_test)
# --------------------------------------------------------------------------------------------------------------
def __add_value_to_test(
self, test_data: TestData, raw_attribute_name: str, attribute_value: Any, attributes: AllAttributes
):
"""Add a single value for getting or setting to the test.
Args:
test_data: Test to be amended
raw_attribute_name: Full specification of attribute to be in the test
attribute_value: Value to bet set or tested on the attribute
attributes: Attribute managers of all types participating in the test
"""
# Strip the suffix from the state namespace and set the flag to indicate if it is get or set
is_setting = False
attribute_name = raw_attribute_name
if raw_attribute_name.startswith(f"{STATE_NS}_get"):
attribute_name = raw_attribute_name.replace("_get", "")
elif raw_attribute_name.startswith(f"{STATE_NS}_set"):
attribute_name = raw_attribute_name.replace("_set", "")
is_setting = True
# This special key is used to tag output attributes whose value will be available on the GPU.
# This is only necessary when the attribute defines its memory location at runtime.
# The name isn't modified since it doesn't correspond to a real attribute, and the values must
# have the fully qualified attribute name (e.g. outputs.X)
if attribute_name == TestKeys.GPU_ATTRIBUTES:
test_data.set_gpu_outputs(attribute_value)
return
# Allow specification of the bare attribute name, so long as there are no conflicts with the same name
# in multiple namespaces
if (
not is_input_name(attribute_name)
and not is_output_name(attribute_name)
and not is_state_name(attribute_name)
):
if attribute_name in attributes.inputs and attribute_name in attributes.outputs:
raise ParseError(f'Test attribute "{attribute_name}" is both an input and an output {USE_NAMESPACE}')
if attribute_name in attributes.inputs and attribute_name in attributes.state:
raise ParseError(f'Test attribute "{attribute_name}" is both an input and a state {USE_NAMESPACE}')
if attribute_name in attributes.outputs and attribute_name in attributes.state:
raise ParseError(f'Test attribute "{attribute_name}" is both an output and a state {USE_NAMESPACE}')
if attribute_name in attributes.inputs:
self.add_test(test_data, attribute_name, INPUT_NS, attribute_value, False)
elif attribute_name in attributes.outputs:
self.add_test(test_data, attribute_name, OUTPUT_NS, attribute_value, False)
elif attribute_name in attributes.state:
# Using this shortcut assume the state value is to be checked, not set
self.add_test(test_data, attribute_name, STATE_NS, attribute_value, is_setting)
else:
raise ParseError(f"Test attribute {attribute_name} not recognized")
else:
(namespace, base_name) = split_attribute_name(attribute_name)
# Assume the namespace is correct if specified
if namespace == INPUT_NS and base_name not in attributes.inputs:
raise ParseError(f'Namespaced attribute "{attribute_name}" not an input')
if namespace == OUTPUT_NS and base_name not in attributes.outputs:
raise ParseError(f'Namespaced attribute "{attribute_name}" not an output')
if namespace.startswith(STATE_NS) and base_name not in attributes.state:
raise ParseError(f'Namespaced attribute "{attribute_name}" not a state')
if namespace not in [INPUT_NS, OUTPUT_NS, STATE_NS]:
raise ParseError(f'Test attribute "{attribute_name}" has illegal namespace "{namespace}"')
self.add_test(test_data, attribute_name, namespace, attribute_value, is_setting)
# ----------------------------------------------------------------------
def create_test_from_raw_data(self, test_info: Dict, attributes: AllAttributes):
"""Return a normalized set of test data as parsed from the two allowed formats.
Test data can appear in either expanded or compressed formats. In expanded format it has inputs, outputs,
and state in separate dictionaries:
{
"description": "Optional, and to be ignored",
"inputs": {
"InputAttr": "ValueToSet"
},
"outputs": {
"OutputAttr": "ExpectedValue"
},
"state_set": {
"StateAttr": "InitialState"
},
"state_get": {
"StateAttr": "ExpectedState"
},
"setup": {
"nodes": ["TestNode", "omni.examples.myNode"]
}
}
or in simplified format where attributes use their full namespace in a single dictionary, with state attributes
splitting into "state" or "state_out" for expected values and "state_in" for initial values
{
"inputs:InputAttr": "ValueToSet"
"outputs:OutputAttr": "ExpectedValue",
"state_in:StateAttr": "InitialState",
"state:StateAttr": "ExpectedState",
}
A mix of both will be accepted, though there is no reason to use that approach.
It is also acceptable if the first format specifies names as "inputs:InputAttr", though self-defeating for
shortening the input data.
In addition, if the attribute names are unique the namespace can be omitted. i.e. "inputs:x1" and "outputs:x2"
can be shortened to "x1" and "x2", but "inputs:a1" and "outputs:a1" must be fully qualified.
The optional "description" field is removed and the format is modified if necessary to the simplified form.
Args:
test_info: Test data in one of the two allowed forms
attributes: List of all legal input, output, and state attribute managers
Note:
For the purposes of this test only the initial state values can be checked. If you wish to check for
state changes you must write a separate test script that evaluates multiple times.
Returns:
test_info normalized to remove ignored fields and put into the simplified format
Raises:
ParseError: If the formatting of the test clause was not correct
"""
test_data = TestData()
if TestKeys.INPUTS in test_info:
for input_name, input_value in test_info[TestKeys.INPUTS].items():
# Comments are marked by a leading "$" and can appear anywhere, even among attribute data
if input_name[0] == "$":
continue
self.add_test(test_data, input_name, INPUT_NS, input_value, False)
if TestKeys.OUTPUTS in test_info:
for output_name, output_value in test_info[TestKeys.OUTPUTS].items():
# Comments are marked by a leading "$" and can appear anywhere, even among attribute data
if output_name[0] == "$":
continue
self.add_test(test_data, output_name, OUTPUT_NS, output_value, False)
with suppress(KeyError):
setup = test_info[TestKeys.SETUP]
uses_v1_setup = False
for key in setup:
if key not in GRAPH_SETUP_KEYS_ALLOWED:
if key in V1_GRAPH_SETUP_KEYS:
logger.warning(
"'%s' graph setup is from the obsolete OmniGraphHelper. Update to use og.Controller.", key
)
uses_v1_setup = True
else:
raise ParseError(f"Graph setup key '{key}' not in the allowed set {GRAPH_SETUP_KEYS_ALLOWED}")
test_data.set_graph_setup(setup, uses_v1_setup)
if TestKeys.STATE in test_info:
for state_name, state_value in test_info[TestKeys.STATE].items():
# Comments are marked by a leading "$" and can appear anywhere, even among attribute data
if state_name[0] == "$":
continue
self.add_test(test_data, state_name, STATE_NS, state_value, False)
if TestKeys.STATE_GET in test_info:
for state_name, state_value in test_info[TestKeys.STATE_GET].items():
# Comments are marked by a leading "$" and can appear anywhere, even among attribute data
if state_name[0] == "$":
continue
self.add_test(test_data, state_name, STATE_NS, state_value, False)
if TestKeys.STATE_SET in test_info:
for state_name, state_value in test_info[TestKeys.STATE_SET].items():
# Comments are marked by a leading "$" and can appear anywhere, even among attribute data
if state_name[0] == "$":
continue
self.add_test(test_data, state_name, STATE_NS, state_value, True)
for raw_attribute_name, attribute_value in test_info.items():
# Check to make sure this isn't the attribute grouping rather than an actual name, to parse the simple form
if raw_attribute_name not in [
TestKeys.INPUTS,
TestKeys.OUTPUTS,
TestKeys.SETUP,
TestKeys.STATE,
TestKeys.STATE_GET,
TestKeys.STATE_SET,
TestKeys.DESCRIPTION,
]:
# Comments are marked by a leading "$" and can appear anywhere, even among attribute data
if raw_attribute_name[0] == "$":
continue
self.__add_value_to_test(test_data, raw_attribute_name, attribute_value, attributes)
return test_data
# ----------------------------------------------------------------------
def construct_tests(self, test_list: List[Dict]):
"""Construct the internal list of test configurations from their description"""
attributes = AllAttributes(
[split_attribute_name(attribute)[1] for attribute, _ in self.__inputs.items()],
[split_attribute_name(attribute)[1] for attribute, _ in self.__outputs.items()],
[split_attribute_name(attribute)[1] for attribute, _ in self.__state.items()],
)
# Convert the JSON-based test data to something that can be output as Python
for test_information in test_list:
self.tests.append(self.create_test_from_raw_data(test_information, attributes))
# ----------------------------------------------------------------------
def construct_attributes(self, attribute_interfaces: dict, namespace: str) -> Dict[str, AttributeManager]:
"""Create attribute interface classes for every attribute in the description dictionary
Args:
attribute_interfaces: Dictionary of (attribute name, dictionary) from which to extract interfaces
namespace: Prefix for attribute names in this interface; will be prepended to the name if missing
Returns:
A dictionary of (attribute name, AttributeManager) extracted from the attribute interface list
"""
extracted_interfaces = {}
logger.info("Construct attributes from interface %s", attribute_interfaces)
for raw_attribute_name, attribute_data in attribute_interfaces.items():
if raw_attribute_name[0] == "$": # Special IDs are not actual attributes
logger.info("Ignoring comment tagged %s", raw_attribute_name)
continue
# Allow namespace to be already on the name, and ensure it is present either way
attribute_name = attribute_name_in_namespace(raw_attribute_name, namespace)
(_, _) = check_attribute_name(attribute_name)
if attribute_name in extracted_interfaces:
raise ParseError(f'Attribute "{raw_attribute_name}" appears more than once in the node definition')
attribute_manager = get_attribute_manager(attribute_name, attribute_data)
extracted_interfaces[attribute_name] = attribute_manager
if attribute_manager.memory_type is None:
attribute_manager.memory_type = self.memory_type
if attribute_manager.memory_type != MemoryTypeValues.CPU:
self.has_cuda_attributes = 1
attribute_manager.cuda_pointer_type = self.cuda_pointer_type
# After creation, ensure that the attribute manager has a valid configuration
try:
attribute_manager.validate_configuration()
if attribute_manager.ogn_base_type().startswith("transform"):
logger.warning(
"'%s' is being deprecated by USD. Use 'framed[4]' or 'matrixd[4]' instead",
attribute_manager.ogn_type(),
)
except Exception as error:
raise ParseError(f"Attribute {attribute_name}") from error
return extracted_interfaces
# ----------------------------------------------------------------------
def attribute_by_name(self, attribute_name: str) -> Tuple[AttributeManager, str]:
"""Look up an attribute on the node by name.
Args:
attribute_name: Name of the attribute to find
Returns:
(Manager of the named attribute, type of the attribute)
Raises:
AttributeError: If the attribute does not exist on the node.
"""
if attribute_name in self.__inputs:
return (self.__inputs[attribute_name], INPUT_GROUP)
if attribute_name in self.__outputs:
return (self.__outputs[attribute_name], OUTPUT_GROUP)
if attribute_name in self.__state:
return (self.__state[attribute_name], STATE_GROUP)
# Handle the case of short-form names
match_found = None
attribute_as_input = attribute_name_in_namespace(attribute_name, INPUT_NS)
if attribute_as_input in self.__inputs:
match_found = (self.__inputs[attribute_as_input], INPUT_GROUP)
attribute_as_output = attribute_name_in_namespace(attribute_name, OUTPUT_NS)
if attribute_as_output in self.__outputs:
if match_found is not None:
raise AttributeError(f'"{attribute_name}" ambiguously matched multiple types')
match_found = (self.__outputs[attribute_as_output], OUTPUT_GROUP)
attribute_as_state = attribute_name_in_namespace(attribute_name, STATE_NS)
if attribute_as_state in self.__state:
if match_found is not None:
raise AttributeError(f'"{attribute_name}" ambiguously matched multiple types')
match_found = (self.__state[attribute_as_state], STATE_GROUP)
if match_found is not None:
return match_found
raise AttributeError(f'"{attribute_name}" was not found in the node"')
# ----------------------------------------------------------------------
def all_input_attributes(self) -> List[AttributeManager]:
"""Get the list of all input attributes extracted from the description
Returns:
The list of attribute interfaces for inputs on the node
"""
return NodeInterface.sorted_values(self.__inputs)
# ----------------------------------------------------------------------
def all_output_attributes(self) -> List[AttributeManager]:
"""Get the list of all output attributes extracted from the description
Returns:
The list of attribute interfaces for outputs on the node
"""
return NodeInterface.sorted_values(self.__outputs)
# ----------------------------------------------------------------------
def all_state_attributes(self) -> List[AttributeManager]:
"""Get the list of all state attributes extracted from the description
Returns:
The list of attribute interfaces for state on the node
"""
return NodeInterface.sorted_values(self.__state)
# ----------------------------------------------------------------------
def all_attributes(self) -> List[AttributeManager]:
"""Get the list of all attributes of all types extracted from the description
Returns:
The list of attribute interfaces for all attributes defined on the node
"""
return self.all_input_attributes() + self.all_output_attributes() + self.all_state_attributes()
# ----------------------------------------------------------------------
def has_attributes(self) -> bool:
"""Returns true if this node type has any attributes.
This provides a quick check so that code generators can skip attribute sections when none exist.
For code sections containing only a single type of attribute use, e.g., if node.all_state_attributes():
"""
return self.__inputs or self.__outputs or self.__state
# ----------------------------------------------------------------------
@staticmethod
def sorted_values(attributes: dict) -> List[AttributeManager]:
"""Get the list of dictionary values sorted by the dictionary keys
Args:
attributes: A dictionary with sortable keys
Returns:
The list of dictionary values sorted by the dictionary keys
"""
return [attributes[key] for key in sorted(attributes.keys())]
# ----------------------------------------------------------------------
def all_tests(self) -> List[TestData]:
"""Returns the list of all sets of tests data extracted from the description"""
return self.tests
# ----------------------------------------------------------------------
def check_support(self):
"""Checks to see if this node contains currently unsupported attributes
Raises:
AttributeError: If any attributes on the node are not going to be supported
UnimplementedError: If any attributes on the node are currently not supported but will be
"""
for attribute in self.all_input_attributes():
try:
attribute.check_support()
except AttributeError as error:
raise AttributeError(f"{self.name} input not supported") from error
except UnimplementedError as error:
raise UnimplementedError(f"{self.name} input not yet supported") from error
for attribute in self.all_output_attributes():
try:
attribute.check_support()
except AttributeError as error:
raise AttributeError(f"{self.name} output not supported") from error
except UnimplementedError as error:
raise UnimplementedError(f"{self.name} output not yet supported") from error
for attribute in self.all_state_attributes():
try:
attribute.check_support()
except AttributeError as error:
raise AttributeError(f"{self.name} state not supported") from error
except UnimplementedError as error:
raise UnimplementedError(f"{self.name} state not yet supported") from error
# ----------------------------------------------------------------------
def can_generate(self, generation_type: str) -> bool:
"""Checks to see if a particular type of output should be generated.
Args:
generation_type: Name of output generation type. Exact values respected are in main.py
Return:
True if the generation_type of data is allowed by the node
"""
if generation_type in self.excluded_generators:
return False
if generation_type == "c++" and self.language in [LanguageTypeValues.PYTHON]:
return False
return True
# ======================================================================
class NodeInterfaceGenerator:
"""Manage the common functions used by all types of node generators
Override the interface_file_name() and generate_node_interface() methods for a derived generator.
You can also override pre_interface_generation() if you have something to emit at the beginning of the
interface that is not replicated for each node.
Attributes:
base_name: Base name of the file containing the node descriptions
extension: Name of the extension requesting the generation
generator_version: The version information for this extension when it was run
interface_directory: Path to the directory where the file should be written (None is as string)
module: Root import for the files of the node
node_interface: Node interface whose interface is being generated
out: File object which is the destination of the test output
output_path: Location of output destination, None if only going to a string
target_version: The version information for the omni.graph.core extension the generated code is meant for
verbose: True if extra debugging output is to be added
"""
def __init__(self, configuration: GeneratorConfiguration):
"""Set up the generator and output the Python interface code for the node
Args:
configuration: Information defining how and where the documentation will be generated
Raises:
NodeGenerationError: When for some reason the Python code could not be generated
"""
self.base_name = configuration.base_name
self.generator_version = configuration.generator_version
self.target_version = configuration.target_version
self.extension = configuration.extension
self.node_interface = configuration.node_interface
self.module = configuration.module
self.node_file_path = configuration.node_file_path
self.interface_directory = configuration.destination_directory
self.needs_directory = configuration.needs_directory
self.verbose = configuration.verbose
self.output_path = None
try:
# Choose the Linux-style newlines to keep output consistent and simple
if self.interface_directory and self.interface_file_name():
self.output_path = os.path.join(self.interface_directory, self.interface_file_name())
# The generated file sizes will never be too large to fit into a string, and generating them to a
# string first is far more efficient so set up the output to buffer into a string first and then
# write it to a file when complete, if requested
self.out = IndentedOutput(io.StringIO())
except IOError as error:
raise NodeGenerationError(f"Could not obtain write access to {self.interface_directory}") from error
# ----------------------------------------------------------------------
def __str__(self) -> str:
"""Return the interface generated as a string, if requested. Otherwise return the interface file path"""
return str(self.out)
# ----------------------------------------------------------------------
def safe_name(self) -> str:
"""Returns the name of the node type, filtered to be safe for Python, USD, or C++ use"""
return self.node_interface.name.replace(".", "_")
# ----------------------------------------------------------------------
def interface_file_name(self) -> Optional[str]:
"""Return the path for the generated file - should be overridden by derived classes"""
logger.info("Generator has not overridden the interface_file_name")
# ----------------------------------------------------------------------
def generate_node_interface(self):
"""Create interface output from the given node interface.
This does nothing; it should be overridden in a base class
"""
logger.info("Generator has not overidden the node interface")
# ----------------------------------------------------------------------
def pre_interface_generation(self):
"""Create the information information preceding the node-specific stuff"""
logger.info("Generator has not overridden the pre-node interface")
# ----------------------------------------------------------------------
def post_interface_generation(self):
"""Create the information information following the node-specific stuff"""
logger.info("Generator has not overridden the post-node interface")
# ----------------------------------------------------------------------
def generate_interface(self):
"""Create an interface for the node
Raises:
NodeGenerationError: When there is a failure in the generation of the interface
"""
self.pre_interface_generation()
self.generate_node_interface()
self.post_interface_generation()
# Now that the entire interface has been generated it can be written out to disk.
if self.output_path is not None:
self.__check_interface_directory()
if self.needs_to_write():
with open(self.output_path, "w", newline="\n", encoding="utf-8") as generated_file:
generated_file.write(str(self.out))
else:
# We still need to update the mod-time for the sake of timestamp-based build rules
os.utime(self.output_path)
# --------------------------------------------------------------------------------------------------------------
def needs_to_write(self):
"""Check if the file needs to be written
Returns:
True if the file does not exist or its contents differ from the generated text
"""
if os.path.exists(self.output_path):
with open(self.output_path, newline="\n", encoding="utf-8") as fp:
data = fp.read()
if data == str(self.out):
return False
return True
# --------------------------------------------------------------------------------------------------------------
def __check_interface_directory(self):
"""Check to see if the interface directory is required, creating it if it is.
Raises:
NodeGenerationError if the interface directory was required but did not exist and could not be created
"""
# No directory needed, that's good
if not self.needs_directory:
return
# No directory specified but one is needed, that's bad
if not self.interface_directory:
raise NodeGenerationError(f"Required an interface directory for {self.__class__} but did not specify one")
# Directory is needed and specified, and exists, that's good
directory = Path(self.interface_directory)
if directory.is_dir():
return
# Try to create the missing directory
try:
directory.mkdir(mode=0o777, parents=True, exist_ok=True)
logger.info("Created interface destination directory %s", directory)
except Exception as error:
raise NodeGenerationError(f"Failed to create interface directory '{directory}'") from error
# ======================================================================
class NodeInterfaceWrapper:
"""Converts a JSON node description file into a set of interfaces to the node contained in it
Reads and parses a node interface description file in order to present an interface to the data
that is more specific to the type of data that is in the file.
Attributes:
node_interface: The NodeInterface parsed from the JSON data
"""
def __init__(
self,
node_as_json: Union[str, IO, Dict],
extension: str,
config_directory: Optional[str] = None,
categories_allowed: Dict[str, str] = None,
):
"""Initialize the class by parsing the node description file or the already-retrieved description
Args:
node_as_json: File object, path to file, or raw dictionary containing the node interface description data
extension: Name of the extension in which the node was defined
config_directory: Location of directory in which the attribute type configuration files can be found. If
None then use the directory where this script lives.
categories_allowed: Dictionary of name:description values for legal categories
Raises:
ParseError: If there are any errors parsing the node description - string contains the problem
"""
self.node_interface = None
json_description = None
node_directory = None
if categories_allowed is None:
categories_allowed = {}
if config_directory is None:
config_directory = os.path.dirname(os.path.realpath(__file__))
if isinstance(node_as_json, str):
logger.info("Parsing node interface as string")
# logger.info(json.dumps(node_as_json, indent=4))
try:
json_description = json.loads(node_as_json)
except json.decoder.JSONDecodeError as error:
raise ParseError(f"Invalid JSON formatting in string - {error}\n{node_as_json}") from None
elif isinstance(node_as_json, Dict):
json_description = node_as_json
else:
logger.info("Parsing node interface as a file")
try:
json_description = json.load(node_as_json)
node_directory = os.path.dirname(node_as_json.name)
except json.decoder.JSONDecodeError as error:
raise ParseError(f"Invalid JSON formatting in file {node_as_json.name} - {error}") from None
if not json_description or not json_description.keys():
raise ParseError("Not a valid JSON file")
if len([main_key for main_key in json_description.keys() if not is_comment(main_key)]) > 1:
raise ParseError(f"Only one node definition allowed per file - found {list(json_description.keys())}")
logger.info("Extracting node information")
for node_type_name, node_type_description in json_description.items():
if node_type_name[0] == "$": # Special IDs are not actual nodes
logger.info("Ignoring comment tagged %s", node_type_name)
continue
logger.info("Extracting node data for %s", node_type_name)
check_node_name(node_type_name)
if self.node_interface is not None:
raise ParseError("Only one node per JSON description is supported")
if node_type_name.find(".") < 0:
# If no explicit namespace then prepend the extension name to guarantee uniqueness
node_type_name = f"{extension}.{node_type_name}"
self.node_interface = NodeInterface(
node_type_name, node_type_description, config_directory, categories_allowed, node_directory
)
# ----------------------------------------------------------------------
def can_generate(self, generation_type: str) -> bool:
"""Checks to see if a particular type of output should be generated.
Args:
generation_type: Name of output generation type. Exact values respected are in main.py
Return:
True if the generation_type of data is allowed by the node
"""
return self.node_interface.can_generate(generation_type) if self.node_interface else False
# ----------------------------------------------------------------------
def check_support(self):
"""Raises AttributeError if any attributes on the node are currently not supported"""
self.node_interface.check_support()
| 62,207 | Python | 48.410643 | 119 | 0.605462 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/OmniGraphExtension.py | """
Support for managing the automatic creation and handling of an OGN-enabled extension.
This manages extensions outside of the build system, using Kit's automatic extension loading capabilities.
It explicitly does not handle running the generation scripts on .ogn files, though it does provide the directories
to which such generated files should be written.
The file structure of a newly created extension, beginning at the root of the extension looks like this:
ROOT/
my.cool.extension/
config/
extension.toml # (Generated) Information required to load the extension
docs/
README.md # Description of your extension
my/cool/extension/
__init__.py # (Generated) Initialization of your extension
nodes/
# Contains the .ogn and .py files implementing your OmniGraph nodes
ogn/
__init__.py # (Generated) Registration of your OmniGraph nodes
docs/
# (Generated) Documentation describing your nodes
include/
# (Generated) C++ Database files, to make it easy for C++ nodes to access data
tests/
# (Generated) Test scripts that exercise your nodes
python/
# (Generated) Python Database files, to make it easy for Python nodes to access data
usd/
# (Generated) Template USD file setting up the attributes in your nodes
"""
import os
import re
from warnings import warn
from .utils import create_symbolic_link, dbg_reg
class OmniGraphExtension:
"""Class handling all of the requirements of an OGN-enabled extension
Properties:
ogn_docs_directory: Path to the directory containing user-written docs
ogn_include_directory: Path to the directory containing .ogn-generated C++ header files
ogn_nodes_directory: Path to the directory containing user-implemented nodes (.ogn and .py/.cpp)
ogn_python_directory: Path to the directory containing .ogn-generated Python interface files
ogn_tests_directory: Path to the directory containing .ogn-generated Python test scripts
ogn_usd_directory: Path to the directory containing .ogn-generated USD template files
Internal:
extension_root: Top level directory where the extension is defined
python_directory: Directory in which the Python import root for this extension is found
import_path: Import path for the Python root of this extension (e.g. omni.my.example)
Also used to determine the extension subdirectory
(e.g. $extension_root/omni.my.example/omni/my/example)
"""
def __init__(self, extension_root: str, import_path: str):
"""Initialize the location information for an extension
Args:
extension_root: Root directory of the extension to be managed.
"""
self._extension_root = extension_root
self._import_path = import_path
self.extension_directory = None
self.python_directory = None
self.ogn_docs_directory = None
self.ogn_include_directory = None
self.ogn_nodes_directory = None
self.ogn_python_directory = None
self.ogn_tests_directory = None
self.ogn_usd_directory = None
self.extension_name = None
self.rebuild_configuration()
# ================================================================================
def __str__(self):
"""Returns a string with the class's information nicely formatted"""
return "\n".join(
[
f"Root = {self._extension_root}",
f"Import = {self._import_path}",
f"Directory = {self.extension_directory}",
f"Python Directory = {self.python_directory}",
f"Name = {self.extension_name}",
]
)
# ================================================================================
def rebuild_configuration(self):
"""Reset all of the internal file paths and variables based on current configurations"""
self.extension_directory = os.path.join(self._extension_root, self._import_path)
self.python_directory = os.path.join(self.extension_directory, *self._import_path.split("."))
# extension_name is an InterCaps version of the import_path, plus the word "Extension"
# e.g. omni.my.example -> OmniMyExampleExtension
self.extension_name = "".join(word.capitalize() for word in self._import_path.split(".")) + "Extension"
self.ogn_nodes_directory = os.path.join(self.python_directory, "nodes")
self.ogn_python_directory = os.path.join(self.python_directory, "ogn")
self.ogn_docs_directory = os.path.join(self.ogn_python_directory, "docs")
self.ogn_include_directory = os.path.join(self.ogn_python_directory, "include")
self.ogn_tests_directory = os.path.join(self.ogn_python_directory, "tests")
self.ogn_usd_directory = os.path.join(self.ogn_python_directory, "usd")
# ================================================================================
@property
def import_path(self):
"""Returns the current value of the extension's import path"""
return self._import_path
@import_path.setter
def import_path(self, new_import_path: str):
"""Sets the import path to the new location, updating all internal file paths and variables
Note that if you call this all of your existing paths will be reconfigured to the default layout.
Args:
new_import_path: Python import path for the extension
Raises:
ValueError if new_import_path is not a valid path
"""
if not self.validate_import_path(new_import_path):
raise ValueError(
"Import path must be a valid Python name with dot-separated components consisting of the uppercase"
" and lowercase letters A through Z, the underscore _ and, except for the first character, the digits"
f" 0 through 9. '{new_import_path}' does not satisfy that requirement."
)
self._import_path = new_import_path
self.rebuild_configuration()
@staticmethod
def validate_import_path(import_path: str) -> bool:
"""Returns True iff the given import path has a legal name"""
re_path_component_name = re.compile("^[_A-Za-z][_0-9A-Za-z]*$")
return all(re_path_component_name.match(path_component) for path_component in import_path.split("."))
# ================================================================================
@property
def extension_root(self):
"""Get the current value of the extension's root directory"""
return self._extension_root
@extension_root.setter
def extension_root(self, new_root_directory: str):
"""Sets the root directory of the extension the new location, updating all internal file paths and variables
Note that if you call this all of your existing paths will be reconfigured to the default layout.
Args:
new_root_directory: Root directory for the extension files
"""
self._extension_root = new_root_directory
self.rebuild_configuration()
# ================================================================================
def create_directory_tree(self):
"""Create all of the directories that comprise the OGN-enabled extension, including locations for new nodes"""
os.makedirs(os.path.join(self.extension_directory, "config"), exist_ok=True)
os.makedirs(os.path.join(self.extension_directory, "docs"), exist_ok=True)
os.makedirs(self.ogn_docs_directory, exist_ok=True)
os.makedirs(self.ogn_include_directory, exist_ok=True)
os.makedirs(self.ogn_nodes_directory, exist_ok=True)
os.makedirs(self.ogn_python_directory, exist_ok=True)
os.makedirs(self.ogn_tests_directory, exist_ok=True)
os.makedirs(self.ogn_usd_directory, exist_ok=True)
# Linking the nodes directory accomplishes the dual goals of keeping the generated and handwritten code
# separated, while still encapsulating everything needed for OGN inside a single directory.
try:
create_symbolic_link(self.ogn_nodes_directory, os.path.join(self.ogn_python_directory, "nodes"))
except Exception as error: # noqa: PLW0703
dbg_reg(f"Could not symlink to {self.ogn_nodes_directory}, will look for directory named 'nodes' - {error}")
# ================================================================================
def __remove_directory_contents(self, directory_path: str, file_pattern: str):
"""Remove the specified files in a directory
Args:
directory_path: Path to the directory from which to remove the files
file_pattern: Regular expression string specifying which files are to be removed
"""
file_matcher = re.compile(file_pattern)
for file_to_check in os.listdir(directory_path):
if file_matcher.match(file_to_check):
try:
os.remove(os.path.join(directory_path, file_to_check))
except Exception as error: # noqa: PLW0703
dbg_reg(f"Could not remove generated file {file_to_check} from {directory_path} - {error}")
# ================================================================================
def remove_generated_files(self):
"""Delete all of the files under the extension that are automatically generated from a .ogn file"""
self.__remove_directory_contents(self.ogn_python_directory, r".*\.py$")
self.__remove_directory_contents(self.ogn_docs_directory, r".*\.rst$")
self.__remove_directory_contents(self.ogn_include_directory, r".*\.h$")
self.__remove_directory_contents(self.ogn_tests_directory, r".*\.py$")
self.__remove_directory_contents(self.ogn_usd_directory, r".*\.usda$")
# ================================================================================
def write_extension_init(self, force: bool):
"""Writes out the extension's main __init__.py file, responsible for setting up the extension"""
init_file_path = os.path.join(self.python_directory, "__init__.py")
if not force and os.path.isfile(init_file_path):
return
try:
with open(init_file_path, "w", newline="\n", encoding="utf-8") as init_fd:
init_fd.write(
f"""
import omni.ext
from .ogn import *
# Any class derived from `omni.ext.IExt` in a top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when the extension is enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() will be called.
class {self.extension_name}(omni.ext.IExt):
# ext_id is the current extension id. It can be used with the extension manager to query additional information,
# such as where this extension is located in the filesystem.
def on_startup(self, ext_id):
print("[{self._import_path}] {self.extension_name} startup", flush=True)
def on_shutdown(self):
print("[{self._import_path}] {self.extension_name} shutdown", flush=True)
"""
)
except Exception as error: # noqa: PLW0703
warn(f"Could not write extension's init file {init_file_path} - {error}")
# ================================================================================
def write_ogn_init(self, force: bool):
"""Write out the OGN __init__.py file that registers all of the nodes in the ogn/ subdirectory"""
init_file_path = os.path.join(self.python_directory, "ogn", "__init__.py")
if not force and os.path.isfile(init_file_path):
return
try:
with open(init_file_path, "w", newline="\n", encoding="utf-8") as init_fd:
init_fd.write(
f'''
"""
Dynamically import every file in a directory tree that looks like a Python Ogn Node.
This includes linked directories, which is the mechanism by which nodes can be hot-reloaded from the source tree.
"""
import omni.graph.core as og
og.register_ogn_nodes(__file__, "{self._import_path}")
'''
)
except Exception as error: # noqa: PLW0703
warn(f"Could not write ogn's init file {init_file_path} - {error}")
# ================================================================================
def write_extension_toml(self, force: bool):
"""Write out the extension definition file, used for configuring it when the extension loads"""
toml_file_path = os.path.join(self.extension_directory, "config", "extension.toml")
if not force and os.path.isfile(toml_file_path):
return
try:
with open(toml_file_path, "w", newline="\n", encoding="utf-8") as toml_fd:
toml_fd.write(
f"""
[package]
# Semantic Versioning is used: https://semver.org/
version = "0.1.0"
# Lists people or organizations that are considered the "authors" of the package.
authors = []
# The title and description fields are primarly for displaying extension info in UI
title = "Omniverse Graph Extension Example"
description="Example extension for OmniGraph nodes."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository="https://gitlab-master.nvidia.com/omniverse/kit-extensions/example"
# One of categories for UI.
category = "Example"
# Keywords for the extension
keywords = ["kit", "omnigraph"]
# Watch the .ogn files for hot reloading (only works for Python files)
[fswatcher.patterns]
include = ["*.ogn", "*.py"]
exclude = ["Ogn*Database.py"]
[dependencies]
"omni.kit.test" = {{}}
"omni.graph" = {{}}
# Main python module this extension provides, it will be publicly available as "import {self._import_path}".
[[python.module]]
name = "{self._import_path}"
# Additional python module with tests, to make them discoverable by test system.
[[python.module]]
name = "{self._import_path}.ogn.tests"
"""
)
except Exception as error: # noqa: PLW0703
warn(f"Could not write extension's configuration file {toml_file_path} - {error}")
# ================================================================================
def write_readme(self, force: bool):
"""Write out the README.md file that the extension uses to identify itself in the extension window"""
readme_path = os.path.join(self.extension_directory, "docs", "README.md")
if not force and os.path.isfile(readme_path):
return
try:
with open(readme_path, "w", newline="\n", encoding="utf-8") as readme_fd:
readme_fd.write(
f"""
# OmniGraph Extension [{self._import_path}]
Extension with implementation of some OmniGraph nodes
"""
)
except Exception as error: # noqa: PLW0703
warn(f"Could not write extension's description file {readme_path} - {error}")
# ================================================================================
def write_all_files(self, force: bool = False):
"""Write all of the manually generated extension's files
Args:
force: If True then write out the files even if they already exist
"""
self.write_extension_init(force)
self.write_ogn_init(force)
self.write_extension_toml(force)
self.write_readme(force)
| 15,919 | Python | 46.522388 | 120 | 0.603807 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/code_generation.py | """Interactive access to the .ogn code generator"""
import json
from typing import Dict, Optional, Union
from .generate_cpp import generate_cpp
from .generate_documentation import generate_documentation
from .generate_icon import generate_icon
from .generate_python import generate_python
from .generate_template import generate_template
from .generate_tests import generate_tests
from .generate_usd import generate_usd
from .nodes import NodeInterfaceWrapper
from .utils import OGN_PARSE_DEBUG, GeneratorConfiguration, ParseError, Settings, UnimplementedError, logger
def code_generation(
ogn: Union[str, Dict[str, Dict]], class_name: str, extension: str, module: str, settings: Optional[Settings] = None
) -> Dict[str, str]:
"""Run the code generator on the ogn input, which is in the same JSON format as the .ogn file
Args:
ogn: Raw OGN data, either in string version or in JSON parsed form
class_name: Base name for the OGN generated classes. e.g. OgnMyNode
extension: Extension to which the generated node type will belong
module: Python module from which the generated node type will be imported
settings: Optional code generator settings that will modify the type of code generated
Returns:
Dictionary of the generated code. The key value is the type of code, the value is the actual code.
If no code is generated for a particular key value then it will contain None.
cpp = C++ header defining the database for a node implemented in C++
template = C++ or Python template implementation
docs = .rst format containing the node documentation
icon = path to the icon specified in the node description or None
python = Python database definition, for both C++ and Python nodes
tests = Python code implementing some simple tests on the node
usd = Sample USD that defines the node type as a prim template
node = Node wrapper object
Raises:
ParseError if the ogn dictionary is not parseable as legal OGN data.
"""
if isinstance(ogn, str):
try:
ogn = json.loads(ogn)
except json.decoder.JSONDecodeError as error:
raise ParseError("Failed to parse dictionary") from error
generated_code = {}
try:
node_interface_wrapper = NodeInterfaceWrapper(ogn, extension)
configuration = GeneratorConfiguration(
None,
node_interface_wrapper.node_interface,
extension,
module,
class_name,
None,
OGN_PARSE_DEBUG,
settings or Settings(),
)
try:
all_supported = True
node_interface_wrapper.check_support()
except UnimplementedError as error:
all_supported = False
logger.warning("Some attributes are not supported. Only documentation will be generated.\n\t%s", error)
generated_code["icon"] = generate_icon(configuration)
generated_code["cpp"] = generate_cpp(configuration, all_supported)
generated_code["docs"] = generate_documentation(configuration)
generated_code["python"] = generate_python(configuration)
generated_code["template"] = generate_template(configuration)
generated_code["tests"] = generate_tests(configuration)
generated_code["usd"] = generate_usd(configuration)
generated_code["node"] = node_interface_wrapper.node_interface
except ParseError as error:
raise ParseError("Failed to parse dictionary") from error
return generated_code
| 3,653 | Python | 43.024096 | 119 | 0.679989 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_icon.py | """Support for generating an icon file representing a node type in the build directory."""
import os
import re
import shutil
from typing import Optional
from .nodes import NodeInterfaceGenerator
from .utils import GeneratorConfiguration, ParseError, ensure_writable_directory, logger
__all__ = ["generate_icon"]
class NodeIconGenerator(NodeInterfaceGenerator):
"""Manage the functions required to install a representative icon for a node type"""
def __init__(self, configuration: GeneratorConfiguration): # noqa: PLW0246
"""Set up the generator to get ready to copy the icon file
Just passes the initialization on to the parent class. See the argument and exception descriptions there.
"""
super().__init__(configuration)
# ----------------------------------------------------------------------
def interface_file_name(self) -> str:
"""Return the path to the name of the node icon file"""
return f"{self.node_interface.name}.svg"
# ----------------------------------------------------------------------
def generate_node_interface(self):
"""Generate the test method for the named node"""
# If there is no place to put the file then the specified path, if it exists, is considered absolute
if self.output_path is None:
self.out.write(self.node_interface.icon_path)
return
# This ensures the file is not overwritten by the base class after returning
output_path = self.output_path
self.output_path = None
potential_path = self.node_interface.icon_path
if self.node_file_path is not None:
# If a file name was not explicit then see if a .svg file with the same name as the node exists
if potential_path is None:
# If no file was specified it's not an error if it doesn't exist
potential_path = self.node_file_path.replace(".ogn", ".svg")
if not os.path.isfile(potential_path):
return
else:
# The specified file name is relative to the node's directory; make it absolute
potential_path = os.path.join(os.path.dirname(self.node_file_path), potential_path)
if not potential_path.endswith(".svg"):
raise ParseError(f"Node icon path must be an SVG file. '{potential_path}' not allowed")
# Copy the file from the source location to the output path
try:
ensure_writable_directory(os.path.dirname(output_path))
shutil.copy(potential_path, output_path)
except Exception as error:
raise ParseError("Failed to copy node icon file") from error
# Find the icon path relative to the extension directory
match = re.match(f".*/{self.extension}/(.*)", output_path.replace("\\", "/"))
if not match:
raise ParseError(f"Icon location '{output_path}' needs to appear under extension '{self.extension}'")
extension_relative_path = match.group(1)
self.out.write(extension_relative_path)
# ======================================================================
def generate_icon(configuration: GeneratorConfiguration) -> Optional[str]:
"""Create support files for the icons defined within the node
Args:
configuration: Information defining how and where the documentation will be generated
Returns:
Relative path of the icon file if it existed and was successfully installed, else None
Raises:
NodeGenerationError: When there is a failure in the generation of the icon files
"""
if not configuration.node_interface.can_generate("icon"):
return None
logger.info("Generating icon")
needed_directory = configuration.needs_directory
configuration.needs_directory = False
generator = NodeIconGenerator(configuration)
generator.generate_interface()
configuration.needs_directory = needed_directory
icon_path = str(generator.out).rstrip()
return icon_path if icon_path else None
| 4,099 | Python | 41.708333 | 113 | 0.632349 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/category_definitions.py | """Handle the mapping of OGN types onto the various generated code types"""
import json
from io import TextIOWrapper
from pathlib import Path
from typing import IO, Dict, List, Union
from .keys import NodeTypeKeys
from .utils import ParseError, is_comment
CategoryListType = Dict[str, List[str]]
# ==============================================================================================================
def get_category_definitions(category_information: Union[str, Dict, IO, Path, None]) -> CategoryListType:
"""Get the set of category definitions specified in the category file
Args:
category_information: Reference to a file containing a category dictionary or the dictionary itself
Returns:
Dictionary of MainCategory:SubCategories of category types found in the file
Raises:
ParseError if the file could not be parsed
"""
try:
definitions = {}
if category_information is None:
pass
elif isinstance(category_information, str):
definitions = json.loads(category_information)[NodeTypeKeys.CATEGORY_DEFINITIONS]
elif isinstance(category_information, TextIOWrapper):
definitions = json.load(category_information)[NodeTypeKeys.CATEGORY_DEFINITIONS]
elif isinstance(category_information, Path):
definitions = json.load(category_information.open("r"))[NodeTypeKeys.CATEGORY_DEFINITIONS]
elif isinstance(category_information, Dict):
definitions = category_information
else:
raise ParseError(f"Category definition file type not handled - {category_information}")
except OSError as error:
raise ParseError(f"File error when parsing category definitions {category_information} - {error}") from None
except json.decoder.JSONDecodeError as error:
raise ParseError(f"Invalid JSON formatting in file {category_information} - {error}") from None
# Filter out the comments before returning the dictionary
definitions = {key: value for key, value in definitions.items() if not is_comment(key)}
return definitions
# ==============================================================================================================
def merge_category_definitions(
merged_definitions: CategoryListType, definitions_to_merge: Union[str, Dict, IO, Path, None]
):
"""Merge the second set of category definitions with the first one"""
merged_definitions.update(get_category_definitions(definitions_to_merge))
| 2,524 | Python | 43.298245 | 116 | 0.658479 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/FloatAttributeManager.py | """
Contains the support class for managing attributes whose data is single precision numbers
"""
from typing import Any, List
from ..utils import ParseError
from .AttributeManager import CppConfiguration, CudaConfiguration
from .NumericAttributeManager import NumericAttributeManager, is_number_or_list_of_numbers, values_in_range
class FloatAttributeManager(NumericAttributeManager):
"""Support class for attributes of type float"""
OGN_TYPE = "float"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"float": CppConfiguration("float", cast_required=False),
"float[2]": CppConfiguration("pxr::GfVec2f", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
"float[3]": CppConfiguration("pxr::GfVec3f", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
"float[4]": CppConfiguration("pxr::GfVec4f", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
}
CUDA_CONFIGURATION = {
"float": CudaConfiguration("float", cast_required=False),
"float[2]": CudaConfiguration("float3", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
"float[3]": CudaConfiguration("float3", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
"float[4]": CudaConfiguration("float4", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
values = [4.5, 2.5]
if self.tuple_count > 1:
values = [tuple(value + i * 0.125 for i in range(self.tuple_count)) for value in values]
if for_usd:
from pxr import Gf
gf_type = getattr(Gf, f"Vec{self.tuple_count}f")
values = [gf_type(*value) for value in values] # noqa: PLE1133
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
def numerical_type(self) -> int:
"""Returns the numerical TYPE_* value for this attribute's data"""
return NumericAttributeManager.TYPE_FLOAT
def validate_value(self, value):
"""Raises a ParseError if value is not a valid float value"""
if not is_number_or_list_of_numbers(value, self.tuple_count):
raise ParseError(f"Value {value} on a float[{self.tuple_count}] attribute is not a matching type")
# Values not representable exactly due to precision considerations are still accepted
if not values_in_range(value, -3402823400e38, 3402823400e38):
raise ParseError(f"Value {value} on a 32-bit float[{self.tuple_count}] attribute is out of range")
super().validate_value(value)
@staticmethod
def tuples_supported() -> List[int]:
"""USD supports only these tuples natively so restrict support to them for now"""
return [1, 2, 3, 4]
def cpp_includes(self) -> List[str]:
"""Tack on the include implementing the USD tuple type"""
regular_includes = super().cpp_includes()
if self.tuple_count in [2, 3, 4]:
regular_includes.append("omni/graph/core/ogn/UsdTypes.h")
return regular_includes
def python_type_name(self):
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("float")
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "Float"
def usd_type_name(self):
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_containers("float")
| 3,972 | Python | 46.297618 | 115 | 0.656093 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/TokenAttributeManager.py | """
Contains the support class for managing attributes whose data is strings represented as tokens
"""
import json
from contextlib import suppress
from typing import Any, Dict, List
from ..utils import IndentedOutput, MetadataKeys, ParseError, check_token_name
from .AttributeManager import AttributeManager, CppConfiguration, CudaConfiguration
from .parsing import is_type_or_list_of_types
class TokenAttributeManager(AttributeManager):
"""Support class for attributes of type curated unique string.
The values passed around are still strings, it is only the internal data type that is different
from a regular string (NameToken instead of std::string)
"""
OGN_TYPE = "token"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"token": CppConfiguration("NameToken", cast_required=False),
}
CUDA_CONFIGURATION = {
"token": CudaConfiguration("NameToken", cast_required=False),
}
def __init__(self, attribute_name: str, attribute_type_name: str):
"""Initialize the token-based attribute information
Args:
attribute_name: Name to use for this attribute
attribute_type_name: Unique name for this attribute type
"""
super().__init__(attribute_name, attribute_type_name)
self.__allowed_tokens = {}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
values = ["Ahsoka", "Tano"]
if self.tuple_count > 1:
values = [tuple(value + "x" * i for i in range(self.tuple_count)) for value in values]
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
@staticmethod
def tuples_supported() -> List[int]:
"""Simple values are supported, no multiple tuples"""
return [1]
def get_allowed_tokens(self) -> Dict[str, str]:
"""Returns a dictionary of tokens allowed on this attribute type, raising a ParseError if they are not legal.
This dictionary will be merged into the main node token dictionary, with duplicates removed. This is done
separately from regular metadata and token parsing since it crosses both worlds with different requirements"""
allowed_tokens = {}
raw_tokens = self.__allowed_tokens
if isinstance(raw_tokens, str):
token_list = raw_tokens.split(",")
allowed_tokens = {check_token_name(token_name): token_name for token_name in token_list}
elif isinstance(raw_tokens, list):
allowed_tokens = {check_token_name(token): token for token in raw_tokens}
elif isinstance(raw_tokens, dict):
allowed_tokens = {check_token_name(token): value for token, value in raw_tokens.items()}
else:
raise ParseError(f"allowedTokens can only be a string, list, or dictionary - '{raw_tokens}'")
return allowed_tokens
def parse_metadata(self, metadata: Dict[str, Any]):
"""Parse the metadata attached to the attribute type.
Overrides to this method can add additional interpretation of special metadata.
"""
super().parse_metadata(metadata)
with suppress(KeyError):
self.__allowed_tokens = metadata[MetadataKeys.ALLOWED_TOKENS]
# The allowed tokens could be a dictionary, the keys of which would be lost if only the processed version
# of the tokens was saved in the metadata so add in the raw data as well.
self.metadata[MetadataKeys.ALLOWED_TOKENS_RAW] = json.dumps(self.__allowed_tokens)
def cpp_default_initializer(self):
"""Default value setting is delayed so only set the array pointers to null if required."""
return "nullptr, 0" if self.array_depth > 0 else None
def cpp_pre_initialization(self, out: IndentedOutput):
"""If there is a default, output the code to initialize the token from the string default"""
super().cpp_pre_initialization(out)
if not self.default:
return
default_variable_name = f"{self.namespace}::{self.cpp_variable_name()}"
size_param = ""
if isinstance(self.default, list):
default_to_set = f"std::array<NameToken, {len(self.default)}>{{"
default_strings = [f'"{value}"' for value in self.default] # noqa: PLE1133
default_to_set += ", ".join([f"iToken.getHandle({value})" for value in default_strings])
default_to_set += "}.data()"
size_param = f", {len(self.default)}"
else:
default_to_set = f'iToken.getHandle("{self.default}")'
out.write(f"{default_variable_name}.setDefault({default_to_set}{size_param});")
def cpp_element_value(self, value, remaining_depth: int = None):
"""String defaults must be quoted - use the json library to do it right"""
return json.dumps(value) if value is not None else None
def cuda_includes(self) -> List[str]:
"""Cuda cannot include iComputeGraph so it directly includes the handle definition file for token access"""
includes = super().cuda_includes()
includes.append("omni/graph/core/Handle.h")
return includes
def validate_value(self, value):
"""Raises a ParseError if value is not a valid string value"""
if not is_type_or_list_of_types(value, str, self.tuple_count):
raise ParseError(f"Value {value} on a token[{self.tuple_count}] attribute is not a matching type")
super().validate_value(value)
def python_value(self, value):
"""Token defaults must be quoted - use the json library to do it right"""
return json.dumps(value) if value is not None else None
def value_for_test(self, value):
"""The test data runs through JSON so there is no need to add outer quotes"""
return value
def python_type_name(self):
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("str")
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "Token"
def usd_type_name(self):
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_containers("token")
def empty_base_value(self) -> str:
"""Return the default for a token, which must include quotes"""
return ""
| 6,798 | Python | 44.630872 | 118 | 0.65431 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/StringAttributeManager.py | """
Contains the support class for managing attributes whose data is strings
"""
import json
from typing import Any, List
from ..keys import CudaPointerValues, MemoryTypeValues
from ..utils import ParseError
from .AttributeManager import AttributeManager, CppConfiguration, CudaConfiguration
from .parsing import is_type_or_list_of_types
class StringAttributeManager(AttributeManager):
"""Support class for attributes of type string"""
OGN_TYPE = "string"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"string": CppConfiguration("char*", cast_required=False, role="eText"),
}
CUDA_CONFIGURATION = {
"string": CudaConfiguration("char*", cast_required=False, role="eText"),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
values = ["Anakin", "Skywalker"]
if self.tuple_count > 1:
values = [tuple(value + "x" * i for i in range(self.tuple_count)) for value in values]
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
@staticmethod
def tuples_supported() -> List[int]:
"""Strings don't have tuples"""
return [1]
@staticmethod
def array_depths_supported() -> List[int]:
"""String arrays are not yet supported by fabric"""
return [0]
def validate_value(self, value):
"""Raises a ParseError if value is not a valid string value"""
if not is_type_or_list_of_types(value, str, self.tuple_count):
raise ParseError(f"Value {value} on a string[{self.tuple_count}] attribute is not a matching type")
super().validate_value(value)
def cpp_includes(self) -> List[str]:
"""Tack on the include implementing the string wrapper types"""
regular_includes = super().cpp_includes()
regular_includes.append("omni/graph/core/ogn/ArrayAttribute.h")
return regular_includes
def cpp_element_value(self, value):
"""String defaults must be quoted - use the json library to do it right"""
return f"{json.dumps(value)}" if value is not None else None
def cpp_default_initializer(self):
"""The string initializer recognizes that a simple string is actual stored as an array."""
# Arrays of strings have the same form as regular arrays
if self.array_depth > 0:
return super().cpp_default_initializer()
# Regular strings look like arrays in that they have a defined length, yet not, because they can be represented
# as an actual string rather than a std::array
raw_value = self.cpp_element_value(self.default)
if not raw_value:
return "nullptr, 0"
return f"{raw_value}, {len(self.default)}"
def cpp_wrapper_class(self) -> str:
"""Returns a string with the wrapper class used to access attribute data in the C++ database along
with the non-default parameters to that class's template"""
wrapper_class = ""
if self.is_read_only():
modifier = "const "
wrapper_class = "ogn::ArrayInput"
else:
modifier = ""
wrapper_class = "ogn::ArrayOutput"
template_arguments = [f"{modifier}char", MemoryTypeValues.CPP[self.memory_type]]
if self.cuda_pointer_type is not None:
template_arguments.append(CudaPointerValues.CPP[self.cuda_pointer_type])
return (wrapper_class, template_arguments)
def fabric_needs_counter(self) -> bool:
"""Even simple strings require counter variables since they are implemented as arrays"""
return True
def python_value(self, value):
"""String values must be quoted - use the json library to do it right"""
return json.dumps(value) if value is not None else None
def value_for_test(self, value):
"""The test data runs through JSON so there is no need to add outer quotes"""
return value
def python_type_name(self):
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("str")
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "String"
def usd_type_name(self):
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_containers("string")
def empty_base_value(self) -> str:
"""Return the default for a string, which must include quotes"""
return ""
| 4,924 | Python | 39.702479 | 119 | 0.649269 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/TimeCodeAttributeManager.py | """
Contains the support class for managing attributes whose data is time codes
"""
from typing import Any, List
from .AttributeManager import CppConfiguration, CudaConfiguration
from .DoubleAttributeManager import DoubleAttributeManager
class TimeCodeAttributeManager(DoubleAttributeManager):
"""Support class for all attributes of type timecode
This is just an alias for double with a time-based interpretation
"""
OGN_TYPE = "timecode"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"timecode": CppConfiguration("pxr::SdfTimeCode", include_files=["pxr/usd/sdf/timeCode.h"], role="eTimeCode"),
}
CUDA_CONFIGURATION = {
"timecode": CudaConfiguration("double", cast_required=False),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
values = [5.0, 6.0]
if self.tuple_count > 1:
values = [tuple(value + i * 0.125 for i in range(self.tuple_count)) for value in values]
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
@staticmethod
def tuples_supported() -> List[int]:
"""Timecodes do not have tuple representation in USD"""
return [1]
def python_role_name(self) -> str:
"""Returns a string with the Python role name for this attribute"""
return "og.Database.ROLE_TIMECODE"
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "TimeCode"
def usd_type_name(self) -> str:
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_arrays("timecode")
| 2,035 | Python | 36.018181 | 117 | 0.660442 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/parsing.py | """
Constants used in parsing attributes in a .ogn file
"""
import ast
from typing import List, Tuple
from ..keys import AttributeKeys
# ======================================================================
# Legacy keyword support - use the values from keys.py for new code
KEY_ATTR_DEFAULT = AttributeKeys.DEFAULT
KEY_ATTR_DESCRIPTION = AttributeKeys.DESCRIPTION
KEY_ATTR_MINIMUM = AttributeKeys.MINIMUM
KEY_ATTR_MAXIMUM = AttributeKeys.MAXIMUM
KEY_ATTR_MEMORY_TYPE = AttributeKeys.MEMORY_TYPE
KEY_ATTR_METADATA = AttributeKeys.METADATA
KEY_ATTR_OPTIONAL = AttributeKeys.OPTIONAL
KEY_ATTR_TYPE = AttributeKeys.TYPE
KEY_ATTR_UI_NAME_METADATA = AttributeKeys.UI_NAME
KEY_ATTR_UNVALIDATED = AttributeKeys.UNVALIDATED
MANDATORY_ATTR_KEYS = AttributeKeys.MANDATORY
PROCESSED_ATTR_KEYS = AttributeKeys.PROCESSED
# ======================================================================
def attributes_as_usd(attribute_info: List[Tuple[str, str]]) -> List[str]:
"""Returns a list of the attribute definitions in the USDA file format
Most attributes are listed as their normal type, with a few exceptions:
prim: Output bundles
any: Extended attribute type with any value
union[a,b,c...] Extended attribute type with any of the types a, b, c...
"""
usd_lines = []
metadata = {}
for (attr_type, attr_name) in attribute_info:
if attr_type == "bundle":
usd_lines.append(f'def Output "{attr_name}" {{ }}')
elif attr_type == "any":
usd_lines.append(f'custom token {attr_name} = "any"')
metadata[attr_name] = "ExtendedAttributeType-->Any"
elif attr_type.find("union") == 0:
type_list = ",".join(ast.literal_eval(attr_type[5:]))
usd_lines.append(f'custom token {attr_name} = "union of {type_list}"')
metadata[attr_name] = f"ExtendedAttributeType-->Union->{type_list}"
else:
usd_lines.append(f"custom {attr_type} {attr_name}")
if metadata:
usd_lines.append('def ComputeNodeMetaData "metaData"')
usd_lines.append("{")
for name, information in metadata.items():
usd_lines.append(f' custom token {name} = "{information}"')
usd_lines.append("}")
return usd_lines
# ======================================================================
def is_type_or_list_of_types(value, type_definition, type_count: int):
"""Return True if the value is of the type passed in, or is a list of those types of the defined length"""
if isinstance(value, type_definition) and type_count == 1:
return True
if isinstance(value, list):
if len(value) != type_count:
return False
return all(isinstance(single_value, type_definition) for single_value in value)
return False
# Support for separating roles and types
SUFFIX_TO_TYPE = {"f": "float", "d": "double", "h": "half"}
# ======================================================================
def separate_ogn_role_and_type(raw_type_name: str) -> Tuple[str, str]:
"""Extract the base data type and role name from a raw OGN type, which could include a role"""
if raw_type_name[:-1] in ["quat", "matrix", "normal", "point", "color", "texcoord", "vector"]:
return (SUFFIX_TO_TYPE[raw_type_name[-1]], raw_type_name[:-1])
if raw_type_name in ["frame", "transform", "timecode"]:
return ("double", raw_type_name)
if raw_type_name == "execution":
return ("uint", "execution")
if raw_type_name == "string":
return ("uchar", "text")
if raw_type_name == "path":
return ("uchar", "path")
return (raw_type_name, "none")
# ======================================================================
def usd_type_name(type_name: str, tuple_count: int, is_array: bool) -> str:
"""Returns the USD type_name for the attribute with the given parameters
Args:
name: Base type (int, float, ...)
tuple_count: Number of fixed elements (int,2 -> [int, int])
is_array: True if the attribute has a variable number of elements ([int, int, ...])
e.g. (int, 2, False) -> "int2"
(float, 3, True) -> "float3[]"
Returns:
A string containing the USD version of the constructed name
"""
full_type = type_name
if tuple_count > 1:
if type_name[:-1] in ["matrix", "point", "color", "texCoord", "frame", "transform"]:
full_type = f"{type_name[:-1]}{tuple_count}{type_name[-1]}"
elif type_name[:-1] != "quat":
# Quaternions are assumed to be 4 so do not have the tuple count added
full_type += str(tuple_count)
if is_array:
full_type += "[]"
return full_type
# ======================================================================
def sdf_type_name(value_type_name: str, tuple_count: int, is_array: bool) -> str:
"""Returns the SDF ValueTypeName for the attribute with the given parameters
Args:
name: Base Sdf type
tuple_count: Number of fixed elements
is_array: True if the attribute has a variable number of elements
e.g. (Int, 2, False) -> "Int2"
(Float, 3, True) -> "Float3Array"
Returns:
A string containing the SDF ValueTypeName version corresponding to the attribute parameters
"""
full_type = value_type_name
if tuple_count > 1:
if value_type_name[:-1] in ["Color", "Frame", "Matrix", "Normal", "Point", "TexCoord", "Transform", "Vector"]:
full_type = f"{value_type_name[:-1]}{tuple_count}{value_type_name[-1]}"
elif value_type_name[:-1] != "Quat":
# Quaternions are assumed to be 4 so do not have the tuple count added
full_type += str(tuple_count)
if is_array:
full_type += "Array"
return full_type
| 5,809 | Python | 40.798561 | 118 | 0.585127 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/ObjectIdAttributeManager.py | """
Contains the support class for managing attributes whose data is strings represented as tokens
"""
from .AttributeManager import CppConfiguration, CudaConfiguration
from .UInt64AttributeManager import UInt64AttributeManager
class ObjectIdAttributeManager(UInt64AttributeManager):
"""Support class for attributes of type objectId.
The values passed around are the same as uint64, except that they are interpreted as references to objects
in the scene from a managed object store. The mechanism for interpreting them differently is the metadata
stored on the attribute.
"""
OGN_TYPE = "objectId"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"objectId": CppConfiguration("uint64_t", cast_required=False, role="eObjectId"),
}
CUDA_CONFIGURATION = {
"objectId": CudaConfiguration("uint64_t", cast_required=False),
}
def create_type_name(self) -> str:
"""When creating the attribute this special type name will set up the underlying metadata correctly"""
return "objectId[]" if self.array_depth == 1 else "objectId"
def python_role_name(self) -> str:
"""Returns a string with the Python role name for this attribute"""
return "og.Database.ROLE_OBJECT_ID"
| 1,342 | Python | 40.968749 | 110 | 0.726528 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/naming.py | """This file contains support for the various utilities and constants used to manage attribute naming."""
from __future__ import annotations
import re
from typing import Any, Dict, Optional, Tuple
from ..utils import ParseError
# ======================================================================
# Namespaces for attribute types
INPUT_NS = "inputs"
OUTPUT_NS = "outputs"
STATE_NS = "state"
ALL_NS = [INPUT_NS, OUTPUT_NS, STATE_NS]
# ======================================================================
# Port type enum names corresponding to the namespaces
PORT_NAMES = {
INPUT_NS: "og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT",
OUTPUT_NS: "og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT",
STATE_NS: "og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE",
}
# ======================================================================
# Unique identifier of attribute types (corresponds to the C++ enum value for each type)
INPUT_GROUP = "ogn::kOgnInput"
OUTPUT_GROUP = "ogn::kOgnOutput"
STATE_GROUP = "ogn::kOgnState"
# Pattern for legal attribute names, not including the applied type-namespace prefix
# - starts with a letter or underscore
# - then an arbitrary number of alphanumerics, dots, or colons (colon is namespace separator)
RE_ATTRIBUTE_NAME = re.compile("^[A-Za-z_][A-Za-z0-9_:.]*$")
ATTR_NAME_REQUIREMENT = (
"Attribute name must be a letter or underscore followed by letters, numbers, or"
' the special characters "_", ":", or "."'
)
# Requirements for user-friendly names. Only quotes are prohibited as they are problematic.
RE_ATTRIBUTE_UI_NAME = re.compile("^[^'\"]*$")
ATTR_UI_NAME_REQUIREMENT = "User-friendly attribute name cannot contain a quote"
# ======================================================================
def namespace_of_group(attribute_group: str) -> str:
"""Returns the namespace for the attributes of the given type"""
if attribute_group == INPUT_GROUP:
return INPUT_NS
if attribute_group == OUTPUT_GROUP:
return OUTPUT_NS
if attribute_group == STATE_GROUP:
return STATE_NS
raise ParseError(f"Attribute with unknown type {attribute_group}")
# ======================================================================
def attribute_name_in_namespace(attribute_name: str, namespace: str) -> str:
"""Returns the attribute_name with the namespace prepended - ignores if it is already there
Handles the case of nested namespaces by prepending in those cases as well. Does not look for a degenerate
case like the named namespace nested inside of a different one (e.g. change ("a:b:c", "b") to just "b:c")
"""
(current_namespace, current_name) = split_attribute_name(attribute_name)
# Prepend the namespace if there is none, or if there is an unmatching one
if current_namespace is None:
return f"{namespace}:{current_name}"
if current_namespace != namespace:
return f"{namespace}:{attribute_name}"
return attribute_name
# ======================================================================
def is_input_name(attribute_name: str) -> bool:
"""Returns True if the attribute_name lives in the input attribute namespace"""
# Input namespace always appears at the top level so it's only necessary to check the prefix
return attribute_name.startswith(f"{INPUT_NS}:")
# ======================================================================
def is_output_name(attribute_name: str) -> bool:
"""Returns True if the attribute_name lives in the output attribute namespace"""
# Output namespace always appears at the top level so it's only necessary to check the prefix
return attribute_name.startswith(f"{OUTPUT_NS}:") or attribute_name.startswith(f"{OUTPUT_NS}_")
# ======================================================================
def is_state_name(attribute_name: str) -> bool:
"""Returns True if the attribute_name lives in the state attribute namespace"""
# State namespace always appears at the top level so it's only necessary to check the prefix
return attribute_name.startswith(f"{STATE_NS}:")
# ======================================================================
def split_attribute_name(attribute_name: str) -> Tuple[Optional[str], str]:
"""Returns the namespace and basename extracted from the full attribute name"""
name_information = attribute_name.split(":")
if len(name_information) < 2:
return (None, attribute_name)
if len(name_information) > 2:
return (name_information[0], ":".join(name_information[1:]))
return tuple(name_information)
# ======================================================================
def attribute_name_without_port(attribute_name: str) -> str:
"""Returns the attribute name with its port namespace removed"""
for prefix in [f"{OUTPUT_NS}:", f"{OUTPUT_NS}_", f"{INPUT_NS}:", f"{STATE_NS}:"]:
if attribute_name.startswith(prefix):
return attribute_name.replace(prefix, "")
return attribute_name
# ======================================================================
def attribute_name_as_python_property(attribute_name: str) -> str:
"""
Returns the attribute name in a form suitable for a Python property, with the namespace stripped off and
any ":" separators changed to "_"
"""
if attribute_name.startswith(INPUT_NS):
raw_name = attribute_name[len(INPUT_NS) + 1 :]
elif attribute_name.startswith(OUTPUT_NS):
raw_name = attribute_name[len(OUTPUT_NS) + 1 :]
elif attribute_name.startswith(STATE_NS):
raw_name = attribute_name[len(STATE_NS) + 1 :]
else:
raw_name = split_attribute_name(attribute_name)[1]
return raw_name.replace(":", "_")
# ======================================================================
def check_attribute_name(attribute_name: str):
"""Returns a pair of (namespace,base_name) is the attribute name is legal
Raises:
ParseError: Attribute name is not legally constructed
"""
if not is_input_name(attribute_name) and not is_output_name(attribute_name) and not is_state_name(attribute_name):
raise ParseError(f'Attribute name "{attribute_name}" is not correctly namespaced as input, output, or state')
(actual_namespace, base_name) = split_attribute_name(attribute_name)
if not RE_ATTRIBUTE_NAME.match(base_name):
raise ParseError(ATTR_NAME_REQUIREMENT)
return (actual_namespace, base_name)
# ======================================================================
def check_attribute_ui_name(attribute_ui_name: str):
"""Raises ParseError if the new user-friendly name was illegal, else returns the name itself"""
if not RE_ATTRIBUTE_UI_NAME.match(attribute_ui_name):
raise ParseError(ATTR_UI_NAME_REQUIREMENT)
# ======================================================================
def assemble_attribute_type_name(
type_name: str,
tuple_count: int,
array_depth: int,
extra_info: Optional[Dict[str, Any]] = None,
):
"""Assemble a fully qualified attribute name from its constituent parts.
Basically the reversal of split_attribute_type_name().
This method does no validation; use management.py:validate_attribute_type_name for that
Args:
type_name: Base name of the attribute type
tuple_count: Number of tuple elements in the attribute type
array_depth: Levels of arrays in the attribute type
"""
full_type = None
if type_name == "union":
full_type = list(extra_info.keys()) if extra_info else []
else:
full_type = type_name
if tuple_count > 1:
full_type += f"[{tuple_count}]"
full_type += "[]" * array_depth
return full_type
| 7,740 | Python | 41.532967 | 118 | 0.60478 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/DoubleAttributeManager.py | """
Contains the support class for managing attributes whose data is double precision numbers
"""
from typing import Any, List
from ..utils import ParseError
from .AttributeManager import CppConfiguration, CudaConfiguration
from .NumericAttributeManager import NumericAttributeManager, is_number_or_list_of_numbers
class DoubleAttributeManager(NumericAttributeManager):
"""Support class for attributes of type double.
As Python does not really have a double type, float values are used for that portion of the code.
"""
OGN_TYPE = "double"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"double": CppConfiguration("double", cast_required=False),
"double[2]": CppConfiguration("pxr::GfVec2d", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
"double[3]": CppConfiguration("pxr::GfVec3d", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
"double[4]": CppConfiguration("pxr::GfVec4d", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
}
CUDA_CONFIGURATION = {
"double": CudaConfiguration("double", cast_required=False),
"double[2]": CudaConfiguration("double3", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
"double[3]": CudaConfiguration("double3", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
"double[4]": CudaConfiguration("double4", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
values = [4.125, 2.125]
if self.tuple_count > 1:
values = [tuple(value + i * 0.125 for i in range(self.tuple_count)) for value in values]
if for_usd:
from pxr import Gf
gf_type = getattr(Gf, f"Vec{self.tuple_count}d")
values = [gf_type(*value) for value in values] # noqa: PLE1133
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
def numerical_type(self) -> int:
"""Returns the numerical TYPE_* value for this attribute's data"""
return NumericAttributeManager.TYPE_DECIMAL
def validate_value(self, value):
"""Raises a ParseError if value is not a valid double value"""
if not is_number_or_list_of_numbers(value, self.tuple_count):
raise ParseError(f"Value {value} on a double[{self.tuple_count}] attribute is not a matching type")
super().validate_value(value)
@staticmethod
def tuples_supported() -> List[int]:
"""USD supports only these tuples natively so restrict support to them for now"""
return [1, 2, 3, 4]
def python_type_name(self) -> str:
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("float")
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "Double"
def usd_type_name(self) -> str:
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_containers("double")
| 3,527 | Python | 44.818181 | 117 | 0.657499 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/NumericAttributeManager.py | """
Contains the support class for managing attributes whose data is all types of numeric values
Exports:
is_number
is_number_or_list_of_numbers
NumericAttributeManager
values_in_range
"""
from typing import List, Union
from ..utils import ParseError
from .AttributeManager import AttributeManager, PropertySet
from .parsing import KEY_ATTR_MAXIMUM, KEY_ATTR_MINIMUM
# ======================================================================
def is_number(value):
"""Return True if the value is a number, not including booleans"""
if isinstance(value, float) or (isinstance(value, int) and not isinstance(value, bool)):
return True
return False
# ======================================================================
def is_number_or_list_of_numbers(value, type_count: int):
"""Return True if the value is a number or a list of "type_count" numbers, not including booleans"""
if is_number(value) and type_count == 1:
return True
if isinstance(value, list):
if len(value) != type_count:
return False
return all(is_number(single_value) for single_value in value)
return False
# ======================================================================
def values_in_range(value, min_value: int, max_value: int):
"""Return True if the value is a number or list of numbers in the range [min_value,max_value]"""
if isinstance(value, list):
return all(not (single_value < min_value or single_value > max_value) for single_value in value)
return min_value <= value <= max_value
# ======================================================================
class NumericAttributeManager(AttributeManager):
"""Support class for attributes with simple numeric types
Attributes:
minimum: Minimum allowable value of the numeric attribute. None means no minimum.
maximum: Maximum allowable value of the numeric attribute. None means no maximum.
"""
# Convenience types for determining numerical type information about these attributes with numerical_type()
TYPE_OTHER = 0
TYPE_INTEGER = 1
TYPE_UNSIGNED_INTEGER = 2
TYPE_DECIMAL = 3
TYPE_FLOAT = 4
TYPE_OBJECT_ID = 5
def __init__(self, attribute_name: str, attribute_type_name: str):
"""Initialize the numeric attribute information
Args:
attribute_name: Name to use for this attribute
attribute_type_name: Type of this attribute
"""
super().__init__(attribute_name, attribute_type_name)
self.minimum = None
self.maximum = None
def numerical_type(self) -> int:
"""Returns the numerical TYPE_* value for this attribute's data"""
return NumericAttributeManager.TYPE_OTHER
def cpp_element_value(self, value) -> str:
"""Ensure floating point elements have decimal values so that they are properly recognized."""
if self.numerical_type() == self.TYPE_DECIMAL:
return f"{value * 1.0}"
if self.numerical_type() == self.TYPE_FLOAT:
return f"{value * 1.0}f"
return f"{int(value)}"
def validate_value(self, value):
"""Validate that the given data is a matching numeric type, and in range when min/max are specified
The parse_extra_properties() method should have been called before validating anything.
Args:
value: Data value to verify
Raises:
ParseError if the min/max range is not respected by the value
"""
if not is_number_or_list_of_numbers(value, self.tuple_count):
raise ParseError(f"Value {value} on a double[{self.tuple_count}] attribute is not a matching type")
self.validate_numbers_in_range(value)
def validate_numbers_in_range(self, value):
"""Validate that the given value is in the legal numeric range, if any are specified
Args:
value: Data value to verify
Raises:
ParseError if the min/max range is not respected by the value
"""
# Convert single values to lists for uniform handling
values = value if isinstance(value, list) else [value]
if self.minimum is not None:
minimums = self.minimum if isinstance(self.minimum, list) else [self.minimum]
for single_value, minimum in zip(values, minimums):
if single_value < minimum:
raise ParseError(f"Value of {value} is less than the allowed minimum of {self.minimum}")
if self.maximum is not None:
maximums = self.maximum if isinstance(self.maximum, list) else [self.maximum]
for single_value, maximum in zip(values, maximums):
if single_value > maximum:
raise ParseError(f"Value of {value} is greater than the allowed maximum of {self.maximum}")
super().validate_value(value)
def parse_extra_properties(self, property_set: dict) -> PropertySet:
"""Parse properties specific to numeric attribute types
Args:
property_set: (NAME, VALUE) for properties the attribute type might support
Raises:
ParseError: If any of the extra properties are invalid or not recognized
"""
remaining_properties = self.parse_min_max_properties(property_set)
unchecked_properties = super().parse_extra_properties(remaining_properties)
return unchecked_properties
def parse_min_max_properties(self, property_set: dict) -> PropertySet:
"""Check that the min and max property values in property_set are acceptable values (or not present).
Args:
property_set: (KEY:VALUE) set from which the min and max will be extracted
Returns:
The subset of property_set not checked by this method
"""
unchecked_set = {}
for property_name, property_value in property_set.items():
if property_name in [KEY_ATTR_MINIMUM, KEY_ATTR_MAXIMUM]:
try:
# Min/Max values are applied to all array members, but elements must be specified individually
self.validate_value_nested(property_value, [])
except ParseError as error:
raise ParseError(f"Setting {property_name} on {self.name}") from error
setattr(self, property_name, property_value)
else:
unchecked_set[property_name] = property_value
return unchecked_set
def cpp_includes(self) -> List[str]:
"""Numeric tuples can use the core 'tuple' class to provide some helpful support methods"""
regular_includes = super().cpp_includes()
if self.tuple_count > 1:
regular_includes.append("omni/graph/core/tuple.h")
return regular_includes
def empty_base_value(self) -> Union[float, int]:
"""Returns an empty value of the current attribute type without tuples or arrays"""
return 0.0 if self.numerical_type() in [self.TYPE_DECIMAL, self.TYPE_FLOAT] else 0
| 7,079 | Python | 40.893491 | 114 | 0.623817 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/AnyAttributeManager.py | """
Contains the support class for managing attributes whose data is any type of data, determined at runtime
"""
from typing import List
from ..keys import CudaPointerValues, MemoryTypeValues
from ..utils import _EXTENDED_TYPE_ANY, IndentedOutput, to_usd_docs, value_as_usd
from .AttributeManager import AttributeManager, CppConfiguration, CudaConfiguration
class AnyAttributeManager(AttributeManager):
"""
Support class for attributes whose type is only determined at runtime.
Most of the generated code is removed for this type of attribute since the interface types are not yet known.
"""
OGN_TYPE = "any"
CPP_CONFIGURATION = {
# Type information is overridden but the include file is important to specialize
"any": CppConfiguration(None, include_files=["omni/graph/core/ogn/UsdTypes.h"])
}
CUDA_CONFIGURATION = {"any": CudaConfiguration(None, cast_required=False)}
# ----------------------------------------------------------------------
def requires_default(self):
"""Extended types never need default values as their data types are not known in advance"""
return False
# ----------------------------------------------------------------------
def validate_value(self, value):
"""All values are welcome"""
return True
# ----------------------------------------------------------------------
def validate_value_structure(self, value_to_validate):
"""we can't validate until runtime"""
return True
# ----------------------------------------------------------------------
@staticmethod
def array_depths_supported() -> List[int]:
"""The meaning of an array of mixed union types is unclear and will not be supported at this time"""
return [0]
# ----------------------------------------------------------------------
@staticmethod
def tuples_supported() -> List[int]:
"""USD supports only these tuples natively so restrict support to them for now"""
return [1]
# ----------------------------------------------------------------------
def cpp_configuration(self) -> CppConfiguration:
"""Returns the C++ configuration data that applies to the attribute type implemented by this manager
If no implementation is defined then return an empty dictionary.
"""
try:
return self.CPP_CONFIGURATION["any"]
except AttributeError:
return CppConfiguration("any")
# ----------------------------------------------------------------------
def cpp_extended_type(self):
"""Returns the extended type identifier for C++ types"""
return "kExtendedAttributeType_Any"
# ----------------------------------------------------------------------
def cpp_base_type_name(self):
"""Returns a string with the C++ type of the attribute data
This value relies on the fact that the group names correspond to the template parameters for RuntimeAttribute.
"""
template_args = [self.attribute_group, MemoryTypeValues.CPP[self.memory_storage()]]
if self.cuda_pointer_type is not None:
template_args.append(CudaPointerValues.CPP[self.cuda_pointer_type])
return f"ogn::RuntimeAttribute<{', '.join(template_args)}>"
# ----------------------------------------------------------------------
def cpp_element_type_name(self) -> str:
"""The configuration is all manual here so override the default method"""
return self.cpp_base_type_name()
# ----------------------------------------------------------------------
def cpp_includes(self) -> List[str]:
"""Tack on the include implementing the runtime attribute wrappers"""
regular_includes = super().cpp_includes()
regular_includes.append("omni/graph/core/ogn/RuntimeAttribute.h")
return regular_includes
# ----------------------------------------------------------------------
def cpp_accessor_on_cpu(self) -> bool:
"""Extended type wrappers provide a type-casting accessor that will always live on the CPU"""
return True
# ----------------------------------------------------------------------
def fabric_data_variable_name(self) -> str:
"""This type uses a local variable to encapsulate the Fabric data so reference that instead."""
return f"{self.cpp_variable_name()}_"
# ----------------------------------------------------------------------
def datamodel_accessor_constructor_args(self) -> List[str]:
"""The runtime attribute is a local variable so the pointer has to be added to the constructor"""
return [f"&{self.fabric_data_variable_name()}"] + super().datamodel_accessor_constructor_args()
# ----------------------------------------------------------------------
def datamodel_local_variables(self):
"""The runtime attributes require a wrapper for data access (RuntimeAttribute). Create one here to avoid
the overhead of recreating it every time access to it is needed, and to avoid needing a parallel set of
accessor classes just for extended attribute types.
"""
return [f"{self.fabric_raw_type()} {self.fabric_data_variable_name()}{{}};"]
# ----------------------------------------------------------------------
def has_fixed_type(self) -> bool:
"""Variable typed attributes have runtime type identification"""
return False
# ----------------------------------------------------------------------
def ogn_type(self) -> List:
"""Returns a string containing the fully expanded name of this attribute type in a .ogn file"""
return "any"
# ----------------------------------------------------------------------
def python_extended_type(self):
"""Returns the extended type identifier for Python attribute types"""
return (_EXTENDED_TYPE_ANY, "any")
# ----------------------------------------------------------------------
def python_imports(self) -> List[str]:
"""Return a list of modules to import in the Python header for proper parsing of this type"""
return super().python_imports() + ["from typing import Any"]
# ----------------------------------------------------------------------
def python_type_name(self):
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("Any")
# ----------------------------------------------------------------------
def generate_python_property_code(self, out: IndentedOutput):
"""Emits the generated code implementing a readable property for this extended attribute.
This class overrides the default behaviour because it needs a wrapper class to access the internal
functionality of the runtime data.
"""
property_name = self.python_property_name()
out.write()
out.write("@property")
if out.indent(f"def {property_name}(self) -> og.RuntimeAttribute:"):
out.write(f'"""Get the runtime wrapper class for the attribute {self.namespace}.{property_name}"""')
out.write(
f"return og.RuntimeAttribute(self._attributes.{property_name}.get_attribute_data(),"
f" self._context, {self.is_read_only()})"
)
out.exdent()
# For this type of attribute a setter can forward the assignment to the value of the attribute, where legal
out.write()
out.write(f"@{property_name}.setter")
if out.indent(f"def {property_name}(self, value_to_set: Any):"):
out.write(f'"""Assign another attribute\'s value to outputs.{property_name}"""')
if out.indent("if isinstance(value_to_set, og.RuntimeAttribute):"):
out.write(f"self.{property_name}.value = value_to_set.value")
out.exdent()
if out.indent("else:"):
out.write(f"self.{property_name}.value = value_to_set")
out.exdent()
out.exdent()
# ----------------------------------------------------------------------
def generate_python_validation(self, out: IndentedOutput):
"""Emit code that checks to make sure the attribute type is resolved before computing"""
if self.do_validation:
name = f"{self.namespace}.{self.python_property_name()}"
if out.indent(f"if db.{name}.type.base_type == og.BaseDataType.UNKNOWN:"):
out.write(f"db.log_warning('Required extended attribute {self.name} is not resolved, compute skipped')")
out.write("return False")
out.exdent()
# ----------------------------------------------------------------------
def usd_type_name(self):
"""As the type of the attribute is not known at load time use a token value to describe the accepted types"""
return "token"
# ----------------------------------------------------------------------
def usd_type_accepted_description(self) -> str:
"""Returns a string that will be the default value of the USD token, describing accepted types"""
return "any"
# ----------------------------------------------------------------------
def emit_usd_declaration(self, out):
"""USD declaration for extended types use a placeholder type of token so the code path must be replaced
Args:
out: Output handler where the USD will be emitted
"""
usd_name = self.usd_name()
usd_type = self.usd_type_name()
docs = to_usd_docs(self.description)
if self.array_depth == 0:
default = value_as_usd(self.usd_type_accepted_description())
else:
default = value_as_usd([self.usd_type_accepted_description()] * self.array_depth)
if out.indent(f"custom {usd_type} {usd_name} = {default} ("):
out.write(docs)
out.exdent(")")
| 10,050 | Python | 48.029268 | 120 | 0.532139 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/FrameAttributeManager.py | """
Contains the support class for managing attributes whose data is cartesian frames
"""
from typing import Any, List
from .AttributeManager import CppConfiguration, CudaConfiguration
from .MatrixAttributeManager import MatrixAttributeManager
class FrameAttributeManager(MatrixAttributeManager):
"""Support class for the attribute with role "frame" or "transform"
These are aliases for "matrixd[4]", with support for different USD naming
"""
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"frame[4]": CppConfiguration(
"pxr::GfMatrix4d", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eFrame"
),
"transform[4]": CppConfiguration(
"pxr::GfMatrix4d", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eTransform"
),
}
CUDA_CONFIGURATION = {
"frame[4]": CudaConfiguration("Matrix4d", include_files=["omni/graph/core/cuda/Matrix4d.h"], role="eFrame"),
"transform[4]": CudaConfiguration(
"Matrix4d", include_files=["omni/graph/core/cuda/Matrix4d.h"], role="eTransform"
),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: If True return as the data type used to set the value in USD attributes, else return Python values
"""
if self.attribute_type_name == "frame":
a = 1.0
b = 0.0
c = 2.0
d = 3.0
else:
a = 1.5
b = 0.5
c = 2.5
d = 3.5
values = [
tuple(tuple(a if i == j else b for i in range(self.tuple_count)) for j in range(self.tuple_count)),
tuple(tuple(c if i == j else d for i in range(self.tuple_count)) for j in range(self.tuple_count)),
]
if for_usd:
from pxr import Gf
gf_type = getattr(Gf, f"Matrix{self.tuple_count}d")
values = [gf_type(*values[0]), gf_type(*values[1])]
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
@staticmethod
def roles():
"""Return a list of valid role names for this type"""
return ["frame", "transform"]
@classmethod
def is_matrix_type(cls) -> bool:
"""Frames and Transforms are matrix types"""
return True
def suffix(self):
"""Always uses matrix4d"""
return "d"
def python_role_name(self) -> str:
"""Returns a string with the Python role name for this attribute"""
return "og.Database.ROLE_FRAME" if self.attribute_type_name == "frame" else "og.Database.ROLE_TRANSFORM"
@staticmethod
def tuples_supported() -> List[int]:
"""This type of matrix can only be 4d"""
return [4]
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return f"Frame{self.suffix()}"
def usd_type_name(self) -> str:
"""Returns a string with the data type the attribute would use in a USD file"""
# transform4d is not a valid USD type, but for our purposes it's the same as frame4d
return self.usd_add_arrays(f"frame{self.tuple_count}{self.suffix()}")
| 3,489 | Python | 37.351648 | 119 | 0.610777 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/NormalAttributeManager.py | """
Contains the support class for managing attributes whose data is arrays interpreted as surface normals
"""
from typing import Any, List
from .AttributeManager import CppConfiguration, CudaConfiguration
from .RoleAttributeManager import RoleAttributeManager
class NormalAttributeManager(RoleAttributeManager):
"""Support class for all attributes of type normal
This encompasses all legal USD types of normal(3|4)(d|f|h)
"""
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"normald[3]": CppConfiguration(
"pxr::GfVec3d", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eNormal"
),
"normalf[3]": CppConfiguration(
"pxr::GfVec3f", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eNormal"
),
"normalh[3]": CppConfiguration(
"pxr::GfVec3h", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eNormal"
),
}
CUDA_CONFIGURATION = {
"normald[3]": CudaConfiguration(
"double3", include_files=["cuda_fp16.h", "omni/graph/core/CUDAUtils.h"], role="eNormal"
),
"normalf[3]": CudaConfiguration(
"float3", include_files=["cuda_fp16.h", "omni/graph/core/CUDAUtils.h"], role="eNormal"
),
"normalh[3]": CudaConfiguration(
"__half3", include_files=["cuda_fp16.h", "omni/graph/core/CUDAUtils.h"], role="eNormal"
),
"normald[4]": CudaConfiguration(
"double4", include_files=["cuda_fp16.h", "omni/graph/core/CUDAUtils.h"], role="eNormal"
),
"normalf[4]": CudaConfiguration(
"float4", include_files=["cuda_fp16.h", "omni/graph/core/CUDAUtils.h"], role="eNormal"
),
"normalh[4]": CudaConfiguration(
"__half4", include_files=["cuda_fp16.h", "omni/graph/core/CUDAUtils.h"], role="eNormal"
),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: If True return as the data type used to set the value in USD attributes, else return Python values
"""
values = {
"d": [0.01625, 0.14125],
"f": [0.125, 0.1625],
"h": [0.5, 0.25],
}[self.suffix()]
values = [tuple((value + 0.125 * i) for i in range(self.tuple_count)) for value in values]
if for_usd:
from pxr import Gf
gf_type = getattr(Gf, f"Vec{self.tuple_count}{self.suffix()}")
values = [gf_type(*value) for value in values]
if self.array_depth > 0:
values = [[value, value[::-1]] for value in values]
return [[value] for value in values] if for_usd else values
@staticmethod
def roles():
"""Return a list of valid role names for this type"""
return ["normald", "normalf", "normalh"]
def python_role_name(self) -> str:
"""Returns a string with the Python role name for this attribute"""
return "og.Database.ROLE_NORMAL"
@staticmethod
def tuples_supported() -> List[int]:
"""This type can only have 3 members, not 1"""
return [3]
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return f"Normal{self.suffix()}"
def usd_type_name(self) -> str:
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_arrays(f"normal{self.tuple_count}{self.suffix()}")
| 3,690 | Python | 39.560439 | 119 | 0.607588 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/Int64AttributeManager.py | """
Contains the support class for managing attributes whose data is 64 bit integers
"""
from typing import Any
from ..utils import ParseError
from .AttributeManager import CppConfiguration, CudaConfiguration
from .NumericAttributeManager import NumericAttributeManager, values_in_range
from .parsing import is_type_or_list_of_types
class Int64AttributeManager(NumericAttributeManager):
"""Support class for attributes of type 64-bit integer"""
OGN_TYPE = "int64"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"int64": CppConfiguration("int64_t", cast_required=False),
}
CUDA_CONFIGURATION = {
"int64": CudaConfiguration("int64_t", include_files=["stdint.h"], cast_required=False),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
values = [-46, -64]
if self.tuple_count > 1:
values = [tuple(value + i for i in range(self.tuple_count)) for value in values]
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
def numerical_type(self) -> int:
"""Returns the numerical TYPE_* value for this attribute's data"""
return NumericAttributeManager.TYPE_INTEGER
def validate_value(self, value):
"""Raises a ParseError if value is not a valid integer value"""
if not is_type_or_list_of_types(value, int, self.tuple_count):
raise ParseError(f"Value {value} on an int64[{self.tuple_count}] attribute is not a matching type")
if not values_in_range(value, -9223372036854775808, 9223372036854775807):
raise ParseError(f"Value {value} on a 64-bit integer[{self.tuple_count}] attribute is out of range")
super().validate_value(value)
def python_type_name(self):
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("int")
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "Int64"
def usd_type_name(self):
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_containers("int64")
| 2,585 | Python | 41.393442 | 112 | 0.671954 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/ExecutionAttributeManager.py | """
Contains the support class for managing attributes whose data is arrays interpreted as surface normals
"""
from typing import Any, List
from ..utils import ParseError, to_usd_docs
from .AttributeManager import CppConfiguration, CudaConfiguration
from .NumericAttributeManager import NumericAttributeManager, values_in_range
from .parsing import is_type_or_list_of_types
from .RoleAttributeManager import RoleAttributeManager
class ExecutionAttributeManager(RoleAttributeManager):
"""Support class for attributes of type execution
These are uint and uint[] types with the internal Execution Role
"""
OGN_TYPE = "execution"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {"execution": CppConfiguration("uint32_t", cast_required=False, role="eExecution")}
CUDA_CONFIGURATION = {"execution": CudaConfiguration("int", cast_required=False, role="eExecution")}
@staticmethod
def roles():
"""Return a list of valid role names for this type"""
return ["execution"]
def numerical_type(self) -> int:
"""Returns the numerical TYPE_* value for this attribute's data"""
return NumericAttributeManager.TYPE_UNSIGNED_INTEGER
@staticmethod
def array_depths_supported() -> List[int]:
"""Arrays of execution values doesn't make any sense"""
return [0]
@staticmethod
def tuples_supported() -> List[int]:
"""Executions don't have tuples"""
return [1]
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
# execution must be in the range of the enum ExecutionAttributeState
values = [0, 1, 2]
return [[value] for value in values] if for_usd else values
def validate_value(self, value):
"""Raises a ParseError if value is not a valid uint value"""
if not is_type_or_list_of_types(value, int, self.tuple_count):
raise ParseError(f"Value {value} on a uint[{self.tuple_count}] attribute is not a matching type")
if not values_in_range(value, 0, 4294967295):
raise ParseError(f"Value {value} on a uint[{self.tuple_count}] attribute is out of range")
super().validate_value(value)
def requires_default(self):
"""Execution attributes are transient by nature, we shouldn't really store them at all"""
return False
def python_role_name(self) -> str:
"""Returns a string with the Python role name for this attribute"""
return "og.Database.ROLE_EXECUTION"
def cuda_base_type_name(self) -> str:
"""Returns a string with the CUDA base type of the attribute data"""
return "uint32_t"
def sdf_type_name(self) -> str:
"""Execution attribs have no pxr::SdfValueTypeName"""
return
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "UInt"
def python_type_name(self) -> str:
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("int")
def usd_type_name(self) -> str:
"""Returns a string with the data type the attribute would use in a USD file"""
# FIXME: This isn't accurate for execution case, the usd type is 'uint' but we are passing in 'execution' when
# creating the attribute so that it can be recognized as a uint with eExecution role.
# (see emit_usd_declaration())
return "execution"
def emit_usd_declaration(self, out) -> List[str]:
"""Print a declaration for this attribute in USD
Args:
out: Output handler where the USD will be emitted
"""
try:
usd_name = self.usd_name()
# Overriding default behavior of using self.usd_type_name()
usd_type = "uint"
except ParseError:
# Attributes without USD representations can be skipped
return
docs = to_usd_docs(self.description)
default_value = self.usd_default_value()
if out.indent(f"custom {usd_type} {usd_name}{default_value} ("):
out.write(docs)
out.exdent(")")
| 4,478 | Python | 38.637168 | 118 | 0.656543 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/BundleAttributeManager.py | """
Support for handling attributes of type "bundle" - i.e. attributes whose job it is to encapsulate arbitrary
collections of other attributes, including other bundle attributes.
"""
from typing import List
from ..keys import CudaPointerValues, MemoryTypeValues
from ..utils import IndentedOutput, ParseError, ensure_quoted, to_usd_docs
from .AttributeManager import AttributeManager, CppConfiguration
from .naming import INPUT_GROUP, OUTPUT_GROUP, STATE_GROUP
class BundleAttributeManager(AttributeManager):
"""
Support class for attributes of type attribute bundle.
This type of attribute is more complex than standard attributes since it has many more features to handle.
"""
OGN_TYPE = "bundle"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
# Type information is overridden but the include file is important to specialize
"bundle": CppConfiguration(None, include_files=["omni/graph/core/ogn/UsdTypes.h"])
}
def requires_default(self):
"""Bundles never need default values as nothing other than an empty bundle makes sense"""
return False
@staticmethod
def array_depths_supported() -> List[int]:
"""Bundle arrays are not yet supported in Fabric"""
return [0]
def memory_storage(self) -> str:
"""Bundle handles will always be stored on the CPU as that is where Fabric forces them"""
return MemoryTypeValues.CPU
def cpp_base_type_name(self):
"""This type name switches based on read status so this has to override the default method"""
return "ConstBundleHandle" if self.is_read_only() else "BundleHandle"
def cpp_element_type_name(self) -> str:
"""The configuration is all manual here so override the default method"""
return self.cpp_base_type_name()
def cpp_default_initializer(self):
"""The bundle doesn't really have a default possible so initialize it to an invalid handle or empty array."""
return "nullptr, 0" if self.array_depth > 0 else "BundleHandle::invalidValue()"
def cpp_includes(self) -> List[str]:
"""Tack on the include implementing the bundle wrappers"""
includes = super().cpp_includes()
includes.append("omni/graph/core/ogn/Bundle.h")
return includes
def cpp_accessor_on_cpu(self) -> bool:
"""Bundle wrappers provide a type-casting accessor that will always live on the CPU"""
return True
def cpp_wrapper_class(self) -> str:
"""Returns the bundle-specific wrapper class name used to access attribute data in the C++ database"""
template_args = [self.attribute_group, MemoryTypeValues.CPP[self.memory_type]]
if self.cuda_pointer_type is not None:
template_args.append(CudaPointerValues.CPP[self.cuda_pointer_type])
return ("ogn::BundleAttribute", template_args)
def cpp_set_handle_at_runtime(self) -> bool:
"""Bundle types do not use attribute handles"""
return False
def validate_value(self, value):
"""Raises a ParseError if value is not a valid bundle value"""
raise ParseError("Bundles do not have values")
def cuda_includes(self) -> List[str]:
"""The bundle data is the same type of data in CUDA as it is in C++"""
includes = super().cuda_includes()
includes.append("omni/graph/core/Handle.h")
return includes
def cuda_base_type_name(self) -> str:
"""Returns a string with the CUDA base type of the attribute data"""
return "omni::graph::core::ConstBundleHandle" if self.is_read_only() else "omni::graph::core::BundleHandle"
def cuda_element_type_name(self) -> str:
"""The configuration is all manual here so override the default method"""
return self.cuda_base_type_name()
def fabric_data_variable_name(self) -> str:
"""Returns a string containing the generated name of the Fabric pointer for this attribute."""
return f"{self.cpp_variable_name()}.m_bundleHandle"
def fabric_pointer_exists(self) -> str:
"""Return a string that checks for the existence of the Fabric pointer variable value"""
return f"{self.fabric_data_variable_name()}.isValid()"
def python_role_name(self) -> str:
"""Returns a string with the Python role name for this attribute"""
return "og.Database.ROLE_BUNDLE"
def usd_name(self) -> str:
"""Bundled output and state attributes are represented as virtual prims in USD so they cannot have namespaces"""
# Output and state bundles are prims so they have to follow the prim naming restrictions (no colons allowed)
if self.attribute_group == INPUT_GROUP:
return self.name
return self.name.replace(":", "_")
def create_type_name(self) -> str:
"""Bundled attributes have a special name when creating so that they can be instantiated differently"""
return "bundle"
# ----------------------------------------------------------------------
def generate_python_property_code(self, out: IndentedOutput):
"""Emits the generated code implementing a property for this bundle attribute.
This class overrides the default behaviour because it needs a wrapper class to access the internal
functionality of the bundle.
"""
property_name = self.python_property_name()
out.write()
out.write("@property")
if out.indent(f"def {property_name}(self) -> og.BundleContents:"):
out.write(f'"""Get the bundle wrapper class for the attribute {self.namespace}.{property_name}"""')
out.write(f"return self.__bundles.{property_name}")
out.exdent()
# No setters at all for read only bundles
if self.is_read_only():
return
property_name = self.python_property_name()
out.write()
out.write(f"@{property_name}.setter")
if out.indent(f"def {property_name}(self, bundle: og.BundleContents):"):
out.write(f'"""Overwrite the bundle attribute {self.namespace}.{property_name} with a new bundle"""')
if out.indent("if not isinstance(bundle, og.BundleContents):"):
out.write('carb.log_error("Only bundle attributes can be assigned to another bundle attribute")')
out.exdent()
out.write(f"self.__bundles.{property_name}.bundle = bundle")
out.exdent()
# ----------------------------------------------------------------------
def python_imports(self) -> List[str]:
"""Return a list of modules to import in the Python header for proper parsing of this type"""
return super().python_imports() + ["import carb"]
# ----------------------------------------------------------------------
def emit_usd_declaration(self, out: IndentedOutput):
"""Print a declaration for this attribute in USD
Args:
out: Output handler where the USD will be emitted
"""
try:
usd_name = self.usd_name()
except ParseError:
# Attributes without USD representations can be skipped
return
docs = to_usd_docs(self.description)
# Bundle attributes are stored as virtual prims, not actual USD attributes, so they require a
# different type of declaration. Inputs are relationships, outputs are defined as nested prims.
if self.attribute_group == INPUT_GROUP:
if out.indent(f"custom rel {usd_name} ("):
out.write(docs)
out.exdent(")")
else:
# Prim names cannot have colons in them
usd_name = usd_name.replace(":", "_")
if self.attribute_group == OUTPUT_GROUP:
side = "Output"
elif self.attribute_group == STATE_GROUP:
side = "State"
else:
side = "Unknown"
if out.indent(f"def {side} {ensure_quoted(usd_name)} ("):
out.write(docs)
out.exdent(")")
out.write("{")
out.write("}")
| 8,205 | Python | 44.087912 | 120 | 0.628154 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/RoleAttributeManager.py | """
Contains the support class for managing attributes whose data is values with specific interpretations
"""
from typing import List
from ..utils import ParseError
from .NumericAttributeManager import NumericAttributeManager
# ======================================================================
class RoleAttributeManager(NumericAttributeManager):
"""Base class for all attribute types that assume different names to tag their special roles"""
def __init__(self, attribute_name: str, attribute_type_name: str):
"""Initialize the role-based attribute information
Args:
attribute_name: Name to use for this attribute
attribute_type_name: Unique name for this attribute type
"""
super().__init__(attribute_name, attribute_type_name)
if attribute_type_name not in self.roles():
raise ParseError(f"Only {'|'.join(self.roles())} are legal - {attribute_type_name} is not")
def suffix(self):
"""Returns the role suffix, for easy type construction"""
return self.attribute_type_name[-1]
def tuples_allowed(self) -> List[int]:
"""No tuples are currently supported for this type"""
return []
def ogn_base_type(self) -> str:
"""Returns a string containing the fully expanded name of this attribute type in a .ogn file"""
return self.attribute_type_name
def ogn_type(self) -> str:
"""Returns a string containing the fully expanded name of this attribute type in a .ogn file"""
full_type = self.attribute_type_name
if self.tuple_count > 1:
full_type += f"[{self.tuple_count}]"
full_type += "[]" * self.array_depth
return full_type
def numerical_type(self) -> int:
"""Returns the numerical TYPE_* value for this attribute's data"""
return NumericAttributeManager.TYPE_DECIMAL if self.suffix() == "d" else NumericAttributeManager.TYPE_FLOAT
@staticmethod
def roles():
"""Return a list of valid role names for this type"""
return []
def python_type_name(self) -> str:
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("float")
| 2,271 | Python | 38.859648 | 115 | 0.641127 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/management.py | """
Collection of utilities and constants to handle interaction with the set of all attribute managers
.. data:: ATTRIBUTE_MANAGERS
Dictionary of {BASE_TYPE: AttributeManager} for all supported root attribute types. A root type is
"int" but not "int[2]". Tuple counts and array depths are parsed by the manager's constructor.
"""
import json
import re
from contextlib import suppress
from typing import Dict, List, Optional, Tuple, Union
from ..keys import AttributeKeys
from ..utils import MetadataKeys, ParseError, UnimplementedError, attrib_description_to_string, check_memory_type
from .AnyAttributeManager import AnyAttributeManager
from .AttributeManager import AttributeManager
from .BoolAttributeManager import BoolAttributeManager
from .BundleAttributeManager import BundleAttributeManager
from .ColorAttributeManager import ColorAttributeManager
from .DoubleAttributeManager import DoubleAttributeManager
from .ExecutionAttributeManager import ExecutionAttributeManager
from .FloatAttributeManager import FloatAttributeManager
from .FrameAttributeManager import FrameAttributeManager
from .HalfAttributeManager import HalfAttributeManager
from .Int64AttributeManager import Int64AttributeManager
from .IntAttributeManager import IntAttributeManager
from .MatrixAttributeManager import MatrixAttributeManager
from .NormalAttributeManager import NormalAttributeManager
from .ObjectIdAttributeManager import ObjectIdAttributeManager
from .PathAttributeManager import PathAttributeManager
from .PointAttributeManager import PointAttributeManager
from .QuaternionAttributeManager import QuaternionAttributeManager
from .StringAttributeManager import StringAttributeManager
from .TexCoordAttributeManager import TexCoordAttributeManager
from .TimeCodeAttributeManager import TimeCodeAttributeManager
from .TokenAttributeManager import TokenAttributeManager
from .UCharAttributeManager import UCharAttributeManager
from .UInt64AttributeManager import UInt64AttributeManager
from .UIntAttributeManager import UIntAttributeManager
from .UnionAttributeManager import UnionAttributeManager
from .VectorAttributeManager import VectorAttributeManager
# ======================================================================
# Collection of all supported attribute type classes
ATTRIBUTE_MANAGERS = {
support_class.OGN_TYPE: support_class
for support_class in [
AnyAttributeManager,
BoolAttributeManager,
BundleAttributeManager,
DoubleAttributeManager,
FloatAttributeManager,
HalfAttributeManager,
IntAttributeManager,
Int64AttributeManager,
ObjectIdAttributeManager,
PathAttributeManager,
StringAttributeManager,
TimeCodeAttributeManager,
TokenAttributeManager,
UCharAttributeManager,
UIntAttributeManager,
UInt64AttributeManager,
UnionAttributeManager,
]
}
# Role-based attributes have more than one possible type name so they have to be iterated
ATTRIBUTE_MANAGERS.update({role: ColorAttributeManager for role in ColorAttributeManager.roles()})
ATTRIBUTE_MANAGERS.update({role: MatrixAttributeManager for role in MatrixAttributeManager.roles()})
ATTRIBUTE_MANAGERS.update({role: FrameAttributeManager for role in FrameAttributeManager.roles()})
ATTRIBUTE_MANAGERS.update({role: NormalAttributeManager for role in NormalAttributeManager.roles()})
ATTRIBUTE_MANAGERS.update({role: PointAttributeManager for role in PointAttributeManager.roles()})
ATTRIBUTE_MANAGERS.update({role: QuaternionAttributeManager for role in QuaternionAttributeManager.roles()})
ATTRIBUTE_MANAGERS.update({role: TexCoordAttributeManager for role in TexCoordAttributeManager.roles()})
ATTRIBUTE_MANAGERS.update({role: VectorAttributeManager for role in VectorAttributeManager.roles()})
ATTRIBUTE_MANAGERS.update({role: ExecutionAttributeManager for role in ExecutionAttributeManager.roles()})
ALL_ATTRIBUTE_TYPES = ATTRIBUTE_MANAGERS # Backward compatibility
# Attribute types that are OGN shorthand for a list of types, usable within union type definition list
ATTRIBUTE_UNION_GROUPS = {
"integral_scalers": ["uchar", "int", "uint", "uint64", "int64"],
"integral_tuples": ["int[2]", "int[3]", "int[4]"],
"decimal_scalers": ["double", "float", "half", "timecode"],
"decimal_tuples": [
"double[2]",
"double[3]",
"double[4]",
"float[2]",
"float[3]",
"float[4]",
"half[2]",
"half[3]",
"half[4]",
"colord[3]",
"colord[4]",
"colorf[3]",
"colorf[4]",
"colorh[3]",
"colorh[4]",
"normald[3]",
"normalf[3]",
"normalh[3]",
"pointd[3]",
"pointf[3]",
"pointh[3]",
"texcoordd[2]",
"texcoordd[3]",
"texcoordf[2]",
"texcoordf[3]",
"texcoordh[2]",
"texcoordh[3]",
"quatd[4]",
"quatf[4]",
"quath[4]",
"vectord[3]",
"vectorf[3]",
"vectorh[3]",
],
"matrices": ["matrixd[3]", "matrixd[4]", "transform[4]", "frame[4]"],
}
# Add meta-level union groups
ATTRIBUTE_UNION_GROUPS["integral_array_elements"] = (
ATTRIBUTE_UNION_GROUPS["integral_scalers"] + ATTRIBUTE_UNION_GROUPS["integral_tuples"]
)
ATTRIBUTE_UNION_GROUPS["integral_arrays"] = [tp + "[]" for tp in ATTRIBUTE_UNION_GROUPS["integral_array_elements"]]
ATTRIBUTE_UNION_GROUPS["integrals"] = (
ATTRIBUTE_UNION_GROUPS["integral_array_elements"] + ATTRIBUTE_UNION_GROUPS["integral_arrays"]
)
ATTRIBUTE_UNION_GROUPS["decimal_array_elements"] = (
ATTRIBUTE_UNION_GROUPS["decimal_scalers"] + ATTRIBUTE_UNION_GROUPS["decimal_tuples"]
)
ATTRIBUTE_UNION_GROUPS["decimal_arrays"] = [tp + "[]" for tp in ATTRIBUTE_UNION_GROUPS["decimal_array_elements"]]
ATTRIBUTE_UNION_GROUPS["decimals"] = (
ATTRIBUTE_UNION_GROUPS["decimal_array_elements"] + ATTRIBUTE_UNION_GROUPS["decimal_arrays"]
)
ATTRIBUTE_UNION_GROUPS["numeric_scalers"] = (
ATTRIBUTE_UNION_GROUPS["integral_scalers"] + ATTRIBUTE_UNION_GROUPS["decimal_scalers"]
)
ATTRIBUTE_UNION_GROUPS["numeric_tuples"] = (
ATTRIBUTE_UNION_GROUPS["integral_tuples"] + ATTRIBUTE_UNION_GROUPS["decimal_tuples"]
)
ATTRIBUTE_UNION_GROUPS["numeric_array_elements"] = (
ATTRIBUTE_UNION_GROUPS["numeric_scalers"]
+ ATTRIBUTE_UNION_GROUPS["numeric_tuples"]
+ ATTRIBUTE_UNION_GROUPS["matrices"]
)
ATTRIBUTE_UNION_GROUPS["numeric_arrays"] = [tp + "[]" for tp in ATTRIBUTE_UNION_GROUPS["numeric_array_elements"]]
ATTRIBUTE_UNION_GROUPS["numerics"] = (
ATTRIBUTE_UNION_GROUPS["numeric_array_elements"] + ATTRIBUTE_UNION_GROUPS["numeric_arrays"]
)
ATTRIBUTE_UNION_GROUPS["array_elements"] = ATTRIBUTE_UNION_GROUPS["numeric_array_elements"] + ["token"]
ATTRIBUTE_UNION_GROUPS["arrays"] = ATTRIBUTE_UNION_GROUPS["numeric_arrays"] + ["token[]"]
# Pattern match for the attribute type names
# group(1) = Base name of the attribute type (e.g. double, float, ...)
# group(2) = Element count (None if no element count specified)
# group(3) = String with matching array brackets - len(group(3))/2 = array depth
RE_ATTRIBUTE_TYPE = re.compile(r"([^\[\]]+)(?:\[([1-9][0-9]{0,2})\])?((?:\[\]){0,2})$")
# ======================================================================
def expand_attribute_union_groups(union_types: List) -> List:
"""Expands any group names in a union type declaration, in-place. The
list will have duplicate types removed.
Args:
union_types: The list of union types
"""
union_type_set = set()
for tp in union_types:
for literal_type in ATTRIBUTE_UNION_GROUPS.get(tp, [tp]):
union_type_set.add(literal_type)
return list(union_type_set)
# ======================================================================
def split_attribute_type_name(
full_type_name: Union[str, List]
) -> Tuple[str, int, int, Optional[Dict[str, AttributeManager]]]:
"""Split a fully qualified attribute type name into its identifying parts
Args:
full_type_name: Fully qualified attribute type consisting of type name
followed by optional element count as "[X]" and optional arrays as "[]"
Returns:
(base name, element count, array depth, extra_info) extracted from the full name.
e.g. "int[5][]" returns ("int", 5, 1) and "float[][]" returns ("float", 1, 2)
If the type manager requires any construction parameters they will be in the last tuple
element (None means no construction parameters need be passed)
Raises:
ParseError: the name passed in doesn't follow the naming pattern
"""
extra_info = None
if isinstance(full_type_name, list):
attribute_name = "union"
# Special syntax for annotating "lists of union types" encloses the entire attribute name in [] rather
# than appending them to the end. e.g. "type": [["float", "double"]], versus "type": "float[]"
if full_type_name and isinstance(full_type_name[0], list):
array_depth = 1
types_accepted = full_type_name[0]
else:
array_depth = 0
types_accepted = full_type_name
extra_info = {}
for attribute_type in expand_attribute_union_groups(types_accepted):
extra_info[attribute_type] = get_attribute_manager_type(attribute_type)
tuple_count = 1
else:
type_match = RE_ATTRIBUTE_TYPE.match(full_type_name)
if type_match is None:
raise ParseError(f"Attribute type name {full_type_name} does not match pattern TYPE{{[X]}}{{[]{{[]}}}}")
attribute_name = type_match.group(1)
tuple_count = 1
if type_match.group(2) is not None:
tuple_count = int(type_match.group(2))
array_depth = 0
if type_match.group(3) is not None:
array_depth = int(len(type_match.group(3)) / 2)
return (attribute_name, tuple_count, array_depth, extra_info)
# ======================================================================
def validate_attribute_type_name(type_name: str, tuple_count: int, array_depth: int):
"""Validate a fully qualified attribute name from its constituent parts.
Use naming.py:assemble_attribute_type_name to give you the full name once it is validated.
It would be done here except that would create a circular import problem. Normal usage is
try:
validate_attribute_type_name(type_name, tuple_count, array_depth)
full_name = assemble_attribute_type_name(type_name, tuple_count, array_depth)
except AttributeError:
pass
Args:
type_name: Base name of the attribute type
tuple_count: Number of tuple elements in the attribute type
array_depth: Levels of arrays in the attribute type
Raises:
AttributeError: the constituent parts cannot be assembled into a legal attribute type
"""
try:
manager_type = ATTRIBUTE_MANAGERS[type_name]
except KeyError as error:
raise AttributeError(
f"Type name {type_name} is not on the recognized list {list(ATTRIBUTE_MANAGERS.keys())}"
) from error
tuples_supported = manager_type.tuples_supported()
array_depths_supported = manager_type.array_depths_supported()
def __get_legal_type_names() -> List[str]:
"""Returns the list of valid names for a legal type_name"""
legal_tuple_names = [type_name] if 1 in tuples_supported else []
legal_tuple_names = [f"{type_name}[{tuple_value}]" for tuple_value in tuples_supported if tuple_value > 1]
all_legal_names = []
for legal_depth in array_depths_supported:
array_tag = "[]" * legal_depth
for tuple_name in legal_tuple_names:
all_legal_names.append(f"{type_name}{tuple_name}{array_tag}")
return all_legal_names
if tuple_count not in tuples_supported:
raise AttributeError(
f"Tuple count {tuple_count} is not supported. Attribute type must be one of {__get_legal_type_names()}"
)
if array_depth not in array_depths_supported:
raise AttributeError(
f"Array depth {array_depth} is not supported. Attribute type must be one of {__get_legal_type_names()}"
)
# ======================================================================
def get_attribute_manager_type(attribute_type: str, attribute_name: str = "inputs:default"):
"""Returns an attribute manager that matches the name and type, with no other data.
The attribute manager returned will be incomplete, and may not be valid. It is meant to use for things
like verifying legal values for a type, avoiding the chicken-and-egg scenario of needing the manager to
supply a legal value in order to set the legal value on that manager.
Args:
attribute_type: Fully encoded attribute type value, e.g. "int[3][]"
Returns:
Populated attribute manager for the given type (all other required values will be set to defaults)
Raises:
ParseError if the attribute type is not a recognized legal type
"""
# Find the manager for this attribute's type
try:
base_type_name, tuple_count, array_depth, extra_info = split_attribute_type_name(attribute_type)
except KeyError as error:
raise ParseError(
f'Could not decode attribute type "{attribute_type}" for attribute "{attribute_name}"'
) from error
try:
validate_attribute_type_name(base_type_name, tuple_count, array_depth)
except AttributeError as e:
raise ParseError(f'Unsupported attribute type "{attribute_type}" for attribute "{attribute_name}"') from e
if extra_info is None:
attribute_manager = ATTRIBUTE_MANAGERS[base_type_name](attribute_name, base_type_name)
else:
attribute_manager = ATTRIBUTE_MANAGERS[base_type_name](attribute_name, base_type_name, extra_info)
attribute_manager.tuple_count = tuple_count
attribute_manager.array_depth = array_depth
return attribute_manager
# ======================================================================
def get_attribute_manager(attribute_name: str, attribute_data: dict) -> AttributeManager:
"""Deciphers, validates, and provides consistent access to attributes described by a dictionary.
This function deciphers the type of attribute it contains and then runs a sub-parser appropriate to that type of
attribute which performs semantic validation (e.g. that a default value is within a min/max range).
Args:
attribute_name: Fully namespaced name of the attribute being accessed
attribute_data: Dictionary containing the attribute interface data, as extracted from the JSON
Raise:
ParseError: If there are any errors parsing the attribute description - string contains the problem
Returns:
Object that provides an interface to the parsed attribute
"""
if not isinstance(attribute_data, dict):
raise ParseError(f"Value of node name key {attribute_name} must be a dictionary")
# Find the manager for this attribute's type
attribute_manager = get_attribute_manager_type(attribute_data[AttributeKeys.TYPE], attribute_name=attribute_name)
# Set the mandatory attribute values in a generic way
for attr_key in AttributeKeys.MANDATORY:
try:
setattr(attribute_manager, attr_key, attribute_data[attr_key])
except KeyError:
raise ParseError(f'"{attr_key}" value is mandatory for attribute "{attribute_name}"') from None
# Check to see if the attribute is optional
with suppress(KeyError):
attribute_manager.is_required = not attribute_data[AttributeKeys.OPTIONAL]
# Check to see if the attribute has been deprecated
with suppress(KeyError):
attribute_manager.deprecation_msg = attribute_data[AttributeKeys.DEPRECATED]
if isinstance(attribute_manager.deprecation_msg, list):
attribute_manager.deprecation_msg = " ".join(attribute_manager.deprecation_msg)
attribute_manager.is_deprecated = True
# Check to see if the attribute can go into compute without validation
with suppress(KeyError):
attribute_manager.do_validation = not attribute_data[AttributeKeys.UNVALIDATED]
# Check to see if the attribute has metadata
with suppress(KeyError):
attribute_manager.parse_metadata(attribute_data[AttributeKeys.METADATA])
# Store the attribute description in the metadata as there is no direct ABI for it
attribute_manager.metadata[MetadataKeys.DESCRIPTION] = attrib_description_to_string(attribute_manager.description)
# Check to see if the attribute is using the shorter definition of uiName metadata
with suppress(KeyError):
attribute_manager.metadata[MetadataKeys.UI_NAME] = attribute_data[AttributeKeys.UI_NAME]
# Check to see if the attribute is overriding its memory type
with suppress(KeyError):
attribute_manager.memory_type = check_memory_type(attribute_data[AttributeKeys.MEMORY_TYPE])
attribute_manager.metadata[MetadataKeys.MEMORY_TYPE] = attribute_manager.memory_type
# Check to see if the attribute has a default value set
try:
attribute_manager.default = attribute_data[AttributeKeys.DEFAULT]
# Store the default value as metadata so that it can be retrieved to regenerate the file
attribute_manager.metadata[MetadataKeys.DEFAULT] = json.dumps(attribute_manager.default)
except KeyError:
if attribute_manager.requires_default():
attribute_manager.default = attribute_manager.empty_value()
# Process the keys that are not mandatory for the specific discovered attribute type
extra_properties = {key: value for key, value in attribute_data.items() if key not in AttributeKeys.PROCESSED}
try:
unparsed_properties = attribute_manager.parse_extra_properties(extra_properties)
unparsed_errors = [_prop for _prop in unparsed_properties if _prop[0] != "$"]
if unparsed_errors:
raise ParseError(f"Unparsed fields {unparsed_errors}")
except ParseError as error:
raise ParseError(f"Attribute {attribute_name}") from error
return attribute_manager
# ======================================================================
def supported_attribute_type_names(do_formatting: bool = False) -> List[str]:
"""Returns a list of the OGN type names of all currently supported attribute types (e.g. "int[3]", not "int3")
Args:
do_formatting: If True then group together like tuples and arrays
Returns:
List of string representing all currently supported attribute types
"""
supported_type_names = []
for attribute_type_name, attribute_manager in ATTRIBUTE_MANAGERS.items():
# USD no longer supports the transformX attribute types so filter them out
if attribute_type_name.startswith("transform"):
continue
try:
# Create a temporary to check support
manager = attribute_manager("inputs:temp", attribute_type_name)
supported_tuples = manager.tuples_supported()
supported_array_depths = manager.array_depths_supported()
supported_list = []
for array_depth in supported_array_depths:
for tuple_count in supported_tuples:
full_name = attribute_type_name
full_name = f"{full_name}[{tuple_count}]" if tuple_count > 1 else full_name
full_name += "[]" * array_depth
supported_list.append(full_name)
# Add any supported types found in a single list
if supported_list:
if do_formatting:
supported_type_names.append(", ".join(supported_list))
else:
supported_type_names += supported_list
except TypeError:
# This is hit when you try to get a union type, which should be reported differently anyway
pass
except (AttributeError, UnimplementedError):
pass
supported_type_names.sort()
return supported_type_names
# ======================================================================
def formatted_supported_attribute_type_names() -> List[str]:
"""Returns a list of the names of all currently supported attribute types, formatted in lines for easy reading"""
supported_type_names = supported_attribute_type_names(do_formatting=True)
supported_type_names.append('["A", "B", "C"... = Any one of the listed types]')
return supported_type_names
# ======================================================================
def split_attribute_list(
attributes: List[AttributeManager],
) -> Tuple[List[AttributeManager], List[AttributeManager], List[AttributeManager]]:
"""Split a list of attributes into three sets of sublists based on type of data the attribute holds.
Args:
attributes: List of attribute managers encapsulating the list of attributes to be split
Returns:
Tuple(SingleAttributes, BundleAttributes, RuntimeAttributes)
SingleAttributes: Attributes containing a single piece of data, including tuples and arrays
BundleAttributes: Attributes which are a bundle of other attributes, not having any actual data
RuntimeAttributes: Attributes whose data type is only known at runtime
"""
single_attributes = []
bundle_attributes = []
runtime_attributes = []
for attribute in attributes:
if attribute is None:
continue
if not attribute.has_fixed_type():
runtime_attributes.append(attribute)
elif attribute.create_type_name() == "bundle":
bundle_attributes.append(attribute)
else:
single_attributes.append(attribute)
return (single_attributes, bundle_attributes, runtime_attributes)
# ======================================================================
def list_without_runtime_attributes(attributes: List[AttributeManager]) -> List[AttributeManager]:
"""Return the attribute list, filtered to remove all attributes whose data type is determined at runtime.
Args:
attributes: List of attribute managers encapsulating the list of attributes to be filtered
Returns:
List of attributes whose types are known at compile time
"""
known_attributes = []
for attribute in attributes:
if attribute is None:
continue
if attribute.has_fixed_type():
known_attributes.append(attribute)
return known_attributes
| 22,695 | Python | 44.482966 | 118 | 0.674994 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/BoolAttributeManager.py | """
Support for handling attributes of type "bundle" - i.e. attributes whose job it is to encapsulate arbitrary
collections of other attributes, including other bundle attributes.
"""
from typing import Any
from ..utils import ParseError
from .AttributeManager import AttributeManager, CppConfiguration, CudaConfiguration
from .parsing import is_type_or_list_of_types
# ======================================================================
class BoolAttributeManager(AttributeManager):
"""Support class for attributes of type bool"""
OGN_TYPE = "bool"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {"bool": CppConfiguration("bool", cast_required=False)}
CUDA_CONFIGURATION = {"bool": CudaConfiguration("bool", cast_required=False)}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
values = [False, True]
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
def validate_value(self, value):
"""Raises a ParseError if value is not a valid boolean value"""
if not is_type_or_list_of_types(value, bool, self.tuple_count):
raise ParseError(f"Value {value} on a boolean[{self.tuple_count}] attribute is not a matching type")
super().validate_value(value)
def cpp_element_value(self, value):
"""C++ uses different capitalization for boolean values."""
return "true" if value else "false"
def python_type_name(self) -> str:
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("bool")
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "Bool"
def usd_type_name(self) -> str:
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_containers("bool")
def empty_base_value(self):
"""Return the value a boolean should take when no particular value is specified"""
return False
| 2,438 | Python | 41.051723 | 112 | 0.655865 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/PathAttributeManager.py | """
Contains the support class for managing attributes whose data is strings represented as tokens
"""
from typing import Any
from .AttributeManager import CppConfiguration, CudaConfiguration
from .StringAttributeManager import StringAttributeManager
class PathAttributeManager(StringAttributeManager):
"""Support class for attributes of type path.
There is no USD support for Sdf.Path values so these paths have to be stored as strings. This is acceptable as
it is trivial to cast a string to an Sdf.Path if you wish to use that API to manipulate the string value.
"""
OGN_TYPE = "path"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"path": CppConfiguration("char*", cast_required=False, role="ePath"),
}
CUDA_CONFIGURATION = {
"path": CudaConfiguration("char*", cast_required=False, role="ePath"),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
values = ["/This/Is", "/The/Way"]
if self.tuple_count > 1:
values = [tuple(value + "x" * i for i in range(self.tuple_count)) for value in values]
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
def create_type_name(self) -> str:
"""Path attributes have a special name when creating so that they can be instantiated differently"""
return "path"
def python_role_name(self) -> str:
"""Returns a string with the Python role name for this attribute"""
return "og.Database.ROLE_PATH"
| 1,856 | Python | 39.369564 | 114 | 0.66972 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/UnionAttributeManager.py | """
Contains the support class for managing attributes whose data is 32 bit integers
"""
from typing import Dict, List
from ..utils import _EXTENDED_TYPE_UNION, ParseError, to_usd_docs
from .AnyAttributeManager import AnyAttributeManager
from .AttributeManager import AttributeManager
class UnionAttributeManager(AnyAttributeManager):
"""Support class for attributes of type union
The union attribute type is different from all of the others in that it doesn't have a fixed data type, it is
merely a placeholder for an attribute that accepts a variety of data types. It can be thought of as an "any"
attribute with restrictions on exactly what kind of connections can be made. (e.g. if the union accepts float
and double then it cannot accept a connection from a string attribute)
Members:
__types_accepted: Dictionary of {"typeName": AttributeManager} for all of the data types that this union
attribute can accept.
"""
# This is just a keyword for the attribute manager, this doesn't appear in the type name directly
OGN_TYPE = "union"
def __init__(self, attribute_name: str, attribute_type_name: str, types_accepted: Dict[str, AttributeManager]):
"""Set up the empty attribute values for population from the JSON description
Args:
attribute_name: Unique name for this attribute
attribute_type_name: Unique name for this attribute type
types_accepted: Same type as member __types_accepted
Raises:
ParseError: If any of the accepted types are not legal
"""
super().__init__(attribute_name, attribute_type_name)
self.__types_accepted = types_accepted
def cpp_extended_type(self):
"""Returns the extended type identifier for C++ types"""
return "kExtendedAttributeType_Union"
def ogn_type(self) -> str:
"""Returns a string containing the fully expanded name of this attribute type in a .ogn file"""
type_information = sorted(self.__types_accepted.keys())
if self.array_depth == 1:
type_information = [type_information]
return f"{type_information}"
def usd_type_name(self) -> str:
"""Returns a string with the data type the attribute would use in a USD file"""
return ",".join(sorted(self.__types_accepted.keys()))
def python_extended_type(self):
"""Returns the extended type identifier and descriptor for Python attribute types"""
return (_EXTENDED_TYPE_UNION, ",".join(sorted(self.__types_accepted.keys())))
def usd_type_accepted_description(self) -> str:
"""Returns a string that will be the default value of the USD token, describing accepted types"""
return f"union of {','.join(self.__types_accepted.keys())}"
def emit_usd_declaration(self, out) -> List[str]:
"""Print a declaration for this attribute in USD
Args:
out: Output handler where the USD will be emitted
"""
try:
usd_name = self.usd_name()
# Overriding default behavior of using self.usd_type_name()
usd_type = "token"
except ParseError:
# Attributes without USD representations can be skipped
return
docs = to_usd_docs(self.description)
default_value = self.usd_default_value()
if out.indent(f"custom {usd_type} {usd_name}{default_value} ("):
out.write(docs)
out.exdent(")")
| 3,521 | Python | 40.928571 | 115 | 0.660324 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/MatrixAttributeManager.py | """
Contains the support class for managing attributes whose data is matrixes of numbers
"""
from contextlib import suppress
from typing import Any, List, Union
from ..utils import ParseError
from .AttributeManager import CppConfiguration, CudaConfiguration
from .NumericAttributeManager import is_number_or_list_of_numbers
from .RoleAttributeManager import RoleAttributeManager
class MatrixAttributeManager(RoleAttributeManager):
"""Support class for the attribute with role matrix (i.e. matrixd)
Note that unlike other role classes the tuple values for a matrix indicate the row/column sizes, not the
total number of values. So a matrixd[4] has 16 elements.
"""
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"matrixd[2]": CppConfiguration(
"pxr::GfMatrix2d", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eMatrix"
),
"matrixd[3]": CppConfiguration(
"pxr::GfMatrix3d", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eMatrix"
),
"matrixd[4]": CppConfiguration(
"pxr::GfMatrix4d", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eMatrix"
),
}
CUDA_CONFIGURATION = {
"matrixd[2]": CudaConfiguration("Matrix2d", include_files=["omni/graph/core/cuda/Matrix2d.h"], role="eMatrix"),
"matrixd[3]": CudaConfiguration("Matrix3d", include_files=["omni/graph/core/cuda/Matrix3d.h"], role="eMatrix"),
"matrixd[4]": CudaConfiguration("Matrix4d", include_files=["omni/graph/core/cuda/Matrix4d.h"], role="eMatrix"),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: If True return as the data type used to set the value in USD attributes, else return Python values
"""
values = [
tuple(tuple(1.0 if i == j else 0.0 for i in range(self.tuple_count)) for j in range(self.tuple_count)),
tuple(tuple(2.0 if i == j else 3.0 for i in range(self.tuple_count)) for j in range(self.tuple_count)),
]
if for_usd:
from pxr import Gf
gf_type = getattr(Gf, f"Matrix{self.tuple_count}d")
values = [gf_type(*self.flattened_value(values[0])), gf_type(*self.flattened_value(values[1]))]
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
@staticmethod
def roles():
"""Return a list of valid role names for this type"""
return ["matrixd"]
@classmethod
def is_matrix_type(cls) -> bool:
"""This is obviously a matrix type"""
return True
@staticmethod
def tuples_supported() -> List[int]:
"""This type can only have 2, 3 or 4 dimensions (squared for actual data count), not 1"""
return [2, 3, 4]
def tuple_argument(self):
"""Return a string with the declaration of a tuple count argument for constructors"""
tuple_count_arg = "" if self.tuple_count < 2 else f", {self.tuple_count * self.tuple_count}"
return tuple_count_arg
def empty_value(self):
"""Having a 2d tuple count means the matrix needs a different shape for its empty values"""
if self.array_depth == 1:
return []
# The empty matrix is the identity matrix, not all 0's as it is for other numeric types.
identity = []
for i in range(self.tuple_count):
row = []
for j in range(self.tuple_count):
row.append(1.0 if j == i else 0.0)
identity.append(row)
return identity
@staticmethod
def flattened_value(value: Any) -> List[float]:
"""Returns a flattened version of the 2d list, required by the Python bindings
Args:
value: A list of list of values or tuple of tuple of values representing a matrix
"""
if isinstance(value, (list, tuple)):
flattened_value = list(value)
# The index error captures the empty list case, which flattens to itself
with suppress(IndexError):
if isinstance(flattened_value[0], (list, tuple)):
flattened_value = [item for sublist in flattened_value for item in sublist]
else:
# The value is already flattened so just return it directly
return value
else:
raise ParseError(f"Value {value} should be a matrix")
return flattened_value
def square_tuple(self, value: Union[List[float], List[List[float]]]) -> List[float]:
"""Returns a square 2d list version of the passed in list, required for USD setting"""
if len(value) == self.tuple_count:
for element in value:
if len(element) != self.tuple_count:
raise ParseError(
f"Value {value} should be a 2d or flattened matrix of dimension {self.tuple_count}"
)
return tuple(tuple(element) for element in value)
if len(value) == self.tuple_count * self.tuple_count:
square = []
for row in range(self.tuple_count):
square.append(
tuple(
column for column in value[row * self.tuple_count : row * self.tuple_count + self.tuple_count]
)
)
return tuple(square)
raise ParseError(f"Value {value} should be a 2d or flattened matrix of dimension {self.tuple_count}")
def validate_numbers_in_range(self, value):
"""Validate that the given value is in the legal numeric range, if any are specified
Args:
value: Data value to verify, in flattened form
Raises:
ParseError if the min/max range is not respected by the value
"""
# Convert single values to lists for uniform handling
if self.minimum is not None:
for single_value, minimum in zip(value, self.flattened_value(self.minimum)):
if single_value < minimum:
raise ParseError(f"Value of {value} is less than the allowed minimum of {self.minimum}")
if self.maximum is not None:
for single_value, maximum in zip(value, self.flattened_value(self.maximum)):
if single_value > maximum:
raise ParseError(f"Value of {value} is greater than the allowed maximum of {self.maximum}")
def validate_value(self, value):
"""Raises a ParseError if the value is not a valid matrix value of the correct dimensions.
Unlike other tuples the matrix types use a single tuple count to represent both dimensions of the data,
so a matrix2d can be [[0.0, 0.0], [0.0, 0.0]] or [0.0, 0.0, 0.0, 0.0]
"""
flattened_value = self.flattened_value(value)
if not is_number_or_list_of_numbers(flattened_value, self.tuple_count * self.tuple_count):
raise ParseError(f"Value {value} on a {self.ogn_type()} attribute is not a {self.tuple_count}d matrix")
self.validate_numbers_in_range(flattened_value)
def cpp_tuple_value(self, value) -> str:
"""Returns the string for the C++ representation of the single value passed in.
Matrix values can be passed in either as flattened lists or as square 2d lists.
The form passed in is assumed to be the one required for the constructor, except for the pxr::GfMatrixXd
types, which require a flattened list but for historical reasons may not get it, so the list is flattened
for them.
"""
flattened_size = self.tuple_count * self.tuple_count
if len(value) != self.tuple_count and len(value) != flattened_size:
raise ParseError(f"Tuple count initializer expected matrix of size {self.tuple_count}, got `{value}`")
# In the case of a flattened list the value is returned as a simple flattened list as well
if len(value) == flattened_size:
return f"{{{','.join([RoleAttributeManager.cpp_element_value(self, element) for element in value])}}}"
expected_shape = [self.tuple_count] * self.tuple_count
actual_shape = [len(row) for row in value]
if expected_shape != actual_shape:
raise ParseError(f"Matrix default must be size {self.tuple_count} x {self.tuple_count}, flat or square")
if self.cpp_configuration().base_type_name.startswith("pxr"):
# Backward compatibility for pxr::GfMatrix types - they should have flattened lists for construction
# but before both types were accepted. Now the type in the file should correspond with the type needed
# by the type's constructor.
flattened_value = self.flattened_value(value)
return f"{{{','.join([RoleAttributeManager.cpp_element_value(self, elem) for elem in flattened_value])}}}"
row_values = []
for row_element in value:
row_values.append(
f"{{{','.join([RoleAttributeManager.cpp_element_value(self, element) for element in row_element])}}}"
)
return f"{{{','.join(row for row in row_values)}}}"
def python_role_name(self) -> str:
"""Returns a string with the Python role name for this attribute"""
return "og.Database.ROLE_MATRIX"
def python_value(self, value):
"""Returns the value for this attribute in a Python-compatible format, None for no default.
The Python bindings require the list be flattened so do that first.
"""
if not value:
return value
if self.array_depth > 0:
return [self.flattened_value(element) for element in value]
return self.flattened_value(value)
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return f"Matrix{self.suffix()}"
def usd_type_name(self) -> str:
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_arrays(f"matrix{self.tuple_count}{self.suffix()}")
def usd_value(self, value):
"""The matrix values require two levels of parentheses so this override is necessary."""
if value is None:
return None
if self.array_depth > 0:
if not isinstance(value, list):
raise ParseError(f"Expected list for USD array value on {self.name} - got {value}")
array_output = []
for element in value:
array_output.append(self.square_tuple(element))
return array_output
return self.square_tuple(value)
| 10,939 | Python | 46.982456 | 119 | 0.62428 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/IntAttributeManager.py | """
Contains the support class for managing attributes whose data is 32 bit integers
"""
from typing import Any, List
from ..utils import ParseError
from .AttributeManager import CppConfiguration, CudaConfiguration
from .NumericAttributeManager import NumericAttributeManager, values_in_range
from .parsing import is_type_or_list_of_types
class IntAttributeManager(NumericAttributeManager):
"""Support class for attributes of type 32-bit integer"""
OGN_TYPE = "int"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"int": CppConfiguration("int", cast_required=False),
"int[2]": CppConfiguration("pxr::GfVec2i", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
"int[3]": CppConfiguration("pxr::GfVec3i", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
"int[4]": CppConfiguration("pxr::GfVec4i", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
}
CUDA_CONFIGURATION = {
"int": CudaConfiguration("int", cast_required=False),
"int[2]": CudaConfiguration("int3", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
"int[3]": CudaConfiguration("int3", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
"int[4]": CudaConfiguration("int4", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
values = [-32, -23]
if self.tuple_count > 1:
values = [tuple(value + i for i in range(self.tuple_count)) for value in values]
if for_usd:
from pxr import Gf
gf_type = getattr(Gf, f"Vec{self.tuple_count}i")
values = [gf_type(*value) for value in values] # noqa: PLE1133
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
def numerical_type(self) -> int:
"""Returns the numerical TYPE_* value for this attribute's data"""
return NumericAttributeManager.TYPE_INTEGER
def validate_value(self, value):
"""Raises a ParseError if value is not a valid integer value"""
if not is_type_or_list_of_types(value, int, self.tuple_count):
raise ParseError(f"Value {value} on an int[{self.tuple_count}] attribute is not a matching type")
if not values_in_range(value, -2147483648, 2147483647):
raise ParseError(f"Value {value} on a 32-bit integer[{self.tuple_count}] attribute is out of range")
super().validate_value(value)
@staticmethod
def tuples_supported() -> List[int]:
"""USD supports only these tuples natively so restrict support to them for now"""
return [1, 2, 3, 4]
def cuda_base_type_name(self) -> str:
"""Returns a string with the CUDA base type of the attribute data"""
return "int"
def python_type_name(self):
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
return self.python_add_containers("int")
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "Int"
def usd_type_name(self):
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_containers("int")
| 3,689 | Python | 44.555555 | 112 | 0.650854 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/HalfAttributeManager.py | """
Contains the support class for managing attributes whose data is half precision numbers
"""
from typing import Any, List
from ..utils import ParseError
from .AttributeManager import CppConfiguration, CudaConfiguration
from .NumericAttributeManager import NumericAttributeManager, values_in_range
from .parsing import is_type_or_list_of_types
class HalfAttributeManager(NumericAttributeManager):
"""Support class for attributes of type 16-bit floating point value"""
OGN_TYPE = "half"
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"half": CppConfiguration("pxr::GfHalf", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
"half[2]": CppConfiguration("pxr::GfVec2h", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
"half[3]": CppConfiguration("pxr::GfVec3h", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
"half[4]": CppConfiguration("pxr::GfVec4h", include_files=["omni/graph/core/ogn/UsdTypes.h"]),
}
CUDA_CONFIGURATION = {
"half": CudaConfiguration("__half", cast_required=False),
"half[2]": CudaConfiguration("__half3", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
"half[3]": CudaConfiguration("__half3", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
"half[4]": CudaConfiguration("__half4", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"]),
}
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: Ignored - values are set as-is in Python
"""
values = [2.5, 4.5]
if self.tuple_count > 1:
values = [tuple(value + i * 0.125 for i in range(self.tuple_count)) for value in values]
if for_usd:
from pxr import Gf
gf_type = getattr(Gf, f"Vec{self.tuple_count}h")
values = [gf_type(*value) for value in values] # noqa: PLE1133
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
def numerical_type(self) -> int:
"""Returns the numerical TYPE_* value for this attribute's data"""
return NumericAttributeManager.TYPE_FLOAT
def validate_value(self, value):
"""Raises a ParseError if value is not a valid half-float value"""
if not is_type_or_list_of_types(value, float, self.tuple_count):
raise ParseError(f"Value {value} on a half[{self.tuple_count}] attribute is not a matching type")
# Values not representable exactly due to precision considerations are still accepted
if not values_in_range(value, -65504, 65504):
raise ParseError(f"Value {value} on a 16-bit float[{self.tuple_count}] attribute is out of range")
super().validate_value(value)
@staticmethod
def tuples_supported() -> List[int]:
"""USD supports only these tuples natively so restrict support to them for now"""
return [1, 2, 3, 4]
def python_type_name(self):
"""In Python the 16-bit float can only be represented as a regular float"""
return self.python_add_containers("float")
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return "Half"
def usd_type_name(self):
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_containers("half")
| 3,703 | Python | 46.487179 | 115 | 0.654874 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/AttributeManager.py | # noqa: PLC0302
"""
Support for handling the parsing and generation for generic attributes.
Specific attribute types will override some of this behaviour in their own subclasses.
Exported Classes:
AttributeManager
Exported Constants:
PropertySet
"""
import os
import re
from contextlib import suppress
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Tuple
from ..keys import AttributeKeys, CudaPointerValues, MemoryTypeValues
from ..utils import (
_EXTENDED_TYPE_REGULAR,
IndentedOutput,
MetadataKeys,
ParseError,
UnimplementedError,
ensure_quoted,
get_metadata_dictionary,
to_usd_docs,
)
from .naming import (
INPUT_GROUP,
OUTPUT_GROUP,
STATE_GROUP,
assemble_attribute_type_name,
is_input_name,
is_output_name,
is_state_name,
split_attribute_name,
)
from .parsing import sdf_type_name
# ======================================================================
# Syntactic sugar for type validation of a property set
PropertySet = Tuple[bool, Dict]
# ======================================================================
# C++ type matcher that pulls apart typedefs like "int[2]" into "int" and "[2]".
# The reason is so that they can be put back together into legal types when adding
# references, "int(&)[2]", or pointers, "int(*)[2]". Other types can simply append a "&" or "*" to be legal.
RE_CPP_MATRIX = re.compile(r"(.*)(\[[0-9]+\]\[[0-9]+\])")
RE_CPP_TUPLE = re.compile(r"(.*)(\[[0-9]+\])")
# ======================================================================
@dataclass
class CppConfiguration:
"""Contains the information required to configure the C++ types for an attribute.
Attributes:
base_type_name (str): Data type for one of the attribute values (e.g. "double")
cast_required (bool): If True then a cast is required for construction (e.g. "true" versus "pxr::GfHalf(1.0)")
include_files (List[str]): List of files to include to define the above types (e.g. ["pxr/gf/vec3d.h"])
role (str): Name of the AttributeRole enum value that marks this attribute's type (override not allowed)
"""
base_type_name: str
include_files: List[str] = field(default_factory=list)
cast_required: bool = True
role: str = "eNone"
# ======================================================================
@dataclass
class CudaConfiguration:
"""Contains the information required to configure the CUDA types for an attribute.
Attributes:
base_type_name (str): Data type for one of the attribute values (e.g. "double")
cast_required (bool): If True then a cast is required for construction (e.g. "true" versus "__half(1.0)")
include_files (List[str]): List of files to include to define the above types (e.g. ["cuda_fp16.h"])
role (str): Name of the AttributeRole enum value that marks this attribute's type (override not allowed)
"""
base_type_name: str
include_files: List[str] = field(default_factory=list)
cast_required: bool = True
role: str = "eNone"
# ======================================================================
class AttributeManager:
"""
Base class that provides support methods common to all types of attributes
The members of this class implementing the mandatory values must be named the same as AttributeKeys.MANDATORY
Attributes:
attribute_type_name: Role interpretation of the attribute type (e.g. colorf for float[3])
array_depth: How many array levels are on this attribute (optional, range 0-2 - default 0)
base_name: Name of the attribute with the namespace removed
cuda_pointer_type: Location of CUDA array pointers
default: Value for the attribute when none is explicitly specified at runtime (mandatory)
description: Description of what the attribute does (mandatory)
do_validation: Is the attribute required to be valid before compute can be called?
element_count: How many members of an array attribute are specified in the default value?
attribute_type: Type of attribute this is (INPUT_GROUP, OUTPUT_GROUP, STATE_GROUP)
is_deprecated: Has the attribute been deprecated? (optional - default False)
deprecation_msg: Optional message describing what users must to do deal with the deprecated attribute
is_required: Is the attribute required for the node to operate? (optional - default True)
manager: Object created to manage data for this attribute's type
memory_type: Key indicating where the attribute's memory is stored (optional - default MemoryTypeValues.CPU)
metadata: Dictionary of Key/Value strings attached to the attribute (optional - default {})
name: Name of the attribute (mandatory)
namespace: Namespace of the attribute (split from the full name)
tuple_count: How many members of the same type are in this attribute as a tuple? (optional - default 1)
e.g. float[3] has a tuple count of 3
type: Type of data contained by this attribute (mandatory)
unimplemented_error: None if attribute is valid, else a string indicating what is unimplemented
Abstract Methods To Override:
These methods do not have usable definitions and must be overridden specifically for each attribute type.
cpp_base_type_name(self) -> str
cuda_base_type_name(self) -> str
python_type_name(self) -> str
sdf_type_name(self) -> str
usd_name(self) -> str
usd_type_name(self) -> str
Usually you can add element counts and arrays using the associated FOO_add_containers() method, however
you are free to elevate a type to array and element amounts in any way you like.
Other Methods To Override:
These methods add information, and should include the results from the super() call in their result
python_imports(self) -> List[str]
cpp_includes(self) -> List[str]
cpp_element_type_name(self) -> str
cuda_includes(self) -> List[str]
cuda_element_type_name(self) -> str
tuples_allowed(self) -> List[int]
The derived classes must also implement the class variable OGN_TYPE, which defines how the base type of
the attribute is declared in the .ogn file (e.g. "float" for float, float[3], float[], etc.)
"""
CPP_CONFIGURATION = {}
CUDA_CONFIGURATION = {}
OGN_TYPE = None
def __init__(self, attribute_name: str, attribute_type_name: str):
"""Set up the empty attribute values for population from the JSON description
Args:
attribute_name: Unique name for this attribute
attribute_type_name: Unique name for this attribute type
"""
self.cuda_pointer_type = None
self.default = None
self.description = None
self.array_depth = 0
self.tuple_count = 0
self.element_count = 0
self.is_deprecated = False
self.deprecation_msg = ""
self.is_required = True
self.do_validation = True
self.memory_type = None # Lets the node set the value if no override is present
self.metadata = {}
self.name = attribute_name
self.namespace, self.base_name = split_attribute_name(attribute_name)
if is_input_name(attribute_name):
self.attribute_group = INPUT_GROUP
elif is_output_name(attribute_name):
self.attribute_group = OUTPUT_GROUP
elif is_state_name(attribute_name):
self.attribute_group = STATE_GROUP
else:
raise ParseError(f"Attribute '{attribute_name}' has unrecognized type")
self.role = None
self.attribute_type_name = attribute_type_name
self.unimplemented_error = None
# ----------------------------------------------------------------------
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: If True return as the data type used to set the value in USD attributes, else return Python values
Values are returned as a list of constructor arguments; e.g. if a call to set a value looks like
this: Set(float, float, float) then the parameters returned would be [float, float, float]
"""
return
# ----------------------------------------------------------------------
@classmethod
def is_matrix_type(cls) -> bool:
"""Returns true iff the attribute is a matrix type, where the tuple_count must be squared"""
return False
# ----------------------------------------------------------------------
def ogn_base_type(self) -> str:
"""Returns a string containing the fully expanded name of this attribute type in a .ogn file"""
return self.OGN_TYPE
# ----------------------------------------------------------------------
def ogn_type(self) -> str:
"""Returns a string containing the fully expanded name of this attribute type in a .ogn file"""
return assemble_attribute_type_name(self.ogn_base_type(), self.tuple_count, self.array_depth)
# ----------------------------------------------------------------------
def ogn_root_type(self) -> str:
"""Returns a string containing the fully expanded name of this attribute type in a .ogn file, without arrays"""
return assemble_attribute_type_name(self.ogn_base_type(), self.tuple_count, 0)
# ----------------------------------------------------------------------
def is_read_only(self) -> bool:
"""Returns True if this attribute should no be written to"""
return self.attribute_group == INPUT_GROUP
# ----------------------------------------------------------------------
def supports_metadata(self) -> bool:
"""Returns True if this type of attribute can handle metadata output"""
return True
# ----------------------------------------------------------------------
def memory_storage(self) -> str:
"""Returns the memory location the attribute's data will be stored - usually the same as its memory type"""
return self.memory_type
# ----------------------------------------------------------------------
def override_cpp_configuration(self, base_type_name: str, include_files: List[str], cast_required: bool):
"""Override the type definitions applicable to the attribute type implemented by this manager.
Args:
base_type_name (str): Data type for one of the attribute values (e.g. "double")
cast_required (bool): Is a cast required for construction? (e.g. "true" versus "pxr::GfHalf(1.0)")
include_files (List[str]): List of files to include to define the above types (e.g. ["pxr/gf/vec3d.h"])
"""
old_configuration = self.cpp_configuration()
old_configuration.base_type_name = base_type_name
old_configuration.include_files = include_files
old_configuration.cast_required = cast_required
self.CPP_CONFIGURATION[self.ogn_root_type()] = old_configuration
# ----------------------------------------------------------------------
def cpp_configuration(self) -> CppConfiguration:
"""Returns the C++ configuration data that applies to the attribute type implemented by this manager
If no implementation is defined then return an empty dictionary.
"""
try:
return self.CPP_CONFIGURATION[self.ogn_root_type()]
except AttributeError:
return CppConfiguration(self.ogn_root_type())
# ----------------------------------------------------------------------
def cpp_includes(self) -> List[str]:
"""Return a list of files to include in the C++ header for proper compilation of this type
Note that the list entries should include the <> or "" surrounding the file name so that the proper include
can be generated (e.g. '<carb/Types.h>' versus '"../myIncludes/myFile.h"')
"""
includes = ["omni/graph/core/Type.h"]
if self.array_depth > 0:
includes.append("omni/graph/core/ogn/ArrayAttribute.h")
includes.append("array")
else:
includes.append("omni/graph/core/ogn/SimpleAttribute.h")
# Any include files in the configuration can be automatically added here, avoiding having each class
# make a special override function just for that.
with suppress(AttributeError, KeyError):
includes += self.cpp_configuration().include_files
return includes
# ----------------------------------------------------------------------
def cpp_declarations(self) -> List[str]:
"""Return a list of declarations to include in the C++ header for proper compilation of this type
This will be included after the declarations.
"""
return []
# ----------------------------------------------------------------------
def cpp_post_initialization(self, out: IndentedOutput):
"""Outputs any attribute-specific code needing to run at the end of the initializeType method"""
return
# ----------------------------------------------------------------------
def cpp_pre_initialization(self, out: IndentedOutput):
"""Outputs any attribute-specific code needing to run at the top of in the initializeType method"""
return
# ----------------------------------------------------------------------
def cpp_add_arrays(self, base_type: str) -> str:
"""Common method for transforming a base C++ type to add any appropriate arrays.
Call this method directly when the tuple type is not the standard std::array
Args:
base_type: String containing the type name of the single-value, e.g. "float"
"""
final_type = base_type
for _level in range(0, self.array_depth):
final_type = f"{final_type}*"
return final_type
# ----------------------------------------------------------------------
def cpp_base_type_name(self) -> str:
"""Returns a string with the base C++ type of the attribute data"""
try:
return self.cpp_configuration().base_type_name
except (AttributeError, KeyError) as error:
raise ParseError(f"Attribute {self.name} has an unimplemented C++ base type name") from error
# ----------------------------------------------------------------------
def cpp_role_name(self) -> str:
"""Returns a string with the role name for this attribute - none by default"""
try:
return f"AttributeRole::{self.cpp_configuration().role}"
except (AttributeError, KeyError) as error:
raise ParseError(f"Attribute {self.name} has an unimplemented C++ role name") from error
# ----------------------------------------------------------------------
def cpp_element_type_name(self) -> str:
"""Returns a string with the full C++ type of the attribute data"""
return self.cpp_configuration().base_type_name
# ----------------------------------------------------------------------
def _cpp_add_array_accessor(self, raw_type: str) -> str:
"""Returns a string that adds the appropriate array accessor to the given raw type, unchanged if not an array"""
if self.array_depth > 0:
if self.is_read_only():
return f"ogn::const_array<{raw_type}>"
return f"ogn::array<{raw_type}>"
return raw_type
# ----------------------------------------------------------------------
def _cpp_accessor_type(self, include_arrays: bool) -> str:
"""Returns a string with the full C++ type of the data returned by the attribute's accessor"""
modifier = "const " if self.is_read_only() else ""
accessor_type = self.cpp_element_type_name()
if include_arrays:
accessor_type = self._cpp_add_array_accessor(accessor_type)
return f"{modifier}{accessor_type}"
# ----------------------------------------------------------------------
def cpp_extended_type(self):
"""Returns the extended type identifier for C++ types"""
return "kExtendedAttributeType_Regular"
# ----------------------------------------------------------------------
def cpp_accessor_on_cpu(self) -> bool:
"""Returns True if the accessor constructed for the attribute's data is always on the CPU, else it will be
wherever the attribute's memory type dictates.
"""
return False
# ----------------------------------------------------------------------
def cpp_wrapper_class(self) -> Tuple[str, List[str]]:
"""Returns a string with the wrapper class used to access attribute data in the C++ database along
with the non-default parameters to that class's template"""
wrapper_class = "ogn::{}{}".format(
"Array" if self.array_depth > 0 else "Simple",
{INPUT_GROUP: "Input", OUTPUT_GROUP: "Output", STATE_GROUP: "State"}[self.attribute_group],
)
memory_type = MemoryTypeValues.CPU if self.cpp_accessor_on_cpu() else self.memory_type
template_arguments = [self.fabric_element_type(), MemoryTypeValues.CPP[memory_type]]
if self.array_depth > 0 and self.cuda_pointer_type is not None:
template_arguments.append(CudaPointerValues.CPP[self.cuda_pointer_type])
return (wrapper_class, template_arguments)
# ----------------------------------------------------------------------
def cpp_typedef_name(self, use_namespace: bool = True):
"""Returns a string with the typedef for this attribute's raw type as it appears in Fabric.
Use this typedef name instead of the raw type to avoid C++'s problem with things like "float[3]&"
"""
safe_base_name = self.base_name.replace(":", "_")
name = f"{self.namespace}::{safe_base_name}" if use_namespace else safe_base_name
return f"{name}_t"
# ----------------------------------------------------------------------
def cpp_initializer(self) -> Tuple[str, str]:
"""Generate the code for the static attribute initializer.
The initializer contains any compile-time values for the attribute, like name and type name.
Returns:
(NAME, DECLARATION)
NAME: Name of the initializer object, which should be file-static
DECLRATION: Full declaration of the initializer object
"""
# Uses the logic in generate_attribute_static_data to get the right name for the initializer
initializer_type = f"ogn::AttributeInitializer<{self.fabric_default_data_typedef()}, {self.attribute_group}>"
initializer_name = self.cpp_variable_name()
initializer_args = f'"{self.usd_name()}"'
initializer_args += f', "{self.create_type_name()}"'
initializer_args += f", {self.cpp_extended_type()}"
if self.default is not None:
initializer = self.cpp_default_initializer()
if initializer is not None:
initializer_args += f", {initializer}"
# Array types must always be initialized
elif self.array_depth > 0:
initializer_args += ", nullptr, 0"
return (initializer_name, f"{initializer_type} {initializer_name}({initializer_args});")
# ----------------------------------------------------------------------
def create_type_name(self) -> str:
"""Returns the type of this attribute as expected by the attribute creation methods"""
return self.usd_type_name()
# ----------------------------------------------------------------------
def has_fixed_type(self) -> bool:
"""Returns True if the attribute's type is fixed at compile time"""
return True
# ----------------------------------------------------------------------
def get_allowed_tokens(self) -> Dict[str, str]:
"""Returns the dictionary of tokens allowed on this attribute type, raising ParseError if they are not legal"""
try:
return self.metadata[MetadataKeys.ALLOWED_TOKENS]
except KeyError:
return {}
# ----------------------------------------------------------------------
def cpp_set_handle_at_runtime(self) -> bool:
"""Returns True if the C++ initializer class requires the handle to be set at runtime"""
return True
# ----------------------------------------------------------------------
def override_cuda_configuration(self, base_type_name: str, include_files: List[str], cast_required: bool):
"""Override the type definitions applicable to the attribute type implemented by this manager.
Args:
base_type_name (str): Data type for one of the attribute values (e.g. "double")
cast_required (bool): If True then a cast is required for construction (e.g. "true" versus "__half(1.0)")
include_files (List[str]): List of files to include to define the above types (e.g. ["cuda_fp16.h"])
"""
old_configuration = self.cuda_configuration()
old_configuration.base_type_name = base_type_name
old_configuration.include_files = include_files
old_configuration.cast_required = cast_required
self.CUDA_CONFIGURATION[self.ogn_root_type()] = old_configuration
# ----------------------------------------------------------------------
def cuda_configuration(self) -> CudaConfiguration:
"""Returns the C++ configuration data that applies to the attribute type implemented by this manager
If no implementation is defined then return an empty dictionary.
"""
try:
return self.CUDA_CONFIGURATION[self.ogn_root_type()]
except AttributeError:
return CudaConfiguration(self.ogn_root_type())
# ----------------------------------------------------------------------
def cuda_includes(self) -> List[str]:
"""Return a list of include statements to add to the CUDA section of the C++ header
The strings should be full paths, like "omni/graph/core/iComputeGraph.h"
"""
includes = []
# Any include files in the configuration can be automatically added here, avoiding having each class
# make a special override function just for that.
with suppress(AttributeError, KeyError):
includes += self.cuda_configuration().include_files
return includes
# ----------------------------------------------------------------------
def cuda_base_type_name(self) -> str:
"""Returns a string with the base C++ type of the attribute data"""
try:
return self.cuda_configuration().base_type_name
except (AttributeError, KeyError) as error:
raise ParseError(f"Attribute {self.name} has an unimplemented CUDA base type name") from error
# ----------------------------------------------------------------------
def cuda_role_name(self) -> str:
"""Returns a string with the role name for this attribute - none by default"""
return f"AttributeRole::{self.cuda_configuration().role}"
# ----------------------------------------------------------------------
def cuda_element_type_name(self) -> str:
"""Returns a string with the full CUDA type of the attribute element data, not counting arrays"""
return self.cuda_configuration().base_type_name
# ----------------------------------------------------------------------
def cuda_type_name(self) -> str:
"""Returns a string with the full CUDA type of the attribute data.
The CUDA type adds an extra pointer indirection because the data is passed through from the CPU,
and can not be dereferenced since the memory is on the GPU.
"""
cuda_type = self.cuda_element_type_name()
if cuda_type is None:
return cuda_type
# If the memory is coming from the CPU then the data can only be passed by reference, otherwise it must
# be passed as a pointer to the actual (potentially GPU) data.
ptr_suffix = "" if self.memory_storage() == MemoryTypeValues.CPU else "*"
return f"{self.cpp_add_arrays(cuda_type)}{ptr_suffix}"
# ----------------------------------------------------------------------
def datamodel_accessor_constructor_args(self) -> List[str]:
"""Returns a list of declarations used by the datamodel accessor constructor"""
role_name = self.cpp_role_name()
return [] if role_name.endswith("eNone") else [role_name]
# ----------------------------------------------------------------------
def datamodel_accessor_declaration(self):
"""Returns a string containing the declaration of the datamodel accessor variable for this attribute"""
variable_name = self.cpp_variable_name()
(wrapper_class, template_parameters) = self.cpp_wrapper_class()
type_declaration = f"{wrapper_class}<{','.join(template_parameters)}>"
constructor_args = self.datamodel_accessor_constructor_args()
# For now the access methods just return a dereferenced pointer to the data. By encapsulating these in
# methods now we leave open the option of doing things like delaying value reads if we want to.
declarations = [f"{type_declaration} {variable_name}{{ {','.join(constructor_args)} }};"]
if not self.is_required:
declarations.append(f"bool has_{variable_name}() const {{ return {self.fabric_pointer_exists()}; }};")
return declarations
# ----------------------------------------------------------------------
def cpp_typedef_definitions(self) -> List[str]:
"""Return a list of strings containing the definitions of C++ typedefs facilitating the access of this
attribute's Fabric data. The typedefs returned will vary, based on the memory type of the attribute.
The base typedef "X_t" will always refer to the type of data you get from the attribute's accessor; i.e.
the return value from "db.inputs.X()". The typedef will be an appropriate one for the memory type.
For "ANY" member types there will be two typedefs, "X_cpu_t" and "X_gpu_t" indicating the typedefs to
use when the data is extracted on the CPU and GPU respectively.
"""
typedefs = []
typedef = self.cpp_typedef_name(use_namespace=False)
# Some typedefs are not conducive to adding pointers or references (e.g. float[3]) so for those there will be
# an extra layer of type indirection to use as a bridge (using X_t_raw = float[3]; using X_t = X_t_raw&;)
needs_raw_type = self.cpp_element_type_name().find("[") >= 0
if needs_raw_type:
raw_data_type = self._cpp_accessor_type(include_arrays=False)
typedefs.append(f"using {typedef}_raw = {raw_data_type};")
cpp_data_type = self._cpp_add_array_accessor(f"{typedef}_raw")
else:
cpp_data_type = self._cpp_accessor_type(include_arrays=True)
# If the attribute can be accessed from CPU or GPU then it will have a different type when
# passed across the boundary, as the CPU side can only have references to GPU memory pointers.
if self.memory_storage() == MemoryTypeValues.CPU:
typedefs.append(f"using {typedef} = {cpp_data_type}&;")
else:
typedefs.append(f"using {typedef}_cpu = {cpp_data_type}&;")
indirection = "**" if self.array_depth > 0 else "*"
suffix = "_gpu" if self.memory_storage() == MemoryTypeValues.ANY else ""
cuda_type = self._cpp_accessor_type(include_arrays=False)
if needs_raw_type:
typedefs.append(f"using {typedef}_raw = {cuda_type};")
cuda_type = f"{typedef}_raw"
typedefs.append(f"using {typedef}{suffix} = {cuda_type}{indirection};")
return typedefs
# ----------------------------------------------------------------------
def _needs_intermediate_typedef(self) -> bool:
"""Returns True if the attribute's data type requires an intermediate typedef in order to form legal pointers
and references to it. e.g. array of double[3] since "double[3]*" is not a valid type"""
pointer_type = self.cpp_element_type_name()
return pointer_type.find("[") >= 0
# ----------------------------------------------------------------------
def _intermediate_typedef(self) -> str:
"""Returns the name of the intermediate typedef, only for use when it was required."""
return f"{self.cpp_typedef_name(use_namespace=True)}_raw"
# ----------------------------------------------------------------------
def fabric_default_data_typedef(self) -> str:
"""Returns a string representing the raw data type stored in Fabric for this attribute's default values.
Uses the predefined raw type for attribute's whose type requires it (e.g. array of double[3] since "double[3]*"
is not a valid type.)"""
return self.fabric_raw_type()
# ----------------------------------------------------------------------
def fabric_element_type(self) -> str:
"""Return a string corresponding to the type of element data this attribute points to in Fabric. This only
differs from fabric_raw_type() in that arrays do not get an extra level of indirection.
"""
prefix = "const " if self.is_read_only() else ""
return f"{prefix}{self.cpp_element_type_name()}"
# ----------------------------------------------------------------------
def fabric_raw_type(self) -> str:
"""Return a string corresponding to the type of data this attribute points to in Fabric. This is used
for size information, in particular for default values and for adding attributes to Fabric so using the
C++ type is good enough.
"""
if self._needs_intermediate_typedef():
# This type already has any required "const" baked into it
pointer_type = self._intermediate_typedef()
else:
pointer_type = self.fabric_element_type()
indirection = "*" if self.array_depth > 0 else ""
return f"{pointer_type}{indirection}"
# ----------------------------------------------------------------------
def fabric_pointer_exists(self) -> str:
"""Return a string that checks for the existence of the fabric pointer variable value"""
return f"nullptr != {self.fabric_data_variable_name()}"
# ----------------------------------------------------------------------
def fabric_data_variable_name(self) -> str:
"""Returns a string containing the generated name of the fabric data reference for this attribute."""
return f"{self.cpp_variable_name()}.m_ptrToData"
# ----------------------------------------------------------------------
def fabric_pointer(self) -> str:
"""Returns a string containing the generated name of the fabric pointer for this attribute."""
return f"{self.namespace}.{self.fabric_data_variable_name()}"
# ----------------------------------------------------------------------
def fabric_counter_variable_name(self) -> str:
"""Returns a string containing the generated name of the fabric element counter for this attribute."""
return f"{self.cpp_variable_name()}.m_arrayMemberCount"
# ----------------------------------------------------------------------
def fabric_needs_counter(self) -> bool:
"""Returns true if the attribute's data type requires a separate element count variable"""
return self.array_depth > 0
# ----------------------------------------------------------------------
def _fabric_data_typedef(self) -> str:
"""Returns a string with the type or typedef reference to use for this attribute's Fabric data"""
modifier = "const " if self.is_read_only() else ""
accessor_type = self.cpp_element_type_name()
if self.array_depth > 0:
if self.is_read_only():
accessor_type = f"ogn::const_array<{accessor_type}>"
else:
accessor_type = f"ogn::array<{accessor_type}>"
return f"{modifier}{accessor_type}"
# ----------------------------------------------------------------------
def fabric_base_typedef(self, use_namespace: bool = True):
"""Returns the typedef that references data stored in Fabric elements."""
return
# ----------------------------------------------------------------------
def fabric_data_reference_type(self, use_namespace: bool = True):
"""Returns a string containing the declaration of the data type used for internal references to fabric"""
# See the typedefs in generate_cpp:generate_attribute_static_data for why this decision is made
if self.memory_type == MemoryTypeValues.CUDA and not self.cpp_accessor_on_cpu():
return f"{self.cpp_typedef_name(use_namespace)}_cpu"
return self.cpp_typedef_name(use_namespace)
# ----------------------------------------------------------------------
def datamodel_local_variables(self):
"""Returns a list containing the declaration of local variables required for accessor initialization.
Simple and array objects have their fabric pointers in their accessors so nothing is needed here"""
return []
# ----------------------------------------------------------------------
def python_extended_type(self):
"""Returns the extended type identifier for Python attribute types"""
return (_EXTENDED_TYPE_REGULAR, None)
# ----------------------------------------------------------------------
def python_imports(self) -> List[str]:
"""Return a list of modules to import in the Python header for proper parsing of this type"""
imports = []
if self.tuple_count > 1 or self.array_depth > 0:
imports.append("import numpy")
return imports
# ----------------------------------------------------------------------
def python_role_name(self) -> str:
"""Returns a string with the role name for this attribute usable in Python code - none by default"""
return ""
# ----------------------------------------------------------------------
def python_type_name(self) -> str:
"""Returns a string with the Python "typing" type-checking declaration for the attribute data"""
raise ParseError(f"Attribute {self.name} has an unimplemented Python type name")
# ----------------------------------------------------------------------
def python_add_containers(self, base_type: str) -> str:
"""Common method for transforming a base Python type to add any appropriate array or component values"""
final_type = base_type
if self.tuple_count > 1 or self.array_depth > 0:
final_type = "numpy.ndarray"
return final_type
# ----------------------------------------------------------------------
def python_property_name(self) -> str:
"""Return the name of a python property that will correspond to this attribute.
Input attributes have a parent object "inputs" and output attributes have a parent object "outputs" in order to
avoid name clashes and be consistent with the appearance of the C++ equivalent code
"""
return self.base_name.replace(":", "_")
# ----------------------------------------------------------------------
def python_attribute_name(self) -> str:
"""Return the name of a python name that will correspond to this attribute.
The attribute name itself is used as a property to represent the value. This name is for storing the
actual og.Attribute.
"""
return f"_attributes.{self.python_property_name()}"
# ----------------------------------------------------------------------
def __generate_python_property_get_code(
self, attribute_name: str, property_name: str, out: IndentedOutput, on_gpu: bool, has_parent: bool = False
):
"""Emits the generated code implementing a property read accessor for the attribute.
Args:
attribute_name: Name of the attribute being accessed
property_name: Name of the property to define for the attribute to be accessed
out: Output location for the generated code
on_gpu: If True then the accessor should get data pointers from GPU memory
has_parent: If True then this is a nested accessor and it has to go to the parent for some data
"""
owner = "self._parent" if has_parent else "self"
out.write()
out.write("@property")
if out.indent(f"def {property_name}(self):"):
out.write(f"data_view = og.AttributeValueHelper({owner}._attributes.{attribute_name})")
args = []
if self.fabric_needs_counter():
# State attributes pre-calculate the size to be the current attribute value size
if self.attribute_group == STATE_GROUP:
out.write(f"{owner}.{attribute_name}_size = data_view.get_array_size()")
elif self.attribute_group != INPUT_GROUP:
args.append(f"reserved_element_count={owner}.{attribute_name}_size")
if on_gpu:
args.append("on_gpu=True")
if self.cuda_pointer_type == CudaPointerValues.CPU:
out.write(f"data_view.gpu_ptr_kind = {CudaPointerValues.PYTHON[self.cuda_pointer_type]}")
out.write(f"return data_view.get({', '.join(args)})")
out.exdent()
# ----------------------------------------------------------------------
def __generate_python_property_set_code(
self, attribute_name: str, property_name: str, out: IndentedOutput, on_gpu: bool, has_parent: bool = False
):
"""Emits the generated code implementing a property write accessor for the attribute.
Args:
attribute_name: Name of the attribute being accessed
property_name: Name of the property to define for the attribute to be accessed
out: Output location for the generated code
on_gpu: If True then the accessor should set data pointers from GPU memory
has_parent: If True then this is a nested accessor and it has to go to the parent for some data
"""
owner = "self._parent" if has_parent else "self"
attribute_member = f"{owner}._attributes.{property_name}"
out.write()
out.write(f"@{property_name}.setter")
if out.indent(f"def {property_name}(self, value):"):
# Inputs cannot be modified when locked, e.g. during a compute() call
if self.is_read_only() and out.indent(f"if {owner}._setting_locked:"):
# Indent this code manually relative to itself; extra indentation will be handled by the caller
out.write(f"raise og.ReadOnlyError({attribute_member})")
out.exdent()
out.write(f"data_view = og.AttributeValueHelper({attribute_member})")
args = ["value"]
if self.memory_storage() == MemoryTypeValues.CUDA:
args.append("on_gpu=True")
if self.cuda_pointer_type == CudaPointerValues.CPU:
out.write(f"data_view.gpu_ptr_kind = {CudaPointerValues.PYTHON[self.cuda_pointer_type]}")
out.write(f"data_view.set({', '.join(args)})")
# The set_attr_value call will handle setting the size, keep it sychronized with the local variable
if self.fabric_needs_counter():
out.write(f"{owner}.{property_name}_size = data_view.get_array_size()")
out.exdent()
# ----------------------------------------------------------------------
def generate_python_property_code(self, out: IndentedOutput):
"""Emits the generated code implementing a readable property for this attribute.
The properties will take one of two forms, depending on where the attribute's memory resides.
For hardcoded CPU or GPU versions the code will be a simple property, e.g. "db.inputs.attributeName":
@property
def attributeName(self):
...
When the memory location is determined at runtime then a secondary level is installed to give access to
both as either "db.inputs.attributeName.cpu" or "db.inputs.attributeName.gpu"
class __attributeName:
@property
def cpu(self):
...
@property
def gpu(self):
...
@property
def attributeName(self):
return __attributeName()
"""
property_name = self.python_property_name()
if self.memory_storage() == MemoryTypeValues.ANY:
out.write()
if out.indent(f"class __{property_name}:"):
if out.indent("def __init__(self, parent):"):
out.write("self._parent = parent")
out.exdent()
self.__generate_python_property_get_code(property_name, "cpu", out, on_gpu=False, has_parent=True)
self.__generate_python_property_set_code(property_name, "cpu", out, on_gpu=False, has_parent=True)
self.__generate_python_property_get_code(property_name, "gpu", out, on_gpu=True, has_parent=True)
self.__generate_python_property_set_code(property_name, "gpu", out, on_gpu=True, has_parent=True)
out.exdent()
out.write()
out.write("@property")
if out.indent(f"def {property_name}(self):"):
out.write(f"return self.__class__.__{property_name}(self)")
out.exdent()
elif self.memory_storage() == MemoryTypeValues.CUDA:
self.__generate_python_property_get_code(property_name, property_name, out, on_gpu=True)
self.__generate_python_property_set_code(property_name, property_name, out, on_gpu=True)
else:
self.__generate_python_property_get_code(property_name, property_name, out, on_gpu=False)
self.__generate_python_property_set_code(property_name, property_name, out, on_gpu=False)
# ----------------------------------------------------------------------
def __generate_python_batched_property_get_code(
self, attribute_name: str, property_name: str, index: int, out: IndentedOutput
):
"""Emits the generated code implementing a property read accessor for the batched attribute.
Args:
attribute_name: Name of the attribute being accessed
property_name: Name of the property to define for the attribute to be accessed
index: Attribute index within the batch list. Used only for inputs.
out: Output location for the generated code
"""
out.write()
out.write("@property")
if out.indent(f"def {property_name}(self):"):
if self.attribute_group == INPUT_GROUP:
out.write(f"return self._batchedReadValues[{index}]")
elif self.attribute_group == OUTPUT_GROUP:
out.write(f"value = self._batchedWriteValues.get(self._attributes.{attribute_name})")
if out.indent("if value:"): # if value was already set...return it
out.write("return value")
out.exdent()
if out.indent(
"else:"
): # otherwise, we fetch it from fabric (issue discovered with OgnIntCounter and OgnCountTo)
out.write(f"data_view = og.AttributeValueHelper(self._attributes.{attribute_name})")
out.write("return data_view.get()")
out.exdent()
out.exdent()
# ----------------------------------------------------------------------
def __generate_python_batched_property_set_code(
self, attribute_name: str, property_name: str, index: int, out: IndentedOutput
):
"""Emits the generated code implementing a property write accessor for the batched attribute.
Args:
attribute_name: Name of the attribute being accessed
property_name: Name of the property to define for the attribute to be accessed
index: Attribute index within the batch list. Used only for inputs.
out: Output location for the generated code
"""
out.write()
out.write(f"@{property_name}.setter")
if out.indent(f"def {property_name}(self, value):"):
if self.attribute_group == INPUT_GROUP:
out.write(f"self._batchedReadValues[{index}] = value")
elif self.attribute_group == OUTPUT_GROUP:
out.write(f"self._batchedWriteValues[self._attributes.{attribute_name}] = value")
out.exdent()
# ----------------------------------------------------------------------
def generate_python_batched_property_code(self, index: int, out: IndentedOutput):
"""Emits the generated code implementing a batched property for this attribute.
The properties are currently only designed to read/write the data from CPU batch.
The code will be a simple property, e.g. "db.inputs.attributeName":
@property
def attributeName(self):
...
"""
property_name = self.python_property_name()
self.__generate_python_batched_property_get_code(property_name, property_name, index, out)
self.__generate_python_batched_property_set_code(property_name, property_name, index, out)
# ----------------------------------------------------------------------
def sdf_type_name(self) -> str:
"""Returns a string with the pxr::SdfValueTypeName of the attribute data"""
return sdf_type_name(self.sdf_base_type(), self.tuple_count, self.array_depth > 0)
# ----------------------------------------------------------------------
def sdf_base_type(self) -> Optional[str]:
"""By default no SDF base type exists"""
return
# ----------------------------------------------------------------------
def usd_name(self) -> str:
"""Returns a string with the name of the attribute in a USD file"""
return self.name
# ----------------------------------------------------------------------
def usd_type_name(self) -> str:
"""Returns a string with the data type the attribute would use in a USD file"""
raise ParseError(f"Attribute {self.name} has an unimplemented USD type name")
# ----------------------------------------------------------------------
def usd_add_arrays(self, base_type: str) -> str:
"""Common method for transforming a base USD type to add any appropriate array values"""
return base_type + "[]" * self.array_depth
# ----------------------------------------------------------------------
def usd_add_containers(self, base_type: str) -> str:
"""Common method for transforming a base USD type to add any appropriate array or component values"""
final_type = base_type
if self.tuple_count > 1:
final_type += str(self.tuple_count)
return self.usd_add_arrays(final_type)
# ----------------------------------------------------------------------
@staticmethod
def tuples_supported() -> List[int]:
"""Returns a list of tuple numbers supported by the attribute type, by default only single values"""
return [1]
# ----------------------------------------------------------------------
@staticmethod
def array_depths_supported() -> List[int]:
"""Returns a list of array depths supported by the attribute type, by default up to 1 level deep"""
return [0, 1]
# ----------------------------------------------------------------------
def check_support(self):
"""Checks to see if this attribute is currently supported.
This is different from the attribute being valid as this only raises an exception if the
attribute is a legal type, it just hasn't had proper code generation added for it yet.
Raises:
AttributeError: If any attributes on the node are currently not supported
"""
# This is the limit of values for the tuple in the C++ implementation of the array type
if self.tuple_count > 255 or self.tuple_count < 1:
raise AttributeError(f"Element count {self.tuple_count} is not in the allowed range of 1 - 255")
# This is the limit of array depth support in the Fabric
if self.array_depth not in self.array_depths_supported():
raise UnimplementedError(
f"Array depth of {self.array_depth} for attribute type"
f" {self.cpp_base_type_name()} not in supported list {self.array_depths_supported()}"
)
# Certain types only support a limited subset of tuple counts (for now), check those here
if self.tuple_count not in self.tuples_supported():
raise UnimplementedError(
f"Tuple count of {self.tuple_count} for attribute type"
f" {self.cpp_base_type_name()} not in supported list {self.tuples_supported()}"
)
if self.unimplemented_error is not None:
error = self.unimplemented_error
self.unimplemented_error = None
raise UnimplementedError(error)
# ----------------------------------------------------------------------
def requires_default(self):
"""Returns True if this type of attribute needs a default value"""
return self.is_required and self.default is None and self.is_read_only()
# ----------------------------------------------------------------------
def validate_configuration(self):
"""Validate that the current state of the object is legal for use
Raises:
ParseError if the current configuration is missing mandatory information or has illegal values
"""
for key in AttributeKeys.MANDATORY:
if not hasattr(self, key):
raise ParseError(f"Attribute type is missing mandatory parameter {key}")
if not hasattr(self, "OGN_TYPE") and not hasattr(self, "roles"):
raise ParseError("Attribute type must have OGN_TYPE or roles() method")
# Default values must have compatible type structures
if self.default is not None:
try:
self.validate_value_structure(self.default)
except UnimplementedError as error:
# If something is unimplemented there may be further processing to do so note it and move on
self.unimplemented_error = error
elif self.requires_default():
raise ParseError(f'Default value is mandatory for required attribute "{self.name}"')
# Empty descriptions are anti-social
if not self.description:
warning = f"Attribute '{self.name}' description should not be empty"
if os.getenv("OGN_STRICT_DEBUG"):
raise ParseError(warning)
print(f"WARNING: {warning}", flush=True)
# Memory type must be legal
if self.memory_type not in MemoryTypeValues.ALL:
raise ParseError(f"Memory type {self.memory_type} not in legal list {MemoryTypeValues.ALL}")
# ----------------------------------------------------------------------
def validate_value(self, value):
"""Raises a ParseError if value is not legal value; never happens in the base class
This only checks a single value for the attribute type, not things like arrays of values or elements.
For those checks call validate_value_structure().
"""
# ----------------------------------------------------------------------
def validate_value_structure(self, value_to_validate):
"""Validate a value being set on the attribute.
This checks to make sure there is a match between the structure of the value and that expected by the
attribute type. (e.g that float[3] contains 3 values, all floats). For a single value check that only
verifies matching simple data type use validate_value()
"""
legal_level_counts = [0] * self.array_depth
if isinstance(value_to_validate, list):
if not legal_level_counts and self.tuple_count < 2:
raise ParseError(f'Value of simple attribute "{self.name}" cannot be an array')
elif legal_level_counts or self.tuple_count > 1:
raise ParseError(
f"Value {value_to_validate} of type {type(value_to_validate)} on "
f'multiple-element attribute "{self.name}" must be an array'
)
self.validate_value_nested(value_to_validate, legal_level_counts)
# ----------------------------------------------------------------------
def validate_value_nested(self, array_data: List, array_levels: List[int]):
"""Validate that the given data matches an array of this attribute's type
Args:
array_data: List of data elements to validate.
array_levels: List of number of members allowed in nested arrays. 0 means any number.
Until this is [] the members of array_data must themselves be lists.
Raises:
ParseError if the structure of the data does not match the structure and values of the attribute
"""
if not array_levels:
self.validate_value(array_data)
else:
if array_levels[0] > 0 and len(array_data) != array_levels[0]:
raise ParseError(f"Attribute {self.name} default expects {array_levels[0]} elements")
for data in array_data:
self.validate_value_nested(data, array_levels[1:])
# ----------------------------------------------------------------------
def parse_extra_properties(self, property_set: dict) -> PropertySet:
"""Parse any extra properties specific to certain attribute types
Args:
property_set: (NAME, VALUE) for properties the attribute type might support
"""
# Base class does not recognize any extra properties, that's what makes them "extra"
return property_set
# ----------------------------------------------------------------------
def parse_metadata(self, metadata: Dict[str, Any]):
"""Parse the metadata attached to the attribute type.
Overrides to this method can add additional interpretation of special metadata.
"""
self.metadata = get_metadata_dictionary(metadata)
# ----------------------------------------------------------------------
def python_default_value(self):
"""Returns the current default value of this attribute in a Python-compatible format, None for no default."""
return self.python_value(self.default)
# ----------------------------------------------------------------------
def python_value(self, value):
"""Returns the value of this attribute in a Python-compatible format, None for no default."""
return value
# ----------------------------------------------------------------------
def value_for_test(self, value):
"""Returns the value of this attribute in a format suitable for test output, None for no default."""
return self.python_value(value)
# ----------------------------------------------------------------------
def generate_python_validation(self, out: IndentedOutput):
"""Adds any code required to validate the value of an attribute before a Python compute method is called"""
return
# ----------------------------------------------------------------------
def __usd_element(self, element, bare: bool = False) -> str:
"""Get the value of a single USD element as a string (e.g. float, int, string, token... no arrays or tuples)"""
# Strings inside arrays will already be quoted so only add quotes if the element is "bare" (i.e. by itself)
if isinstance(element, str):
return ensure_quoted(element) if bare else element.replace('"', '\\"')
if isinstance(element, bool):
return "true" if element else "false"
if isinstance(element, (int, float)):
return element
return element
# ----------------------------------------------------------------------
def usd_value(self, value):
"""Returns the current default value of this attribute in a USD-compatible format, None for no default."""
if value is None:
return None
if self.array_depth > 0:
if not isinstance(value, list):
raise ParseError(f"Expected list for USD array value on {self.name} - got {value}")
array_output = []
if self.tuple_count > 1:
for element in value:
if len(element) != self.tuple_count:
raise ParseError(f"USD value expected tuple[{self.tuple_count}] - got {element}")
array_output.append(tuple(self.__usd_element(child) for child in element))
else:
array_output = [self.__usd_element(child) for child in value]
return array_output
if self.tuple_count > 1:
return tuple(self.__usd_element(child) for child in value)
return self.__usd_element(value, bare=True)
# ----------------------------------------------------------------------
def usd_default_value(self):
"""Returns the current default value of this attribute in a USD-compatible format, None for no default."""
if self.default is None:
return ""
return f" = {self.usd_value(self.default)}"
# ----------------------------------------------------------------------
def emit_usd_declaration(self, out) -> List[str]:
"""Print a declaration for this attribute in USD
Args:
out: Output handler where the USD will be emitted
"""
try:
usd_name = self.usd_name()
usd_type = self.usd_type_name()
except ParseError:
# Attributes without USD representations can be skipped
return
docs = to_usd_docs(self.description)
default_value = self.usd_default_value()
if out.indent(f"custom {usd_type} {usd_name}{default_value} ("):
out.write(docs)
out.exdent(")")
# ----------------------------------------------------------------------
def tuple_argument(self):
"""Return a string with the declaration of a tuple count argument for constructors"""
tuple_count_arg = "" if self.tuple_count < 2 else f", {self.tuple_count}"
return tuple_count_arg
# ----------------------------------------------------------------------
def cpp_element_value(self, value) -> str:
"""Return a string with the simple Python 'value' translated to the C++ equivalent as an initializer.
This method assumes the value is not a list, which should be handled in cpp_constructor_value or
cpp_tuple_value.
Args:
value: Python value to convert - e.g. True -> "true"
Raises:
ParseError: The derived classes must override this method to provide a value of the correct format
"""
if isinstance(value, list):
raise ParseError(f"Initializer of non-array non-tuple should not be a list : {self.name} = {value}")
raise ParseError(f"Attribute manager {self} failed to override cpp_element_value()")
# ----------------------------------------------------------------------
def cpp_tuple_value(self, value) -> str:
"""Return a string with the simple Python 'value' translated to the C++ equivalent as an initializer.
This method checks to make sure the list elements match the tuple count, if any.
Args:
value: Python value to convert - e.g. True -> "true"
Raises:
ParseError: The derived classes must override this method to provide a value of the correct format
"""
if self.tuple_count > 1:
if not isinstance(value, list):
raise ParseError(f"Tuple initialization expected list[{self.tuple_count}], got '{value}'")
if len(value) != self.tuple_count:
raise ParseError(f"Tuple count initializer expected list of size {self.tuple_count}, got `{value}`")
return f"{{{','.join([self.cpp_element_value(element) for element in value])}}}"
if isinstance(value, list):
raise ParseError(f"Initializer of non-array non-tuple should not be a list : {self.name} = {value}")
return self.cpp_element_value(value)
# ----------------------------------------------------------------------
def cpp_constructor_value(self, value) -> str:
"""Return a string with the Python 'value' translated to the C++ equivalent as an initializer
This recurses to handle arrays and tuples. For example an array of float[2] would look like this in Python:
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]
and this in C++
{{1.0f, 2.0f}, {3.0f, 4.0f}, {5.0f, 6.0f}}
Args:
value: Python value to convert - e.g. True -> "true", [] -> {}
Returns:
A string representing the given values in C++ form
"""
if value is None:
return None
if self.array_depth > 0:
# Empty array is treated the same as no default array
if not value:
return None
if not isinstance(value, list):
raise ParseError(f"Array initialization expected list, got '{value}'")
return f"{{{','.join([self.cpp_tuple_value(element) for element in value])}}}"
return self.cpp_tuple_value(value)
# ----------------------------------------------------------------------
def cpp_default_initializer(self):
"""Returns the current default value of this attribute in a C++-compatible format, None for no default."""
try:
raw_value = self.cpp_constructor_value(self.default)
except ParseError:
return None
array_length = len(self.default) if isinstance(self.default, list) else 0
if self.array_depth > 0:
# Empty or missing array defaults must still initialize to an empty array
if raw_value is None or raw_value == "{}":
raw_value = "nullptr"
array_length = 0
else:
array_member_type = self.fabric_default_data_typedef().replace("*", "")
raw_value = f"std::array<{array_member_type}, {array_length}>{{{raw_value}}}.data()"
if raw_value is None:
return None
# Array construction must include the number of elements in the array
array_count_arg = f", {array_length}" if self.array_depth > 0 else ""
return f"{raw_value}{array_count_arg}"
# ----------------------------------------------------------------------
def is_required_as_string(self):
"""Return the C++ version of true/false that tells if this attribute is required"""
return "true" if self.is_required else "false"
# ----------------------------------------------------------------------
def cpp_variable_name(self, use_namespace: bool = False) -> str:
"""Return the name of the attribute in a form suitable for use as a C++ variable name.
No attempt is made to remove the cases where an attribute has a keyword name. That is left
up to the calling code if necessary.
Args:
use_namespace: If True then include "inputs:" or "outputs:" as part of the name
"""
clean_name = self.name if use_namespace else self.base_name
clean_name = clean_name.replace(":", "_")
return clean_name
# ----------------------------------------------------------------------
def empty_value(self) -> Any:
"""Returns an empty value compatible with the current attribute type"""
if self.array_depth == 1:
return []
base_value = self.empty_base_value()
return [base_value] * self.tuple_count if self.tuple_count > 1 else base_value
# ----------------------------------------------------------------------
def empty_base_value(self) -> Any:
"""Attribute types must define their single empty value"""
raise UnimplementedError(f"Attribute {self.name} of type {self.attribute_type_name} has no empty value")
| 65,410 | Python | 50.142299 | 120 | 0.560679 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/attributes/QuaternionAttributeManager.py | """
Contains the support class for managing attributes whose data is arrays interpreted as quaternions
"""
from typing import Any, List, Union
from ..utils import ParseError
from .AttributeManager import CppConfiguration, CudaConfiguration
from .RoleAttributeManager import RoleAttributeManager
QUAT_OR_QUAT_LIST = Union[List[float], List[List[float]]]
class QuaternionAttributeManager(RoleAttributeManager):
"""Support class for all attributes of type quaternion
This encompasses all legal USD types of quat(d|f|h).
Note that for all quaternion values showing up in OGN the order of values will be (i, j, k, real), even though
internally the Gf.Quat constructor uses (real, i, j, k). This means you have to be careful when extracting the
quaternion values, using a cast rather element-wise construction to maintain the proper ordering.
"""
# Hardcoded type configuration using USD. Dictionary key is the OGN attribute type name (without arrays).
CPP_CONFIGURATION = {
"quatd[4]": CppConfiguration(
"pxr::GfQuatd", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eQuaternion"
),
"quatf[4]": CppConfiguration(
"pxr::GfQuatf", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eQuaternion"
),
"quath[4]": CppConfiguration(
"pxr::GfQuath", include_files=["omni/graph/core/ogn/UsdTypes.h"], role="eQuaternion"
),
}
CUDA_CONFIGURATION = {
"quatd[3]": CudaConfiguration(
"double4", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"], role="eQuaternion"
),
"quatf[4]": CudaConfiguration(
"float4", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"], role="eQuaternion"
),
"quath[4]": CudaConfiguration(
"__half4", include_files=["cuda_fp16.h", "omni/graph/core/cuda/CUDAUtils.h"], role="eQuaternion"
),
}
def __constructor_ordering(self, value: QUAT_OR_QUAT_LIST) -> QUAT_OR_QUAT_LIST:
"""Converts the ordering of a quaternion value so that the vector appears in constructor parameter ordering.
Args:
value: Quaternion or list of quaternions to convert, assumed to be in [i, j, k, r] order
Values are lists, not tuples, because that's all the .ogn passes in.
Returns:
Quaternion or list of quaternions in constructor ordering
Raises:
ParseError if the value was not a legal quaternion value specification
"""
if value is None:
return None
if not isinstance(value, list):
raise ParseError(f"Value passed was not a legal quaternion construction value - '{value}'")
# An empty list needs no conversion
if not value:
if self.array_depth == 0:
raise ParseError("An empty list cannot be passed to a simple quaternion constructor")
return []
# Handle lists of lists
if isinstance(value[0], list):
real_value = []
for item in value:
if not isinstance(item, list) or len(item) != 4:
raise ParseError(f"Quaternion array member needs 4 members, got '{item}'")
real_value.append([item[3], item[0], item[1], item[2]])
elif len(value) != 4:
raise ParseError(f"Quaternion value must have four elements, got '{value}'")
else:
real_value = [value[3], value[0], value[1], value[2]]
return real_value
def sample_values(self, for_usd: bool = False) -> Any:
"""A set of sample values of the attribute's type for testing - None if samples are not supported.
Args:
for_usd: If True return as the data type used to set the value in USD attributes, else return Python values
"""
values = {
"d": [(0.01625, 0.14125, 0.26625, 0.78), (0.14125, 0.26625, 0.39125, 0.51625)],
"f": [(0.125, 0.25, 0.375, 0.5), (0.25, 0.375, 0.5, 0.625)],
"h": [(0.0, 0.25, 0.5, 0.75), (0.125, 0.375, 0.625, 0.875)],
}[self.suffix()]
if for_usd:
from pxr import Gf
gf_quat_type = getattr(Gf, f"Quat{self.suffix()}")
# The reordering ensures consistency in the order of parameters exposed to the interface to match the
# order stored in memory rather than the order in the constructor.
values = [gf_quat_type(real, i, j, k) for (i, j, k, real) in values]
if self.array_depth > 0:
values = [values, [values[1], values[0]]]
return [[value] for value in values] if for_usd else values
@staticmethod
def roles():
"""Return a list of valid role names for this type"""
return ["quatd", "quatf", "quath"]
def cpp_constructor_value(self, value: QUAT_OR_QUAT_LIST) -> str:
"""The Gf.Quat classes construct in a different order (r, i, j, k) than the user-facing order (i, j, k, r) so
generate the initializer in the rearranged order
Args:
value: Python value to convert
Returns:
A string representing the given values in C++ form
"""
if not self.cpp_configuration().base_type_name.startswith("pxr"):
return super().cpp_constructor_value(value)
return super().cpp_constructor_value(self.__constructor_ordering(value))
def python_role_name(self) -> str:
"""Returns a string with the Python role name for this attribute"""
return "og.Database.ROLE_QUATERNION"
@staticmethod
def tuples_supported() -> List[int]:
"""This type can only have 4 members"""
return [4]
def sdf_base_type(self) -> str:
"""Returns a string with the base type of the pxr.Sdf.ValueTypeName of the attribute data"""
return f"Quat{self.suffix()}"
def usd_type_name(self) -> str:
"""Returns a string with the data type the attribute would use in a USD file"""
return self.usd_add_arrays(f"quat{self.suffix()}")
def usd_value(self, value):
"""Reorder the value to be in Gf.Quat construction order."""
return super().usd_value(self.__constructor_ordering(value))
| 6,290 | Python | 41.506756 | 119 | 0.618601 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/scripts/debugging.py | import carb
carb.log_error(
"""Import of omni.graph.tools.scripts.debugging is deprecated. Use this import sequence instead:
import omni.graph.tools as ogt
ogt.debugging_function()"""
)
| 199 | Python | 23.999997 | 100 | 0.728643 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/tests/test_node_generator.py | """
Contains support for testing the ../generate_node.py script.
Comprehensive data type testing is in TestNodeGeneratorDataTypes.py
The framework is set up so that the tests can be run synchronously through main.py, or
asynchronously through the Kit testing framework.
TODO: Tests for capabilities to be added
- Nodes with multiline descriptions
- Attributes with multiline descriptions
- Include single or double quotes in descriptions
"""
import json
from pathlib import Path
import omni.graph.tools.ogn as ogn
import omni.kit.test
from omni.graph.tools._impl.internal.extension_contents_1_18 import ExtensionContentsV118
class TestNodeGenerator(omni.kit.test.AsyncTestCase):
"""Unit test class for this script"""
attr_index = 0
# ======================================================================
def create_attribute(self, attribute_type: str, attribute_default):
"""
Create an attribute JSON structure and text with a generic description
:param attribute_type: Attribute type
:param attribute_default: Default value of the attribute
:return: (Attribute Name, Attribute Info as JSON, Attribute as string) output for the attribute definition.
Both versions are returned as the string attribute omits the enclosing curly braces so that
attribute definitions can more easily be embedded in the JSON attribute lists
"""
attribute_name = f"attr{TestNodeGenerator.attr_index}"
TestNodeGenerator.attr_index += 1
attribute_as_json = {
f"{ogn.AttributeKeys.DESCRIPTION}": f"This is attribute {attribute_name}",
f"{ogn.AttributeKeys.TYPE}": attribute_type,
f"{ogn.AttributeKeys.DEFAULT}": attribute_default,
}
attribute_as_string = f'"{attribute_name}" : {json.dumps(attribute_as_json)}'
return attribute_name, attribute_as_json, attribute_as_string
# ======================================================================
def validate_node(self, node_wrapper: ogn.NodeInterfaceWrapper, node_name: str) -> ogn.NodeInterface:
"""
Confirms that a named node exists and is a valid node in the interface wrapper.
:param node_wrapper: ogn.NodeInterfaceWrapper class generated from the node description
:param node_name: Name of the node to check
:return: ogn.NodeInterface object for the named node
"""
self.assertEqual(node_wrapper.node_interface.name, node_name, f"Expected node {node_name} not listed")
return node_wrapper.node_interface
# ======================================================================
def validate_node_description(self, node_interface: ogn.NodeInterface, expected_description: str):
"""
Validate that the node interface has parsed a description with the expected name
:param node_interface: ogn.NodeInterface class of the node being checked
:param expected_description: Description string the node is expected to have
"""
self.assertEqual(
node_interface.description, expected_description, f"Description for node {node_interface.name} not correct"
)
# ======================================================================
def test_attribute_defaults(self):
"""Test for successful parsing of defaults values for optional and array attributes"""
# Test data consisting of values for array, optional, and default values.
# When a value is "None" the property is omitted. Attribute is assumed to be an integer type.
array_combinations = [
[1, True, [1]],
[1, True, None],
[1, False, [1]],
[1, None, [1]],
[0, True, 1],
[0, True, None],
[0, False, 1],
[0, None, 1],
]
for array_value, optional_value, default_value in array_combinations:
attr_type = f"int{'[]' * array_value}"
if optional_value is None:
optional_property = ""
elif optional_value:
optional_property = '"optional": true,'
else:
optional_property = '"optional": false,'
if default_value is None:
default_property = ""
else:
default_property = f'"default": {default_value},'
arrays_description = f"""{{
"Arrays" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with optional or array types",
"{ogn.NodeTypeKeys.INPUTS}" : {{
"a": {{
"description": "This is an a",
{optional_property}
{default_property}
"type": "{attr_type}"
}}
}}
}}
}}"""
try:
arrays = ogn.NodeInterfaceWrapper(arrays_description, "test")
node_interface = self.validate_node(arrays, "test.Arrays")
self.validate_node_description(node_interface, "This is a node with optional or array types")
except Exception as error: # pylint: disable=broad-except
raise Exception(f"Failed parsing node description {arrays_description}") from error
# ======================================================================
def all_role_tuple_combinations(self):
"""Return a list of all tuple combinations allowed for all of the roles.
If a role supports an arbitrary list of tuples then choose "2" as a representative example.
Returns:
List of (role, tupleList)
role: Name of the attribute role
tupleList: List of all tuples the role supports
"""
all_roles = []
for attribute_type, attribute_manager in ogn.ALL_ATTRIBUTE_TYPES.items():
if getattr(attribute_manager, "roles", None) is not None:
tuples_supported = attribute_manager.tuples_supported()
if tuples_supported is None:
all_roles.append([attribute_type, 2])
else:
for tuple_count in tuples_supported:
all_roles.append([attribute_type, tuple_count])
return all_roles
# ======================================================================
def test_minmax_defaults(self):
"""Test the set of minimum and maximum value combinations that are allowed"""
# Test data consisting of values for attribute type, minimum, maximum, and default values.
# When a value is "None" the property is omitted.
minmax_combinations = []
# Add range tests for every type supporting min/max, arrays of them, and pairs of them
for type_with_minmax in ["double", "float", "half"]:
minmax_combinations.append([type_with_minmax, 1.0, 3.0, 2.0])
minmax_combinations.append([type_with_minmax, 1.0, None, 2.0])
minmax_combinations.append([type_with_minmax, None, 3.0, 2.0])
minmax_combinations.append([f"{type_with_minmax}[]", 1.0, 3.0, [2.0, 2.0]])
minmax_combinations.append([f"{type_with_minmax}[]", 1.0, None, [2.0, 2.0]])
minmax_combinations.append([f"{type_with_minmax}[]", None, 3.0, [2.0, 2.0]])
minmax_combinations.append([f"{type_with_minmax}[2]", [1.0, 5.0], [3.0, 7.0], [2.0, 6.0]])
minmax_combinations.append([f"{type_with_minmax}[2]", [1.0, 5.0], None, [2.0, 6.0]])
minmax_combinations.append([f"{type_with_minmax}[2]", None, [3.0, 7.0], [2.0, 6.0]])
minmax_combinations.append([f"{type_with_minmax}[2][]", [1.0, 5.0], [3.0, 7.0], [[2.0, 6.0]]])
minmax_combinations.append([f"{type_with_minmax}[2][]", [1.0, 5.0], None, [[2.0, 6.0]]])
minmax_combinations.append([f"{type_with_minmax}[2][]", None, [3.0, 7.0], [[2.0, 6.0]]])
for type_with_minmax in ["int", "int64"]:
minmax_combinations.append([type_with_minmax, 1, 3, 2])
minmax_combinations.append([type_with_minmax, 1, None, 2])
minmax_combinations.append([type_with_minmax, None, 3, 2])
minmax_combinations.append([f"{type_with_minmax}[]", 1, 3, [2, 2]])
minmax_combinations.append([f"{type_with_minmax}[]", 1, None, [2, 2]])
minmax_combinations.append([f"{type_with_minmax}[]", None, 3, [2, 2]])
if type_with_minmax == "int":
minmax_combinations.append([f"{type_with_minmax}[2]", [1, 5], [3, 7], [2, 6]])
minmax_combinations.append([f"{type_with_minmax}[2]", [1, 5], None, [2, 6]])
minmax_combinations.append([f"{type_with_minmax}[2]", None, [3, 7], [2, 6]])
minmax_combinations.append([f"{type_with_minmax}[2][]", [1, 5], [3, 7], [[2, 6]]])
minmax_combinations.append([f"{type_with_minmax}[2][]", [1, 5], None, [[2, 6]]])
minmax_combinations.append([f"{type_with_minmax}[2][]", None, [3, 7], [[2, 6]]])
for role_type, tuple_count in self.all_role_tuple_combinations():
if role_type == "execution":
# execution can only take on the values of the enum ExecutionAttributeState
min_value = 0
max_value = 2
actual_value = 1
minmax_combinations.append([f"{role_type}", min_value, max_value, actual_value])
else:
# Matrix types use only one dimension for their tuple count
if role_type in ["matrixd", "matrixf", "matrixh", "frame", "transform"]:
min_value = []
max_value = []
actual_value = []
for _i in range(tuple_count):
min_value.append([1] * tuple_count)
max_value.append([5] * tuple_count)
actual_value.append([3] * tuple_count)
else:
min_value = [1] * tuple_count
max_value = [5] * tuple_count
actual_value = [3] * tuple_count
minmax_combinations.append([f"{role_type}[{tuple_count}]", min_value, max_value, actual_value])
minmax_combinations.append([f"{role_type}[{tuple_count}][]", min_value, max_value, [actual_value]])
for attribute_type, minimum_value, maximum_value, default_value in minmax_combinations:
if minimum_value is None:
minimum_property = ""
else:
minimum_property = f'"minimum": {minimum_value},'
if maximum_value is None:
maximum_property = ""
else:
maximum_property = f'"maximum": {maximum_value},'
if default_value is None:
default_property = ""
else:
default_property = f'"default": {default_value},'
minmax_description = f"""{{
"MinMax" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with all valid min/max combinations",
"{ogn.NodeTypeKeys.INPUTS}" : {{
"a": {{
"description": "This is an a",
{minimum_property}
{maximum_property}
{default_property}
"type": "{attribute_type}"
}}
}}
}}
}}"""
try:
minmax = ogn.NodeInterfaceWrapper(minmax_description, "test")
node_interface = self.validate_node(minmax, "test.MinMax")
self.validate_node_description(node_interface, "This is a node with all valid min/max combinations")
except Exception as error: # pylint: disable=broad-except
raise Exception(minmax_description) from error
# ======================================================================
def test_comments(self):
"""Test for successful parsing of a trivial node description with comment fields"""
comment_string = """
"$arrayComment": [1, 2, 3],
"$boolComment": true,
"$numberComment": 2,
"$objectComment": { "a" : 1 },
"$stringComment": "Ignore me"
"""
comment_node_description = f"""{{
"CommentNode" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a commented node",
{comment_string},
"{ogn.NodeTypeKeys.INPUTS}" : {{
{comment_string}
}},
"{ogn.NodeTypeKeys.OUTPUTS}" : {{
{comment_string}
}}
}},
{comment_string}
}}"""
comment_node = ogn.NodeInterfaceWrapper(comment_node_description, "test")
node_interface = self.validate_node(comment_node, "test.CommentNode")
self.validate_node_description(node_interface, "This is a commented node")
# ======================================================================
def test_simple_nodes(self):
"""Test for successful parsing of some simple node descriptions"""
empty_node_description = f"""{{
"EmptyNode" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is an empty node"
}}
}}"""
empty_node = ogn.NodeInterfaceWrapper(empty_node_description, "test")
node_interface = self.validate_node(empty_node, "test.EmptyNode")
self.validate_node_description(node_interface, "This is an empty node")
# ----------------------------------------
empty_attributes_description = f"""{{
"EmptyAttributes" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with no attributes",
"{ogn.NodeTypeKeys.INPUTS}" : {{}},
"{ogn.NodeTypeKeys.OUTPUTS}" : {{}}
}}
}}"""
empty_attributes = ogn.NodeInterfaceWrapper(empty_attributes_description, "test")
node_interface = self.validate_node(empty_attributes, "test.EmptyAttributes")
self.validate_node_description(node_interface, "This is a node with no attributes")
self.assertEqual(node_interface.all_input_attributes(), [])
self.assertEqual(node_interface.all_output_attributes(), [])
# ======================================================================
def test_metadata(self):
"""Test for successful parsing of some node type metadata"""
metadata_node_description = f"""{{
"MetadataNode" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is node with metadata",
"{ogn.NodeTypeKeys.METADATA}" : {{ "__testKey__" : "__testValue__", "hidden": true }},
"{ogn.NodeTypeKeys.UI_NAME}": "Metadata Node",
"{ogn.NodeTypeKeys.TAGS}": ["first", "second"]
}}
}}"""
metadata_node = ogn.NodeInterfaceWrapper(metadata_node_description, "test")
node_interface = self.validate_node(metadata_node, "test.MetadataNode")
self.validate_node_description(node_interface, "This is node with metadata")
self.assertEqual(node_interface.metadata["__testKey__"], "__testValue__")
self.assertEqual(node_interface.metadata["hidden"], "True")
self.assertEqual(node_interface.metadata[ogn.MetadataKeys.UI_NAME], "Metadata Node")
self.assertEqual(node_interface.metadata[ogn.MetadataKeys.TAGS], "first,second")
# ======================================================================
def test_memory_type(self):
"""Test for successful parsing of some node memory type and cuda pointer value"""
memory_node_description = f"""{{
"MemoryTypeNode" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is node with CUDA CPU pointer types",
"{ogn.NodeTypeKeys.MEMORY_TYPE}": "{ogn.MemoryTypeValues.CUDA}",
"{ogn.NodeTypeKeys.CUDA_POINTERS}": "{ogn.CudaPointerValues.CPU}"
}}
}}"""
metadata_node = ogn.NodeInterfaceWrapper(memory_node_description, "test")
node_interface = self.validate_node(metadata_node, "test.MemoryTypeNode")
self.validate_node_description(node_interface, "This is node with CUDA CPU pointer types")
# ======================================================================
def test_icon_simple(self):
"""Test for successful parsing of the simplified form of the icon description"""
simple_icon_node_description = f"""{{
"SimpleIconNode" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is node with a simplified icon description",
"{ogn.NodeTypeKeys.VERSION}" : 1,
"{ogn.NodeTypeKeys.ICON}": "icons/SimpleIcon.svg"
}}
}}"""
metadata_node = ogn.NodeInterfaceWrapper(simple_icon_node_description, "test")
node_interface = self.validate_node(metadata_node, "test.SimpleIconNode")
self.validate_node_description(node_interface, "This is node with a simplified icon description")
self.assertEqual(node_interface.icon_path, "icons/SimpleIcon.svg")
with self.assertRaises(KeyError):
_ = node_interface.metadata[ogn.MetadataKeys.ICON_COLOR]
with self.assertRaises(KeyError):
_ = node_interface.metadata[ogn.MetadataKeys.ICON_BACKGROUND_COLOR]
with self.assertRaises(KeyError):
_ = node_interface.metadata[ogn.MetadataKeys.ICON_BORDER_COLOR]
# ======================================================================
def test_icon_detailed(self):
"""Test for successful parsing of the detailed form of the icon description"""
detailed_icon_node_description = f"""{{
"DetailedIconNode" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is node with a detailed icon description",
"{ogn.NodeTypeKeys.VERSION}" : 1,
"{ogn.NodeTypeKeys.ICON}": {{
"{ogn.IconKeys.PATH}": "icons/DetailedIcon.svg",
"{ogn.IconKeys.COLOR}": "#FFAABBCC",
"{ogn.IconKeys.BACKGROUND_COLOR}": [0, 1, 2, 133],
"{ogn.IconKeys.BORDER_COLOR}": "#DDCCBBAA"
}}
}}
}}"""
metadata_node = ogn.NodeInterfaceWrapper(detailed_icon_node_description, "test")
node_interface = self.validate_node(metadata_node, "test.DetailedIconNode")
self.validate_node_description(node_interface, "This is node with a detailed icon description")
self.assertEqual(node_interface.icon_path, "icons/DetailedIcon.svg")
self.assertEqual("#FFAABBCC", node_interface.metadata[ogn.MetadataKeys.ICON_COLOR])
self.assertEqual("#85020100", node_interface.metadata[ogn.MetadataKeys.ICON_BACKGROUND_COLOR])
self.assertEqual("#DDCCBBAA", node_interface.metadata[ogn.MetadataKeys.ICON_BORDER_COLOR])
# ======================================================================
def test_tests(self):
"""Test for successful parsing of combinations of values in the 'tests' property"""
tests_node_format = """
{
"Add" : {
"description" : "Add the two inputs to create the output",
"inputs": {
"input1" : {
"description": "First input",
"type": "float",
"default": 0.0
},
"input2" : {
"description": "Second input",
"type": "float[]",
"default": [0.0]
},
"x2": {
"description": "Double the first input",
"type": "bool",
"optional": true
}
},
"outputs": {
"output": {
"description": "Sum of the two inputs",
"type": "float[]"
}
},
"""
test_configurations = [
'{"inputs:input1": 3.0, "inputs:input2": [1.0, 2.0], "outputs:output": [4.0, 5.0]}',
'{"inputs:input1": 3.0, "inputs:input2": [1.0, 2.0], "inputs:x2": false, "outputs:output": [4.0, 5.0]}',
'{"inputs:input1": 3.0, "inputs:input2": [1.0, 2.0], "outputs:output": [4.0, 5.0]},'
'{"inputs:input1": 3.0, "inputs:input2": [1.0, 2.0], "outputs:output": [4.0, 5.0]}',
'{"description": "More verbose formatting", "inputs": { "input1": 3.0, "input2": [1.0, 2.0] },'
'"outputs": {"output": [4.0, 5.0]}}',
]
for test_configuration in test_configurations:
tests_node_description = tests_node_format + f'"tests": [ {test_configuration}] }} }}'
test_node = ogn.NodeInterfaceWrapper(tests_node_description, "test")
_ = self.validate_node(test_node, "test.Add")
# ======================================================================
def test_comment_formatting(self):
"""Test for correct operation of the ogn.to_cpp_comments utility"""
# List of test configuration pairs of [Input, [ExpectedOutputNoIndent, ExpectedOutputIndent4]]
test_data = [
["abc", ["// abc", " // abc"]],
["\nabc\n\n", ["//\n// abc\n//", " //\n // abc\n //"]],
["abc\n def\n ghi", ["// abc\n// def\n// ghi", " // abc\n // def\n // ghi"]],
]
for test_input, test_outputs in test_data:
self.assertEqual(test_outputs[0], ogn.to_cpp_comment(test_input))
self.assertEqual(test_outputs[1], ogn.to_cpp_comment(test_input, indent_level=1))
# JSON encodes multiline strings as a list of strings per line. Test that as well.
# List is pairs of ([InputLines], [ExpectedOutputNoIndent, ExpectedOutputIndent4])
# The outputs of these tests should match the ones in the list above
test_list_data = [
[["abc"], ["// abc", " // abc"]],
[["\n", "abc\n", "\n"], ["//\n// abc\n//", " //\n // abc\n //"]],
[
["abc\n", " def\n", " ghi\n"],
["// abc\n// def\n// ghi", " // abc\n // def\n // ghi"],
],
]
for test_inputs, test_outputs in test_list_data:
test_input = "".join(test_inputs)
self.assertEqual(test_outputs[0], ogn.to_cpp_comment(test_input))
self.assertEqual(test_outputs[1], ogn.to_cpp_comment(test_input, indent_level=1))
# ======================================================================
def test_immediate(self):
"""Test for code generation directly from a dictionary to a string"""
immediate_node_format = """
{
"Add" : {
"description": "Add the two inputs to create the output",
"version": 1,
"icon": "///AddIcon.svg",
"inputs": {
"input1" : {
"description": "First input",
"type": "float",
"default": 0.0
},
"input2" : {
"description": "Second input",
"type": "float",
"default": 0.0
}
},
"outputs": {
"output": {
"description": "Sum of the two inputs",
"type": "float"
}
},
"tests": [
{ "inputs:input1": 1.0, "inputs:input2": 2.0, "outputs:output": 3.0 }
]
}
}
"""
# The icon must be an absolute path when generating directly from code as relative paths are assumed to
# be relative to the location of the .ogn file, which doesn't exist in this case.
this_dir = Path(__file__).parent
immediate_node_format = immediate_node_format.replace("//", this_dir.as_posix())
results = ogn.code_generation(immediate_node_format, "OgnTest", "ogn.test", "ogn.test")
self.assertCountEqual(
["cpp", "docs", "icon", "python", "template", "tests", "usd", "node"], list(results.keys())
)
# It would be too tedious to continually adjust this tests every time any tiny little bit of code generation
# changes so instead just check for some invariants.
self.assertTrue(results["cpp"].find("class OgnTestDatabase") > 0)
self.assertTrue(results["docs"].find(":orphan:") > 0)
self.assertTrue("AddIcon.svg" in results["icon"])
self.assertTrue(results["python"].find("class OgnTestDatabase(og.Database):") > 0)
self.assertTrue(results["template"].find("compute(OgnTestDatabase& db)") > 0)
self.assertTrue(results["tests"].find("async def test_data_access(self):") > 0)
self.assertTrue(results["usd"].find('def OmniGraphNode "Template_ogn_test_Add"') > 0)
# ======================================================================
def test_scheduling(self):
"""Test for legal combinations of scheduling flags"""
scheduling_format = """
{{
"ScheduleMe" : {{
"description": "Schedule this node as it requests",
"version": 1,
"scheduling": {}
}}
}}
"""
for scheduling_configuration, expected in ogn.SchedulingHints.legal_configurations():
scheduling_description = scheduling_format.format(scheduling_configuration)
results = ogn.code_generation(scheduling_description, "OgnTest", "ogn.test", "ogn.test")
difference = expected.compare(results["node"].scheduling_hints)
self.assertEqual([], difference, f"Error with configuration {scheduling_configuration}")
# ==============================================================================================================
def test_directory_scanning(self):
"""Tests the hardcoded assumptions made about how the directory tree is structured"""
fake_extension = ExtensionContentsV118("not_an_extension", ogn, ogn.__file__)
self.assertIsNotNone(fake_extension.config_dir)
self.assertTrue(fake_extension.categories)
| 26,841 | Python | 51.838583 | 119 | 0.525763 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/tests/deprecated_import.py | """File used only for testing of reporting of deprecated module import"""
import omni.graph.tools as ogt
ogt.DeprecatedImport("Do Not Import")
| 144 | Python | 27.999994 | 73 | 0.777778 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/tests/test_node_generator_data_types.py | """
Contains support for testing the ../generate_node.py script for processing different data types.
Basic testing is in TestNodeGenerator.py.
The framework is set up so that the tests can be run synchronously through main.py, or
asynchronously through the Kit testing framework.
"""
import json
import unittest
import omni.graph.tools.ogn as ogn
# Detect the testing environment and use the Kit Async testing framework if it is available
try:
import omni.kit.test
TestBaseClass = omni.kit.test.AsyncTestCase
except ImportError:
TestBaseClass = unittest.TestCase
class TestNodeGeneratorDataTypes(TestBaseClass):
"""Unit test class for this script"""
attr_index = 0
# ======================================================================
def create_attribute(self, attribute_type: str, attribute_default):
"""
Create an attribute JSON structure and text with a generic description
:param attribute_type: Attribute type
:param attribute_default: Default value of the attribute
:return: (Attribute Name, Attribute Info as JSON, Attribute as string) output for the attribute definition.
Both versions are returned as the string attribute omits the enclosing curly braces so that
attribute definitions can more easily be embedded in the JSON attribute lists
"""
attribute_name = f"attr{TestNodeGeneratorDataTypes.attr_index}"
TestNodeGeneratorDataTypes.attr_index += 1
attribute_as_json = {
f"{ogn.AttributeKeys.DESCRIPTION}": f"This is attribute {attribute_name}",
f"{ogn.AttributeKeys.TYPE}": attribute_type,
f"{ogn.AttributeKeys.DEFAULT}": attribute_default,
}
attribute_as_string = f'"{attribute_name}" : {json.dumps(attribute_as_json)}'
return attribute_name, attribute_as_json, attribute_as_string
# ======================================================================
def validate_node(self, node_wrapper: ogn.NodeInterfaceWrapper, node_name: str):
"""
Confirms that a named node exists and is a valid node in the interface wrapper.
:param node_wrapper: ogn.NodeInterfaceWrapper class generated from the node description
:param node_name: Name of the node to check
:return: ogn.NodeInterface object for the named node
"""
self.assertEqual(node_wrapper.node_interface.name, node_name, f"Expected node {node_name} not listed")
return node_wrapper.node_interface
# ======================================================================
def validate_node_description(self, node_interface: ogn.NodeInterface, expected_description: str):
"""
Validate that the node interface has parsed a description with the expected name
:param node_interface: ogn.NodeInterface class of the node being checked
:param expected_description: Description string the node is expected to have
"""
self.assertEqual(
node_interface.description, expected_description, f"Description for node {node_interface.name} not correct"
)
# ======================================================================
async def test_min_max(self):
"""Test for successful handling of attributes with minimum/maximum values"""
# Test configurations consist of (attribute type, min value, max value, default value, should succeed?)
min_max_configurations = [
["bool", "true", "false", "true", False],
["double", 0.0, 5.0, 3.0, True],
["double", 0.0, 5.0, 6.0, False],
["float", 0.0, 5.0, 3.0, True],
["float", 0.0, 5.0, 6.0, False],
["half", 10.0, 1.0, 5.0, False],
["half", 1.0, 10.0, 5.0, True],
["int", 1, 10, 5, True],
["int", 10, 1, 5, False],
["int64", 10, 1, 5, False],
["int64", 1, 10, 5, True],
["uchar", 1, 10, 5, True],
["uchar", 10, 1, 5, False],
["uint", 1, 10, 5, True],
["uint", 10, 1, 5, False],
["uint64", 10, 1, 5, False],
["uint64", 1, 10, 5, True],
["double", '"inf"', None, 5, False],
["double", '"inf"', '"inf"', 5, False],
["double", '"inf"', '"-inf"', 5, False],
["double", '"inf"', '"nan"', 5, False],
["double", '"-inf"', '"inf"', 5, True],
["double", '"-inf"', '"-inf"', 5, False],
["double", '"-inf"', '"nan"', 5, False],
["double", '"-inf"', None, 5, True],
["double", '"nan"', None, 5, False],
["double", '"nan"', '"inf"', 5, False],
["double", '"nan"', '"-inf"', 5, False],
["double", '"nan"', '"nan"', 5, False],
["double", None, None, 5, True],
["double", None, '"inf"', 5, True],
["double", None, '"-inf"', 5, False],
["double", None, '"nan"', 5, False],
["double", '"inf"', '"inf"', '"inf"', True],
["double", '"-inf"', '"-inf"', '"-inf"', True],
["double", '"nan"', '"nan"', '"nan"', True],
["double[2]", '["inf", "inf"]', '["inf", "inf"]', '["inf", "inf"]', True],
["double[2]", '["-inf", "-inf"]', '["inf", "inf"]', [5, 6], True],
["double[2]", '["-inf", "-inf"]', None, [100, 200], True],
["double[2]", '["-inf", "-inf"]', ["inf", 6], [10, 10], False],
]
for attribute_type, min_value, max_value, default_value, should_succeed in min_max_configurations:
min_text = f'"minimum": {min_value},' if min_value is not None else ""
max_text = f'"maximum": {max_value},' if max_value is not None else ""
min_max_description = f"""{{
"MinMaxNode" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This node has attributes with min/max values",
"{ogn.NodeTypeKeys.INPUTS}" : {{
"a" : {{
"description": "This is an a",
"type": "{attribute_type}",
{min_text}{max_text}
"default": {default_value}
}}
}}
}}
}}"""
if should_succeed:
min_max_node = ogn.NodeInterfaceWrapper(min_max_description, "test")
_ = self.validate_node(min_max_node, "test.MinMaxNode")
else:
with self.assertRaises(ogn.ParseError, msg=f"Parsing node with min/max values {min_max_description}"):
ogn.NodeInterfaceWrapper(min_max_description, "test")
# Repeat the same tests, reinterpreting the type and values as arrays
for (attribute_type, min_value, max_value, default_value, should_succeed) in min_max_configurations:
min_text = f'"minimum": {min_value},' if min_value is not None else ""
max_text = f'"maximum": {max_value},' if max_value is not None else ""
min_max_description = f"""{{
"MinMaxNode" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This node has attributes with min/max values",
"{ogn.NodeTypeKeys.INPUTS}" : {{
"a" : {{
"description": "This is an a",
"type": "{attribute_type}[]",
{min_text}{max_text}
"default": [{default_value}]
}}
}}
}}
}}"""
if should_succeed:
min_max_node = ogn.NodeInterfaceWrapper(min_max_description, "test")
_ = self.validate_node(min_max_node, "test.MinMaxNode")
else:
with self.assertRaises(ogn.ParseError, msg=f"Parsing node with min/max values {min_max_description}"):
ogn.NodeInterfaceWrapper(min_max_description, "test")
# ======================================================================
async def test_defaults(self):
"""Test for setting defaults on various types of attributes"""
# ----------------------------------------
# Values are [Attribute Type, Default for normal value, Python version of the default]
defaults = [["bool", "true", True], ["double", 1.4, 1.4], ["float", 1.5, 1.5], ["int", 1, 1]]
input_name = "input1"
for attribute_type, default, python_default in defaults:
array_spec = ["", ', "array": true'] # Test with and without array enabled
array_info = ["", "array of "]
array_defaults = [default, [default]]
array_python_defaults = [python_default, [python_default]]
for array_type in range(0, 1):
node_info = f"a legal input default of type {array_info[array_type]}{attribute_type}"
node_with_defaults_description = f"""{{
"NodeWithDefaults" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with {node_info}",
"{ogn.NodeTypeKeys.INPUTS}" : {{
"{input_name}": {{
"{ogn.AttributeKeys.DESCRIPTION}": "This is the defaulted input",
"{ogn.AttributeKeys.TYPE}": "{attribute_type}",
"{ogn.AttributeKeys.DEFAULT}": {array_defaults[array_type]}{array_spec[array_type]}
}}
}}
}}
}}"""
defaulted_node = ogn.NodeInterfaceWrapper(node_with_defaults_description, "test")
node_interface = self.validate_node(defaulted_node, "test.NodeWithDefaults")
(input_attribute, _) = node_interface.attribute_by_name(input_name)
self.assertEqual(input_attribute.default, array_python_defaults[array_type])
# ----------------------------------------
# Values are [Attribute Type, Illegal default for normal value, Illegal default for array value]
illegal_defaults = [
["bool", ["true"], "true"],
["double", [1.4], 1.4],
["float", [1.5], 1.5],
["int", [1], 1],
["bool", 1, [1]],
["double", "true", ["true"]],
["float", '"hello"', ['"world"']],
["int", 1.6, [1.6]],
["half", 65505.0, [-65505.0]],
["int", 3000000000, [-3000000000]],
["int64", 10000000000000000000000, [-10000000000000000000000]],
["uchar", 257, [-1]],
["uint", 3000000000, [-1]],
["uint64", 10000000000000000000000, [-1]],
["string", 2, [1.2]],
["token", 2, [1.2]],
]
for attribute_type, illegal_default, illegal_array_default in illegal_defaults:
array_spec = ["", '"array": true'] # Test with and without array enabled
array_info = ["", "array of "]
array_defaults = [illegal_default, illegal_array_default]
for array_type in range(0, 1):
node_info = f"an illegal input default of type {array_info[array_type]}{attribute_type}"
illegal_default_type_node = f"""{{
"IllegalInputType" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with {node_info}",
"{ogn.NodeTypeKeys.INPUTS}" : {{
"input1": {{
{ogn.AttributeKeys.DESCRIPTION}: "This is the bad input",
"{ogn.AttributeKeys.TYPE}": "{attribute_type}",
"{ogn.AttributeKeys.DEFAULT}": {array_defaults[array_type]},
{array_spec[array_type]}
}}
}}
}}
}}"""
with self.assertRaises(ogn.ParseError, msg=f"Parsing interface with {node_info}"):
ogn.NodeInterfaceWrapper(illegal_default_type_node, "test")
# ======================================================================
async def test_nan_inf(self):
"""Test for successful handling of attributes with sNan/NaN/Inf/-Inf values"""
# All of the numeric types that support NaN and Inf values
allow_nan_inf = [
"double",
"double[2]",
"double[3]",
"double[4]",
"double[]",
"double[2][]",
"double[3][]",
"double[4][]",
"float",
"float[2]",
"float[3]",
"float[4]",
"float[]",
"float[2][]",
"float[3][]",
"float[4][]",
"half",
"half[2]",
"half[3]",
"half[4]",
"half[]",
"half[2][]",
"half[3][]",
"half[4][]",
"matrixd[2]",
"matrixd[2][]",
"matrixd[3]",
"matrixd[3][]",
"matrixd[4]",
"matrixd[4][]",
"frame[4]",
"frame[4][]",
"colord[3]",
"colord[3][]",
"colord[4]",
"colord[4][]",
"colorf[3]",
"colorf[3][]",
"colorf[4]",
"colorf[4][]",
"colorh[3]",
"colorh[3][]",
"colorh[4]",
"colorh[4][]",
"normald[3]",
"normalf[3]",
"normalh[3]",
"normald[3][]",
"normalf[3][]",
"normalh[3][]",
"pointd[3]",
"pointf[3]",
"pointh[3]",
"pointd[3][]",
"pointf[3][]",
"pointh[3][]",
"quatd[4]",
"quatf[4]",
"quath[4]",
"quatd[4][]",
"quatf[4][]",
"quath[4][]",
"texcoordd[2]",
"texcoordd[3]",
"texcoordf[2]",
"texcoordf[3]",
"texcoordh[2]",
"texcoordh[3]",
"texcoordd[2][]",
"texcoordd[3][]",
"texcoordf[2][]",
"texcoordf[3][]",
"texcoordh[2][]",
"texcoordh[3][]",
"timecode",
"timecode[]",
"transform[4]",
"transform[4][]",
"vectord[3]",
"vectorf[3]",
"vectorh[3]",
"vectord[3][]",
"vectorf[3][]",
"vectorh[3][]",
# These are not numeric types but they do allow the NAN/INF strings since they are string types
"path",
"string",
"token",
"token[]",
]
# Create nodes with each of the allowed attribute types, ensuring that only those listed above that accept
# NaN/Inf values correctly parse and the others fail. The variations of the legal names have exactly 4 values
# so that we know they will be exercised when the 4-tuples are tested. Matrix values are just square arrays of
# the vectors: ["INF", "INF"] = double[2], [["INF", "INF"], ["INF", "INF"]] = matrixd[2]
legal_pos_inf = ['"INF"', '"Inf"', '"inf"', '"+Inf"']
legal_neg_inf = ['"INF"', '"Inf"', '"inf"', '"-Inf"']
legal_nan = ['"NAN"', '"NaN"', '"nan"', '"naN"']
legal_snan = ['"SNAN"', '"sNaN"', '"snan"', '"SnaN"']
for base_type_name, attribute_manager_type in ogn.ALL_ATTRIBUTE_TYPES.items():
# These types cannot be instantiated with defaults
if base_type_name in ["any", "union", "target", "bundle"]:
continue
for tuple_count in attribute_manager_type.tuples_supported():
for array_depth in attribute_manager_type.array_depths_supported():
pos_inf_value = ", ".join(legal_pos_inf[i] for i in range(tuple_count))
neg_inf_value = ", ".join(legal_neg_inf[i] for i in range(tuple_count))
nan_value = ", ".join(legal_nan[i] for i in range(tuple_count))
snan_value = ", ".join(legal_snan[i] for i in range(tuple_count))
attribute_type_name = base_type_name
if tuple_count > 1:
attribute_type_name += f"[{tuple_count}]"
pos_inf_value = f"[{pos_inf_value}]"
neg_inf_value = f"[{neg_inf_value}]"
nan_value = f"[{nan_value}]"
snan_value = f"[{snan_value}]"
if attribute_manager_type.is_matrix_type():
pos_inf_value = "[" + ", ".join(f"{pos_inf_value}" for i in range(tuple_count)) + "]"
neg_inf_value = "[" + ", ".join(f"{neg_inf_value}" for i in range(tuple_count)) + "]"
nan_value = "[" + ", ".join(f"{nan_value}" for i in range(tuple_count)) + "]"
snan_value = "[" + ", ".join(f"{snan_value}" for i in range(tuple_count)) + "]"
if array_depth > 0:
attribute_type_name += "[]"
pos_inf_value = f"[{pos_inf_value}, {pos_inf_value}]"
neg_inf_value = f"[{neg_inf_value}, {neg_inf_value}]"
nan_value = f"[{nan_value}, {nan_value}]"
snan_value = f"[{snan_value}, {snan_value}]"
inf_nan_description = f"""{{
"InfNanNode" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This node has attributes with Inf/Nan values",
"{ogn.NodeTypeKeys.INPUTS}" : {{
"positiveInf" : {{
"description": "This is an attribute whose default is positive infinity",
"type": "{attribute_type_name}",
"default": {pos_inf_value}
}},
"negativeInf" : {{
"description": "This is an attribute whose default is negative infinity",
"type": "{attribute_type_name}",
"default": {neg_inf_value}
}},
"nanValue" : {{
"description": "This is an attribute whose default is NaN",
"type": "{attribute_type_name}",
"default": {nan_value}
}},
"snanValue" : {{
"description": "This is an attribute whose default is a signaling NaN",
"type": "{attribute_type_name}",
"default": {snan_value}
}}
}}
}}
}}"""
if attribute_type_name in allow_nan_inf:
inf_nan_node = ogn.NodeInterfaceWrapper(inf_nan_description, "test")
_ = self.validate_node(inf_nan_node, "test.InfNanNode")
else:
with self.assertRaises(
ogn.ParseError, msg=f"Parsing node with Inf/Nan values {inf_nan_description}"
):
ogn.NodeInterfaceWrapper(inf_nan_description, "test")
| 19,799 | Python | 48.253731 | 119 | 0.459922 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/tests/test_api.py | """Testing the stability of the API in this module"""
import omni.graph.tools as ogt
import omni.graph.tools.ogn as ogn
import omni.kit
from omni.graph.tools.tests.internal_utils import _check_module_api_consistency, _check_public_api_contents
# ======================================================================
class _TestOmniGraphToolsApi(omni.kit.test.AsyncTestCase):
_UNPUBLISHED = ["bindings", "ogn", "tests", "node_generator"]
async def test_api(self):
_check_module_api_consistency( # noqa: PLW0212
ogt.tests,
["internal_utils", "deprecated_import"],
is_test_module=True,
)
_check_module_api_consistency(ogn, ogn._HIDDEN) # noqa: PLW0212
# Since the ogt module also contains all of the previously exposed objects for backward compatibility, they
# have to be added to the list of unpublished elements here as they, rightly, do not appear in __all__.
# There is also the rogue published deprecated function that is being used downstream and so has to be
# omitted from the "in ogn but not ogt" list.
all_unpublished = self._UNPUBLISHED + [
module_object
for module_object in dir(ogn)
if not module_object.startswith("_") and module_object not in ["supported_attribute_type_names"]
]
_check_module_api_consistency(ogt, all_unpublished) # noqa: PLW0212
async def test_api_features(self):
"""Test that the known public API features continue to exist"""
_check_public_api_contents(
ogt,
[ # noqa: PLW0212
"dbg_gc",
"dbg_ui",
"dbg",
"deprecated_constant_object",
"deprecated_function",
"DeprecatedClass",
"DeprecatedDictConstant",
"DeprecatedImport",
"DeprecatedStringConstant",
"DeprecateMessage",
"DeprecationLevel",
"destroy_property",
"function_trace",
"import_tests_in_directory",
"IndentedOutput",
"RenamedClass",
"shorten_string_lines_to",
"supported_attribute_type_names",
],
self._UNPUBLISHED,
only_expected_allowed=False,
)
| 2,384 | Python | 40.842105 | 115 | 0.565436 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/tests/test_node_generator_attributes.py | """
Contains support for testing the ../generate_node.py script for attribute manager behaviour.
The framework is set up so that the tests can be run synchronously through main.py, or
asynchronously through the Kit testing framework.
"""
import json
import unittest
import omni.graph.tools.ogn as ogn
# Detect the testing environment and use the Kit Async testing framework if it is available
try:
import omni.kit.test
TestBaseClass = omni.kit.test.AsyncTestCase
except ImportError:
TestBaseClass = unittest.TestCase
class TestNodeGeneratorAttributes(TestBaseClass):
"""Unit test class for this script"""
attr_index = 0
# ======================================================================
def create_attribute(self, attribute_type: str, attribute_default):
"""
Create an attribute JSON structure and text with a generic description
:param attribute_type: Attribute type
:param attribute_default: Default value of the attribute
:return: (Attribute Name, Attribute Info as JSON, Attribute as string) output for the attribute definition.
Both versions are returned as the string attribute omits the enclosing curly braces so that
attribute definitions can more easily be embedded in the JSON attribute lists
"""
attribute_name = f"attr{TestNodeGeneratorAttributes.attr_index}"
TestNodeGeneratorAttributes.attr_index += 1
attribute_as_json = {
f"{ogn.AttributeKeys.DESCRIPTION}": f"This is attribute {attribute_name}",
f"{ogn.AttributeKeys.TYPE}": attribute_type,
f"{ogn.AttributeKeys.DEFAULT}": attribute_default,
}
attribute_as_string = f'"{attribute_name}" : {json.dumps(attribute_as_json)}'
return attribute_name, attribute_as_json, attribute_as_string
# ======================================================================
def validate_node(self, node_wrapper: ogn.NodeInterfaceWrapper, node_name: str):
"""
Confirms that a named node exists and is a valid node in the interface wrapper.
:param node_wrapper: ogn.NodeInterfaceWrapper class generated from the node description
:param node_name: Name of the node to check
:return: ogn.NodeInterface object for the named node
"""
self.assertEqual(node_wrapper.node_interface.name, node_name, f"Expected node {node_name} not listed")
return node_wrapper.node_interface
# ======================================================================
def validate_node_description(self, node_interface: ogn.NodeInterface, expected_description: str):
"""
Validate that the node interface has parsed a description with the expected name
:param node_interface: ogn.NodeInterface class of the node being checked
:param expected_description: Description string the node is expected to have
"""
self.assertEqual(
node_interface.description, expected_description, f"Description for node {node_interface.name} not correct"
)
# ======================================================================
def test_attribute_types(self):
"""Test for extracting the correct attribute manager from a type description"""
import omni.graph.tools._impl.node_generator.attributes.management as management_module
description = {"type": None, "optional": True, "description": "No description"}
# KEY: attribute type name, VALUE: name of attribute manager for that type
test_data = {
"bool": "BoolAttributeManager",
"double": "DoubleAttributeManager",
"float": "FloatAttributeManager",
"half": "HalfAttributeManager",
"int": "IntAttributeManager",
"int64": "Int64AttributeManager",
"string": "StringAttributeManager",
"token": "TokenAttributeManager",
"colord": "ColorAttributeManager",
"colorf": "ColorAttributeManager",
"colorh": "ColorAttributeManager",
"execution": "ExecutionAttributeManager",
"frame": "FrameAttributeManager",
"matrixd": "MatrixAttributeManager",
"normald": "NormalAttributeManager",
"normalf": "NormalAttributeManager",
"normalh": "NormalAttributeManager",
"pointd": "PointAttributeManager",
"pointf": "PointAttributeManager",
"pointh": "PointAttributeManager",
"texcoordd": "TexCoordAttributeManager",
"texcoordf": "TexCoordAttributeManager",
"texcoordh": "TexCoordAttributeManager",
"timecode": "TimeCodeAttributeManager",
"transform": "FrameAttributeManager",
"uchar": "UCharAttributeManager",
"uint": "UIntAttributeManager",
"uint64": "UInt64AttributeManager",
"vectord": "VectorAttributeManager",
"vectorf": "VectorAttributeManager",
"vectorh": "VectorAttributeManager",
}
for attribute_type, manager_name in test_data.items():
expected_manager = getattr(management_module, manager_name)
# Check tuple and array combinations
for tuple_count in expected_manager.tuples_supported():
for array_depth in expected_manager.array_depths_supported():
suffix = "" if tuple_count < 2 else f"[{tuple_count}]"
suffix += "" if array_depth == 0 else "[]"
description["type"] = f"{attribute_type}{suffix}"
manager = ogn.get_attribute_manager(f"{ogn.OUTPUT_NS}:attribute", attribute_data=description)
self.assertEqual(manager.__class__.__name__, manager_name)
# ======================================================================
def test_split_attributes(self):
"""Test for successful parsing of attribute union groups"""
# Try each group by itself
for group_name, literal_types in ogn.ATTRIBUTE_UNION_GROUPS.items():
type_name, _, _, extra_info = ogn.split_attribute_type_name([group_name])
self.assertEqual(type_name, "union")
for literal_type in literal_types:
self.assertTrue(literal_type in extra_info)
# Test a combination of groups
combo_groups = [["integral_scalers", "decimal_scalers"], ["arrays", "integral_arrays"]]
for combo in combo_groups:
type_name, _, _, extra_info = ogn.split_attribute_type_name(combo)
self.assertEqual(type_name, "union")
combo_expanded = ogn.expand_attribute_union_groups(combo)
for literal_type in (name for name in (group for group in combo_expanded)):
self.assertTrue(literal_type in extra_info)
# ======================================================================
def test_attributes(self):
"""Test for successful parsing of attributes within nodes"""
input_name, input_json, input_description = self.create_attribute("float", 0.0)
output_name, output_json, output_description = self.create_attribute("double", 0.0)
state_name, state_json, state_description = self.create_attribute("double", 0.0)
node_description = "This is a node with one of each type of attribute"
one_attribute_node_description = f"""{{
"EmptyAttributes" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "{node_description}",
"{ogn.NodeTypeKeys.INPUTS}" : {{
{input_description}
}},
"{ogn.NodeTypeKeys.OUTPUTS}" : {{
{output_description}
}},
"{ogn.NodeTypeKeys.STATE}" : {{
{state_description}
}}
}}
}}"""
one_attribute_node = ogn.NodeInterfaceWrapper(one_attribute_node_description, "test")
node_interface = self.validate_node(one_attribute_node, "test.EmptyAttributes")
self.validate_node_description(node_interface, node_description)
self.assertEqual(1, len(node_interface.all_input_attributes()))
self.assertEqual(1, len(node_interface.all_output_attributes()))
self.assertEqual(1, len(node_interface.all_state_attributes()))
(input1, attribute_group) = node_interface.attribute_by_name(input_name)
self.assertTrue(
input1 is not None and attribute_group == ogn.INPUT_GROUP, f"Input attributes should contain {input_name}"
)
self.assertEqual(
input1.description, input_json[ogn.AttributeKeys.DESCRIPTION], "Description of input attribute"
)
self.assertEqual(input1.type, input_json[ogn.AttributeKeys.TYPE], "Type of input attribute")
self.assertEqual(input1.default, input_json[ogn.AttributeKeys.DEFAULT], "Default value of input attribute")
(output1, attribute_group) = node_interface.attribute_by_name(output_name)
self.assertTrue(
output1 is not None and attribute_group == ogn.OUTPUT_GROUP,
f"Output attributes should contain {output_name}",
)
self.assertEqual(
output1.description, output_json[ogn.AttributeKeys.DESCRIPTION], "Description of output attribute"
)
self.assertEqual(output1.type, output_json[ogn.AttributeKeys.TYPE], "Type of output attribute")
self.assertEqual(output1.default, output_json[ogn.AttributeKeys.DEFAULT], "Default value of output attribute")
(state1, attribute_group) = node_interface.attribute_by_name(state_name)
self.assertTrue(
state1 is not None and attribute_group == ogn.STATE_GROUP, f"State attributes should contain {state_name}"
)
self.assertEqual(
state1.description, state_json[ogn.AttributeKeys.DESCRIPTION], "Description of state attribute"
)
self.assertEqual(state1.type, state_json[ogn.AttributeKeys.TYPE], "Type of state attribute")
self.assertEqual(state1.default, state_json[ogn.AttributeKeys.DEFAULT], "Default value of state attribute")
# ======================================================================
def test_attribute_deprecation(self):
"""Test for successful parsing of deprecated attributes"""
node_with_deprecated_attrs = f"""{{
"DeprecatedAttrsNode" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with several deprecated attributes.",
"{ogn.NodeTypeKeys.INPUTS}" : {{
"specialCharsMsg": {{
"{ogn.AttributeKeys.DESCRIPTION}":
["Deprecation msg has special chars that need to be properly escaped."],
"{ogn.AttributeKeys.TYPE}": "double",
"{ogn.AttributeKeys.DEPRECATED}":
"This message has a backslash \\\\, newline \\n, return \\r, and quotes \\\" '"
}},
"notDeprecated": {{
"{ogn.AttributeKeys.DESCRIPTION}": ["This attribute is not deprecated."],
"{ogn.AttributeKeys.TYPE}": "double"
}}
}},
"{ogn.NodeTypeKeys.OUTPUTS}" : {{
"arrayMsg": {{
"{ogn.AttributeKeys.DESCRIPTION}": ["Deprecation msg is an array of strings."],
"{ogn.AttributeKeys.TYPE}": "int",
"{ogn.AttributeKeys.DEPRECATED}": [
"This message is",
"spread across multiple",
"array elements."
]
}}
}},
"{ogn.NodeTypeKeys.STATE}" : {{
"emptyMsg": {{
"{ogn.AttributeKeys.DESCRIPTION}": ["Deprecation msg is empty."],
"{ogn.AttributeKeys.TYPE}": "int",
"{ogn.AttributeKeys.DEPRECATED}": ""
}}
}}
}}
}}"""
node = ogn.NodeInterfaceWrapper(node_with_deprecated_attrs, "test")
node_interface = self.validate_node(node, "test.DeprecatedAttrsNode")
(attr, _is_output) = node_interface.attribute_by_name("specialCharsMsg")
self.assertTrue(attr.is_deprecated, "Deprecated input attribute")
self.assertEqual(
attr.deprecation_msg,
"This message has a backslash \\, newline \n, return \r, and quotes \" '",
"Deprecation msg with special chars.",
)
(attr, _is_output) = node_interface.attribute_by_name("notDeprecated")
self.assertFalse(attr.is_deprecated, "Non-deprecated input attribute")
self.assertEqual(attr.deprecation_msg, "", "Deprecation msg should be empty.")
(attr, _is_output) = node_interface.attribute_by_name("arrayMsg")
self.assertTrue(attr.is_deprecated, "Deprecated output attribute")
self.assertEqual(
attr.deprecation_msg,
"This message is spread across multiple array elements.",
"Deprecation msg is an array of strings.",
)
(attr, _is_output) = node_interface.attribute_by_name("emptyMsg")
self.assertTrue(attr.is_deprecated, "Deprecated state attribute")
self.assertEqual(attr.deprecation_msg, "", "Deprecation msg is empty.")
# ======================================================================
def test_attribute_memory_type(self):
"""Test for successful parsing of attribute memory type"""
cpu_cuda_node = f"""{{
"CpuCudaNode" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with one CUDA input and one CPU output",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.MEMORY_TYPE}": "{ogn.MemoryTypeValues.CUDA}",
"{ogn.NodeTypeKeys.INPUTS}" : {{
"inputCUDA": {{
"description": ["This is a CUDA input"],
"type": "double",
"default": 0.0
}}
}},
"{ogn.NodeTypeKeys.OUTPUTS}" : {{
"outputCPU": {{
"description": ["This is a CPU output"],
"type": "double",
"memoryType": "{ogn.MemoryTypeValues.CPU}"
}},
"outputAny": {{
"description": ["This is a runtime determined output"],
"type": "double",
"memoryType": "{ogn.MemoryTypeValues.ANY}"
}}
}}
}}
}}"""
cpu_cuda_node = ogn.NodeInterfaceWrapper(cpu_cuda_node, "test")
node_interface = self.validate_node(cpu_cuda_node, "test.CpuCudaNode")
(input1, _) = node_interface.attribute_by_name("inputCUDA")
self.assertEqual(input1.memory_type, ogn.MemoryTypeValues.CUDA, "Input inheriting CUDA type from node")
(output1, _) = node_interface.attribute_by_name("outputCPU")
self.assertEqual(output1.memory_type, ogn.MemoryTypeValues.CPU, "Output overriding CUDA type from node")
# ======================================================================
def test_attribute_metadata(self):
"""Test for successful parsing of attribute metadata"""
node_with_attr_metadata = f"""{{
"NodeWithAttributeMetadata" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with metadata on an attribute",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.TOKENS}": ["foo", "bar"],
"{ogn.NodeTypeKeys.INPUTS}" : {{
"inputMeta": {{
"description": ["This is an input with metadata"],
"type": "token",
"default": "foo",
"metadata": {{
"{ogn.MetadataKeys.HIDDEN}": "True",
"keyable": "True",
"{ogn.MetadataKeys.ALLOWED_TOKENS}": ["foo", "B"]
}}
}}
}}
}}
}}"""
node_with_attr_metadata = ogn.NodeInterfaceWrapper(node_with_attr_metadata, "test")
node_interface = self.validate_node(node_with_attr_metadata, "test.NodeWithAttributeMetadata")
(input1, _) = node_interface.attribute_by_name("inputMeta")
expected_metadata = {
ogn.MetadataKeys.HIDDEN: "True",
"keyable": "True",
ogn.MetadataKeys.ALLOWED_TOKENS: "foo,B",
ogn.MetadataKeys.ALLOWED_TOKENS_RAW: '["foo", "B"]',
ogn.MetadataKeys.DESCRIPTION: "This is an input with metadata",
ogn.MetadataKeys.DEFAULT: '"foo"',
}
self.assertEqual(input1.metadata, expected_metadata, "Input attribute with metadata")
# Check that node tokens and allowedTokens are consolidated
expected_tokens = {"foo": "foo", "bar": "bar", "B": "B"}
self.assertEqual(node_interface.tokens, expected_tokens)
# ======================================================================
def test_tokens_with_special_characters(self):
"""Test for successful parsing of attribute token metadata"""
node_with_attr_metadata = f"""{{
"NodeWithSpecialTokenCharacters" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with special characters in the tokens",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.TOKENS}": {{"fooLtBar": "foo < bar", "fooNeBar": "foo != bar"}},
"{ogn.NodeTypeKeys.INPUTS}" : {{
"inputTokens": {{
"description": ["This is an input with metadata"],
"type": "token",
"default": "fooEqBar",
"metadata": {{
"{ogn.MetadataKeys.HIDDEN}": "True",
"keyable": "True",
"{ogn.MetadataKeys.ALLOWED_TOKENS}": {{"fooLtBar": "foo < bar", "fooEqBar": "foo == bar"}}
}}
}}
}}
}}
}}"""
node_with_attr_metadata = ogn.NodeInterfaceWrapper(node_with_attr_metadata, "test")
node_interface = self.validate_node(node_with_attr_metadata, "test.NodeWithSpecialTokenCharacters")
(input1, _) = node_interface.attribute_by_name("inputTokens")
expected_metadata = {
ogn.MetadataKeys.HIDDEN: "True",
"keyable": "True",
ogn.MetadataKeys.ALLOWED_TOKENS_RAW: '{"fooLtBar": "foo < bar", "fooEqBar": "foo == bar"}',
ogn.MetadataKeys.ALLOWED_TOKENS: "foo < bar,foo == bar",
ogn.MetadataKeys.DESCRIPTION: "This is an input with metadata",
ogn.MetadataKeys.DEFAULT: '"foo == bar"',
}
self.assertEqual(input1.metadata, expected_metadata, "Input attribute with metadata")
expected_tokens = {"fooLtBar": "foo < bar", "fooNeBar": "foo != bar", "fooEqBar": "foo == bar"}
self.assertEqual(node_interface.tokens, expected_tokens)
# ======================================================================
def test_alternative_token_specification(self):
"""Test for successful use of allowed tokens as first order data with name-based default value"""
node_with_alt_tokens = f"""{{
"NodeWithAlternativeAllowedTokens" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node that sets token defaults by name",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.INPUTS}" : {{
"inputTokens": {{
"description": ["This is an input with metadata"],
"type": "token",
"default": "fooLtBar",
"{ogn.MetadataKeys.ALLOWED_TOKENS}": {{"fooLtBar": "foo < bar", "fooEqBar": "foo == bar"}}
}}
}}
}}
}}"""
node_with_alt_tokens = ogn.NodeInterfaceWrapper(node_with_alt_tokens, "test")
node_interface = self.validate_node(node_with_alt_tokens, "test.NodeWithAlternativeAllowedTokens")
(input1, _) = node_interface.attribute_by_name("inputTokens")
self.assertEqual(input1.metadata[ogn.MetadataKeys.DEFAULT], '"foo < bar"')
| 20,731 | Python | 51.753181 | 119 | 0.556654 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/tests/test_internal_versions.py | """Testing for the internal utilities, like extension version management, logging, and .ogn file handling
To turn on debugging for a test just add this line at the beginning:
ogi.set_registration_logging("stdout") # TODO:
"""
from pathlib import Path
from tempfile import TemporaryDirectory
import omni.graph.tools._internal as ogi
import omni.kit.test
from omni.graph.tools._impl.node_generator.attributes.attribute_unions import parse_union_definitions
from omni.graph.tools._impl.node_generator.utils import ParseError, rst_table
from .internal_utils import CreateHelper
# ==============================================================================================================
class TestInternalVersions(omni.kit.test.AsyncTestCase):
async def test_compatibility(self):
"""Low level test of the compatibility functions"""
bad_version = (0, 0, 0)
# Run twice to make sure the lru caching behaves
for _ in range(0, 2):
self.assertEqual(bad_version, ogi.get_generator_extension_version(ogi.Compatibility.Incompatible))
current_version = ogi.get_generator_extension_version()
major_compatible_version = ogi.get_generator_extension_version(ogi.Compatibility.MajorVersionCompatible)
self.assertEqual(current_version[0], major_compatible_version[0])
self.assertTrue(current_version > major_compatible_version)
self.assertEqual(bad_version, ogi.get_target_extension_version(ogi.Compatibility.Incompatible))
current_version = ogi.get_target_extension_version()
major_compatible_version = ogi.get_target_extension_version(ogi.Compatibility.MajorVersionCompatible)
self.assertEqual(current_version[0], major_compatible_version[0])
self.assertTrue(current_version < major_compatible_version)
# --------------------------------------------------------------------------------------------------------------
async def test_generation_versions(self):
"""Test of the class that manages generation versions"""
base_version = [3, 3, 3]
minor_version = [3, 3, 4]
major_version = [3, 4, 5]
new_version = [4, 0, 0]
def _with_versions(
generator_version: ogi.ExtensionVersion_t, target_version: ogi.ExtensionVersion_t
) -> ogi.GenerationVersions:
new_versions = ogi.GenerationVersions(generator_version=generator_version, target_version=target_version)
return new_versions
base = _with_versions(base_version, base_version)
# Set up the test suite with all 16 possible combinations of version compatibility
test_data = [
(base_version, base_version, ogi.Compatibility.FullyCompatible),
(base_version, minor_version, ogi.Compatibility.FullyCompatible),
(base_version, major_version, ogi.Compatibility.MajorVersionCompatible),
(base_version, new_version, ogi.Compatibility.Incompatible),
(minor_version, base_version, ogi.Compatibility.FullyCompatible),
(minor_version, minor_version, ogi.Compatibility.FullyCompatible),
(minor_version, major_version, ogi.Compatibility.MajorVersionCompatible),
(minor_version, new_version, ogi.Compatibility.Incompatible),
(major_version, base_version, ogi.Compatibility.MajorVersionCompatible),
(major_version, minor_version, ogi.Compatibility.MajorVersionCompatible),
(major_version, major_version, ogi.Compatibility.MajorVersionCompatible),
(major_version, new_version, ogi.Compatibility.Incompatible),
(new_version, base_version, ogi.Compatibility.Incompatible),
(new_version, minor_version, ogi.Compatibility.Incompatible),
(new_version, major_version, ogi.Compatibility.Incompatible),
(new_version, new_version, ogi.Compatibility.Incompatible),
]
for (generator_version, target_version, expected_compatibility) in test_data:
test_version = _with_versions(generator_version, target_version)
self.assertEqual(expected_compatibility, test_version.compatibility(base))
# Test extraction of the version numbers from a database file
with TemporaryDirectory() as test_directory_fd:
test_directory = Path(test_directory_fd)
versions = ogi.GenerationVersions(generator_version=[1, 2, 3], target_version=[4, 5, 6])
creator = CreateHelper("omni.test.internal.versions", test_directory, versions)
db_path = creator.create_py_database(creator.TEST_CLASS, Path("."))
db_versions = ogi.GenerationVersions()
db_versions.set_versions_from_database(db_path)
self.assertEqual(db_versions, versions, f"Compare {db_versions} and {versions}")
self.assertEqual(ogi.Compatibility.FullyCompatible, versions.compatibility(db_versions))
# Test the comparison operators
minor = ogi.GenerationVersions(generator_version=minor_version, target_version=minor_version)
major = ogi.GenerationVersions(generator_version=major_version, target_version=major_version)
newest = ogi.GenerationVersions(generator_version=new_version, target_version=new_version)
none = ogi.GenerationVersions()
all_versions = [base, minor, major, newest]
for version in all_versions + [none]:
self.assertTrue(version == version) # noqa: PLR0124
self.assertTrue(version <= version) # noqa: PLR0124
self.assertTrue(version >= version) # noqa: PLR0124
for index, version in enumerate(all_versions[:-1]):
next_version = all_versions[index + 1]
self.assertTrue(version < next_version)
self.assertTrue(next_version >= version)
self.assertTrue(version <= next_version)
self.assertTrue(next_version > version)
self.assertTrue(version != next_version)
self.assertTrue(next_version != version)
self.assertTrue(none < base)
self.assertTrue(base > none)
self.assertTrue(none <= base)
self.assertTrue(base >= none)
self.assertFalse(none > base)
self.assertFalse(base < none)
self.assertFalse(none >= base)
self.assertFalse(base <= none)
# --------------------------------------------------------------------------------------------------------------
async def test_logging(self):
"""Test the basic extensions made to the logging function"""
# Check all of the legal locations for logging
locations = {
"1": "StdOutInterceptor",
"stdout": "StdOutInterceptor",
"cout": "StdOutInterceptor",
"stderr": "StdErrInterceptor",
"cerr": "StdErrInterceptor",
}
for location, stream_name in locations.items():
ogi.set_registration_logging(location)
self.assertEqual(len(ogi.LOG.handlers), 1)
self.assertEqual(stream_name, ogi.LOG.handlers[0].stream.__class__.__name__)
# The last type is a file, which can be tested directly by looking at its contents after logging
with TemporaryDirectory() as test_directory_fd:
test_directory = Path(test_directory_fd)
log_path = test_directory / "Log.txt"
ogi.set_registration_logging(log_path)
try:
ogi.LOG.info("Hello")
with open(log_path, "r", encoding="utf-8") as log_fd:
actual_contents = log_fd.readlines()
self.assertCountEqual(["INFO: Hello\n"], actual_contents)
finally:
ogi.set_registration_logging(None)
# --------------------------------------------------------------------------------------------------------------
async def test_ogn_file_names(self):
"""Test the utility that returns the generated file name given a base name"""
expected_names = {
ogi.FileType.OGN: f"{CreateHelper.TEST_CLASS}.ogn",
ogi.FileType.PYTHON: f"{CreateHelper.TEST_CLASS}.py",
ogi.FileType.PYTHON_DB: f"{CreateHelper.TEST_CLASS}Database.py",
ogi.FileType.TEST: f"Test{CreateHelper.TEST_CLASS}.py",
ogi.FileType.CPP_DB: f"{CreateHelper.TEST_CLASS}Database.h",
ogi.FileType.DOCS: f"{CreateHelper.TEST_CLASS}.rst",
ogi.FileType.USD: f"{CreateHelper.TEST_CLASS}Template.usda",
}
# Test going from root+type to the full name
for file_type, expected_name in expected_names.items():
self.assertEqual(
expected_name, ogi.get_ogn_file_name(CreateHelper.TEST_CLASS, file_type), f"Assembled {expected_name}"
)
# Test going from full name back to type+root
for file_type, full_name in expected_names.items():
(root_name, found_type) = ogi.get_ogn_type_and_node(full_name)
self.assertEqual(found_type, file_type, f"Extracted type from {full_name}")
self.assertEqual(root_name, CreateHelper.TEST_CLASS, f"Extracted root name from {full_name}")
# --------------------------------------------------------------------------------------------------------------
async def test_find_build_directory(self):
"""Test the utility for finding a build directory above a given directory"""
build_dir = ogi.find_ogn_build_directory(Path(__file__))
self.assertTrue(build_dir is not None)
self.assertTrue(build_dir.is_dir())
self.assertEqual(build_dir.name, "ogn")
non_dir = ogi.find_ogn_build_directory(build_dir.parent.parent)
self.assertIsNone(non_dir)
# --------------------------------------------------------------------------------------------------------------
async def test_temporary_cache_location(self):
"""Test the temporary redirection of the cache location, used for testing"""
expected_cache_location = Path("This/Does/Not/Exist")
with ogi.TemporaryCacheLocation(expected_cache_location):
self.assertEqual(ogi.cache_location(), expected_cache_location)
self.assertEqual(
ogi.full_cache_path(
ogi.GenerationVersions(generator_version=[1, 2, 3], target_version=[4, 5, 6]),
"my.extension-0.1",
"my.extension",
),
expected_cache_location / "ogn_generated" / "1.2.3" / "my.extension-0.1" / "my.extension" / "ogn",
)
# ==============================================================================================================
class TestUtiltities(omni.kit.test.AsyncTestCase):
"""Tests for some of the miscellaneous functionality that appears in the node generator code"""
async def test_rst_table(self):
"""Test generation of simple rst tables from text"""
test_table = [
["Language", "Hello World"],
[
"C++",
"""#include <iostream>
int main()
{
std::cout << "Hello World!";
return 0;
}""",
],
["Python", 'print("Hello World!")'],
]
expected_result = """+----------+----------------------------------+
| Language | Hello World |
+==========+==================================+
| C++ | #include <iostream> |
| | |
| | int main() |
| | |
| | { |
| | |
| | std::cout << "Hello World!"; |
| | |
| | return 0; |
| | |
| | } |
+----------+----------------------------------+
| Python | print("Hello World!") |
+----------+----------------------------------+
"""
actual_result = rst_table(test_table)
self.assertEqual(expected_result, actual_result)
# ==============================================================================================================
class TestAttributeUnionUtilities(omni.kit.test.AsyncTestCase):
"""Tests parsing utilities for attribute unions configurations"""
# --------------------------------------------------------------------------------------------------------------
async def test_throws_on_invalid_entries(self):
"""Tests that ParseError is thrown when there are invalid entries"""
invalid_values = [1, (1, 2), {"dict": "missing 'entries' and 'appends' keys"}, ["valid", 1]]
for x in invalid_values:
definition = {"valid_entry": "valid", "invalid_entry": x}
with self.assertRaises(ParseError):
parse_union_definitions(definition)
# --------------------------------------------------------------------------------------------------------------
async def test_throws_on_recursive_definition(self):
"""Test that recursive definitions throw an error"""
# self referencing
definition = {
"entry_a": "valid_entry",
"entry_b": "entry_b",
}
with self.assertRaises(ParseError):
parse_union_definitions(definition)
# recursive via cross-entries
definition = {
"entry_a": "entry_b",
"entry_b": "entry_c",
"entry_c": "entry_d",
"entry_d": ["entry_e", "entry_f", "entry_a"],
}
with self.assertRaises(ParseError):
parse_union_definitions(definition)
# --------------------------------------------------------------------------------------------------------------
async def test_flattens_referenced_entries(self):
"""Tests that referenced entries work as expected"""
definition = {
"entry_a": ["a", "b", "c", "d"],
"entry_b": ["entry_a", "e"],
"entry_c": ["entry_b", "f"],
}
result = parse_union_definitions(definition)
expected_result = {
"entry_a": ["a", "b", "c", "d"],
"entry_b": ["a", "b", "c", "d", "e"],
"entry_c": ["a", "b", "c", "d", "e", "f"],
}
self.assertDictEqual(result, expected_result)
# --------------------------------------------------------------------------------------------------------------
async def test_append_operator(self):
"""Simple test that the appends operator works as expected"""
definition = {"entry_a": ["a", "b", "c", "d"], "entry_b": {"entries": ["entry_a", "f"], "append": "_z"}}
result = parse_union_definitions(definition)
expected_result = {"entry_a": ["a", "b", "c", "d"], "entry_b": ["a_z", "b_z", "c_z", "d_z", "f_z"]}
self.assertDictEqual(result, expected_result)
| 15,069 | Python | 49.740741 | 118 | 0.540381 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/tests/test_internal_generation.py | """Tests the correct generation of node type definitions into the cache"""
from __future__ import annotations
from contextlib import ExitStack
from pathlib import Path
from tempfile import TemporaryDirectory
import omni.graph.tools._internal as ogi
import omni.kit.test
from omni.graph.tools.tests.internal_utils import TemporaryPathAddition
from .internal_utils import CreateHelper
# Helper constants shared by many tests
_CURRENT_VERSIONS = ogi.GenerationVersions(ogi.Compatibility.FullyCompatible)
EXT_INDEX = 0
# ==============================================================================================================
class ModuleContexts:
def __init__(self, stack: ExitStack):
"""Set up a stack of contexts to use for tests running in individual temporary directory"""
global EXT_INDEX
EXT_INDEX += 1
self.ext_name = f"omni.test.internal.generation{EXT_INDEX}"
# Put all temporary files in a temporary directory for easy disposal
self._directory_ctx = stack.enter_context(TemporaryDirectory()) # pylint: disable=consider-using-with
self.test_directory = Path(self._directory_ctx)
self.module_root = Path(self.test_directory) / "exts"
self.module_name = self.ext_name
self.module_path = self.module_root / self.ext_name / self.module_name.replace(".", "/")
# Add the import path of the new extension to the system path
self.path_addition = stack.enter_context(TemporaryPathAddition(self.module_root / self.ext_name))
# Redirect the usual node cache to the temporary directory
self.cache_root = stack.enter_context(ogi.TemporaryCacheLocation(self.test_directory / "cache"))
# Uncomment this to dump debugging information while the tests are running
# self.log = stack.enter_context(ogi.TemporaryLogLocation("stdout"))
# ==============================================================================================================
class TestInternalGeneration(omni.kit.test.AsyncTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.maxDiff = None # Diffs of file path lists can be large so let the full details be seen
# --------------------------------------------------------------------------------------------------------------
async def test_out_of_date_node_types(self):
"""Test that a standalone build with no cache recognizes that it needs to build the nodes"""
incompatible = ogi.GenerationVersions(ogi.Compatibility.Incompatible)
with ExitStack() as stack:
ctx = ModuleContexts(stack)
creator = CreateHelper(ctx.ext_name, ctx.module_root, incompatible)
creator.add_v1_18_node("OgnTestNode")
creator.add_v1_18_node("OgnOtherNode")
ext_contents = ogi.extension_contents_factory(ctx.ext_name, ctx.ext_name, ctx.module_root / ctx.ext_name)
self.assertIsNotNone(ext_contents)
ext_contents.scan_for_nodes()
self.assertCountEqual(
["OgnTestNode", "OgnOtherNode"], list(ext_contents.get_out_of_date_definitions().keys())
)
# --------------------------------------------------------------------------------------------------------------
async def test_build_standalone(self):
"""Test that the caching system builds new versions of generated files when no build directory exists"""
with ExitStack() as stack:
ctx = ModuleContexts(stack)
creator = CreateHelper(ctx.ext_name, ctx.module_root, _CURRENT_VERSIONS)
creator.add_standalone_node("OgnTestNode")
cache_directory = ogi.full_cache_path(_CURRENT_VERSIONS, ctx.ext_name, ctx.module_name)
ext_contents = ogi.extension_contents_factory(ctx.ext_name, ctx.ext_name, ctx.module_root / ctx.ext_name)
self.assertIsNotNone(ext_contents)
ext_contents.scan_for_nodes()
ext_contents.ensure_files_up_to_date()
definition = ext_contents.node_type_definition("OgnTestNode")
self.assertIsNotNone(definition)
expected_files = [
ctx.module_path / "nodes" / "OgnTestNode.ogn",
ctx.module_path / "nodes" / "OgnTestNode.py",
cache_directory / "__init__.py",
cache_directory / "OgnTestNodeDatabase.py",
cache_directory / "docs" / "OgnTestNode.rst",
cache_directory / "tests" / "__init__.py",
cache_directory / "tests" / "TestOgnTestNode.py",
cache_directory / "tests" / "usd" / "OgnTestNodeTemplate.usda",
]
actual_files = []
for root, _dirs, files in ogi.walk_with_excludes(ctx.test_directory, ["__pycache__"]):
for file in files:
actual_files.append(Path(root) / file)
self.assertCountEqual(expected_files, actual_files, "New files generated")
# --------------------------------------------------------------------------------------------------------------
async def test_no_generation(self):
"""Test that the caching system does not build anything when the built files are up to date"""
with ExitStack() as stack:
ctx = ModuleContexts(stack)
ogn_directory = ctx.module_path / "ogn"
creator = CreateHelper(ctx.ext_name, ctx.module_root, _CURRENT_VERSIONS)
creator.add_v1_18_node("OgnTestNode")
ext_contents = ogi.extension_contents_factory(ctx.ext_name, ctx.ext_name, ctx.module_root / ctx.ext_name)
self.assertIsNotNone(ext_contents)
ext_contents.scan_for_nodes()
ext_contents.ensure_files_up_to_date()
definition = ext_contents.node_type_definition("OgnTestNode")
self.assertIsNotNone(definition)
expected_files = [
ogn_directory / "nodes" / "OgnTestNode.ogn",
ogn_directory / "nodes" / "OgnTestNode.py",
ogn_directory / "OgnTestNodeDatabase.py",
ogn_directory / "tests" / "__init__.py",
ogn_directory / "tests" / "TestOgnTestNode.py",
ogn_directory / "tests" / "usd" / "OgnTestNodeTemplate.usda",
]
actual_files = []
for root, _dirs, files in ogi.walk_with_excludes(ctx.test_directory, ["__pycache__"]):
for file in files:
actual_files.append(Path(root) / file)
self.assertCountEqual(expected_files, actual_files, "New files generated")
| 6,648 | Python | 52.192 | 117 | 0.580776 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/tests/test_node_generator_illegal.py | """
Contains support for testing the ../generate_node.py script for illegal syntax or flag combinations.
The framework is set up so that the tests can be run synchronously through main.py, or
asynchronously through the Kit testing framework.
"""
import json
import os
import unittest
import omni.graph.tools.ogn as ogn
from omni.graph.tools._impl.node_generator.attributes.naming import CPP_KEYWORDS, PYTHON_KEYWORDS, SAFE_CPP_KEYWORDS
# Detect the testing environment and use the Kit Async testing framework if it is available
try:
import omni.kit.test
TestBaseClass = omni.kit.test.AsyncTestCase
except ImportError:
TestBaseClass = unittest.TestCase
# --------------------------------------------------------------------------------------------------------------
class _ExpectedError:
"""
Helper class used to prefix any pending error messages with [Expected Error]
stdoutFailPatterns.exclude (defined in extension.toml) will cause these errors
to be ignored when running tests.
Note that it will prepend only the first error.
Usage:
with _ExpectedError():
function_that_produced_error_output()
"""
def __enter__(self):
print("", flush=True) # preflush any output, otherwise it may be appended to the next statement
print("[Ignore this error/warning] ", end="", flush=True)
def __exit__(self, exit_type, value, traceback):
print("", flush=True) # print a newline, to avoid actual errors being ignored
class TestNodeGeneratorIllegal(TestBaseClass):
"""Unit test class for this script"""
attr_index = 0
# ======================================================================
def create_attribute(self, attribute_type: str, attribute_default):
"""
Create an attribute JSON structure and text with a generic description
:param attribute_type: Attribute type
:param attribute_default: Default value of the attribute
:return: (Attribute Name, Attribute Info as JSON, Attribute as string) output for the attribute definition.
Both versions are returned as the string attribute omits the enclosing curly braces so that
attribute definitions can more easily be embedded in the JSON attribute lists
"""
attribute_name = f"attr{TestNodeGeneratorIllegal.attr_index}"
TestNodeGeneratorIllegal.attr_index += 1
attribute_as_json = {
f"{ogn.AttributeKeys.DESCRIPTION}": f"This is attribute {attribute_name}",
f"{ogn.AttributeKeys.TYPE}": attribute_type,
f"{ogn.AttributeKeys.DEFAULT}": attribute_default,
}
attribute_as_string = f'"{attribute_name}" : {json.dumps(attribute_as_json)}'
return attribute_name, attribute_as_json, attribute_as_string
# ======================================================================
def validate_node(self, node_wrapper: ogn.NodeInterfaceWrapper, node_name: str):
"""
Confirms that a named node exists and is a valid node in the interface wrapper.
:param node_wrapper: ogn.NodeInterfaceWrapper class generated from the node description
:param node_name: Name of the node to check
:return: ogn.NodeInterface object for the named node
"""
self.assertEqual(node_wrapper.node_interface.name, node_name, f"Expected node {node_name} not listed")
return node_wrapper.node_interface
# ======================================================================
def validate_node_description(self, node_interface: ogn.NodeInterface, expected_description: str):
"""
Validate that the node interface has parsed a description with the expected name
:param node_interface: ogn.NodeInterface class of the node being checked
:param expected_description: Description string the node is expected to have
"""
self.assertEqual(
node_interface.description, expected_description, f"Description for node {node_interface.name} not correct"
)
# ======================================================================
def illegal_when_debugging(self, node_description: str, msg: str):
"""Run a parse that is only illegal when in debugging mode, otherwise it just generates warnings"""
if os.getenv("OGN_STRICT_DEBUG"):
with self.assertRaises(ogn.ParseError, msg=f"Parsing interface with {msg}\n{node_description}"):
ogn.NodeInterfaceWrapper(node_description, "test")
else:
ogn.NodeInterfaceWrapper(node_description, "test")
# ======================================================================
async def test_illegal_arrays(self):
"""Test the set of array flag combinations that are not allowed"""
# Test data consisting of values for array, optional, and default values.
# When a value is "None" the property is omitted. Attribute is assumed to be an integer type.
# Missing default values are permitted so only illegal values are tested.
array_combinations = [
[1, True, 1],
[1, False, 1],
[1, False, ["hello"]],
[1, None, 1],
[1, None, ["hello"]],
[0, True, [1]],
[0, False, [1]],
[0, False, ["hello"]],
[0, None, [1]],
[0, None, ["hello"]],
[2, True, [1]],
[2, False, [1]],
[2, False, ["hello"]],
[2, None, [1]],
[2, None, ["hello"]],
]
for array_value, optional_value, default_value in array_combinations:
attribute_type = f"int{'[]' * array_value}"
if optional_value is None:
optional_property = ""
elif optional_value:
optional_property = '"optional": true,'
else:
optional_property = '"optional": false,'
if default_value is None:
default_property = ""
else:
default_property = f'"default": {default_value},'
arrays_description = f"""{{
"Arrays" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with all valid array combinations",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.INPUTS}" : {{
"a": {{
"{ogn.AttributeKeys.DESCRIPTION}": "This is an a",
{optional_property}
{default_property}
"{ogn.AttributeKeys.TYPE}": "{attribute_type}"
}}
}}
}}
}}"""
with self.assertRaises(
ogn.ParseError, msg=f"Parsing interface with illegal array parameter combinations {arrays_description}"
):
ogn.NodeInterfaceWrapper(arrays_description, "test")
# ======================================================================
async def test_illegal_minmax(self):
"""Test the set of minimum and maximum value combinations that are not allowed"""
# Test data consisting of values for attribute type, minimum, maximum, and default value
# When a value is "None" the property is omitted.
minmax_combinations = [["int", 1.0, None, 1], ["float", None, "hello", 1.0]]
# Add range tests for every type supporting min/max, arrays of them, and pairs of them
for type_with_minmax in [
"double",
"float",
"colord",
"colorf",
"colorh",
"normald",
"normalf",
"normalh",
"pointd",
"pointf",
"pointh",
"texcoordd",
"texcoordf",
"texcoordh",
"vectord",
"vectorf",
"vectorh",
"xform",
]:
minmax_combinations.append([type_with_minmax, 1.0, 3.0, 0.0]) # Default < min
minmax_combinations.append([type_with_minmax, 1.0, 3.0, 4.0]) # Default > max
minmax_combinations.append([type_with_minmax, 3.0, 1.0, 2.0]) # max < min
minmax_combinations.append([f"{type_with_minmax}[]", 1.0, 3.0, [0.0, 2.0]])
minmax_combinations.append([f"{type_with_minmax}[]", 1.0, 3.0, [2.0, 4.0]])
minmax_combinations.append([f"{type_with_minmax}[]", 3.0, 1.0, [2.0, 2.0]])
minmax_combinations.append([f"{type_with_minmax}[2]", [1.0, 5.0], [3.0, 7.0], [0.0, 6.0]])
minmax_combinations.append([f"{type_with_minmax}[2]", [1.0, 5.0], [3.0, 7.0], [2.0, 8.0]])
minmax_combinations.append([f"{type_with_minmax}[2]", [3.0, 7.0], [1.0, 5.0], [2.0, 6.0]])
minmax_combinations.append([f"{type_with_minmax}[2][]", [1.0, 5.0], [3.0, 7.0], [[0.0, 6.0]]])
minmax_combinations.append([f"{type_with_minmax}[2][]", [1.0, 5.0], [3.0, 7.0], [[2.0, 8.0]]])
minmax_combinations.append([f"{type_with_minmax}[2][]", [3.0, 7.0], [1.0, 5.0], [[2.0, 6.0]]])
for type_with_minmax in ["half", "int", "int64", "uchar", "uint", "uint64"]:
minmax_combinations.append([type_with_minmax, 1, 3, 0]) # Default < min
minmax_combinations.append([type_with_minmax, 1, 3, 4]) # Default > max
minmax_combinations.append([type_with_minmax, 3, 1, 2]) # max < min
minmax_combinations.append([f"{type_with_minmax}[]", 1, 3, [0, 2]])
minmax_combinations.append([f"{type_with_minmax}[]", 1, 3, [2, 4]])
minmax_combinations.append([f"{type_with_minmax}[]", 3, 1, [2, 2]])
minmax_combinations.append([f"{type_with_minmax}[2]", [1, 5], [3, 7], [0, 6]])
minmax_combinations.append([f"{type_with_minmax}[2]", [1, 5], [3, 7], [2, 8]])
minmax_combinations.append([f"{type_with_minmax}[2]", [3, 7], [1, 5], [2, 6]])
minmax_combinations.append([f"{type_with_minmax}[2][]", [1, 5], [3, 7], [[0, 6]]])
minmax_combinations.append([f"{type_with_minmax}[2][]", [1, 5], [3, 7], [[2, 8]]])
minmax_combinations.append([f"{type_with_minmax}[2][]", [3, 7], [1, 5], [[2, 6]]])
# Add one test for every type that does not support min/max values
for type_without_minmax in ["bool", "string", "token"]:
minmax_combinations.append([type_without_minmax, 1, None, 2])
minmax_combinations.append([type_without_minmax, None, 3, 2])
for attribute_type, minimum_value, maximum_value, default_value in minmax_combinations:
if minimum_value is None:
minimum_property = ""
else:
minimum_property = f'"minimum": {minimum_value},'
if maximum_value is None:
maximum_property = ""
else:
maximum_property = f'"maximum": {maximum_value},'
if default_value is None:
default_property = ""
else:
default_property = f'"default": {default_value},'
minmax_description = f"""{{
"MinMax" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with all valid min/max combinations",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.INPUTS}" : {{
"a": {{
"description": "This is an a",
{minimum_property}
{maximum_property}
{default_property}
"{ogn.AttributeKeys.TYPE}": "{attribute_type}"
}}
}}
}}
}}"""
with self.assertRaises(
ogn.ParseError,
msg=f"Parsing interface with illegal min/max parameter combinations {minmax_description}",
):
ogn.NodeInterfaceWrapper(minmax_description, "test")
# ======================================================================
async def test_illegal_nodes(self):
"""Test for correctly failed parsing of some illegal node descriptions"""
empty_json = "{}"
with self.assertRaises(ogn.ParseError, msg="Parsing interface with no nodes"):
ogn.NodeInterfaceWrapper(empty_json, "test")
# ----------------------------------------
illegal_json = f'"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node"'
with self.assertRaises(ogn.ParseError, msg="Parsing illegal json"):
ogn.NodeInterfaceWrapper(illegal_json, "test")
# ----------------------------------------
illegal_node = f'{{ "{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node" }}'
with self.assertRaises(ogn.ParseError, msg="Parsing illegal node"):
ogn.NodeInterfaceWrapper(illegal_node, "test")
# ----------------------------------------
invalid_node = f'{{ "123Node" : {{ "{ogn.NodeTypeKeys.DESCRIPTION}" : "This is an illegally named node" }} }}'
with self.assertRaises(ogn.ParseError, msg="Parsing interface with illegal node name"):
ogn.NodeInterfaceWrapper(invalid_node, "test")
# ----------------------------------------
illegal_multiple_nodes = f"""{{
"LegalFirstNode" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This empty node is legal"
}},
"IllegalSecondNode" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This second node is legal"
}}
}}"""
with self.assertRaises(ogn.ParseError, msg="Parsing interface with illegal multiple nodes"):
ogn.NodeInterfaceWrapper(illegal_multiple_nodes, "test")
# ----------------------------------------
illegal_memory_type = f"""{{
"IllegalMemoryType" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with an illegal memory type",
"{ogn.NodeTypeKeys.MEMORY_TYPE}" : "foo"
}}
}}"""
with self.assertRaises(ogn.ParseError, msg="Parsing interface with illegal memory type"):
ogn.NodeInterfaceWrapper(illegal_memory_type, "test")
# ----------------------------------------
illegal_description_empty = f"""{{
"IllegalDescriptionEmpty" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : [""]
}}
}}"""
self.illegal_when_debugging(illegal_description_empty, "illegal empty description")
# ----------------------------------------
illegal_description_null = f"""{{
"IllegalDescriptionNull" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : ""
}}
}}"""
self.illegal_when_debugging(illegal_description_null, "illegal null description")
# ----------------------------------------
illegal_description_empty_list = f"""{{
"IllegalDescriptionEmptyList" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : []
}}
}}"""
self.illegal_when_debugging(illegal_description_empty_list, "illegal empty list description")
# ----------------------------------------
illegal_attribute_memory_type = f"""{{
"IllegalAttributeMemoryType" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with an illegal attribute memory type",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.INPUTS}" : {{
"input1": {{
"{ogn.AttributeKeys.DESCRIPTION}": "This is the first input",
"{ogn.AttributeKeys.TYPE}": "float",
"{ogn.AttributeKeys.DEFAULT}": 1.0,
"{ogn.AttributeKeys.MEMORY_TYPE}": "foo"
}}
}}
}}
}}"""
with self.assertRaises(ogn.ParseError, msg="Parsing interface with illegal attribute memory type"):
ogn.NodeInterfaceWrapper(illegal_attribute_memory_type, "test")
# ----------------------------------------
illegal_metadata = f"""{{
"illegal_metadata" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with an illegal metadata",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.METADATA}" : "must be a dictionary"
}}
}}"""
with self.assertRaises(ogn.ParseError, msg="Parsing interface with illegal metadata"):
ogn.NodeInterfaceWrapper(illegal_metadata, "test")
# ----------------------------------------
illegal_ui_name_metadata = f"""{{
"illegal_ui_name_metadata" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with an illegal uiName metadata",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.UI_NAME}" : ["must be a string"]
}}
}}"""
with self.assertRaises(ogn.ParseError, msg="Parsing interface with illegal uiName metadata"):
ogn.NodeInterfaceWrapper(illegal_ui_name_metadata, "test")
# ----------------------------------------
illegal_simple_icon = f"""{{
"illegal_simple_icon" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with an illegal simple icon path description",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.ICON}" : ["icon cannot be a list"]
}}
}}"""
with self.assertRaises(ogn.ParseError, msg="Parsing interface with illegal simplified icon path"):
ogn.NodeInterfaceWrapper(illegal_simple_icon, "test")
# ----------------------------------------
illegal_detailed_icon = f"""{{
"illegal_detailed_icon" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with an illegal detailed icon path description",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.ICON}" : {{ "illegal": "Illegal keyword" }}
}}
}}"""
with self.assertRaises(ogn.ParseError, msg="Parsing interface with illegal detailed icon information"):
ogn.NodeInterfaceWrapper(illegal_detailed_icon, "test")
# ----------------------------------------
illegal_colors = [
"#FFGGFF99",
123,
"FF112233",
"#11223344",
{"red": 45},
[1, 2, 3, 4, 5],
[-1, 2, 3, 4],
[256, 2, 3, 4],
]
for illegal_color in illegal_colors:
illegal_icon_color = f"""{{
"illegal_icon_color" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" :
"This is a node with an illegal icon color definition {illegal_color}",
"{ogn.NodeTypeKeys.VERSION}" : 1,
"{ogn.NodeTypeKeys.ICON}" : {{ "color": "#FFGGFF99" }}
}}
}}"""
with self.assertRaises(
ogn.ParseError, msg=f"Parsing interface with illegal icon color definition {illegal_color}"
):
ogn.NodeInterfaceWrapper(illegal_icon_color, "test")
# ----------------------------------------
illegal_tags_metadata = f"""{{
"illegal_tags_metadata" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with an illegal tags metadata",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.TAGS}" : {{"help": "cannot be a dictionary"}}
}}
}}"""
with self.assertRaises(ogn.ParseError, msg="Parsing interface with illegal tags metadata"):
ogn.NodeInterfaceWrapper(illegal_tags_metadata, "test")
# ----------------------------------------
illegal_memory_type = f"""{{
"illegal_memory_type" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with an illegal memory type",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.MEMORY_TYPE}" : "gpu"
}}
}}"""
with self.assertRaises(ogn.ParseError, msg="Parsing interface with illegal memory type"):
ogn.NodeInterfaceWrapper(illegal_memory_type, "test")
# ----------------------------------------
illegal_cuda_pointers = f"""{{
"illegal_cuda_pointers" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with an illegal CUDA pointer type",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.CUDA_POINTERS}" : "gpu"
}}
}}"""
with self.assertRaises(ogn.ParseError, msg="Parsing interface with illegal CUDA pointer type"):
ogn.NodeInterfaceWrapper(illegal_cuda_pointers, "test")
# ----------------------------------------
illegal_language = f"""{{
"IllegalLanguage" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with an illegal language",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.LANGUAGE}" : "pascal"
}}
}}"""
with self.assertRaises(ogn.ParseError, msg="Parsing interface with illegal language type"):
ogn.NodeInterfaceWrapper(illegal_language, "test")
# ----------------------------------------
illegal_comments = [
'"!comment": [1,2,3]',
'"!comment": true',
'"!comment": 2',
'"!comment": { "a": 1 }',
'"!comment": "Ignore me"',
]
for illegal_comment in illegal_comments:
for comment_location in range(0, 4):
comment_strings = ["", "", "", ""]
comment_strings[comment_location] = illegal_comment
illegal_comment_node_description = f"""{{
"CommentNode" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a commented node",
"{ogn.NodeTypeKeys.VERSION}": 1,
{comment_strings[0]}
"{ogn.NodeTypeKeys.INPUTS}" : {{
{comment_strings[1]}
}},
"{ogn.NodeTypeKeys.OUTPUTS}" : {{
"{ogn.AttributeKeys.DESCRIPTION}": "This is a commented attribute",
{comment_strings[2]}
}}
}},
{comment_strings[3]}
}}"""
with self.assertRaises(
ogn.ParseError, msg=f"Parsing illegal comment {illegal_comment} in position {comment_location}"
):
ogn.NodeInterfaceWrapper(illegal_comment_node_description, "test")
# ======================================================================
async def test_illegal_attributes(self):
"""Test for correctly failed parsing of some illegal attribute descriptions"""
# ----------------------------------------
illegal_input_name = f"""{{
"IllegalInputName" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with an illegal input name",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.INPUTS}" : {{
"1input1": {{
"{ogn.AttributeKeys.DESCRIPTION}": "This is the first input",
"{ogn.AttributeKeys.TYPE}": "float"
}}
}}
}}
}}"""
with self.assertRaises(ogn.ParseError, msg="Parsing interface with illegal input attribute name"):
ogn.NodeInterfaceWrapper(illegal_input_name, "test")
# ----------------------------------------
illegal_output_name = f"""{{
"IllegalOutputName" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with an illegal output name",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.OUTPUTS}" : {{
"output1(a)": {{
"{ogn.AttributeKeys.DESCRIPTION}": "This is the output",
"{ogn.AttributeKeys.TYPE}": "float"
}}
}}
}}
}}"""
with self.assertRaises(ogn.ParseError, msg="Parsing interface with illegal output attribute name"):
ogn.NodeInterfaceWrapper(illegal_output_name, "test")
# ----------------------------------------
illegal_input_type = f"""{{
"IllegalInputType" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with an illegal input type",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.INPUTS}" : {{
"input1": {{
"{ogn.AttributeKeys.DESCRIPTION}": "This is the first input",
"{ogn.AttributeKeys.TYPE}": "YogiBear"
}}
}}
}}
}}"""
with self.assertRaises(ogn.ParseError, msg="Parsing interface with illegal input attribute type"):
ogn.NodeInterfaceWrapper(illegal_input_type, "test")
# ----------------------------------------
illegal_input_metadata = f"""{{
"IllegalInputMetadata" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with an illegal input metadata",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.INPUTS}" : {{
"input1": {{
"{ogn.AttributeKeys.DESCRIPTION}": "This is the first input",
"{ogn.AttributeKeys.TYPE}": "int",
"{ogn.AttributeKeys.METADATA}": 3
}}
}}
}}
}}"""
with self.assertRaises(ogn.ParseError, msg="Parsing interface with illegal input attribute metadata"):
ogn.NodeInterfaceWrapper(illegal_input_metadata, "test")
# ----------------------------------------
illegal_output_description = f"""{{
"IllegalOutputDescription" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with a missing attribute description",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.OUTPUTS}" : {{
"output1(a)": {{
"{ogn.AttributeKeys.DESCRIPTION}": "This is the first output",
"{ogn.AttributeKeys.TYPE}": "float"
}}
}}
}}
}}"""
with self.assertRaises(ogn.ParseError, msg="Parsing interface with missing output attribute description"):
ogn.NodeInterfaceWrapper(illegal_output_description, "test")
# ----------------------------------------
no_default_description = f"""{{
"NoDefaultDescription" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with a missing attribute default",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.OUTPUTS}" : {{
"output1": {{
{ogn.AttributeKeys.DESCRIPTION}: "This is the output with no default",
"{ogn.AttributeKeys.TYPE}": "float"
}}
}}
}}
}}"""
with self.assertRaises(ogn.ParseError, msg="Parsing interface with missing output attribute default"):
ogn.NodeInterfaceWrapper(no_default_description, "test")
# ----------------------------------------
attribute_description = f"""
"input1": {{
{ogn.AttributeKeys.DESCRIPTION}: "This is the first input",
"{ogn.AttributeKeys.TYPE}": "int"
}}
"""
# Test configurations are a list of [input_attributes, output_attributes], each containing a duplicate
duplicate_attribute_tests = [
[[attribute_description, attribute_description], []],
[[], [attribute_description, attribute_description]],
[[attribute_description], [attribute_description]],
]
for duplicate_attribute_test in duplicate_attribute_tests:
inputs = ""
if duplicate_attribute_test[0]:
attribute_list = ",".join(duplicate_attribute_test[0])
inputs = f'"{ogn.NodeTypeKeys.INPUTS}" : {{ {attribute_list} }},'
outputs = ""
if duplicate_attribute_test[1]:
attribute_list = ",".join(duplicate_attribute_test[1])
outputs = f'"{ogn.NodeTypeKeys.INPUTS}" : {{ {attribute_list} }},'
duplicated_attributes_description = f"""{{
"DuplicatedAttributes" : {{
"{ogn.NodeTypeKeys.VERSION}": 1,
{inputs}
{outputs}
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with duplicate attributes"
}}
}}"""
with self.assertRaises(
ogn.ParseError, msg=f"Parsing duplicate attributes {duplicated_attributes_description}"
):
ogn.NodeInterfaceWrapper(duplicated_attributes_description, "test")
# Test for attributes with existing but empty descriptions
for attribute_type in [ogn.NodeTypeKeys.INPUTS, ogn.NodeTypeKeys.OUTPUTS]:
for description_string in ['[""]', '""', "[]"]:
illegal_empty_description = f"""{{
"IllegalEmptyDescription" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : ["Node with empty attribute description"],
"{ogn.NodeTypeKeys.VERSION}": 1,
"{attribute_type}": {{
"attribute": {{
"{ogn.AttributeKeys.TYPE}": "float",
"{ogn.AttributeKeys.DEFAULT}": 0.0,
"{ogn.AttributeKeys.DESCRIPTION}": {description_string}
}}
}}
}}
}}"""
self.illegal_when_debugging(illegal_empty_description, "illegal empty attribute description")
# ======================================================================
async def test_illegal_tests(self):
"""Test for correct rejection of illegal combinations of values in the 'tests' property"""
tests_node_format = """
{
"Add" : {
"description": "Add the two inputs to create the output",
"version": 1,
"inputs": {
"input1" : {
"description": "Multiplier",
"type": "float",
"default": 0.0,
"minimum": 0.0,
"maximum": 1.0
},
"input2" : {
"description": "Second input",
"type": "float[]",
"default": [0.0]
},
"x2": {
"description": "Double the first input",
"type": "bool",
"optional": true
}
},
"outputs": {
"output": {
"description": "Element-wise product of the two inputs",
"type": "float[]",
"default": [0.0]
}
},
"""
test_configurations = [
# Attribute not recognized
'{"inputs:nobody": 1.0}',
# Bad attribute data type
'{"inputs:input1": true, "inputs:input2": [1.0, 2.0], "outputs:output": [4.0, 5.0]}',
# Attribute value out of range
'{"inputs:input1": 2.0, "inputs:input2": [1.0, 2.0], "outputs:output": [4.0, 5.0]}',
# Bad optional attribute
'{"inputs:input1": 1.0, "inputs:input2": [1.0, 2.0], "inputs:x2": 3, "outputs:output": [4.0, 5.0]}',
# Bad optional attribute
'{"inputs:input1": 1.0, "inputs:input2": [1.0, 2.0], "inputs:foo": 3, "outputs:output": [4.0, 5.0]}',
]
for test_configuration in test_configurations:
tests_node_description = (
tests_node_format
+ f"""
"tests": [ {test_configuration}]
}}
}}"""
)
with self.assertRaises(ogn.ParseError, msg=f"Parsing illegal test configuration {tests_node_description}"):
ogn.NodeInterfaceWrapper(tests_node_description, "test")
# ======================================================================
async def test_illegal_scheduling(self):
"""Test for correct rejection of illegal combinations of scheduling flags"""
scheduling_format = """
{{
"ScheduleMe" : {{
"description": "Schedule this node as it requests",
"version": 1,
"scheduling": {}
}}
}}
"""
for scheduling_configuration in ogn.SchedulingHints.illegal_configurations():
scheduling_description = scheduling_format.format(scheduling_configuration)
with self.assertRaises(ogn.ParseError, msg=f"Parsing illegal scheduling flags {scheduling_description}"):
ogn.NodeInterfaceWrapper(scheduling_description, "test")
# ======================================================================
async def test_tokens_with_illegal_characters(self):
"""Test for successful parsing of attribute metadata"""
node_name = "NodeWithSpecialTokenCharacters"
node_with_illegal_tokens = json.loads(
f"""{{
"{node_name}" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with special characters in the tokens",
"{ogn.NodeTypeKeys.VERSION}": 1
}}
}}"""
)
illegal_tokens = [
["foo < bar", "foo != bar"],
{"foo < bar": "fooLtBar"},
]
for token_data in illegal_tokens:
node_with_illegal_tokens[node_name][ogn.NodeTypeKeys.TOKENS] = token_data
with self.assertRaises(ogn.ParseError, msg=f"Parsing illegal token names {node_with_illegal_tokens}"):
ogn.NodeInterfaceWrapper(node_with_illegal_tokens, "test")
# ======================================================================
async def test_allowed_tokens_with_illegal_characters(self):
"""Test for successful parsing of attribute metadata"""
node_name = "AttributeWithSpecialTokenCharacters"
attr_with_illegal_tokens = json.loads(
f"""{{
"{node_name}" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with special characters in the tokens",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.INPUTS}" : {{
"inputTokens": {{
"description": ["This is an input with metadata"],
"type": "token",
"default": "foo",
"metadata": {{
}}
}}
}}
}}
}}"""
)
illegal_tokens = [
["foo < bar", "foo != bar"],
{"foo < bar": "fooLtBar"},
]
for token_data in illegal_tokens:
attr_with_illegal_tokens[node_name][ogn.NodeTypeKeys.INPUTS]["inputTokens"]["metadata"][
ogn.MetadataKeys.ALLOWED_TOKENS
] = token_data
with self.assertRaises(
ogn.ParseError, msg=f"Parsing illegal allowedToken names {attr_with_illegal_tokens}"
):
ogn.NodeInterfaceWrapper(attr_with_illegal_tokens, "test")
# ======================================================================
async def test_illegal_token_default(self):
"""Test for failure when specifying a default token that is not in the allowed list"""
node_name = "NodeWithSpecialTokenCharacters"
node_with_illegal_default = json.loads(
f"""{{
"{node_name}" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with attributes with an illegal default token",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.INPUTS}" : {{
"XXX": {{
"{ogn.AttributeKeys.DESCRIPTION}": "This is the token input",
"{ogn.AttributeKeys.TYPE}": "token",
"{ogn.AttributeKeys.DEFAULT}": "X",
"{ogn.AttributeKeys.ALLOWED_TOKENS}": "X"
}}
}}
}}
}}"""
)
illegal_combinations = [
["foo", {"fooLtBar": "foo < bar", "fooEqBar": "foo == bar"}],
["foo", ["fooLtBar", "fooEqBar"]],
["foo", "bar"],
]
for default_value, allowed_tokens in illegal_combinations:
attr_definition = node_with_illegal_default[node_name][ogn.NodeTypeKeys.INPUTS]["XXX"]
attr_definition[ogn.AttributeKeys.DEFAULT] = default_value
attr_definition[ogn.AttributeKeys.ALLOWED_TOKENS] = allowed_tokens
with self.assertRaises(ogn.ParseError, msg=f"Parsing illegal token defaults {node_with_illegal_default}"):
ogn.NodeInterfaceWrapper(node_with_illegal_default, "test")
# ======================================================================
async def test_keyword_attributes(self):
"""Test for correctly failed parsing of attributes named after keywords"""
python_keyword_attribute = f"""{{
"PythonKeywordAttribute" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with an illegal input name",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.LANGUAGE}": "Python",
"{ogn.NodeTypeKeys.INPUTS}" : {{
"XXX": {{
"{ogn.AttributeKeys.DESCRIPTION}": "This is the first input",
"{ogn.AttributeKeys.TYPE}": "float"
}}
}}
}}
}}"""
for python_key in PYTHON_KEYWORDS:
description = python_keyword_attribute.replace("XXX", python_key)
with self.assertRaises(ogn.ParseError, msg="Parsing interface with python keyword attribute name"):
ogn.NodeInterfaceWrapper(description, "test")
# --------------------------------------------------------------------------------------------------------------
cpp_keyword_attribute = f"""{{
"CppKeywordAttribute" : {{
"{ogn.NodeTypeKeys.DESCRIPTION}" : "This is a node with an illegal input name",
"{ogn.NodeTypeKeys.VERSION}": 1,
"{ogn.NodeTypeKeys.INPUTS}" : {{
"XXX": {{
"{ogn.AttributeKeys.DESCRIPTION}": "This is the first input",
"{ogn.AttributeKeys.TYPE}": "float"
}}
}}
}}
}}"""
for cpp_key in CPP_KEYWORDS:
description = cpp_keyword_attribute.replace("XXX", cpp_key)
if cpp_key in SAFE_CPP_KEYWORDS:
self.assertIsNotNone(ogn.NodeInterfaceWrapper(description, "test"))
else:
with self.assertRaises(ogn.ParseError, msg="Parsing interface with C++ keyword attribute name"):
ogn.NodeInterfaceWrapper(description, "test")
| 40,175 | Python | 47.172662 | 120 | 0.495482 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/tests/test_ogn_types.py | """Test the utility functions found in ogn_types.py.
These are almost unit tests, they just require the Kit environment to work due to the imports.
"""
import omni.graph.tools.ogn as ogn
import omni.kit.test
# This test uses some imports that aren't exposed as part of the public interface
from omni.graph.tools._1_11 import OGN_TO_SDF_BASE_NAME, SDF_BASE_NAME_TO_OGN, SDF_TO_OGN
class TestOmniGraphTypeSupport(omni.kit.test.AsyncTestCase):
"""Tests the basic operation of the utilities found in ogn_types.py"""
async def test_ogn_to_sdf_conversion(self):
"""Test consistency when converting to and from OGN and SDF type definitions"""
self.assertEqual(len(OGN_TO_SDF_BASE_NAME), len(SDF_BASE_NAME_TO_OGN))
self.assertEqual(len(SDF_BASE_NAME_TO_OGN) * 2, len(SDF_TO_OGN))
for sdf_type, ogn_type_name in SDF_TO_OGN.items():
ogn_converted_name = ogn.sdf_to_ogn(sdf_type)
self.assertEqual(ogn_type_name, ogn_converted_name, f"Converting SDF {sdf_type} to OGN type name")
sdf_converted_type = ogn.ogn_to_sdf(ogn_type_name)
self.assertEqual(sdf_type, sdf_converted_type, f"Converting OGN type name {ogn_type_name} to SDF")
| 1,211 | Python | 49.499998 | 110 | 0.705202 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/tests/internal_utils.py | """Internal test utilities"""
from __future__ import annotations
import json
import sys
from contextlib import suppress
from pathlib import Path
from types import TracebackType
from typing import Any, List, Tuple
import omni.graph.tools._internal as ogi
import omni.graph.tools.ogn as ogn
# ==============================================================================================================
# Type of data to pass to the utilities when building a testing build or cache tree
BuildOgnTreeInfo_t = Tuple[ogi.GenerationVersions, ogn.LanguageTypeValues, List[ogi.FileType]]
# ==============================================================================================================
def _is_test_object(object_to_check: str) -> bool:
return object_to_check != "scan_for_test_modules" and not object_to_check.startswith("test")
# ==============================================================================================================
def _check_module_api_consistency(module: object, ignored_extras: List[str] = None, is_test_module: bool = False):
"""Check the given module to make sure that its visible API matches the one it has published.
Args:
module: Module being tested (already imported)
ignored_extras: List of known differences that can be ignored (e.g. the "tests" submodule)
is_test_module: If True then the module is also allowed to contain the standard test definitions since they
must be public for the automated test registration to work.
Raises:
ValueError if the module API contents are not as expected - message in the exception indicates the discrepancy
"""
visible_objects = [
module_object
for module_object in dir(module)
if not module_object.startswith("_")
and (ignored_extras is None or module_object not in ignored_extras)
and (not is_test_module or _is_test_object(module_object))
]
# For performance, sorting and comparing lists is 4x faster than converting both to sets and then comparing
visible_objects.sort()
with suppress(AttributeError):
all_objects = module.__all__
all_objects.sort()
if visible_objects != all_objects:
# Compute the set of objects that are in only one list to clarify what's reported
visible_not_all = set(visible_objects) - set(all_objects)
all_not_visible = set(all_objects) - set(visible_objects)
msgs = []
if visible_not_all:
msgs.append(f"Visible objects {visible_not_all} not published")
if all_not_visible:
msgs.append(f"Published objects {all_not_visible} not part of the module")
raise ValueError(f"Module {module} error - {', '.join(msgs)}")
# ==============================================================================================================
def _check_public_api_contents(
module: object, published: List[str], unpublished: List[str], only_expected_allowed: bool
):
"""Check the given module to make sure that the expected API objects are still publicly visible.
Args:
module: Module being tested (already imported)
published: Names of the objects that should be publicly visible in the module
unpublished: List of known visible objects that are not published and can be ignored (e.g. "tests" submodule)
only_expected_allowed: If True then it is an error if objects other than the expected ones are publicly visible
Raises:
ValueError if the module API contents are not as expected - message in the exception indicates the discrepancy
"""
with suppress(AttributeError):
all_objects = module.__all__
for expected_object in published:
# Check to make sure the API object is exposed in the module
if expected_object not in all_objects:
raise ValueError(f"Expected API object '{expected_object}' not exposed in {module.__name__}.__all__")
# Check to make sure the API object actually exists in the module
if getattr(module, expected_object, None) is None:
raise ValueError(f"Expected API object '{expected_object}' not a member of {module.__name__}")
for unpublished_object in unpublished:
# Check to make sure the unpublished object is really not published
if unpublished_object in all_objects:
raise ValueError(f"Unexpected API object '{unpublished_object}' exposed in {module.__name__}.__all__")
# If the expected objects are the only ones allowed then confirm that no published or non-underscore object
# exists in that module.
if not only_expected_allowed:
return
for published_object in all_objects:
if published_object not in published:
raise ValueError(f"Published API object '{published_object}' not in expected list '{published}'")
all_visible = published + unpublished
visible_objects = [obj for obj in dir(module) if not obj.startswith("_")]
# Use some magic to figure out if the module being tested is actually one of the test submodules, where
# the actual tests can safely be ignored as they are required to be public for the automated test runner.
if "scan_for_test_modules" in visible_objects:
visible_objects = [module_object for module_object in visible_objects if _is_test_object(module_object)]
for visible_object in visible_objects:
if visible_object not in all_visible:
raise ValueError(f"Visible API object '{visible_object}' not in expected list '{all_visible}'")
# ==============================================================================================================
class CreateHelper:
"""Helper class containing all of the functions that build temporary files for the OGN tree
Attributes:
__root: Path to the root of the tree where the generated code will live
__ext_name: Name of the extension this tree belongs to
__module_name: Name of the Python module this tree belongs to; usually the same as __ext_name
__versions: Generation versions for any generated code
__generated_code: The collection of generated code for nodes added to this tree
"""
TEST_CLASS = "OgnTestNode"
# --------------------------------------------------------------------------------------------------------------
@staticmethod
def safe_create(directory: Path, class_name: str | None, file_type: ogi.FileType | None, content: str) -> Path:
"""Create a new OGN-related file in the given location with the given type and contents.
If class_name is None then the directory is assumed to be the full path name and file_type is ignored. A bit
questionable but avoids two versions of this function.
Raises AttributeError if the file could not be written, returns the new file path otherwise
Returns the path to the newly created file
"""
if class_name is None:
file_path = directory
else:
file_path = directory / ogi.get_ogn_file_name(class_name, file_type)
try:
file_path.parent.mkdir(parents=True, exist_ok=True)
with open(file_path, "w", encoding="utf-8") as fd:
fd.write(content)
return file_path
except IOError as error:
raise AttributeError(f"Failed to write {class_name} file of type {file_type} to {directory}") from error
# --------------------------------------------------------------------------------------------------------------
def __init__(self, ext_name: str, root_path: Path, versions: ogi.GenerationVersions):
self.__root = root_path
self.__ext_name = ext_name
self.__module_name = ext_name
self.__versions = versions
self.__generated_code = {}
# --------------------------------------------------------------------------------------------------------------
def _generate_code(
self,
class_name: str,
exclusions: list[ogi.FileType] = None,
language: str = "Python",
) -> dict[ogi.FileType, str]:
"""Creates an OGN definition and the code it will generate.
Adds the set of generated code for the class type to the internal dictionary for later use.
Args:
class_name: Name of the class in the generated node type
exclusions: List of file types excluded from generation
language: Implementation language, usually Python for these tests
Returns:
A reference to the saved generated code for this class
"""
if class_name in self.__generated_code:
_ = ogi.LOG.disabled or ogi.LOG.info("Skipping generation of code for %s, it already exists", class_name)
return self.__generated_code[class_name]
_ = ogi.LOG.disabled or ogi.LOG.info("Generating code for %s with exclusions %s", class_name, exclusions)
# Remember the .ogn definitions for use in the generation of sample code
ogn_code = {
class_name: {
ogn.NodeTypeKeys.DESCRIPTION: "None",
ogn.NodeTypeKeys.VERSION: 1,
ogn.NodeTypeKeys.LANGUAGE: ogn.LanguageTypeValues.PYTHON,
}
}
if exclusions is not None:
ogn_code[class_name].update(
{
ogn.NodeTypeKeys.EXCLUDE: [
ogi.GENERATED_FILE_CONFIG_NAMES[exclusion]
for exclusion in exclusions
if exclusion in ogi.GENERATED_FILE_CONFIG_NAMES
],
}
)
_ = ogi.LOG.disabled or ogi.LOG.info("Creating node for %s", class_name)
# Start with the two user-driven files that will be matched with the generated code
generated_code = {
ogi.FileType.OGN: json.dumps(ogn_code, indent=4),
ogi.FileType.PYTHON: f"""
class {class_name}:
@staticmethod
def compute(db) -> bool:
return True
""",
}
# Run the code generator to get all of the results
results = ogn.code_generation(
ogn_code,
class_name,
self.__ext_name,
self.__ext_name,
generator_version_override=self.__versions[ogi.VersionProperties.GENERATOR],
target_version_override=self.__versions[ogi.VersionProperties.TARGET],
)
for file_type, config_name in ogi.GENERATED_FILE_CONFIG_NAMES.items():
if file_type not in (exclusions or []) and config_name in results:
generated_code[file_type] = results[config_name]
self.__generated_code.update({class_name: generated_code})
return generated_code
# --------------------------------------------------------------------------------------------------------------
def create_ogn(
self, class_name: str, language: str, relative_path: Path, exclusions: list[ogi.FileType] = None
) -> Path:
_ = ogi.LOG.disabled or ogi.LOG.info("...Create OGN file for %s", class_name)
my_code = self._generate_code(class_name, exclusions, language)
return CreateHelper.safe_create(
self.__root.joinpath(relative_path),
class_name,
ogi.FileType.OGN,
my_code[ogi.FileType.OGN],
)
# --------------------------------------------------------------------------------------------------------------
def create_cpp(self, class_name: str, file_path: Path) -> Path:
_ = ogi.LOG.disabled or ogi.LOG.info("...Create CPP file for %s", class_name)
my_code = self._generate_code(class_name)
return CreateHelper.safe_create(
self.__root.joinpath(file_path),
class_name,
ogi.FileType.CPP,
my_code[ogi.FileType.CPP],
)
# --------------------------------------------------------------------------------------------------------------
def create_cpp_database(self, class_name: str, file_path: Path) -> Path:
_ = ogi.LOG.disabled or ogi.LOG.info("...Create CPP_DB file for %s", class_name)
my_code = self._generate_code(class_name)
return CreateHelper.safe_create(
self.__root.joinpath(file_path),
class_name,
ogi.FileType.CPP_DB,
my_code[ogi.FileType.CPP_DB],
)
# --------------------------------------------------------------------------------------------------------------
def create_py_database(self, class_name: str, relative_path: Path) -> Path:
_ = ogi.LOG.disabled or ogi.LOG.info("...Create PY_DB file for %s", class_name)
my_code = self._generate_code(class_name)
return CreateHelper.safe_create(
self.__root.joinpath(relative_path),
class_name,
ogi.FileType.PYTHON_DB,
my_code[ogi.FileType.PYTHON_DB],
)
# --------------------------------------------------------------------------------------------------------------
def create_py(self, class_name: str, relative_path: Path) -> Path:
_ = ogi.LOG.disabled or ogi.LOG.info("...Create PY file for %s", class_name)
my_code = self._generate_code(class_name)
return CreateHelper.safe_create(
self.__root.joinpath(relative_path),
class_name,
ogi.FileType.PYTHON,
my_code[ogi.FileType.PYTHON],
)
# --------------------------------------------------------------------------------------------------------------
def create_docs(self, class_name: str, relative_path: Path) -> Path:
_ = ogi.LOG.disabled or ogi.LOG.info("...Create DOCS file for %s", class_name)
my_code = self._generate_code(class_name)
return CreateHelper.safe_create(
self.__root.joinpath(relative_path),
class_name,
ogi.FileType.DOCS,
my_code[ogi.FileType.DOCS],
)
# --------------------------------------------------------------------------------------------------------------
def create_tests(self, class_name: str, relative_path: Path) -> Path:
_ = ogi.LOG.disabled or ogi.LOG.info("...Create TESTS file for %s", class_name)
my_code = self._generate_code(class_name)
return CreateHelper.safe_create(
self.__root.joinpath(relative_path),
class_name,
ogi.FileType.TEST,
my_code[ogi.FileType.TEST],
)
# --------------------------------------------------------------------------------------------------------------
def create_usd(self, class_name: str, relative_path: Path) -> Path:
_ = ogi.LOG.disabled or ogi.LOG.info("...Create USD file for %s", class_name)
my_code = self._generate_code(class_name)
return CreateHelper.safe_create(
self.__root.joinpath(relative_path),
class_name,
ogi.FileType.USD,
my_code[ogi.FileType.USD],
)
# --------------------------------------------------------------------------------------------------------------
def _add_init_to_generated_tree(self, relative_module_root: Path) -> Path:
"""Adds the __init__.py file to the generated tree that is used for importing the node definitions
Args:
relative_module_root: Root directory of the node type's Python module relative to the main root
Returns:
Path to the created file
"""
init_path = self.__root / relative_module_root / "__init__.py"
with open(init_path, "w", encoding="utf-8") as init_fd:
init_fd.write("# Placeholder for database imports\n")
for database_file in relative_module_root.glob("*Database.py"):
class_name = database_file.stem
init_fd.write(f"from .{class_name} import {class_name}\n")
return init_path
# --------------------------------------------------------------------------------------------------------------
def _add_test_init_file(self, ogn_root: Path) -> list[Path]:
"""Adds the __init__.py file to the generated tree that is used for importing the node definitions
Args:
ogn_root: Root directory of the node type's generated OGN definitions
Returns:
List containing the created file, or empty list if the file already existed
"""
created_files = []
test_init_file = ogn_root / "tests" / "__init__.py"
if not test_init_file.is_file():
created_files.append(
CreateHelper.safe_create(
test_init_file,
None,
None,
"""
import omni.graph.tools._internal as ogi
ogi.import_tests_in_directory(__file__, __name__)
""",
)
)
return created_files
# --------------------------------------------------------------------------------------------------------------
def _add_node_type_to_tree(
self,
relative_root: Path,
node_type_name: str,
language: ogn.LanguageTypeValues,
versions: ogi.GenerationVersions,
tree_description: list[tuple[ogi.FileType, Path]],
exclusions: list[ogi.FileType] = None,
) -> list[Path]:
"""Adds a node type file with the given configuration to the tree
Args:
relative_root: Top level directory of the simulated Python module build tree relative to the main root
node_type_name: Name of the node type being constructed, also used as the class name
language: Implementation language the .ogn file specifies
versions: Target and generator versions for the constructed node type files
tree_description: List of file type and relative path for all files to be constructed
exclusions: List of file types that will not be constructed (and are added to the .ogn "exclusions" list)
Returns:
List of the paths to the files that were constructed
"""
node_paths = []
for file_type, relative_to_module in tree_description:
if exclusions is not None and file_type in exclusions:
continue
relative_path = relative_root / relative_to_module
try:
if file_type == ogi.FileType.OGN:
node_paths.append(self.create_ogn(node_type_name, language, relative_path, exclusions))
elif file_type == ogi.FileType.CPP_DB:
node_paths.append(self.create_cpp_database(node_type_name, relative_path))
elif file_type == ogi.FileType.PYTHON_DB:
node_paths.append(self.create_py_database(node_type_name, relative_path))
elif file_type == ogi.FileType.PYTHON:
node_paths.append(self.create_py(node_type_name, relative_path))
elif file_type == ogi.FileType.CPP:
node_paths.append(self.create_cpp(node_type_name, relative_path))
elif file_type == ogi.FileType.DOCS:
node_paths.append(self.create_docs(node_type_name, relative_path))
elif file_type == ogi.FileType.TEST:
node_paths.append(self.create_tests(node_type_name, relative_path))
elif file_type == ogi.FileType.USD:
node_paths.append(self.create_usd(node_type_name, relative_path))
else:
raise ValueError(f"Unrecognized type: {file_type} building {node_type_name}")
except IOError as error:
raise IOError(f"Failed to create file type {file_type} on node {node_type_name}") from error
_ = ogi.LOG.disabled or ogi.LOG.info("Built Tree with %s", node_paths)
return node_paths
# --------------------------------------------------------------------------------------------------------------
def add_standalone_node(
self,
node_type_name: str,
exclusions: list[ogi.FileType] = None,
) -> list[Path]:
"""Adds a set of node type files with the given configuration, without any generated files, to the root tree
The structure of these directories looks like this:
.../omni.my.extension/
omni/
my/
extension/
nodes/
OgnMyNode.py
OgnMyNode.ogn
Args:
root: Top level directory of the simulated Python module build tree
exclusions: List of file types that will not be constructed (and are added to the .ogn "exclusions" list)
Returns:
List of the paths to the files that were constructed
"""
relative_module_root = Path(self.__ext_name) / self.__module_name.replace(".", "/")
tree = [
(ogi.FileType.OGN, "nodes"),
(ogi.FileType.PYTHON, "nodes"),
]
return self._add_node_type_to_tree(
relative_module_root, node_type_name, ogn.LanguageTypeValues.PYTHON, self.__versions, tree, exclusions
)
# --------------------------------------------------------------------------------------------------------------
def add_extension_module(
self,
ext_path: Path,
ext_name: str,
module_path: Path,
import_ogn: bool,
import_ogn_tests: bool,
):
"""Create the support files required for a local extension.
The files will not be deleted by this class, their lifespan must be managed by the caller, e.g. through using
a TempDirectory for their location.
Files created will include:
ext_path/
config/
extension.toml
my/
python/
module/
__init__.py
Args:
ext_path: Directory to contain the new extension
ext_name: Name of the extension to create (e.g. 'omni.my.extension')
import_ogn: Add a python module spec for the .ogn submodule in the extension.toml
import_ogn_tests: Add a python module spec for the .ogn.tests submodule in the extension.toml
Raises:
AttributeError if there was a problem creating the extension configuration
"""
_ = ogi.LOG.disabled or ogi.LOG.info(
"Creating extension %s in %s using module %s", ext_name, ext_path, module_path
)
install_path: Path = Path(ext_path) / ext_name
config_dir: Path = install_path / "config"
created_files: list[Path] = []
# Add any extras requested by the arguments
extras = ""
if import_ogn:
extras += f"""[[python.module]]
name = "{ext_name}.ogn
"""
if import_ogn_tests:
extras += f"""[[python.module]]
name = "{ext_name}.ogn.tests
"""
# Create a minimal extension.toml file that will set up the test extension for OmniGraph Python nodes
created_files.append(
CreateHelper.safe_create(
config_dir / "extension.toml",
None,
None,
f"""
[package]
version = "0.1.0"
[fswatcher.patterns]
include = ["*.ogn", "*.py"]
exclude = ["Ogn*Database.py"]
[dependencies]
"omni.graph" = {{}}
# Main python module this extension provides, it will be publicly available as "import {ext_name}".
[[python.module]]
name = "{ext_name}"
{extras}""",
)
)
# Create a minimal __init__.py file for the extension
created_files.append(
CreateHelper.safe_create(
module_path / "__init__.py",
None,
None,
f"""
import omni.ext
class _PublicExtension(omni.ext.IExt):
def on_startup(self, ext_id):
print("[{module_path.as_posix()}] {ext_name} startup", flush=True)
def on_shutdown(self):
print("[{module_path.as_posix()}] {ext_name} shutdown", flush=True)
""",
)
)
return created_files
# --------------------------------------------------------------------------------------------------------------
def add_v1_18_node(
self,
node_type_name: str,
exclusions: list[ogi.FileType] = None,
) -> list[Path]:
"""Adds a standard Python node, as it appears in omni.graph.tools V1.18, to the given root tree.
Note that the ordering is such that it behaves as though the generation happened after the node creation, so the
"generated" files are all newer than the implementation files.
The structure of these directories looks like this:
.../omni.my.extension/
omni/
my/
extension/
ogn/
OgnMyNodeDatabase.py
nodes/
OgnMyNode.py
OgnMyNode.ogn
tests/
TestOgnMyNode.py
usd/
OgnMyNodeTemplate.usd
Args:
node_type_name: Name of the node type being constructed, also used as the class name
exclusions: List of file types that will not be constructed (and are added to the .ogn "exclusions" list)
Returns:
List of the paths to the files that were constructed
"""
_ = ogi.LOG.disabled or ogi.LOG.info(
"Adding V1.18 node %s in extension %s at %s", node_type_name, self.__ext_name, self.__root
)
created_files = []
relative_module_root = Path(self.__ext_name) / self.__module_name.replace(".", "/")
tree = [
(ogi.FileType.OGN, "ogn/nodes"),
(ogi.FileType.PYTHON, "ogn/nodes"),
(ogi.FileType.PYTHON_DB, "ogn"),
]
if exclusions is None or ogi.FileType.TEST not in exclusions:
tree.append((ogi.FileType.TEST, "ogn/tests"))
created_files += self._add_test_init_file(self.__root / relative_module_root / "ogn")
if exclusions is None or ogi.FileType.USD not in exclusions:
tree.append((ogi.FileType.USD, "ogn/tests/usd"))
created_files += self._add_node_type_to_tree(
relative_module_root, node_type_name, ogn.LanguageTypeValues.PYTHON, self.__versions, tree, exclusions
)
return created_files
# --------------------------------------------------------------------------------------------------------------
def add_v1_19_node(
self,
node_type_name: str,
exclusions: list[ogi.FileType] = None,
) -> list[Path]:
"""Adds a standard Python node, as it appears in omni.graph.tools V1.19 and higher, to the given root tree.
Note that the ordering is such that it behaves as though the generation happened after the node creation, so the
"generated" files are all newer than the implementation files. The docs file is omitted to give the caller
something to test both for presence and omission.
The structure of these directories looks like this:
.../omni.my.extension/
ogn/
generated/
__init__.py
OgnMyNodeDatabase.py
tests/
TestOgnMyNode.py
usd/
OgnMyNodeTemplate.usd
omni/
my/
extension/
nodes/
OgnMyNode.py
OgnMyNode.ogn
Args:
node_type_name: Name of the node type being constructed, also used as the class name
exclusions: List of file types that will not be constructed (and are added to the .ogn "exclusions" list)
Returns:
List of the paths to the files that were constructed
"""
relative_module_root = Path(self.__ext_name) / self.__module_name.replace(".", "/")
relative_generated_root = Path(self.__ext_name) / "ogn" / "generated"
user_tree = [
(ogi.FileType.OGN, "."),
(ogi.FileType.PYTHON, "."),
]
generated_tree = [
(ogi.FileType.PYTHON_DB, "ogn"),
]
if exclusions is None or ogi.FileType.TEST not in exclusions:
generated_tree.append((ogi.FileType.TEST, "tests"))
if exclusions is None or ogi.FileType.USD not in exclusions:
generated_tree.append((ogi.FileType.USD, "tests/usd"))
if exclusions is None or ogi.FileType.DOCS not in exclusions:
generated_tree.append((ogi.FileType.DOCS, "docs"))
files_added = self._add_node_type_to_tree(
relative_module_root, node_type_name, ogn.LanguageTypeValues.PYTHON, self.__versions, user_tree, exclusions
)
files_added += self._add_node_type_to_tree(
relative_generated_root,
node_type_name,
ogn.LanguageTypeValues.PYTHON,
self.__versions,
generated_tree,
exclusions,
)
files_added.append(self._add_init_to_generated_tree(self.__root / relative_generated_root))
return files_added
# --------------------------------------------------------------------------------------------------------------
def add_cache_for_node(
self,
node_type_name: str,
exclusions: list[ogi.FileType] = None,
) -> list[Path]:
"""Adds a set of generated node type files with the given configuration as they would appear in the cache
The structure of these directories looks like this:
.../cache/ogn_generated/
1.19.0/
omni.my.extension-0.1.0/
ogn/
__init__.py
OgnMyNodeDatabase.py
tests/
TestOgnMyNode.py
usd/
OgnMyNodeTemplate.usd
Args:
node_type_name: Name of the node type being constructed, also used as the class name
exclusions: List of file types that will not be constructed (and are added to the .ogn "exclusions" list)
Returns:
List of the paths to the files that were constructed
"""
# Append the default extension ID version to the name to get a consistent path
self.__root = ogi.full_cache_path(self.__versions, self.__ext_name + "-0.1.0", self.__module_name)
old_root = self.__root
tree = [(ogi.FileType.PYTHON_DB, ".")]
files_added = []
try:
if exclusions is None or ogi.FileType.TEST not in exclusions:
tree.append((ogi.FileType.TEST, "tests"))
files_added += self._add_test_init_file(self.__root)
if exclusions is None or ogi.FileType.DOCS not in exclusions:
tree.append((ogi.FileType.DOCS, "docs"))
if exclusions is None or ogi.FileType.DOCS not in exclusions:
tree.append((ogi.FileType.USD, "tests/usd"))
files_added += self._add_node_type_to_tree(
Path("."), node_type_name, ogn.LanguageTypeValues.PYTHON, self.__versions, tree, exclusions
)
files_added.append(self._add_init_to_generated_tree(Path(".")))
finally:
self.__root = old_root
return files_added
# ==============================================================================================================
class TemporaryPathAddition:
"""Context manager to temporarily add extra locations to the sys import path.
with TemporaryPathAddition(new_path_location):
do_import_from_new_path()
"""
def __init__(self, new_location: Path | str):
self.__new_location = str(new_location)
def __enter__(self):
sys.path.append(self.__new_location)
return self.__new_location
def __exit__(self, exit_type: Any, value: Any, traceback: TracebackType):
with suppress(ValueError):
sys.path.remove(self.__new_location)
self.__new_location = None
| 32,847 | Python | 45.005602 | 120 | 0.537309 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/tests/test_internal_extension.py | """Testing for the internal utilities, like extension version management, logging, and .ogn file handling"""
from contextlib import ExitStack
from pathlib import Path
from tempfile import TemporaryDirectory
import omni.graph.tools._internal as ogi
import omni.kit.test
from .internal_utils import CreateHelper, TemporaryPathAddition
# Helper constants shared by many tests
EXT_INDEX = 0
_CLASS_NAME = "OgnTestNode"
_CURRENT_VERSIONS = ogi.GenerationVersions(ogi.Compatibility.FullyCompatible)
# ==============================================================================================================
class ModuleContexts:
def __init__(self, stack: ExitStack):
"""Set up a stack of contexts to use for tests running in individual temporary directory"""
global EXT_INDEX
EXT_INDEX += 1
self.ext_name = f"omni.test.internal.extension{EXT_INDEX}"
# Default extension version is 0.1.0 so create an ID with that
self.ext_id = f"{self.ext_name}-0.1.0"
# Put all temporary files in a temporary directory for easy disposal
self.test_directory = Path(stack.enter_context(TemporaryDirectory())) # pylint: disable=consider-using-with
self.module_root = Path(self.test_directory) / "exts"
self.module_name = self.ext_name
self.module_path = self.module_root / self.ext_name / self.module_name.replace(".", "/")
# Redirect the usual node cache to the temporary directory
self.cache_root = stack.enter_context(ogi.TemporaryCacheLocation(self.test_directory / "cache"))
# Add the import path of the new extension to the system path
self.path_addition = stack.enter_context(TemporaryPathAddition(self.module_root / self.ext_name))
# Uncomment this to dump debugging information while the tests are running
# self.log = stack.enter_context(ogi.TemporaryLogLocation("stdout"))
# ==============================================================================================================
class TestInternalExtension(omni.kit.test.AsyncTestCase):
"""Tests concerned with exercising the information provided by the code generation utilities. For tests related
to the node type registration see omni.graph.core.tests.test_register_python_ogn.py
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.maxDiff = None # Diffs of file path lists can be large so let the full details be seen
# --------------------------------------------------------------------------------------------------------------
async def test_build_tree_constructor(self):
"""Test the CreateHelper utility class"""
with ExitStack() as stack:
ctx = ModuleContexts(stack)
ogn_directory = ctx.module_path / "ogn"
creator = CreateHelper(ctx.ext_name, ctx.module_root, _CURRENT_VERSIONS)
# Create a build tree with one up-to-date version and one out-of-date version to cover both cases
creator.add_standalone_node("OgnTestNode")
creator.add_v1_18_node("OgnBuiltNode")
expected_files = [
ctx.module_path / "nodes" / "OgnTestNode.ogn",
ctx.module_path / "nodes" / "OgnTestNode.py",
ogn_directory / "nodes" / "OgnBuiltNode.ogn",
ogn_directory / "nodes" / "OgnBuiltNode.py",
ogn_directory / "OgnBuiltNodeDatabase.py",
ogn_directory / "tests" / "__init__.py",
ogn_directory / "tests" / "TestOgnBuiltNode.py",
ogn_directory / "tests" / "usd" / "OgnBuiltNodeTemplate.usda",
]
for expected_file in expected_files:
expected_path = ctx.module_root / expected_file
self.assertTrue(expected_path.is_file(), f"Checking existence of {expected_path}")
self.assertTrue(expected_path.stat().st_size > 0, f"Checking size of {expected_path}")
# --------------------------------------------------------------------------------------------------------------
async def test_walk_with_excludes(self):
"""Test the utility that imitates os.walk with directory exclusions"""
with ExitStack() as stack:
ctx = ModuleContexts(stack)
creator = CreateHelper(ctx.ext_name, ctx.module_root, _CURRENT_VERSIONS)
creator.add_standalone_node("OgnTestNode")
creator.add_v1_18_node("OgnBuiltNode")
# Exclude the tests directory and make sure the two files in there didn't get found
files_found = []
for _, _, file_names in ogi.walk_with_excludes(ctx.test_directory, {"tests"}):
files_found += file_names
self.assertCountEqual(
files_found,
[
"OgnTestNode.ogn",
"OgnTestNode.py",
"OgnBuiltNode.ogn",
"OgnBuiltNode.py",
"OgnBuiltNodeDatabase.py",
],
)
# Walk with no exclusions to make sure all files are found
files_found = []
for _, _, file_names in ogi.walk_with_excludes(ctx.test_directory, {}):
files_found += file_names
self.assertCountEqual(
files_found,
[
"OgnTestNode.ogn",
"OgnTestNode.py",
"OgnBuiltNode.ogn",
"OgnBuiltNode.py",
"OgnBuiltNodeDatabase.py",
"TestOgnBuiltNode.py",
"__init__.py",
"OgnBuiltNodeTemplate.usda",
],
)
# --------------------------------------------------------------------------------------------------------------
async def test_scan_extension(self):
"""Test the process of scanning an extension for existing .ogn, .py, and Database.py files"""
with ExitStack() as stack:
ctx = ModuleContexts(stack)
creator = CreateHelper(ctx.ext_name, ctx.module_root, _CURRENT_VERSIONS)
creator.add_v1_18_node(_CLASS_NAME)
ext_contents = ogi.extension_contents_factory(ctx.ext_name, ctx.ext_name, ctx.module_root / ctx.ext_name)
self.assertIsNotNone(ext_contents)
ext_contents.scan_for_nodes()
user_types_expected = [ogi.FileType.OGN, ogi.FileType.PYTHON]
generated_types_expected = [ogi.FileType.PYTHON_DB, ogi.FileType.TEST, ogi.FileType.USD]
file_names = {file_type: ogi.get_ogn_file_name(_CLASS_NAME, file_type) for file_type in ogi.FileType}
definition = ext_contents.node_type_definition(_CLASS_NAME)
self.assertEqual(definition.name, _CLASS_NAME)
self.assertEqual(len(user_types_expected), len(definition.user_files))
self.assertTrue(
all(
info[0] is not None
for info in [definition.user_files[file_type] for file_type in user_types_expected]
)
)
self.assertCountEqual(list(definition.generated_files.keys()), generated_types_expected)
ogn_directory = ctx.module_path / "ogn"
expected_files = {
ogi.FileType.OGN: ogn_directory / "nodes" / file_names[ogi.FileType.OGN],
ogi.FileType.PYTHON: ogn_directory / "nodes" / file_names[ogi.FileType.PYTHON],
ogi.FileType.PYTHON_DB: ogn_directory / file_names[ogi.FileType.PYTHON_DB],
ogi.FileType.TEST: ogn_directory / "tests" / file_names[ogi.FileType.TEST],
ogi.FileType.USD: ogn_directory / "tests" / "usd" / file_names[ogi.FileType.USD],
}
# All of the file types should be in the expected locations
for expected_type, expected_path in expected_files.items():
self.assertEqual(definition.file_path(expected_type), expected_path)
# The files types, in type order, should also be in time order initially
self.assertTrue(definition.file_mtime(ogi.FileType.OGN) <= definition.file_mtime(ogi.FileType.PYTHON))
self.assertTrue(definition.file_mtime(ogi.FileType.PYTHON) <= definition.file_mtime(ogi.FileType.PYTHON_DB))
self.assertTrue(definition.file_mtime(ogi.FileType.PYTHON_DB) <= definition.file_mtime(ogi.FileType.TEST))
self.assertTrue(definition.file_mtime(ogi.FileType.TEST) <= definition.file_mtime(ogi.FileType.USD))
# The files types should have all received the artificially generated version number
self.assertEqual(
definition.generated_versions,
_CURRENT_VERSIONS,
f"Comparing generated version {definition.generated_versions} to"
f" current version {_CURRENT_VERSIONS}",
)
# The node type definitions are up to date by design
self.assertFalse(definition.is_out_of_date(_CURRENT_VERSIONS))
# Touch the .ogn file to make the database out of date.
self.assertTrue(definition.user_files.touch(ogi.FileType.OGN))
self.assertTrue(definition.is_out_of_date(_CURRENT_VERSIONS))
# Check the functionality that walks the available list of generated file types
available_types = list(definition.iter_valid_generated_filetypes())
self.assertCountEqual(available_types, generated_types_expected + [ogi.FileType.DOCS])
# --------------------------------------------------------------------------------------------------------------
async def test_scan_empty(self):
"""Test the utility that scans a directory looking for .ogn files when the directory has none, though it
does have some nodes that look like node files due to their naming.
"""
with ExitStack() as stack:
ctx = ModuleContexts(stack)
ext_contents = ogi.extension_contents_factory(ctx.ext_name, ctx.ext_name, ctx.module_root / ctx.ext_name)
self.assertIsNone(ext_contents)
# --------------------------------------------------------------------------------------------------------------
async def test_scan_standalone(self):
"""Test the utility that scans a directory looking for .ogn files when the directory is structured as a
standalone extension (i.e. no built files)
"""
with ExitStack() as stack:
ctx = ModuleContexts(stack)
creator = CreateHelper(ctx.ext_name, ctx.module_root, _CURRENT_VERSIONS)
creator.add_standalone_node(_CLASS_NAME)
ogn_directory = ctx.module_path / "ogn"
node_directory = ctx.module_path / "nodes"
ext_contents = ogi.extension_contents_factory(ctx.ext_name, ctx.ext_name, ctx.module_root / ctx.ext_name)
self.assertIsNotNone(ext_contents)
ext_contents.scan_for_nodes()
self.assertEqual(ext_contents.node_type_count, 1)
definition = ext_contents.node_type_definition(_CLASS_NAME)
self.assertIsNotNone(definition)
self.assertEqual(definition.name, _CLASS_NAME)
self.assertEqual(2, len(definition.user_files))
self.assertEqual(0, len(definition.generated_files))
self.assertEqual(definition.user_files[ogi.FileType.OGN][0], node_directory / f"{_CLASS_NAME}.ogn")
self.assertEqual(definition.user_files[ogi.FileType.PYTHON][0], node_directory / f"{_CLASS_NAME}.py")
self.assertTrue(definition.is_out_of_date(_CURRENT_VERSIONS))
expected_files = {
ogi.FileType.OGN: node_directory / f"{_CLASS_NAME}.ogn",
ogi.FileType.PYTHON: node_directory / f"{_CLASS_NAME}.py",
ogi.FileType.PYTHON_DB: ogn_directory / f"{_CLASS_NAME}Database.py",
ogi.FileType.TEST: ogn_directory / "tests" / f"Test{_CLASS_NAME}.py",
ogi.FileType.USD: ogn_directory / "tests" / "usd" / f"{_CLASS_NAME}Template.usda",
ogi.FileType.DOCS: ogn_directory / "docs" / f"{_CLASS_NAME}.rst",
}
for file_type, expected_file in expected_files.items():
if file_type in [ogi.FileType.OGN, ogi.FileType.PYTHON]:
self.assertEqual(definition.file_path(file_type), expected_file)
else:
self.assertEqual(definition.file_path(file_type), None)
# --------------------------------------------------------------------------------------------------------------
async def test_exclude_types(self):
"""Test that files excluded by a node do not appear in its outdated list when missing"""
with ExitStack() as stack:
ctx = ModuleContexts(stack)
ogn_directory = ctx.module_path / "ogn"
exclusions = [ogi.FileType.USD, ogi.FileType.DOCS]
creator = CreateHelper(ctx.ext_name, ctx.module_root, _CURRENT_VERSIONS)
creator.add_v1_18_node(_CLASS_NAME, exclusions)
ext_contents = ogi.extension_contents_factory(ctx.ext_name, ctx.ext_name, ctx.module_root / ctx.ext_name)
self.assertIsNotNone(ext_contents)
ext_contents.scan_for_nodes()
self.assertEqual(ext_contents.node_type_count, 1)
definition = ext_contents.node_type_definition(_CLASS_NAME)
self.assertIsNotNone(definition)
self.assertEqual(definition.name, _CLASS_NAME)
self.assertEqual(definition.file_path(ogi.FileType.OGN), ogn_directory / "nodes" / "OgnTestNode.ogn")
self.assertEqual(definition.file_path(ogi.FileType.PYTHON), ogn_directory / "nodes" / "OgnTestNode.py")
self.assertEqual(
{file_type: gen_def[0] for file_type, gen_def in definition.generated_files.items()},
{
ogi.FileType.TEST: ogn_directory / "tests" / "TestOgnTestNode.py",
ogi.FileType.PYTHON_DB: ogn_directory / "OgnTestNodeDatabase.py",
},
)
# The up-to-date database should have been found
self.assertEqual(
definition.generated_versions,
ogi.GenerationVersions(ogi.Compatibility.FullyCompatible),
)
self.assertFalse(definition.is_out_of_date(_CURRENT_VERSIONS))
# --------------------------------------------------------------------------------------------------------------
async def test_out_of_date_versions(self):
"""Test that versions with good timestamps but old versions are flagged for regeneration"""
incompatible = ogi.GenerationVersions(ogi.Compatibility.Incompatible)
with ExitStack() as stack:
ctx = ModuleContexts(stack)
creator = CreateHelper(ctx.ext_name, ctx.module_root, incompatible)
creator.add_v1_18_node(_CLASS_NAME)
ext_contents = ogi.extension_contents_factory(ctx.ext_name, ctx.ext_name, ctx.module_root / ctx.ext_name)
self.assertIsNotNone(ext_contents)
ext_contents.scan_for_nodes()
self.assertEqual(ext_contents.node_type_count, 1)
definition = ext_contents.node_type_definition(_CLASS_NAME)
self.assertIsNotNone(definition)
# The node was generated to be deliberately incompatible with the current versions
self.assertEqual(definition.generated_versions, incompatible)
self.assertTrue(definition.is_out_of_date(_CURRENT_VERSIONS))
# --------------------------------------------------------------------------------------------------------------
async def test_versions_in_build_and_cache(self):
"""Test that the newer version is selected when they exist in both the build and the cache
The structure it generates here is a local directory with the cache/ and ogn/ directories together:
ogn/
OgnSampleNodeDatabase.py
nodes/
OgnSampleNode.py
OgnSampleNode.ogn
tests/
TestOgnSampleNode.py
usd/
OgnSampleNodeTemplate.usda
cache/
ogn_generated/
XX.YY.ZZ/
omni.test.internal.extension/
OgnSampleNodeDatabase.py
docs/
OgnSampleNode.rst
tests/
TestOgnSampleNode.py
usd/
OgnSampleNodeTemplate.usda
"""
with ExitStack() as stack:
ctx = ModuleContexts(stack)
major_compatible_versions = ogi.GenerationVersions(ogi.Compatibility.MajorVersionCompatible)
cache_directory = ogi.full_cache_path(_CURRENT_VERSIONS, ctx.ext_id, ctx.module_name)
creator_major = CreateHelper(ctx.ext_name, ctx.module_root, major_compatible_versions)
creator_major.add_v1_18_node(_CLASS_NAME)
creator = CreateHelper(ctx.ext_name, ctx.module_root, _CURRENT_VERSIONS)
creator.add_cache_for_node(_CLASS_NAME)
ext_contents = ogi.extension_contents_factory(ctx.ext_id, ctx.ext_name, ctx.module_root / ctx.ext_name)
self.assertIsNotNone(ext_contents)
ext_contents.scan_for_nodes()
self.assertEqual(1, ext_contents.node_type_count)
definition = ext_contents.node_type_definition(_CLASS_NAME)
self.assertIsNotNone(definition)
# Different versions should have been found for the cache and the build generated files
self.assertEqual(
definition.generated_versions,
major_compatible_versions,
f"Generated versions {definition.generated_versions} versus major {major_compatible_versions}",
)
self.assertEqual(
definition.cached_versions,
_CURRENT_VERSIONS,
f"Cached versions {definition.cached_versions} versus current {_CURRENT_VERSIONS}",
)
# The cached file is the one that is currently up to date so it should be the one returned when
# asking for the database file.
cached_db_file = cache_directory / ogi.get_ogn_file_name(_CLASS_NAME, ogi.FileType.PYTHON_DB)
self.assertEqual(cached_db_file, definition.database_to_use())
# Test that cached files are flagged for rebuilding when they have the correct version but are
# older than the .ogn from which they are generated.
self.assertTrue(definition.user_files.touch(ogi.FileType.OGN))
self.assertTrue(definition.is_out_of_date(_CURRENT_VERSIONS))
self.assertEqual(None, definition.database_to_use())
# --------------------------------------------------------------------------------------------------------------
async def test_cached_version_old(self):
"""Test that cached files with incompatible versions are ignored"""
with ExitStack() as stack:
ctx = ModuleContexts(stack)
incompatible_versions = ogi.GenerationVersions(ogi.Compatibility.Incompatible)
creator = CreateHelper(ctx.ext_name, ctx.module_root, _CURRENT_VERSIONS)
creator.add_v1_18_node(_CLASS_NAME)
creator_incompatible = CreateHelper(ctx.ext_name, ctx.module_root, incompatible_versions)
creator_incompatible.add_cache_for_node(_CLASS_NAME)
ext_contents = ogi.extension_contents_factory(ctx.ext_name, ctx.ext_name, ctx.module_root / ctx.ext_name)
self.assertIsNotNone(ext_contents)
ext_contents.scan_for_nodes()
self.assertEqual(1, ext_contents.node_type_count)
definition = ext_contents.node_type_definition(_CLASS_NAME)
self.assertIsNotNone(definition)
# Different versions should have been found for the cache and the build generated files
self.assertEqual(
definition.generated_versions,
_CURRENT_VERSIONS,
f"Generated versions {definition.generated_versions} versus current {_CURRENT_VERSIONS}",
)
# The generated file is the one that is currently up to date so it should be the one returned when
# asking for the database file.
generated_db_file = ctx.module_path / "ogn" / ogi.get_ogn_file_name(_CLASS_NAME, ogi.FileType.PYTHON_DB)
self.assertEqual(generated_db_file, definition.database_to_use())
| 20,866 | Python | 53.483029 | 120 | 0.57898 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/tests/test_deprecation.py | """Tests that exercise the various types of deprecation"""
import re
import omni.graph.tools as ogt
import omni.kit
# ==============================================================================================================
# Deprecated constructs for testing
CLASS_DEPRECATED = "Use og.NewerClass"
@ogt.DeprecatedClass(CLASS_DEPRECATED)
class OlderClass:
pass
FUNCTION_DEPRECATED = "Use og.newer_function()"
@ogt.deprecated_function(FUNCTION_DEPRECATED)
def older_function():
pass
class NewNameForClass:
pass
RENAME_MESSAGE = "OldNameForClass is now NewNameForClass"
OldNameForClass = ogt.RenamedClass(NewNameForClass, "OldNameForClass", RENAME_MESSAGE)
OLD_STRING = ogt.DeprecatedStringConstant("OLD_STRING", "GARBAGE", "Wipe your memory of it")
OLD_DICTIONARY = ogt.DeprecatedDictConstant("OLD_DICTIONARY", {}, "Wipe your memory of it")
OLD_OBJECT = ogt.deprecated_constant_object(older_function, "Wipe your memory of it")
# ==============================================================================================================
class TestDeprecation(omni.kit.test.AsyncTestCase):
# --------------------------------------------------------------------------------------------------------------
def setUp(self):
"""Messages have to be cleared before the test to avoid spurious success"""
ogt.DeprecateMessage.clear_messages()
self.__were_deprecations_errors = ogt.DeprecateMessage.deprecations_are_errors()
ogt.DeprecateMessage.set_deprecations_are_errors(False)
def tearDown(self):
"""Messages have to be cleared after the test to prevent contamination of the message logs"""
ogt.DeprecateMessage.clear_messages()
ogt.DeprecateMessage.set_deprecations_are_errors(self.__were_deprecations_errors)
# --------------------------------------------------------------------------------------------------------------
def __check_deprecation_messages(self, pattern: str, expected_count: int):
"""Returns True if the pattern appears in the deprecation messages the given number of times"""
actual_count = sum(
match is not None
for match in [
re.search(pattern, message) for message in ogt.DeprecateMessage._MESSAGES_LOGGED # noqa: PLW0212
]
)
self.assertEqual(
actual_count, expected_count, f"Expected {expected_count} messages matching {pattern} - got {actual_count}"
)
# --------------------------------------------------------------------------------------------------------------
async def test_class_deprecation(self):
"""Test deprecation of a class"""
with ogt.DeprecateMessage.NoLogging():
_ = OlderClass()
self.__check_deprecation_messages(CLASS_DEPRECATED, 1)
# --------------------------------------------------------------------------------------------------------------
async def test_function_deprecation(self):
"""Test deprecation of a single function"""
with ogt.DeprecateMessage.NoLogging():
older_function()
expected = f"older_function() is deprecated: {FUNCTION_DEPRECATED}"
self.assertTrue(
expected in ogt.DeprecateMessage.messages_logged(),
f"'{expected}' not found in {ogt.DeprecateMessage.messages_logged()}",
)
# --------------------------------------------------------------------------------------------------------------
async def test_rename_deprecation(self):
"""Test deprecation of a class by giving it a new name"""
with ogt.DeprecateMessage.NoLogging():
_ = OldNameForClass()
self.__check_deprecation_messages(RENAME_MESSAGE, 1)
# --------------------------------------------------------------------------------------------------------------
async def test_string_constant_deprecation(self):
"""Test deprecation of a string constant that is being removed"""
# The deprecation message only happens on conversion to string as that's where it's most relevant.
with ogt.DeprecateMessage.NoLogging():
new_constant = str(OLD_STRING)
expected = "OLD_STRING is deprecated: Wipe your memory of it"
self.assertEqual(new_constant, "GARBAGE")
self.assertTrue(
expected in ogt.DeprecateMessage.messages_logged(),
f"'{expected}' not found in {ogt.DeprecateMessage.messages_logged()}",
)
# --------------------------------------------------------------------------------------------------------------
async def test_dict_constant_deprecation(self):
"""Test deprecation of a dictionary constant that is being removed"""
expected = "OLD_DICTIONARY is deprecated: Wipe your memory of it"
self.assertEqual(dict(OLD_DICTIONARY), {})
self.assertTrue(
expected in ogt.DeprecateMessage.messages_logged(),
f"'{expected}' not found in {ogt.DeprecateMessage.messages_logged()}",
)
# --------------------------------------------------------------------------------------------------------------
async def test_object_constant_deprecation(self):
"""Test deprecation of an object constant that is being removed"""
expected = re.compile(".*older_function.*is deprecated. Wipe your memory of it")
self.assertEqual(OLD_OBJECT.__name__, "older_function")
found_message = False
messages = ogt.DeprecateMessage.messages_logged()
for message in messages:
if expected.match(message):
found_message = True
self.assertTrue(found_message, f"'{expected}' not found in {messages}")
# --------------------------------------------------------------------------------------------------------------
async def test_deprecation_message(self):
"""Test issuing a deprecation message"""
deprecation_message = "This is deprecated"
with ogt.DeprecateMessage.NoLogging():
ogt.DeprecateMessage.deprecated(deprecation_message)
self.assertTrue(deprecation_message in ogt.DeprecateMessage.messages_logged())
# --------------------------------------------------------------------------------------------------------------
async def test_deprecated_import(self):
"""Test deprecation of a module import"""
with ogt.DeprecateMessage.NoLogging():
import omni.graph.tools.tests.deprecated_import as _do_not_export # noqa: F401,PLW0621
expected = "omni.graph.tools.tests.deprecated_import is deprecated: Do Not Import"
self.assertTrue(
expected in ogt.DeprecateMessage.messages_logged(),
f"'{expected}' not found in {ogt.DeprecateMessage.messages_logged()}",
)
# --------------------------------------------------------------------------------------------------------------
async def test_deprecation_error(self):
"""Test escalation of deprecations from warning to error"""
old_setting = ogt.DeprecateMessage.deprecations_are_errors()
try:
ogt.DeprecateMessage.set_deprecations_are_errors(True)
with self.assertRaises(ogt.DeprecationError, msg="Use of hard deprecated class not raising an error"):
_ = OlderClass()
finally:
ogt.DeprecateMessage.set_deprecations_are_errors(old_setting)
| 7,444 | Python | 47.344156 | 119 | 0.536674 |
omniverse-code/kit/exts/omni.graph.tools/docs/ogn_code_samples_cpp.cpp | // This file contains snippets of example C++ code to be imported by the OGN documentation.
// It's not actual running code itself, merely sections of code that illustrate a point
// It uses the reStructuredText code import with begin/end markings to cherry-pick relevant portions.
// The sections of code being referenced are demarqued by "begin-XX" and "end-XX".
// begin-minimal
#include <OgnNoOpDatabase.h>
class OgnNoOp:
{
public:
static bool compute(OgnNoOpDatabase& db)
{
// This logs a warning to the console, once
db.logWarning("This node does nothing");
// These methods provide direct access to the ABI objects, should you need to drop down to direct ABI calls
// for any reason. (You should let us know if this is necessary as we've tried to minimize the cases in which
// it becomes necessary.)
const auto& contextObj = db.abi_context();
const auto& nodeObj = db.abi_node();
if (! contextObj.iAttributeData || ! nodeObj.iNode)
{
// This logs an error to the console and should only be used for compute failure
db.logError("Could not retrieve the ABI interfaces");
return false;
}
return true;
}
};
REGISTER_OGN_NODE()
// end-minimal
// begin-node-metadata
#include <OgnNodeMetadataDatabase.h>
#include <alloca.h>
class OgnNodeMetadata:
{
public:
static bool compute(OgnNodeMetadataDatabase& db)
{
auto nodeTypeObj = db.abi_node().getNodeTypeObj(db.abi_node());
// Specifically defined metadata can be accessed by name
std::cout << "The author of this node is " << nodeTypeObj.iNodeType->getMetadata(nodeTypeObj, "author") << std::endl;
// Some metadata is automatically added; you can see it by iterating over all of the existing metadata
size_t metadataCount = nodeTypeObj.iNodeType->getMetadataCount(nodeTypeObj);
char const** metadataKeyBuffer = reinterpret_cast<char const**>(alloca(sizeof(char*) * metadataCount));
char const** metadataValueBuffer = reinterpret_cast<char const**>(alloca(sizeof(char*) * metadataCount));
size_t found = nodeTypeObj.iNodeType->getAllMetadata(nodeTypeObj, metadataKeyBuffer, metadataValueBuffer, metadataCount);
for (size_t i=0; i<found; ++i)
{
std:: cout << "Metadata for " << metadataKeyBuffer[i] << " = " << metadataValueBuffer[i] << std::endl;
}
return true;
}
};
REGISTER_OGN_NODE()
// end-node-metadata
// begin-node-icon
#include <OgnNodeWithIconDatabase.h>
class OgnNodeWithIcon:
{
public:
static bool compute(OgnNodeWithIconDatabase& db)
{
auto nodeTypeObj = db.abi_node().getNodeTypeObj(db.abi_node());
// The icon properties are just special cases of metadata. The metadata name is made available with the node type
auto path = nodeTypeObj.iNodeType->getMetadata(nodeTypeObj, kOgnMetadataIconPath);
auto color = nodeTypeObj.iNodeType->getMetadata(nodeTypeObj, kOgnMetadataIconColor);
auto backgroundColor = nodeTypeObj.iNodeType->getMetadata(nodeTypeObj, kOgnMetadataIconBackgroundColor);
auto borderColor = nodeTypeObj.iNodeType->getMetadata(nodeTypeObj, kOgnMetadataIconBorderColor);
if (path)
{
std::cout << "Icon found at " << path << std::endl;
if (color)
{
std::cout << "...color override is " << color << std::endl;
}
else
{
std::cout << "...using default color" << std::endl;
}
if (backgroundColor)
{
std::cout << "...backgroundColor override is " << backgroundColor << std::endl;
}
else
{
std::cout << "...using default backgroundColor" << std::endl;
}
if (borderColor)
{
std::cout << "...borderColor override is " << borderColor << std::endl;
}
else
{
std::cout << "...using default borderColor" << std::endl;
}
}
return true;
}
};
REGISTER_OGN_NODE()
// end-node-icon
// begin-node-scheduling
#include <OgnNodeSchedulingHintsDatabase.h>
class OgnNodeSchedulingHints:
{
public:
static bool compute(OgnNodeSchedulingHintsDatabase& db)
{
auto nodeTypeObj = db.abi_node().getNodeTypeObj(db.abi_node());
// Ordinarily you would not need to access this scheduling information as it is mainly for OmniGraph's use,
// however it is available through the ABI so you can access it at runtime if you wish.
auto schedulingHintsObj = nodeTypeObj.getSchedulingHints(nodeTypeObj);
std::string safety;
switch (schedulingHintsObj.getThreadSafety(schedulingHintsObj))
{
case eThreadSafety::eSafe:
safety = "Safe";
break;
case eThreadSafety::eUnsafe:
safety = "Unsafe";
break;
case eThreadSafety::eUnknown:
default:
safety = "Unknown";
break;
}
std::cout << "Is this node threadsafe? " << safety << std::endl;
return true;
}
};
REGISTER_OGN_NODE()
// end-node-scheduling
// begin-node-singleton
#include <OgnNodeSingletonDatabase.h>
class OgnNodeSingleton:
{
public:
static bool compute(OgnNodeSingletonDatabase& db)
{
auto nodeTypeObj = db.abi_node().getNodeTypeObj(db.abi_node());
// The singleton value is just a special case of metadata. The metadata name is made available with the node type
auto singletonValue = nodeTypeObj.iNodeType->getMetadata(nodeTypeObj, kOgnMetadataSingleton);
if (singletonValue && singletonValue[0] != '1')
{
std::cout << "I am a singleton" << std::endl;
}
return true;
}
};
REGISTER_OGN_NODE()
// end-node-singleton
// begin-node-categories
#include <OgnNodeCategoriesDatabase.h>
class OgnNodeCategories:
{
public:
static bool compute(OgnNodeCategoriesDatabase& db)
{
auto nodeTypeObj = db.abi_node().getNodeTypeObj(db.abi_node());
// The categories value is just a special case of metadata. The metadata name is made available with the node type
auto categoriesValue = nodeTypeObj.iNodeType->getMetadata(nodeTypeObj, kOgnMetadataCategories);
if (categoriesValue)
{
std::cout << "I have these categories:";
char const* delimiters = ",";
char *token = std::strtok(categoriesValue, delimiters);
while (token)
{
std::cout << ' ' << std::quoted(token);
token = std::strtok(nullptr, delimiters);
}
std::cout << std::endl;
}
return true;
}
};
REGISTER_OGN_NODE()
// end-node-categories
// begin-node-tags
#include <OgnNodeTagsDatabase.h>
class OgnNodeTags:
{
public:
static bool compute(OgnNodeTagsDatabase& db)
{
auto nodeTypeObj = db.abi_node().getNodeTypeObj(db.abi_node());
// The tags value is just a special case of metadata. The metadata name is made available with the node type
std::cout << "Tagged as " << nodeTypeObj.iNodeType->getMetadata(nodeTypeObj, kOgnMetadataTags) << std::endl;
return true;
}
};
REGISTER_OGN_NODE()
// end-node-tags
// begin-tokens
#include <OgnTokensDatabase.h>
class OgnTokens:
{
public:
static bool compute(OgnTokensDatabase& db)
{
// Tokens are members of the database, by name. When the dictionary-style definition is used in the .ogn
// file the names are the dictionary keys. These members were created, of type omni::graph::core::NameToken:
// db.tokens.red
// db.tokens.green
// db.tokens.blue
// As tokens are just IDs in order to print their values you have to use the utility method "tokenToString"
// on the database to convert them to strings
std::cout << "The name for red is " << db.tokenToString(db.tokens.red) << std::endl;
std::cout << "The name for green is " << db.tokenToString(db.tokens.green) << std::endl;
std::cout << "The name for blue is " << db.tokenToString(db.tokens.blue) << std::endl;
// This confirms that tokens are uniquely assigned and the same for all types.
auto redToken = db.stringToToken("red");
if (redToken != db.tokens.red)
{
std::cout << "ERROR: red token is not consistent" << std::endl;
return false;
}
return true;
}
};
REGISTER_OGN_NODE()
// end-tokens
// begin-node-uiName
#include <OgnNodeUiNameDatabase.h>
class OgnNodeUiName:
{
public:
static bool compute(OgnNodeUiNameDatabase& db)
{
// The uiName value is just a special case of metadata. The special metadata name is available from the ABI
auto nodeTypeObj = db.abi_node().getNodeTypeObj(db.abi_node());
std::cout << "Call me " << nodeTypeObj.iNodeType->getMetadata(nodeTypeObj, kOgnMetadataUiName) << std::endl;
return true;
}
};
REGISTER_OGN_NODE()
// end-node-uiName
// begin-simple
#include <OgnTokenStringLengthDatabase.h>
class OgnTokenStringLength:
{
public:
static bool compute(OgnTokenStringLengthDatabase& db)
{
// Access pattern is "db", the database, "inputs", the attribute's access type, and "token", the name the
// attribute was given in the .ogn file
// Inputs should always be const-references, outputs and state access types can be just plain references.
const auto& tokenToMeasure = db.inputs.token();
// The database has typedefs for all attributes, which can be used if you prefer explicit types
db.outputs.length_t& stringLength = db.outputs.length();
// Use the utility to convert the token to a string first
std::string tokenString{ db.tokenToString(tokenToMeasure) };
// Simple assignment to the output attribute's accessor is all you need to do to set the value, as it is
// pointing directly to that data.
stringLength = tokenString.length();
return true;
}
};
REGISTER_OGN_NODE()
// end-simple
// begin-tuple
#include <OgnVectorMultiplyDatabase.h>
class OgnVectorMultiply:
{
public:
static bool compute(OgnVectorMultiplyDatabase& db)
{
const auto& vector1 = db.inputs.vector1();
const auto& vector2 = db.inputs.vector2();
auto& product = db.outputs.product();
// Use the functionality in the GfVec classes to perform the multiplication.
// You can use your own library functions by casting to appropriate types first.
for (int row=0; row<4; ++row)
{
product.SetRow(row, vector1 * vector2[row]);
}
return true;
}
};
REGISTER_OGN_NODE()
// end-tuple
// begin-role
#include <OgnPointsToVectorDatabase.h>
// Being explicit about the namespaced types you are using is a good balance between namespace pollution and long names
using omni::graph::core::AttributeRole;
class OgnPointsToVector:
{
public:
static bool compute(OgnPointsToVectorDatabase& db)
{
// Validate the roles for the computation. They aren't used in the actual math, though they could be used
// to do something useful (e.g. cast data with a "color" role to a class that handles color spaces)
if (db.inputs.point1.role() != AttributeRole::ePosition)
{
db.logError("The attribute point1 does not have the point role");
return false;
}
if (db.inputs.point2.role() != AttributeRole::ePosition)
{
db.logError("The attribute point2 does not have the point role");
return false;
}
if (db.outputs.vector.role() != AttributeRole::eVector)
{
db.logError("The attribute vector does not have the vector role");
return false;
}
// The GfVec3f type supports simple subtraction for vector calculation
db.outputs.vector() = db.inputs.point2() - db.inputs.point2();
return true;
}
};
REGISTER_OGN_NODE()
// end-role
// begin-array
#include <OgnPartialSumsDatabase.h>
#include <numeric>
class OgnPartialSums:
{
public:
static bool compute(OgnPartialSumsDatabase& db)
{
const auto& inputArray = db.inputs.array();
auto& outputArray = db.outputs.partialSums();
// This is a critical step, setting the size of the output array. Without this the array has no memory in
// which to write.
//
// For convenience the size() and resize() methods are available at the database level and the wrapper level,
// with the latter method only available for writable attributes. This makes initializing an output with the
// same size as an input a one-liner in either of these two forms:
// db.outputs.partialSum.resize( db.inputs.array.size() );
outputArray.resize(inputArray.size());
// The wrapper to arrays behaves like a std::span<>, where the external memory they manage comes from Fabric.
// The wrapper handles the synchronization, and is set up to behave mostly like
// a normal std::array.
//
// for (const auto& inputValue : inputArray)
// inputArray[index]
// inputArray.at(index)
// inputArray.empty()
// rawData = inputArray.data()
// Since the standard range-based for-loop and general iteration is supported you can make use of the wide
// variety of STL algorithms as well.
//
std::partial_sum(inputArray.begin(), inputArray.end(), outputArray.begin());
return true;
}
};
REGISTER_OGN_NODE()
// end-array
// begin-tuple-array
#include <OgnCrossProductDatabase.h>
#include <algorithm>
class OgnCrossProduct:
{
public:
static bool compute(OgnCrossProductDatabase& db)
{
// It usually keeps your code cleaner if you put your attribute wrappers into local variables, avoiding
// the constant use of the "db.inputs" or "db.outputs" namespaces.
const auto& a = db.inputs.a();
const auto& b = db.inputs.b();
auto& crossProduct = db.outputs.crossProduct();
// This node chooses to make mismatched array lengths an error. You could also make it a warning, or just
// simply calculate the result for the minimum number of available values.
if (a.size() != b.size())
{
db.logError("Input array lengths do not match - '%zu' vs. '%zu'", a.size(), b.size());
return false;
}
// As with simple arrays, the size of the output tuple-array must be set first to allocate Fabric memory
crossProduct.resize(a.size());
// Edge case is easily handled
if (a.size() == 0)
{
return true;
}
// Simple cross product - your math library may have a built-in one you can use
auto crossProduct = [](const GfVec3d& a, const GfVec3d& b) -> GfVec3d {
GfVec3d result;
result[0] = a[1] * b[2] - a[2] * b[1];
result[1] = -(a[0] * b[2] - a[2] * b[0]);
result[2] = a[0] * b[1] - a[1] * b[0];
return result;
};
// STL support makes walking the parallel arrays and computing a breeze
std::transform(a.begin(), a.end(), b.begin(), crossProduct.begin(), crossProduct);
return true;
}
};
REGISTER_OGN_NODE()
// end-tuple-array
// begin-string
#include <OgnReverseStringDatabase.h>
class OgnReverseString:
{
public:
static bool compute(OgnReverseStringDatabase& db)
{
// The attribute wrapper for string types provides a conversion to a std::string with pre-allocated memory.
// Constructing a new string based on that gives you a copy that can be modified locally and assigned after.
std::string result(db.inputs.original());
// Other functions provided by the string wrapper include:
// size() - how many characters in the string?
// empty() - is the string empty?
// data() - the raw char* pointer - note that it is not necessarily null-terminated
// comparison operators for sorting
// iterators
// For writable strings (output and state) there are also modification functions:
// resize() - required before writing, unless you use an assigment operator
// clear() - empty out the string
// assignment operators for char const*, std::string, and string wrappers
// The local copy can be reversed in place.
std::reverse(std::begin(result), std::end(result));
// The assignment operator handles resizing the string as well
db.outputs.result() = result;
// Since in this case the length of the output string is known up front the extra copy can be avoided by
// making use of the iterator feature.
// auto& resultString = db.outputs.result();
// resultString.resize(db.inputs.original.size());
// std::reverse(std::begin(resultString), std::end(resultString));
return true;
}
};
REGISTER_OGN_NODE()
// end-string
// begin-any
#include <OgnAddDatabase.h>
class OgnAdd:
{
public:
static bool compute(OgnAddDatabase& db)
{
// The extra work in handling extended types such as "any" or "union" is in checking the resolved types to
// first ensure they are handled and second select appropriate code paths if they differ from one type to
// another.
// Rather than clutter up the C++ code with a lot of type checking to follow the matrix of code paths
// required to handle all types this example will just perform addition on floats or doubles and report
// an error if any other type is encountered. (In practice this type of node is better implemented in Python
// where such flexible types are easily handled.)
const auto& a = db.inputs.a();
const auto& b = db.inputs.b();
auto& sum = db.outputs.sum();
// Unlike the normal type, a resolved type must be retrieved at runtime. Extra functionality is provided on
// the extended attribute type wrappers for this purpose.
//
// resolved() - Does the attribute currently have a resolved type?
// type() - omni::graph::core::Type information describing the resolved type
// typeName() - String identifying the type()
// get<X>() - Templated function to attempt conversion to the resolved type X
//
if (! a.resolved() || ! b.resolved())
{
db.logWarning("Cannot compute with an input type unresolved")
return false;
}
// For now the output type must be resolved by the graph as well, to ensure consistency.
// In future the type UNKNOWN may be acceptable for outputs, and the compute method can provide the
// type resolution itself.
if (! sum.resolved())
{
db.logWarning("Cannot compute with the output type unresolved")
return false;
}
// There are two ways of access the resolved type data. The first is explicit, where you check the type
// and then explicitly access the data, knowing it will correctly resolve. In this case the add will be
// really fussy and not even allow float+double, so the resolved types need to match exactly.
if ((a.type() != b.type()) || (a.type() != sum.type()))
{
db.logError("Can only add identical types - float or double. Got '%s'+'%s'='%s'",
a.typeName().c_str(), b.typeName().c_str(), sum.typeName().c_str());
return false;
}
// The second way is the Pythonic "ask forgiveness, not permission" method, where the get<X>() method is used
// to attempt to cast to a specific type and it moves on if it fails. This is possible because the get<X>()
// methods do not return direct references to the Fabric data, they return a wrapper around the data that
// provides the same types of access that the normal attribute type wrappers provide, plus a cast operator to
// a bool so that you can quickly check if a cast to a resolved type succeeded.
//
// Note the use of the single "=" assignment to keep the code cleaner.
// Also not the return type is just "auto", not "auto&" or "const auto&". That is because the wrapper must be
// created at runtime so it cannot be cached. The wrapper types are simple so copying is fast.
//
if (auto aValue = a.get<float>())
{
// We've guaranteed above that the resolved types are the same so their retrieval can be assumed to succeed
auto sumValue = sum.get<float>();
auto bValue = b.get<float>();
// The tricky part here is that the actual value still has to be retrieved from the wrapper, and that is
// done by dereferencing it.
*sumValue = *aValue + *bValue;
// The type of "sum" is a wrapper class, the type of "*sum" (or "sum->") is a float&
}
else if (auto aValue = a.get<double>())
{
// You can shorten the operation if you're careful about the placement of the dereferences
*(sum.get<double>()) = *aValue + *(b.get<double>());
}
else
{
// If you want a complete node then you'd continue this cascading type check, even including the
// array and tuple types, such as get<double[]>(), get<double[3]>() and get<double[][3]>(). The types
// supported by the get<X>() method are limited to raw data types, not explicit attribute types.
// For example if you wanted a colord[3] attribute you'd have to do a two-step check for both underlying
// type and attribute role like this:
// if ((auto colorValue = a.get<double[3]>()) && (a.role() == AttributeRole::eColor))
db.logError("Input type '%s' not currently supported by the Add node", a.typeName().c_str());
return false;
}
return true;
}
};
REGISTER_OGN_NODE()
// end-any
// begin-union
#include <OgnMultiplyNumbersDatabase.h>
class OgnMultiplyNumbers:
{
public:
static bool compute(OgnMultiplyNumbersDatabase& db)
{
// Full details on handling extended types can be seen in the example for the "any" type. This example
// shows only the necessary parts to handle the two types accepted for this union type (float and double).
// The underlying code is all the same, the main difference is in the fact that the graph only allows
// resolving to types explicitly mentioned in the union, rather than any type at all.
const auto& a = db.inputs.a();
const auto& b = db.inputs.b();
auto& product = db.outputs.product();
bool handledType{ false };
if (auto aValue = a.get<float>())
{
if (auto bValue = b.get<float>())
{
if (auto productValue = product.get<float>())
{
handledType = true;
*productValue = *aValue * *bValue;
}
}
}
else if (auto aValue = a.get<double>())
{
if (auto bValue = b.get<double>())
{
if (auto productValue = product.get<double>())
{
handledType = true;
*productValue = *aValue * *bValue;
}
}
}
if (! handledType)
{
db.logError("Types were not resolved ('%s'*'%s'='%s')",
a.typeName().c_str(), b.typeName().c_str(), product.typeName().c_str());
return false;
}
return true;
}
};
REGISTER_OGN_NODE()
// end-union
// begin-compute-helpers
#include <OgnAddDatabase.h>
#include <omni/graph/core/ogn/ComputeHelpers.h>
#include <carb/logging/Log.h>
namespace omni {
namespace graph {
namespace nodes {
// unnamed namespace to avoid multiple declaration when linking
namespace {
template<typename T>
bool tryComputeAssumingType(OgnAddDatabase& db)
{
auto functor = [](auto const& a, auto const& b, auto& result)
{
result = a + b;
};
return ogn::compute::tryComputeWithArrayBroadcasting<T>(db.inputs.a(), db.inputs.b(), db.outputs.sum(), functor);
}
template<typename T, size_t N>
bool tryComputeAssumingType(OgnAddDatabase& db)
{
auto functor = [](auto const& a, auto const& b, auto& result)
{
result[i] = a[i] + b[i];
};
return ogn::compute::tryComputeWithTupleBroadcasting<T, N>(db.inputs.a(), db.inputs.b(), db.outputs.sum(), functor);
}
} // namespace
class OgnAdd
{
public:
static bool compute(OgnAddDatabase& db)
{
try
{
// All possible types excluding ogn::string and bool
// scalers
auto& aType = db.inputs.a().type();
switch (aType.baseType)
{
case BaseDataType::eDouble:
switch (aType.componentCount)
{
case 1: return tryComputeAssumingType<double>(db);
case 2: return tryComputeAssumingType<double, 2>(db);
case 3: return tryComputeAssumingType<double, 3>(db);
case 4: return tryComputeAssumingType<double, 4>(db);
case 9: return tryComputeAssumingType<double, 9>(db);
case 16: return tryComputeAssumingType<double, 16>(db);
}
case BaseDataType::eFloat:
switch (aType.componentCount)
{
case 1: return tryComputeAssumingType<float>(db);
case 2: return tryComputeAssumingType<float, 2>(db);
case 3: return tryComputeAssumingType<float, 3>(db);
case 4: return tryComputeAssumingType<float, 4>(db);
}
case BaseDataType::eHalf:
switch (aType.componentCount)
{
case 1: return tryComputeAssumingType<pxr::GfHalf>(db);
case 2: return tryComputeAssumingType<pxr::GfHalf, 2>(db);
case 3: return tryComputeAssumingType<pxr::GfHalf, 3>(db);
case 4: return tryComputeAssumingType<pxr::GfHalf, 4>(db);
}
case BaseDataType::eInt:
switch (aType.componentCount)
{
case 1: return tryComputeAssumingType<int32_t>(db);
case 2: return tryComputeAssumingType<int32_t, 2>(db);
case 3: return tryComputeAssumingType<int32_t, 3>(db);
case 4: return tryComputeAssumingType<int32_t, 4>(db);
}
case BaseDataType::eInt64:
return tryComputeAssumingType<int64_t>(db);
case BaseDataType::eUChar:
return tryComputeAssumingType<unsigned char>(db);
case BaseDataType::eUInt:
return tryComputeAssumingType<uint32_t>(db);
case BaseDataType::eUInt64:
return tryComputeAssumingType<uint64_t>(db);
}
}
catch (ogn::compute::InputError &error)
{
db.logWarning(error.what());
}
return false;
}
static void onConnectionTypeResolve(const NodeObj& node){
auto a = node.iNode->getAttributeByToken(node, inputs::a.token())
auto b = node.iNode->getAttributeByToken(node, inputs::b.token())
auto sum = node.iNode->getAttributeByToken(node, outputs::sum.token())
auto aType = a.iAttribute->getResolvedType(a);
auto bType = b.iAttribute->getResolvedType(b);
// Require inputs to be resolved before determining sum's type
if (aType.baseType != BaseDataType::eUnknown && bType.baseType != BaseDataType::eUnknown)
{
std::array<AttributeObj, 3> attrs { a, b, sum };
// a, b, and sum should all have the same tuple count
std::array<uint8_t, 3> tupleCounts {
aType.componentCount,
bType.componentCount,
std::max(aType.componentCount, bType.componentCount)
};
std::array<uint8_t, 3> arrayDepths {
aType.arrayDepth,
bType.arrayDepth,
// Allow for a mix of singular and array inputs. If any input is an array, the output must be an array
std::max(aType.arrayDepth, bType.arrayDepth)
};
std::array<AttributeRole, 3> rolesBuf {
aType.role,
bType.role,
// Copy the attribute role from the resolved type to the output type
AttributeRole::eUnknown
};
node.iNode->resolvePartiallyCoupledAttributes(node, attrs.data(), tupleCounts.data(),
arrayDepths.data(), rolesBuf.data(), attrs.size());
}
}
};
REGISTER_OGN_NODE()
} // namespace nodes
} // namespace graph
} // namespace omni
// end-compute-helpers
// begin-bundle
#include <OgnMergeBundlesDatabase.h>
class OgnMergeBundle:
{
public:
static bool compute(OgnMergeBundlesDatabase& db)
{
const auto& bundleA = db.inputs.bundleA();
const auto& bundleB = db.inputs.bundleB();
auto& mergedBundle = db.outputs.bundle();
// Bundle assignment means "assign all of the members of the RHS bundle to the LHS bundle". It doesn't
// do a deep copy of the bundle members.
mergedBundle = bundleA;
// Bundle insertion adds the contents of a bundle to an existing bundle. The bundles may not have members
// with the same names
mergedBundle.insertBundle( bundleB );
return true;
}
};
REGISTER_OGN_NODE()
// end-bundle
// begin-bundle-data
#include <OgnCalculateBrightnessDatabase.h>
class OgnCalculateBrightness:
{
public:
// The actual algorithm to run using a well-defined conversion
static float brightnessFromRGB(float r, float g, float b)
{
return (r * (299.f) + (g * 587.f) + (b * 114.f)) / 256.f;
}
static bool compute(OgnCalculateBrightnessDatabase& db)
{
// Retrieve the bundle accessor
const auto& color = db.inputs.color();
// Using the bundle accessor, try to retrieve the RGB color members. In this case the types have to be
// float, though in a more general purpose node you might also allow for double, half, and int types.
const auto r = color.attributeByName(db.tokens.r).get<float>();
const auto g = color.attributeByName(db.tokens.g).get<float>();
const auto b = color.attributeByName(db.tokens.b).get<float>();
// Validity of a member is a boolean
if (r && g && b)
{
db.outputs.brightness() = brightnessFromRGB(r, g, b);
return true;
}
// Having failed to extract RGB members, do the same check for CMYK members
const auto c = color.attributeByName(db.tokens.c).get<float>();
const auto m = color.attributeByName(db.tokens.m).get<float>();
const auto y = color.attributeByName(db.tokens.y).get<float>();
const auto k = color.attributeByName(db.tokens.k).get<float>();
if (c && m && y && k)
{
db.outputs.brightness() = brightnessFromRGB(
(1.f - c/100.f) * (1.f - k/100.f),
(1.f - m/100.f) * (1.f - k/100.f),
(1.f - y/100.f) * (1.f - k/100.f) );
return true;
}
// You could be more verbose about the reason for the problem as there are a few different scenarios:
// - some but not all of r,g,b or c,m,y,k were in the bundle
// - none of the color components were in the bundle
// - some or all of the color components were found but were of the wrong data type
db.logError("Neither the groups (r, g, b) nor (c, m, y, k) are in the color bundle. Cannot compute brightness");
return false;
}
};
REGISTER_OGN_NODE()
// end-bundle-data
// begin-memory-type
#include <OgnMemoryTypeDatabase.h>
class OgnMemoryType:
{
public:
static bool compute(OgnMemoryTypeDatabase& db)
{
// The operation specifies moving the points data onto the GPU for further computation if the size of
// the input data reaches a threshold where that will make the computation more efficient.
// (This particular node just moves data; in practice you would perform an expensive calculation on it.)
if (db.inputs.points.size() > db.inputs.sizeThreshold())
{
// The gpu() methods force the data onto the GPU. They may or may not perform CPU->GPU copies under the
// covers. Fabric handles all of those details so that you don't have to.
db.outputs.points.gpu() = db.inputs.points.gpu();
}
else
{
// The gpu() methods force the data onto the CPU. They may or may not perform GPU->CPU copies under the
// covers. Fabric handles all of those details so that you don't have to.
db.outputs.points.cpu() = db.inputs.points.cpu();
}
return true;
}
};
REGISTER_OGN_NODE()
// end-memory-type
// begin-cuda-pointers
#include <OgnCudaPointersDatabase.h>
extern "C" callCudaFunction(inputs::cudaPoints_t, outputs::cudaPoints_t);
class OgnCudaPointers:
{
public:
static bool compute(OgnCudaPointersDatabase& db)
{
// When the *cudaPointers* keyword is set to *cpu* this wrapped array will contain a CPU pointer that
// references the GPU array data. If not, it would have contained a GPU pointer that references the GPU
// array data and not been able to be dereferenced on the CPU side.
callCudaFunction(db.inputs.cudaPoints(), db.outputs.cudaPoints());
return true;
}
};
REGISTER_OGN_NODE()
// end-cuda-pointers
// begin-attribute-metadata
#include <OgnStarWarsCharactersDatabase.h>
#include <alloca.h>
class OgnStarWarsCharacters:
{
public:
static bool compute(OgnStarWarsCharactersDatabase& db)
{
auto nodeTypeObj = db.abi_node().getNodeTypeObj(db.abi_node());
auto anakinObj = db.abi_node().getAttribute(db.abi_node(), inputs::anakin.token());
// Specifically defined metadata can be accessed by name
std::cout << "Anakin's secret is " << anakinObj->getMetadata(anakinObj, "secret") << std::endl;
// Some metadata is automatically added; you can see it by iterating over all of the existing metadata
size_t metadataCount = anakinObj->getMetadataCount(anakinObj);
char const** metadataKeyBuffer = reinterpret_cast<char const**>(alloca(sizeof(char*) * metadataCount));
char const** metadataValueBuffer = reinterpret_cast<char const**>(alloca(sizeof(char*) * metadataCount));
size_t found = anakinObj->getAllMetadata(anakinObj, metadataKeyBuffer, metadataValueBuffer, metadataCount);
for (size_t i=0; i<found; ++i)
{
std:: cout << "Metadata for " << metadataKeyBuffer[i] << " = " << metadataValueBuffer[i] << std::endl;
}
return true;
}
};
REGISTER_OGN_NODE()
// end-attribute-metadata
// begin-optional
#include <OgnShoesDatabase.h>
#include <string>
#include <stdlib.h>
class OgnShoes:
{
public:
static bool compute(OgnShoesDatabase& db)
{
static std::string[] _shoeTypes{ "Runners", "Slippers", "Oxfords" };
auto shoeIndex = rand() % 3;
std::string shoeTypeName{ _shoeTypes[shoeIndex] };
// If the shoe is a type that has laces then append the lace type name
if (shoeIndex != 1)
{
// As this is an optional value it may or may not be valid at this point.
const auto& shoelaceStyle = db.inputs.shoelaceStyle();
// This happens automatically with required attributes. With optional ones it has to be done when used.
if (db.inputs.shoeLaceStyle.isValid())
{
shoeTypeName += " with ";
shoeTypeName += db.tokenToString(shoelaceStyle);
shoeTypeName += " laces";
}
}
db.outputs.shoeType() = shoeTypeName;
return true;
}
};
REGISTER_OGN_NODE()
// end-optional
// begin-attribute-uiName
#include <OgnAttributeUiNameDatabase.h>
class OgnAttributeUiName:
{
public:
static bool compute(OgnAttributeUiNameDatabase& db)
{
// The uiName value is just a special case of metadata.
// Note the use of the namespace-enabled "inputs" value that provides access to an attribute's static name.
auto attributeObj = db.abi_node().iNode->getAttribute(db.abi_node(), inputs::x.token());
std::cout << "Call me " << attributeObj.iAttribute->getMetadata(attributeObj, kOgnMetadataUiName) << std::endl;
return true;
}
};
REGISTER_OGN_NODE()
// end-attribute-uiName
// begin-attribute-uiType
#include <OgnAttributeUiTypeDatabase.h>
class OgnAttributeUiType:
{
public:
static bool compute(OgnAttributeUiTypeDatabase& db)
{
// The uiType value is just a special case of metadata.
auto attributeObj = db.abi_node().iNode->getAttribute(db.abi_node(), inputs::x.token());
std::cout << "The property panel ui type is " << attributeObj.iAttribute->getMetadata(attributeObj, kOgnMetadataUiType) << std::endl;
return true;
}
};
REGISTER_OGN_NODE()
// end-attribute-uiType
// begin-unvalidated
#include <OgnShoesDatabase.h>
#include <string>
#include <stdlib.h>
class OgnABTest:
{
public:
static bool compute(OgnABTestDatabase& db)
{
auto choice = db.outputs.choice();
auto outType = choice.type();
// Check to see which input is selected and verify that its data type matches the output resolved type
if (db.inputs.selectA())
{
const auto inputA = db.inputs.a();
if (! inputA.isValid() || (inputA.type() != outType))
{
db.logError("Mismatched types at input a - '%s' versus '%s'", inputA.type().getOgnTypeName(), outType.getOgnTypeName());
return false;
}
choice = inputA;
}
else
{
const auto inputF = db.inputs.b();
if (! inputB.isValid() || (inputB.type() != outType))
{
db.logError("Mismatched types at input b - '%s' versus '%s'", inputB.type().getOgnTypeName(), outType.getOgnTypeName());
return false;
}
choice = inputB;
}
return true;
}
};
REGISTER_OGN_NODE()
// end-unvalidated
// begin-state-node
#include <OgnCounterDatabase.h>
class OgnCounter:
{
// Simple state information that counts how many times this node has been evaluated
int m_evaluationCount{ 0 };
public:
static bool compute(OgnCounterDatabase& db)
{
// The state information is on the node so it is the template parameter
auto& state = db.internalState<OgnCounter>();
// This prints the message and updates the state information
std::cout << "This node has been evaluated " << state.m_evaluationCount++ << " times" << std::endl;
return true;
}
};
REGISTER_OGN_NODE()
// end-state-node
// begin-versioned-node
#include <OgnMultiplyDatabase.h>
class OgnMultiply:
{
public:
static bool compute(OgnMultiplyDatabase& db)
{
db.outputs.result() = db.inputs.a() * db.inputs.b() + db.inputs.offset();
return true;
}
// Simply declaring the function is enough to register it as an override to the normal ABI function
static bool updateNodeVersion(const GraphContextObj&, const NodeObj& nodeObj, int oldVersion, int newVersion)
{
if ((oldVersion == 1) && (newVersion == 2))
{
// Version upgrade manually adds the new attribute to the node.
constexpr float zero = 0.0f;
nodeObj.iNode->createAttribute(nodeObj, "inputs:offset", Type(BaseDataType::eFloat), &zero, nullptr, kAttributePortType_Input, kExtendedAttributeType_Regular, nullptr);
return true;
}
// Always good practice to flag unknown version changes so that they are not forgotten
db.logError("Do not know how to upgrade Multiply from version %d to %d", oldVersion, newVersion);
return false;
}
};
REGISTER_OGN_NODE()
// end-versioned-node
| 41,156 | C++ | 37.71778 | 180 | 0.615001 |
omniverse-code/kit/exts/omni.graph.tools/docs/ogn_code_samples_python.rst | .. _ogn_code_samples_py:
OGN Code Samples - Python
=========================
This files contains a collection of examples for using the .ogn generated code from Python. There is no particular flow
to these examples, they are used as reference data for the :ref:`ogn_user_guide`.
In the examples below this import will be assumed when describing names from the OmniGraph API, in the spirit of
common usage for packages such as *numpy* or *pandas*:
.. code-block:: py
import omni.graph.core as og
.. contents::
.. _ogn_python_generated_database:
Python Generated Database
-------------------------
When the .ogn files are processed and the implementation language is set to *python* it generates a database file
through which all of the attribute data can be accessed. It also generates some utility functions that are useful in
the context of a compute function. For the file **OgnMyNode.ogn** the database class will be named **OgnMyNodeDatabase**
and can be imported directly from the generated `ogn` module inside your Python module.
.. code-block:: py
from omni.examples.ogn.OgnMyNodeDatabase import OgnMyNodeDatabase as database
Usually you will not need to import the file though as the compute method is passed an instance to it. The contents
of that database file will include these functions:
.. code-block:: py
db.log_error("Explanation of error") # Log an error in the compute
db.log_warning("Explanation of warning") # Log a warning in the compute
db.log_warn("Explanation of warning") # An alias for log_warning
db.inputs # Object containing accessors for all input attribute data
db.outputs # Object containing accessors for all output attribute data
db.state # Object containing accessors for all state attribute data
database.per_node_internal_state(node) # Class method to get the internal state data attached to a specific node
The attribute members of `db.inputs`, `db.outputs`, and `db.state` are all properties. The input setter can only be
used during node initialization.
.. _ogn_minimal_node_py:
Minimal Python Node Implementation
----------------------------------
Every Python node must contain a node class definition with an implementation of the ``compute`` method that takes the
database as a parameter and returns a boolean indicating if the compute succeeded. To enforce more stringent type
checking on compute calls, import the database definition for the declaration.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-minimal
:end-before: end-minimal
.. note::
For simplicity, the import will be omitted from subsequent examples.
:ref:`[C++ Version]<ogn_minimal_node_cpp>`
.. _ogn_metadata_node_py:
Python Node Type Metadata Access
--------------------------------
When node types have metadata added to them they can be accessed through the Python bindings to the node ABI.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-node-metadata
:end-before: end-node-metadata
:ref:`[C++ Version]<ogn_metadata_node_cpp>`
.. _ogn_node_with_icon_py:
Python Node Icon Location Access
--------------------------------
Specifying the icon location and color information creates consistently named pieces of metadata that the UI can use to
present a more customized visual appearance.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-node-icon
:end-before: end-node-icon
:ref:`[C++ Version]<ogn_node_with_icon_cpp>`
.. _ogn_scheduling_node_python:
Python Node Type Scheduling Hints
---------------------------------
Specifying scheduling hints makes it easier for the OmniGraph scheduler to optimize the scheduling of node evaluation.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-node-scheduling
:end-before: end-node-scheduling
:ref:`[C++ Version]<ogn_scheduling_node_cpp>`
.. _ogn_singleton_node_py:
Python Singleton Node Types
---------------------------
Specifying that a node type is a singleton creates a consistently named piece of metadata that can be checked to see
if multiple instances of that node type will be allowed in a graph or its child graphs. Attempting to create more than
one of such node types in the same graph or any of its child graphs will result in an error.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-node-singleton
:end-before: end-node-singleton
:ref:`[C++ Version]<ogn_singleton_node_cpp>`
.. _ogn_tags_node_py:
Python Node Type Tags
---------------------
Specifying the node tags creates a consistently named piece of metadata that the UI can use to present a more
friendly grouping of the node types to the user.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-node-tags
:end-before: end-node-tags
This example introduces a simple helper data structure ``og.MetadataKeys``, which contains strings set to the key
values of special internal metadata. These are metadata elements managed by the code generator. Using these names
ensures consistent access.
.. code-block:: py
og.MetadataKeys.ALLOWED_TOKENS # On attributes of type token, a CSV formatted comma-separated list of potential legal values for the UI
og.MetadataKeys.CATEGORIES # On node types, contains a comma-separated list of categories to which the node type belongs
og.MetadataKeys.DESCRIPTION # On attributes and node types, contains their description from the .ogn file
og.MetadataKeys.EXTENSION # On node types, contains the extension that owns this node type
og.MetadataKeys.HIDDEN # On attributes and node types, indicating to the UI that they should not be shown
og.MetadataKeys.ICON_PATH # On node types, contains the file path to the node's icon representation in the editor
og.MetadataKeys.ICON_BACKROUND_COLOR # On node types, overrides the background color of the node's icon
og.MetadataKeys.ICON_BORDER_COLOR # On node types, overrides the border color of the node's icon
og.MetadataKeys.SINGLETON # On node types its presence indicates that only one of the node type may be created in a graph
og.MetadataKeys.TAGS # On node types, a comma-separated list of tags for the type
og.MetadataKeys.UI_NAME # On attributes and node types, user-friendly name specified in the .ogn file
:ref:`[C++ Version]<ogn_tags_node_cpp>`
.. _ogn_tokens_node_py:
Python Token Access
-------------------
Python properties are used for convenience in accessing the predefined token values. As tokens are represented
directly as strings in Python there is no need to support translation between strings and tokens as there is in C++.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-tokens
:end-before: end-tokens
:ref:`[C++ Version]<ogn_tokens_node_cpp>`
.. _ogn_uiName_node_py:
Python Node Type UI Name Access
-------------------------------
Specifying the node UI name creates a consistently named piece of metadata that the UI can use to present a more
friendly name of the node type to the user.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-node-uiName
:end-before: end-node-uiName
:ref:`[C++ Version]<ogn_uiName_node_cpp>`
.. _ogn_simple_node_py:
Simple Python Attribute Data Type
---------------------------------
Accessors are created on the generated database class that return Python accessor objects that wrap the underlying
attribute data, which lives in Fabric. As Python does not have the same flexibility with numeric data types there
is some conversion performed. i.e. a Python number is always 64 bits so it must truncate when dealing with smaller
attributes, such as **int** or **uchar**.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-simple
:end-before: end-simple
:ref:`[C++ Version]<ogn_simple_node_cpp>`
.. _ogn_tuple_node_py:
Tuple Python Attribute Data Type
--------------------------------
Tuples, arrays, and combinations of these all use the ``numpy`` array types as return values as opposed to a plain
Python list such as ``List[float, float, float]``. This plays a big part in efficiency as the ``numpy`` arrays can
point directly to the Fabric data to minimize data copying.
Values are returned through the same kind of accessor as for simple data types, only differing in the returned data
types.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-tuple
:end-before: end-tuple
:ref:`[C++ Version]<ogn_tuple_node_cpp>`
.. _ogn_role_node_py:
Role Python Attribute Data Type
-------------------------------
Roles are stored in a parallel structure to the attributes as properties. For example ``db.inputs.color`` will have a
corresponding property ``db.role.inputs.color``. For convenience, the legal role names are provided as constants in
the database class. The list of role names corresponds to the role values in the omni.graph.core.AttributeRole enum:
- ROLE_COLOR
- ROLE_EXECUTION
- ROLE_FRAME
- ROLE_NORMAL
- ROLE_POINT
- ROLE_QUATERNION
- ROLE_TEXCOORD
- ROLE_TIMECODE
- ROLE_TRANSFORM
- ROLE_VECTOR
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-role
:end-before: end-role
:ref:`[C++ Version]<ogn_role_node_cpp>`
.. _ogn_array_node_py:
Array Python Attribute Data Type
--------------------------------
As with tuple values, all array values in Python are represented as ``numpy.array`` types.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-array
:end-before: end-array
:ref:`[C++ Version]<ogn_array_node_cpp>`
.. _ogn_tuple_array_node_py:
Tuple-Array Python Attribute Data Type
--------------------------------------
As with simple tuple values and array values the tuple-array values are also represented as ``numpy.array`` types.
The numpy objects returned use the Fabric memory as their storage so they can be modified directly when computing
outputs. As with regular arrays, you must first set the size required so that the right amount of memory can be
allocated by Fabric.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-tuple-array
:end-before: end-tuple-array
:ref:`[C++ Version]<ogn_tuple_array_node_cpp>`
.. _ogn_string_node_py:
String Python Attribute Data Type
---------------------------------
String attributes are a bit unusual in Python. In Fabric they are implemented as arrays of characters but they are
exposed in Python as plain old ``str`` types. The best approach is to manipulate local copies of the string and then
assign it to the result when you are finished.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-string
:end-before: end-string
.. important::
Although strings are implemented in Fabric as arrays the fact that strings are immutable in Python means you
don't want to use the array method of resizing (i.e. setting the `db.outputs.stringAttribute_size` property).
You can allocate it, but string elements cannot be assigned so there is no way to set the individual values.
:ref:`[C++ Version]<ogn_string_node_cpp>`
.. _ogn_any_node_py:
Extended Python Attribute Data Type - Any
-----------------------------------------
Extended attribute types have extra information that identifies the type they were resolved to at runtime. The access
to this information is achieved by wrapping the attribute value in the same way as :ref:`ogn_bundle_node_py`.
The Python property for the attribute returns an accessor rather than the value itself. This accessor has the
properties **".value"**, **".name"**, and **".type"** so that the type resolution information can be accessed directly.
In addition, variations of the **".value"** method specific to each memory space are provided as the properties
**".cpu_value"** and **".gpu_value"**.
For example, the value for the input named **a** can be found at ``db.inputs.a.value``, and its resolved type is at
``db.inputs.a.type``.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-union
:end-before: end-union
:ref:`[C++ Version]<ogn_any_node_cpp>`
The extended data types must all be resolved before calling into the compute method. The generated code
handles that for you, executing the equivalent of these calls for extended inputs **a** and **b**, and extended
output **sum**, preventing the call to ``compute()`` if any of the types are unresolved.
.. code-block:: python
if db.inputs.a.type.base_type == og.BaseDataType.UNKNOWN:
return False
if db.inputs.b.type.base_type == og.BaseDataType.UNKNOWN:
return False
if db.outputs.sum.type.base_type == og.BaseDataType.UNKNOWN:
return False
.. _ogn_union_node_py:
Extended Python Attribute Data Type - Union
-------------------------------------------
The generated interface for union types is exactly the same as for **any** types. There is just a tacit agreement that
the resolved types will always be one of the ones listed in the union type description.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-union
:end-before: end-union
:ref:`[C++ Version]<ogn_union_node_cpp>`
.. _ogn_bundle_node_py:
Bundle Python Attribute Data Type
---------------------------------
Bundle attribute information is accessed the same way as information for any other attribute type. As an aggregate,
the bundle can be treated as a container for attributes, without any data itself.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-bundle
:end-before: end-bundle
:ref:`[C++ Version]<ogn_bundle_node_cpp>`
.. _ogn_bundle_data_py:
When you want to get at the actual data, you use the bundle API to extract the runtime attribute accessors from the
bundle for those attributes you wish to process.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-bundle-data
:end-before: end-bundle-data
.. tip::
Although you access them in completely different ways the attributes that are bundle members use the same accessors
as the extended attribute types. See further information in :ref:`ogn_any_node_cpp`
This documentation for bundle access is pulled directly from the code. It removes the extra complication in the
accessors required to provide proper typing information for bundle members and shows the appropriate calls in the
bundle attribute API.
.. literalinclude:: ../../../../source/extensions/omni.graph/python/_impl/bundles.py
:language: cpp
:start-after: begin-bundle-interface-description
:end-before: end-bundle-interface-description
:ref:`[C++ Version]<ogn_bundle_data_cpp>`
.. _ogn_attribute_memory_type_py:
Python Attribute Memory Location
--------------------------------
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-memory-type
:end-before: end-memory-type
:ref:`[C++ Version]<ogn_attribute_memory_type_cpp>`
.. _ogn_node_categories_py:
Node Type Categories
--------------------
Categories are added as metadata to the node and can be accessed through the standard metadata interface.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-node-categories
:end-before: end-node-categories
:ref:`[C++ Version]<ogn_node_categories_cpp>`
.. _ogn_node_cudaPointers_py:
Python Attribute CPU Pointers to GPU Data
-----------------------------------------
.. note::
Although this value takes effect at the attribute level the keyword is only valid at the node level. All
attributes in a node will use the same type of CUDA array pointer referencing.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-cuda-pointers
:end-before: end-cuda-pointers
:ref:`[C++ Version]<ogn_node_cudaPointers_cpp>`
.. _ogn_metadata_attribute_py:
Python Attribute Metadata Access
--------------------------------
When attributes have metadata added to them they can be accessed through the ABI attribute interface.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-attribute-metadata
:end-before: end-attribute-metadata
:ref:`[C++ Version]<ogn_metadata_attribute_cpp>`
.. _ogn_optional_py:
Optional Python Attributes
--------------------------
Since Python values are extracted through the C++ ABI bindings they don't have a direct validity check so the validity
of optional attributes must be checked indirectly. If a Python attribute value returns the special **None** value then
the attribute is not valid. It may also raise a *TypeError* or *ValueError* exception, indicating there was a mismatch
between the data available and the type expected.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-optional
:end-before: end-optional
:ref:`[C++ Version]<ogn_optional_cpp>`
.. _ogn_uiName_attribute_py:
Python Attribute UI Name Access
-------------------------------
Specifying the attribute **uiName** creates a consistently named piece of metadata that the UI can use to present a more
friendly version of the attribute name to the user. It can be accessed through the regular metadata ABI, with some
constants provided for easier access.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-attribute-uiName
:end-before: end-attribute-uiName
:ref:`[C++ Version]<ogn_uiName_attribute_cpp>`
.. _ogn_uiType_attribute_py:
Python Attribute UI Type Access
-------------------------------
Specifying the attribute **uiType** tells the property panel that this attribute should be shown with custom widgets.
- For path, string, and token attributes, a ui type of "filePath" will show file browser widgets
- For 3- and 4-component numeric tuples, a ui type of "color" will show the color picker widget
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-attribute-uiType
:end-before: end-attribute-uiType
:ref:`[C++ Version]<ogn_uiType_attribute_cpp>`
.. _ogn_unvalidated_py:
Unvalidated Python Attributes
-----------------------------
For most attributes the generated code will check to see if the attribute is valid before it calls the `compute()`
function. unvalidated attributes will not have this check made. If you end up using their value then you must make the
call to the `is_valid()` method yourself first and react appropriately if invalid values are found. Further, for
attributes with extended types you must verify that they have successfully resolved to a legal type.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-unvalidated
:end-before: end-unvalidated
:ref:`[C++ Version]<ogn_unvalidated_cpp>`
.. _ogn_dynamic_attributes_py:
Dynamic Python Attributes
-------------------------
In addition to attributes statically defined through a .ogn file, you can also dynamically add attributes to a single
node by using the ABI call ``og.Node.create_attribute(...)``. When you do so, the Python database interface will
automatically pick up these new attributes and provide access to their data in exactly the same way as it does for
regular attributes. (i.e. ``db.inputs.X`` for the value, ``db.attributes.input.X`` for the underlying `og.Attribute`,
``db.roles.inputs.X`` for the attribute role, etc.)
The way you test for such an attribute's existence inside a ``compute()`` method is to capture the `AttributeError`
exception.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-dynamic
:end-before: end-dynamic
.. note::
There is no C++ equivalent to this feature. Dynamic attributes will be available on the Python accessors to the
C++ node but the C++ code can only access the attribute data by using the low level ABI.
.. _ogn_state_node_py:
Python Nodes With Internal State
--------------------------------
Unlike C++ classes it is not as easy to determine if a Python class contains data members that should be interpreted as
state information. Instead, the Python node class will look for a method called `internal_state()`, which should return
an object containing state information to be attached to a node. Once the internal state has been constructed it is not
modified by OmniGraph until the node is released, it is entirely up to the node how and when to modify the data.
That information will be in turn made accessible through the database class using the property `db.internal_state`.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-state-node
:end-before: end-state-node
:ref:`[C++ Version]<ogn_state_node_cpp>`
.. _ogn_versioned_node_py:
Python Nodes With Version Upgrades
----------------------------------
To provide code to upgrade a node from a previous version to the current version you must override the ABI function
`update_node_version()`. The current context and node to be upgraded are passed in, as well as the old version at which
the node was created and the new version to which it should be upgraded. Passing both values allows you to upgrade
nodes at multiple versions in the same code.
This example shows how a new attribute is added using the *og.Node* ABI interface.
.. literalinclude:: ogn_code_samples_python.py
:language: python
:start-after: begin-versioned-node
:end-before: end-versioned-node
:ref:`[C++ Version]<ogn_versioned_node_cpp>`
| 21,784 | reStructuredText | 38.111311 | 144 | 0.721126 |
omniverse-code/kit/exts/omni.graph.tools/docs/ogn_code_samples_python.py | # flake8: noqa
# This file contains snippets of example Python code to be imported by the OGN documentation.
# It's not actual running code itself, merely sections of code that illustrate a point
# It uses the reStructuredText code import with begin/end markings to cherry-pick relevant portions.
# The sections of code being referenced are demarqued by "begin-XX" and "end-XX".
# begin-minimal
# This line isn't strictly necessary. It's only useful for more stringent type information of the compute parameter.
# Note how the extra submodule "ogn" is appended to the extension's module to find the database file.
from ogn.examples.ogn.OgnNoOpDatabase import OgnNoOpDatabase
class OgnNoOp:
@staticmethod
def compute(db: OgnNoOpDatabase) -> bool:
"""This comment should describe the compute algorithm.
Running help() on the database class will provide the information from the node type description field in the
.ogn file. The information here should be a supplement to that, consisting of implementation notes.
"""
# This logs a warning to the console, once
db.log_warning("This node does nothing")
# The node is accessible through the database. Normally you don't have to check its validity as it
# will already be checked, it's just done here to illustrate access and error logging.
if db.node is None or not db.node.isValid():
# This logs an error to the console and should only be used for compute failure
db.log_error("The node being computed is not valid")
return False
return True
# end-minimal
# begin-node-metadata
class OgnNodeMetadata:
@staticmethod
def compute(db) -> bool:
# Specifically defined metadata can be accessed by name
print(f"The author of this node is {db.get_metadata('author')}")
# Some metadata is automatically added; you can see it by iterating over all of the existing metadata.
# The Python iteration interfaces with the C++ ABI to make it seem like the metadata is an iterable list.
for metadata_name, metadata_value in db.node.get_all_metadata():
print(f"Metadata for {metadata_name} is {metadata_value}")
return True
# end-node-metadata
# begin-node-icon
import omni.graph.tools.ogn as ogn
class OgnNodeWithIcon:
@staticmethod
def compute(db) -> bool:
# The icon path is just a special case of metadata. The hardcoded key is in the Python namespace
path = db.get_metadata(ogn.MetadataKeys.ICON_PATH)
color = db.get_metadata(ogn.MetadataKeys.ICON_COLOR)
background_color = db.get_metadata(ogn.MetadataKeys.ICON_BACKGROUND_COLOR)
border_color = db.get_metadata(ogn.MetadataKeys.ICON_BORDER_COLOR)
if path is not None:
print(f"Icon found at {path}")
print(f"...color override is {color}" if color is not None else "...using default color")
print(
f"...backgroundColor override is {background_color}"
if background_color is not None
else "...using default backgroundColor"
)
print(
f"...borderColor override is {border_color}"
if border_color is not None
else "...using default borderColor"
)
return True
# end-node-icon
# begin-node-scheduling
class OgnNodeSchedulingHints:
@staticmethod
def compute(db) -> bool:
scheduling_hints = db.abi_node.get_node_type().get_scheduling_hints()
# Ordinarily you would not need to access this scheduling hints as it is mainly for OmniGraph's use,
# however it is available through the ABI so you can access it at runtime if you wish.
print(f"Is this node threadsafe? {scheduling_hints.get_thread_safety()}")
return True
# end-node-scheduling
# begin-node-singleton
import omni.graph.tools.ogn as ogn
class OgnNodeSingleton:
@staticmethod
def compute(db) -> bool:
# The singleton value is just a special case of metadata. The hardcoded key is in the Python namespace
singleton_value = db.get_metadata(ogn.MetadataKeys.SINGLETON)
if singleton_value and singleton_value[0] == "1":
print("I am a singleton")
return True
# end-node-singleton
# begin-node-categories
import omni.graph.tools.ogn as ogn
class OgnNodeCategories:
@staticmethod
def compute(db) -> bool:
# The categories value is just a special case of metadata. The hardcoded key is in the Python namespace
categories_value = db.get_metadata(ogn.MetadataKeys.CATEGORIES)
if categories_value:
print(f"These are my categories {categories_value.split(',')}")
return True
# end-node-categories
# begin-node-tags
import omni.graph.tools.ogn as ogn
class OgnNodeTags:
@staticmethod
def compute(db) -> bool:
# The tags value is just a special case of metadata. The hardcoded key is in the Python namespace
print(f"My tags are {db.get_metadata(ogn.MetadataKeys.TAGS)}")
return True
# end-node-tags
# begin-tokens
class OgnNodeTokens:
@staticmethod
def compute(db) -> bool:
print(f"The name for red is {db.tokens.red}")
print(f"The name for green is {db.tokens.green}")
print(f"The name for blue is {db.tokens.blue}")
return True
# end-tokens
# begin-node-uiName
import omni.graph.core as og
class OgnNodeUiName:
@staticmethod
def compute(db) -> bool:
# The uiName value is just a special case of metadata. The hardcoded key is in the Python namespace
print("Call me ", db.get_metadata(ogn.MetadataKeys.UI_NAME))
return True
# end-node-uiName
# begin-simple
class OgnTokenStringLength:
@staticmethod
def compute(db) -> bool:
# Access pattern is "db", the database, "inputs", the attribute's access type, and "token", the name the
# attribute was given in the .ogn file
#
# Local variables can be used to clarify the intent, but are not necessary. As a matter of consistency we
# use PEP8 conventions for local variables. Attribute names may not exactly follow the naming conventions
# since they are mutually exclusive between C++ and Python (camelCase vs. snake_case)
token_to_measure = db.inputs.token
# Simple assignment to the output attribute's accessor is all you need to do to set the value, as it is
# pointing directly to that data.
db.outputs.length = len(token_to_measure)
return True
# end-simple
# begin-tuple
class OgnVectorMultiply:
@staticmethod
def compute(db) -> bool:
# The fact that everything is in numpy makes this kind of calculation trivial
db.outputs.product = db.inputs.vector1.reshape(4, 1) @ db.inputs.vector2.reshape(1, 4)
# Here db.inputs.vector1.shape = db.inputs.vector1.shape = (4,), db.outputs.product.shape = (4,4)
return True
# end-tuple
# begin-role
class OgnPointsToVector:
@staticmethod
def compute(db) -> bool:
# In Python the wrapper is only a property so the role has to be extracted from a parallel structure
if db.role.inputs.point1 != db.ROLE_POINT:
db.log_error(f"Cannot convert role {db.role.inputs.point1} to {db.ROLE_POINT}")
return False
if db.role.inputs.point2 != db.ROLE_POINT:
db.log_error(f"Cannot convert role {db.role.inputs.point2} to {db.ROLE_POINT}")
return False
if db.role.outputs.vector != db.ROLE_POINT:
db.log_error(f"Cannot convert role {db.role.inputs.vector} to {db.ROLE_VECTOR}")
return False
# The actual calculation is a trivial numpy call
db.outputs.vector = db.inputs.point2 - db.inputs.point1
return True
# end-role
# begin-array
import numpy as np
class OgnPartialSums:
@staticmethod
def compute(db) -> bool:
# This is a critical step, setting the size of the output array. Without this the array has no memory in
# which to write.
#
# As the Python wrapper is a property, in order to get and set the size a secondary property is introduced
# for array data types which have the same name as the attribute with "_size" appended to it. For outputs
# this property also has a setter, which accomplishes the resizing.
#
db.outputs.partialSums_size = db.inputs.array_size
# Always explicitly handle edge cases as it ensures your node doesn't disrupt evaluation
if db.outputs.partialSums_size == 0:
return True
# IMPORTANT:
# The data value returned from accessing the property is a numpy array whose memory location was
# allocated by Fabric. As such you cannot use the numpy functions that resize the arrays as they will
# not use Fabric data.
# However, since the array attribute data is wrapped in a numpy array you can use numpy functions that
# modify data in place to make efficient use of memory.
#
db.inputs.array.cumsum(out=db.outputs.partialSums)
# A second way you can assign array data is to collect the data externally and then do a simple list
# assignment. This is less efficient as it does a physical copy of the entire list, though more flexible as
# you can arbitrarily resize your data before assigning it. If you use this approach you skip the step of
# setting the output array size as the assignment will do it for you.
#
# # Using numpy
# output_list = np.cumsum(db.inputs.array)
# db.outputs.partialSums = output_list
#
# # Using Python lists
# output_list = [value for value in db.inputs.array]
# for index, value in enumerate(output_list[:-1]):
# output_list[index + 1] = output_list[index + 1] + output_list[index]
# db.outputs.partialSum = output_list
#
# # numpy is smart enough to do element-wise copy, but in this case you do have to preset the size
# output_list = np.cumsum(db.inputs.array)
# db.outputs.partialSums_size = db.inputs.array_size
# db.outputs.partialSums[:] = output_list[:]
#
return True
# end-array
# begin-tuple-array
class OgnCrossProducts:
@staticmethod
def compute(db) -> bool:
# It usually keeps your code cleaner if you put your attribute wrappers into local variables, avoiding
# the constant use of the "db.inputs" or "db.outputs" namespaces.
a = db.inputs.a
b = db.inputs.b
crossProduct = db.outputs.crossProduct
# This node chooses to make mismatched array lengths an error. You could also make it a warning, or just
# simply calculate the result for the minimum number of available values.
if db.inputs.a_size != db.inputs.b_size:
db.log_error(f"Input array lengths do not match - '{db.inputs.a_size}' vs. '{db.inputs.b_size}'")
return False
# As with simple arrays, the size of the output tuple-array must be set first to allocate Fabric memory.
db.outputs.crossProduct_size = db.inputs.a_size
# Edge case is easily handled
if db.inputs.a_size == 0:
return False
# The numpy cross product returns the result so there will be a single copy of the result onto the output.
# numpy handles the iteration over the array so this one line does the entire calculation.
crossProduct = np.cross(a, b)
# This common syntax will do exactly the same thing
# crossProduct[:] = np.cross(a, b)[:]
return True
# end-tuple-array
# begin-string
class OgnReverseString:
@staticmethod
def compute(db) -> bool:
# In Python the wrapper to string attributes provides a standard Python string object.
# As the wrapper is a property the assignment of a value uses the setter method to both allocate the
# necessary space in Fabric and copy the values.
db.outputs.result = db.inputs.original[::-1]
return True
# end-string
import numpy as np
# begin-any
import omni.graph.core as og
class OgnAdd:
@staticmethod
def compute(db) -> bool:
# The extra work in handling extended types such as "any" or "union" is in checking the resolved types to
# first ensure they are handled and second select appropriate code paths if they differ from one type to
# another.
# Unlike C++ the loose typing of the Python objects mean that there is no requirement for explicit casting
# of data into the resolve types. Instead, the much more flexible method of letting the numpy library do the
# work for us is employed. The add operation will perform any necessary type conversion, as will the assignment
# operator.
try:
db.outputs.sum.value = np.add(db.inputs.a.value, db.inputs.b.value)
except Exception as error:
# If numpy doesn't handle addition of the two input types, or assignment to the output type, then an
# exception will be thrown and the node can report the error.
db.log_error(f"Addition could not be performed: {error}")
return False
return True
# end-any
# begin-union
class OgnMultiplyNumbers:
@staticmethod
def compute(db) -> bool:
# Full details on handling extended types can be seen in the example for the "any" type. This example
# shows only the necessary parts to handle the two types accepted for this union type (float and double).
# The underlying code is all the same, the main difference is in the fact that the graph only allows
# resolving to types explicitly mentioned in the union, rather than any type at all.
# Use the exception system to implicitly check the resolved types. Unresolved types will not have accessible
# data and raise an exception.
try:
db.outputs.product = np.mult(db.inputs.a, db.inputs.b)
except Exception as error:
db.log_error(f"Multiplication could not be performed: {error}")
return False
return True
# end-union
# begin-bundle
class OgnMergeBundles:
@staticmethod
def compute(db) -> bool:
bundleA = db.inputs.bundleA
bundleB = db.inputs.bundleB
mergedBundle = db.outputs.bundle
# Bundle assignment means "assign all of the members of the RHS bundle to the LHS bundle". It doesn't
# do a deep copy of the bundle members.
mergedBundle = bundleA
# Bundle insertion adds the contents of a bundle to an existing bundle. The bundles may not have members
# with the same names
mergedBundle.insert_bundle(bundleB)
return True
# end-bundle
# begin-bundle-data
import omni.graph.core as og
FLOAT_TYPE = og.Type(og.BaseDataType.FLOAT)
class OgnCalculateBrightness:
def brightness_from_rgb(self, r: float, g: float, b: float) -> float:
"""The actual algorithm to run using a well-defined conversion"""
return (r * (299.0) + (g * 587.0) + (b * 114.0)) / 256.0
@staticmethod
def compute(db) -> bool:
# Retrieve the bundle accessor
color = db.inputs.color
# Using the bundle accessor, try to retrieve the RGB color members. In this case the types have to be
# float, though in a more general purpose node you might also allow for double, half, and int types.
r = color.attribute_by_name(db.tokens.r)
g = color.attribute_by_name(db.tokens.g)
b = color.attribute_by_name(db.tokens.b)
# Validity of a member is a boolean
if r.type == FLOAT_TYPE and g.type == FLOAT_TYPE and b.type == FLOAT_TYPE:
db.outputs.brightness.value = OgnCalculateBrightness.brightness_from_rgb(r.value, g.value, b.value)
return True
# Having failed to extract RGB members, do the same check for CMYK members
c = color.attribute_by_name(db.tokens.c)
m = color.attribute_by_name(db.tokens.m)
y = color.attribute_by_name(db.tokens.y)
k = color.attribute_by_name(db.tokens.k)
if c.type == FLOAT_TYPE and m.type == FLOAT_TYPE and y.type == FLOAT_TYPE and k.type == FLOAT_TYPE:
db.outputs.brightness.value = OgnCalculateBrightness.brightness_from_rgb(
(1.0 - c / 100.0) * (1.0 - k / 100.0),
(1.0 - m / 100.0) * (1.0 - k / 100.0),
(1.0 - y / 100.0) * (1.0 - k / 100.0),
)
return True
# You could be more verbose about the reason for the problem as there are a few different scenarios:
# - some but not all of r,g,b or c,m,y,k were in the bundle
# - none of the color components were in the bundle
# - some or all of the color components were found but were of the wrong data type
db.logError("Neither the groups (r, g, b) nor (c, m, y, k) are in the color bundle. Cannot compute brightness")
return False
# end-bundle-data
# begin-memory-type
class OgnMemoryType:
@staticmethod
def compute(db) -> bool:
# The operation specifies moving the points data onto the GPU for further computation if the size of
# the input data reaches a threshold where that will make the computation more efficient.
# (This particular node just moves data; in practice you would perform an expensive calculation on it.)
if db.inputs.points.size > db.inputs.sizeThreshold:
# The gpu property forces the data onto the GPU. It may or may not perform CPU->GPU copies under the
# covers. Fabric handles all of those details so that you don't have to.
db.outputs.points.gpu = db.inputs.points.gpu
else:
# The cpu property forces the data onto the CPU. It may or may not perform GPU->CPU copies under the
# covers. Fabric handles all of those details so that you don't have to.
db.outputs.points.cpu = db.inputs.points.cpu
return True
# end-memory-type
# begin-cuda-pointers
class OgnCudaPointers:
@staticmethod
def compute(db) -> bool:
# When the *cudaPointers* keyword is set to *cpu* this wrapped array will contain a CPU pointer that
# references the GPU array data. If not, it would have contained a GPU pointer that references the GPU
# array data and not been able to be dereferenced on the CPU side.
callCudaFunction(db.inputs.cudaPoints, db.outputs.cudaPoints)
return True
# end-cuda-pointers
# begin-dynamic
class OgnDynamicDuo:
@staticmethod
def compute(db) -> bool:
try:
# Testing for the existence of the dynamic input boolean attribute "Robin"
db.outputs.batman = "Duo" if db.inputs.robin else "Unknown"
except AttributeError:
db.outputs.batman = "Solo"
return True
# end-dynamic
# begin-optional
import random
from contextlib import suppress
class OgnShoes:
SHOE_TYPES = ["Runners", "Slippers", "Oxfords"]
@staticmethod
def compute(db) -> bool:
shoe_index = random.randint(0, 2)
shoe_type_name = OgnShoes.SHOE_TYPES[shoe_index]
# If the shoe is a type that has laces then append the lace type name
if shoe_index != 1:
# As this is an optional value it may or may not be valid at this point.
# This check happens automatically with required attributes. With optional ones it has to be done when used.
if db.attributes.inputs.shoeLaceStyle.isValid():
# The attribute may be valid but the data retrieval may still fail. In Python this is flagged in one of
# two ways - raising an exception, or returning None. Both indicate the possibility of invalid data.
# In this node we've chosen to silently ignore expected but invalid shoelace style values. We could
# equally have logged an error or a warning.
with suppress(ValueError, TypeError):
shoelace_style = db.inputs.shoelaceStyle
if shoelace_style is not None:
shoe_type_name += f" with {shoelace_style} laces"
db.outputs.shoeType = shoe_type_name
return True
# end-optional
# begin-attribute-uiName
import omni.graph.tools.ogn as ogn
class OgnAttributeUiName:
@staticmethod
def compute(db) -> bool:
# The uiName value is just a special case of metadata
print(f"Call me {db.get_metadata(ogn.MetadataKeys.UI_NAME, db.attributes.inputs.x)}")
return True
# end-attribute-uiName
# begin-attribute-uiType
import omni.graph.tools.ogn as ogn
class OgnAttributeUiType:
@staticmethod
def compute(db) -> bool:
# The uiType value is just a special case of metadata
print(f"The property panel ui type is {db.get_metadata(ogn.MetadataKeys.UI_TYPE, '(default)')}")
return True
# end-attribute-uiType
# begin-unvalidated
import omni.graph.core as og
class OgnABTest:
@staticmethod
def compute(db) -> bool:
choice = db.outputs.choice
out_type = choice.type
# Check to see which input is selected and verify that its data type matches the output resolved type
if db.inputs.selectA:
input_a = db.inputs.a
if not input_a.is_valid() or input_a.type != out_type:
db.log_error(
f"Mismatched types at input a - '{input_a.type.get_ogn_type_name()}' versus '{out_type.get_ogn_type_name()}'"
)
return False
choice.value = input_a.value
else:
input_b = db.inputs.b
if not input_b.is_valid() or input_b.type != out_type:
db.log_error(
f"Mismatched types at input b - '{input_b.type.get_ogn_type_name()}' versus '{out_type.get_ogn_type_name()}'"
)
return False
choice.value = input_b.value
return True
# end-unvalidated
# begin-attribute-metadata
import omni.graph.tools.ogn as ogn
class OgnStarWarsCharacters:
@staticmethod
def compute(db) -> bool:
anakin_attr = db.attributes.inputs.anakin
# Specifically defined metadata can be accessed by name
print(f"Anakin's secret is {db.get_metadata('secret', anakin_attr)}")
# Some metadata is automatically added; you can see it by iterating over all of the existing metadata.
for metadata_name, metadata_value in anakin_attr.get_all_metadata():
print(f"Metadata for {metadata_name} is {metadata_value}")
# You can also access it directly from the database's metadata interface, either from the node type...
print(f"Node UI Name is {db.get_metadata(ogn.MetadataKeys.UI_NAME)}")
# ...or from a specific attribute
print(f"Attribute UI Name is {db.get_metadata(ogn.MetadataKeys.UI_NAME, anakin_attr)}")
return True
# end-attribute-metadata
# begin-state-node
class OgnStateNode:
class State:
"""Container object holding the node's state information"""
def __init__(self):
self.counter = 0
@staticmethod
def internal_state():
"""Returns an object that will contain per-node state information"""
return OgnStateNode.State()
@staticmethod
def compute(db) -> bool:
print(f"This node has been evaluated {db.internal_state.counter} times")
db.internal_state.counter += 1
return True
# end-state-node
# begin-versioned-node
import carb
import omni.graph.core as og
class OgnMultiply:
@staticmethod
def compute(db) -> bool:
db.outputs.result = db.inputs.a * db.inputs.b + db.inputs.offset
return True
@staticmethod
def update_node_version(context, node, old_version, new_version):
if old_version == 1 and new_version == 3:
node.create_attribute("inputs:offset", og.Type(og.BaseDataType.FLOAT))
return True
# Always good practice to flag unknown version changes so that they are not forgotten
carb.log_error(f"Do not know how to upgrade Multiply from version {old_version} to {new_version}")
return False
# end-versioned-node
| 24,580 | Python | 34.937134 | 129 | 0.655167 |
omniverse-code/kit/exts/omni.graph.tools/docs/ogn_user_guide.rst | .. _ogn_user_guide:
OGN User Guide
==============
Now that you are ready to write an OmniGraph node the first thing you must do is create a node definition. The .ogn
format (short for **O** mni **G** raph **N** ode) is a JSON file that describes the node and its attributes.
Links to relevant sections of the :ref:`ogn_reference_guide` are included throughout, where you can find the
detailed syntax and semantics of all of the .ogn file elements.
OmniGraph nodes are best written by creating a .ogn file with a text editor, with the core algorithm
written in a companion C++ or Python file. There is also the :ref:`omnigraph_node_description_editor`, a work in
progress that will give you a user interface assist in populating your node description.
This document walks through the basics for writing nodes, accessing attribute data, and explains how the nodes fit into
the general ecosystem of the OmniGraph. To get a walkthrough of the node writing process by way of examples that build
on each other, from the simplest to most complex node go to the :ref:`ogn_tutorial_nodes`. This document will
reference relevant tutorials when appropriate, but is intended to be more of a one-stop shop for all features of
OmniGraph nodes.
In the interests of clarity the code samples are kept in a separate document for :ref:`C++<ogn_code_samples_cpp>` and
:ref:`Python<ogn_code_samples_py>` and referred to from here, rather than having everything embedded. If you are
reading this from a web browser you probably want to open a new tab for those links when you visit them.
.. contents::
..
.. note::
For the purpose of these examples the extension **ogn.examples** will be assumed, and names will follow the
established naming conventions.
.. warning::
The code referenced is for illustrative purposes only and some necessary elements may have been elided for
clarity. It may not work as-is.
Generated Files
+++++++++++++++
Before you can write any nodes you must first teach your extension how to build them. These instructions
are tailored for building using premake inside Omniverse Kit, with more generic information being provided to
adapt them to any build environment.
The core of the OmniGraph nodes is the .ogn file. Before actually writing a node you must enable processing of these
files in the build of your extension. If your extension doesn't already support it you can follow the steps in
:ref:`ogn_build_conversion` to add it.
What the build process adds is a step that runs the :ref:`OGN Generator Script<ogn_generation_script>` on your .ogn
file to optionally generate several files you will need for building, testing, running, and documenting your node.
Once you have your .ogn file created, with your build .ogn-enabled as described above, you can run the build with
just that file in place. If it all works you should see the following files added to the build directory. (PLATFORM
can be *windows-x86_64* or *linux-x86_64*, and VARIANT can be *debug* or *release*, depending on what you are
building.)
- *_build/ogn/include/OgnMyNodeDatabase.h*
- *_build/PLATFORM/VARIANT/exts/ogn.examples/docs/OgnMyNode.rst*
- *_build/PLATFORM/VARIANT/exts/ogn.examples/ogn/examples/ogn/OgnMyNode.py*
- *_build/PLATFORM/VARIANT/exts/ogn.examples/ogn/examples/tests/TestOgnMyNode.py*
- *_build/PLATFORM/VARIANT/exts/ogn.examples/ogn/examples/tests/data/OgnMyDatabaseTemplate.usda*
If these are not created, go back and check your build logs to confirm that your build is set up correctly and your
.ogn file was processed correctly.
.. note::
If your node is written in Python then the file *_build/ogn/include/OgnMyNodeDatabase.h* is unused and will not
be generated.
.. tip::
If you have an existing node you wish to convert to .ogn format then you can follow along with the detailed
example of a node's conversion found in :ref:`ogn_node_conversion`.
The Split OmniGraph Extension
+++++++++++++++++++++++++++++
Most extensions are implemented atomically, with all code supporting the feature in a single extension. The OmniGraph
core, however, was split into two. `omni.graph.core` is the basic support for nodes and their evaluation, and
`omni.graph` is the added support for Python bindings and scripts. You almost always want your extension to have a
dependency on `omni.graph`. The main reason for just using `omni.graph.core` is if you have a headless evaluation
engine that has no scripting or UI, just raw calculations, and all of your nodes are written in C++.
The Compute
+++++++++++
The primary function of a node is to use a set of attribute values as input to its algorithm, which generates a
set of output values. In its purest form the node compute operation will be purely functional; reading only received
input attributes and writing its defined output attributes. In this form the node is capable of taking advantage of
the maximum performance provided by threading and distributed computing.
However, we recognize that not all interesting calculations can be expressed in that way, and many times should not,
so OmniGraph is set up to handle more complex configurations such as self-contained subgraphs, internal structures,
and persistent state data, as well as combining all types of nodes into arbitrarily complex graphs.
As the node writer, what happens within the compute function is entirely up to you. The examples here are one possible
approach to these algorithms.
.. important::
It is important to note here that you should consider your node to be an island unto itself. It may live on a
different thread, CPU, GPU, or even physical computer than other nodes in the graph. To guarantee correct
functioning in all situations you should never inject or extract data to or from locations outside of your node. It
should behave as a standalone evaluation engine. This includes other nodes, user interfaces, USD data, and
anything else that is not part of the node's input or output attributes. Should your node require access to such
data then you must provide OmniGraph with the :ref:`ogn_keyword_node_scheduling` information.
Mandatory Node Properties
+++++++++++++++++++++++++
There are properties on the node that are required for every legal file. The node must have a name,
a :ref:`ogn_keyword_node_description`, and a :ref:`ogn_keyword_node_version`. Minimal node definition
which includes only those elements.
.. code-block:: json
{
"NoOp" : {
"description": "Minimal node that does nothing",
"version": 1
}
}
.. note::
As described in :ref:`omnigraph_naming_conventions` the actual unique name of this node will include the extension, and
will be ``ogn.examples.NoOp``.
These examples also illustrate some convenience functions added to the database that facilitate the reporting of
warnings or errors encountered during a node's operation. A warning might be something incidental like a deformer
running on an empty set of points. An error is for something serious like a divide-by-zero error in a calculation.
Using this reporting methods makes debugging node operations much easier. Generally speaking a warning will still
return true as the compute is successful, just not useful, whereas an error will return false indicating that the
compute could not be performed.
+---------------------------------------+-----------------------------------------+
| :ref:`C++ Code<ogn_minimal_node_cpp>` | :ref:`Python Code<ogn_minimal_node_py>` |
+---------------------------------------+-----------------------------------------+
Relevant tutorial - :ref:`ogn_tutorial_empty`.
Although it's not mandatory in every file, the keyword :ref:`ogn_keyword_node_language` is required when
you intend to implement your node in Python. For the above, and all subsequent examples, using the Python node
implementation requires this one extra line in your .ogn file. (C++ is the default so it isn't necessary for nodes
written in C++.)
.. code-block:: json
:emphasize-lines: 5
{
"NoOp" : {
"description": "Minimal node that does nothing in Python",
"version": 1,
"language": "python"
}
}
Secondary Node Properties
+++++++++++++++++++++++++
Some other node properties have simple defaults and need not always be specified in the file. These include
:ref:`ogn_keyword_node_exclude`,
:ref:`ogn_keyword_node_memoryType`,
:ref:`ogn_keyword_node_categories`,
:ref:`ogn_keyword_node_cudaPointers`,
:ref:`ogn_keyword_node_metadata`,
:ref:`ogn_keyword_node_scheduling`,
:ref:`ogn_keyword_node_tags`,
:ref:`ogn_keyword_node_tokens`, and
:ref:`ogn_keyword_node_uiName`.
Providing Scheduling Hints
--------------------------
The scheduler will try to schedule execution of the nodes in as efficient a manner as possible while still maintaining
safe evaluation constraints (e.g. by not scheduling two nodes in parallel that are not threadsafe).
Although it's not (yet) mandatory it is a good idea to provide a value for the :ref:`ogn_keyword_node_scheduling`
keyword so that the scheduler has as much information as possible on how to efficiently scheduler your nodes. The
ideal node has *"scheduling": "threadsafe"*, meaning it is safe to schedule that node in parallel with any other
nodes.
Excluding Generated Files
-------------------------
If for some reason you want to prevent any of the normally generated files from being created you can do so within the
.ogn file with the :ref:`ogn_keyword_node_exclude` keyword. For example you might be in a C++-only environment and want
to prevent the Python test scripts and database access file from being created.
.. code-block:: json
:emphasize-lines: 5
{
"NoOp" : {
"description": "Minimal node that does nothing without Python support",
"version": 1,
"exclude": ["python", "tests"]
}
}
In addition to the five generated file types listed above the reference guide shows that you can also exclude
something called **"template"**. This file, if generated, would be a blank implementation of your node, in the
language you've selected. It's not normally generated by the build, though it is useful for manual generation when
you first start implementing a node. The :ref:`omnigraph_node_description_editor` uses this option to give you a
blank node implementation to start with. Adding it to the exclusion list will prevent that.
Relevant tutorial - :ref:`ogn_tutorial_abi`.
.. _ogn_using_gpu_data:
Using GPU Data
--------------
Part of the benefit of using the .ogn format is that it's purely descriptive so it can handle nodes implemented in
different languages and nodes that run on the CPU, the GPU, or both.
The keyword :ref:`ogn_keyword_node_memoryType` is used to specify where the attribute data on a node should live.
By default all of the node data lives on the CPU, however you can use this keyword to tell
:ref:`omnigraph_concept_fabric` that the data instead lives on the GPU, in particular in CUDA format.
.. code-block:: json
:emphasize-lines: 5
{
"NoOp" : {
"description": "Minimal node that does nothing on the GPU",
"version": 1,
"memoryType": "cuda"
}
}
Until you have attributes, though, this keyword has not effect. It is only the attribute's data that lives on
:ref:`omnigraph_concept_fabric`. See :ref:`ogn_overriding_memory_location` for details on how it affects the code
that access the attribute data.
Relevant tutorials - :ref:`ogn_tutorial_cudaData` and :ref:`ogn_tutorial_cpuGpuData`.
By default the memory references of CUDA array data will be GPU-pointer-to-GPU-pointer, for convenience in facilitating
the use of arrays of arrays in an efficient manner. For single arrays, though, this may not be desirable and you might
wish to just use a CPU-pointer-to-GPU-pointer so that it can be dereferenced on the CPU side. To do so you can add
the *cudaPointers* keyword with your memory definition.
.. code-block:: json
:emphasize-lines: 6
{
"NoOp" : {
"description": "Minimal node that does nothing on the GPU",
"version": 1,
"memoryType": "cuda",
"cudaPointers": "cpu"
}
}
Adding Metadata To A Node Type
------------------------------
Node types can have a metadata dictionary associated with them that can be added through the
:ref:`ogn_keyword_node_metadata` keyword.
.. code-block:: json
:emphasize-lines: 5-7
{
"NodeMetadata" : {
"description": "Minimal node that has some metadata",
"version": 1,
"metadata": {
"author": "Bertram P. Knowedrighter"
}
}
}
.. note::
This is not the same as USD metadata. It is only accessible through the OmniGraph node type.
.. tip::
Although all metadata is stored as a string:string mapping in OmniGraph, you can specify a list of strings
in the .ogn file. It will be changed into a single CSV formatted comma-separated string. For example the list
["red", "green", "blue"] results in a single piece of metadata with the value "red,green,blue". The CSV escape
mechanism is used for strings with embedded commas, so the list ["red,green", "blue"] results in the similar but
different metadata "'red,green',blue". Any CSV parser can be used to safely extract the list of values. If your
metadata does not contain commas then a simple tokenizer will also work.
+----------------------------------------+------------------------------------------+
| :ref:`C++ Code<ogn_metadata_node_cpp>` | :ref:`Python Code<ogn_metadata_node_py>` |
+----------------------------------------+------------------------------------------+
Adding Categories To A Node Type
--------------------------------
Node types can have a categories associated with them that can be added through the
:ref:`ogn_keyword_node_categories` keyword. These serve as a common method of grouping similar node types together,
mostly to make the UI easier to navigate.
.. code-block:: json
:emphasize-lines: 5-7
{
"NodeCategories" : {
"description": "Minimal math array conversion node",
"version": 1,
"categories": ["math:array", "math:conversion"]
}
}
For a more detailed example see the :ref:`omnigraph_node_categories` "how-to".
+------------------------------------------+--------------------------------------------+
| :ref:`C++ Code<ogn_node_categories_cpp>` | :ref:`Python Code<ogn_node_categories_py>` |
+------------------------------------------+--------------------------------------------+
Alternative Icon Location
-------------------------
If the node file *OgnMyNode.ogn* has a file in the same directory named *OgnMyNode.svg* then that file will
automatically be promoted to be the node's icon. If you wish to arrange your icons in a different way then you can
specify a different location for the icon file using the :ref:`ogn_keyword_node_icon` keyword.
The icon path will be relative to the directory in which the *.ogn* file lives so be sure to set your path
accordingly. (A common location might be the *icons/* subdirectory.)
.. code-block:: json
:emphasize-lines: 5-7
{
"NodeWithOtherIcon" : {
"description": "Minimal node that uses a different icon",
"version": 1,
"icon": "icons/CompanyLogo.svg"
}
}
.. note::
This file will be installed into the build area in your extension directory, under the subdirectory *ogn/icons/*
so you don't have to install it into the build separately.
When the icon is installed you can get at it by using the extension manager's ability to introspect its own path.
Sometimes you might also wish to change the coloring of the icon. By default all of the colors are the same. Using this
extended syntax for the icon specification lets you override the shape, border, and background color of the icon using
either a **#AABBGGRR** hexadecimal format or a **[R, G, B, A]** decimal format.
.. code-block:: json
:emphasize-lines: 5-10
{
"NodeWithOtherColoredIcon" : {
"description": "Minimal node that uses a different colored icon",
"version": 1,
"icon": {
"path": "icons/CompanyLogo.svg",
"color": "#FF223344",
"backgroundColor": [255, 0, 0, 0],
"borderColor": [255, 128, 0, 128]
}
}
}
+-----------------------------------------+-------------------------------------------+
| :ref:`C++ Code<ogn_node_with_icon_cpp>` | :ref:`Python Code<ogn_node_with_icon_py>` |
+-----------------------------------------+-------------------------------------------+
.. tip::
Although the node type icon information is set through the generated code, it is encoded in metadata and as such
can be modified at runtime if you wish to further customize your look.
Singleton Node Types
--------------------
For some types of nodes it is undesirable to have more than one of them per graph, including any child graphs. To add
this restriction a node can be marked as a "singleton" using the :ref:`ogn_keyword_node_singleton` keyword. It is a
shortcut to defining specially named metadata whose presence will prevent more than one node of that type being
instantiated.
.. code-block:: json
:emphasize-lines: 5-7
{
"SingletonNode" : {
"description": "Minimal node that can only be instantiated once per graph",
"version": 1,
"singleton": true
}
}
.. note::
Node types with this flag set are not true singletons in the programming sense. You can instantiate more than one
of them. The restriction is that they have to be in different graphs.
+-----------------------------------------+---------------------------------------------+
| :ref:`C++ Code<ogn_singleton_node_cpp>` | :ref:`Python Code<ogn_singleton_node_py>` |
+-----------------------------------------+---------------------------------------------+
Node Tags
---------
Nodes can often be grouped in collections orthogonal to their extension owners or names - e.g. you might want the nodes
*Add*, *Multiply*, and *Divide* to appear in a math collection, even though they may have been implemented in three
unrelated extensions. This information appears in the internal metadata value ``tags``.
Since it is so common, a more succinct method of specifying it is available with the :ref:`ogn_keyword_node_tags`
keyword. It is a shortcut to defining that specially named metadata. Also, if it is specified as a list the tags
string will contain the list of names separated by a comma, so these two definitions generate identical code:
.. code-block:: json
:emphasize-lines: 5-7
{
"NodeTagged" : {
"description": "Minimal node with keyword tags",
"version": 1,
"metadata": {
"tags": "cool,new,improved"
}
}
}
.. code-block:: json
:emphasize-lines: 5
{
"NodeTagged" : {
"description": "Minimal node with keyword tags",
"version": 1,
"tags": ["cool", "new", "improved"]
}
}
+------------------------------------+----------------------------------------+
| :ref:`C++ Code<ogn_tags_node_cpp>` | :ref:`Python Code<ogn_tags_node_py>` |
+------------------------------------+----------------------------------------+
Relevant tutorial - :ref:`ogn_tutorial_tupleData`.
String Tokens
-------------
A token is a unique ID that corresponds to an arbitrary string. A lot of the ABI makes use of tokens where the
choices of the string values are limited, e.g. the attribute types, so that fast comparisons can be made. Using tokens
requires accessing a token translation ABI, leading to a lot of duplicated boilerplate code to perform the common
operation of translating a string into a token, and vice-versa. In addition, the translation process could be slow,
so in order to experience the benefits of using a token it should only be done once where possible.
To make this easier, the :ref:`ogn_keyword_node_tokens` keyword is provided in the .ogn file to predefine a set of
tokens that the node will be using. For example if you are going to look up a fixed set of color names at runtime
you can define the color names as tokens.
.. code-block:: json
:emphasize-lines: 5
{
"Tokens" : {
"description": "Minimal node that has some tokens",
"version": 1,
"tokens": ["red", "green", "blue"]
}
}
When you use the alternative token representation you still access the tokens by the simplified name. So this
definition, although the actual token values are different, uses the same code to access those values.
.. code-block:: json
:emphasize-lines: 5
{
"Tokens" : {
"description": "Minimal node that has some tokens",
"version": 1,
"tokens": {"red": "Candy Apple Red", "green": "English Racing Green", "blue": "Sky Blue"}
}
}
As an added simplification, a simple interface to convert between tokens and strings is added to the database code
for nodes in C++. It isn't necessary in Python since Python represents tokens directly as strings.
+--------------------------------------+----------------------------------------+
| :ref:`C++ Code<ogn_tokens_node_cpp>` | :ref:`Python Code<ogn_tokens_node_py>` |
+--------------------------------------+----------------------------------------+
Relevant tutorials - :ref:`ogn_tutorial_tokens`, :ref:`ogn_tutorial_bundle_manipulation`,
:ref:`ogn_tutorial_extended_types`, and :ref:`ogn_tutorial_simpleData`.
.. caution::
Although the simplified token access is implemented in Python, ultimately Python string comparisons are all
done as strings, not as token IDs, due to the nature of Python so that code is for convenience, not efficiency.
Providing A User-Friendly Node Type Name
----------------------------------------
While the unique node type name is useful for keeping things well organized it may not be the type of name you would want
to see, e.g. in a dropdown interface when selecting the node type. A specially named metadata value has been reserved
for that purpose, to give a consistent method of specifying a more user-friendly name for the node type.
Since it is so common, a more succinct method of specifying it is available with the :ref:`ogn_keyword_node_uiName`
keyword. It is a shortcut to defining that specially named metadata, so these two definitions generate identical code:
.. code-block:: json
:emphasize-lines: 5-7
{
"NodeUiName" : {
"description": "Minimal node with a UI name",
"version": 1,
"metadata": {
"uiName": "Node With A UI Name"
}
}
}
.. code-block:: json
:emphasize-lines: 5
{
"NodeUiName" : {
"description": "Minimal node with a UI name",
"version": 1,
"uiName": "Node With A UI Name"
}
}
+--------------------------------------+----------------------------------------+
| :ref:`C++ Code<ogn_uiName_node_cpp>` | :ref:`Python Code<ogn_uiName_node_py>` |
+--------------------------------------+----------------------------------------+
Almost every tutorial in :ref:`ogn_tutorial_nodes` make use of this special piece of metadata.
Attribute Definitions
---------------------
Attributes define the data flowing in and out of the node during evaluation. They are divided into three different
locations with different restrictions on each location.
.. code-block:: json
:emphasize-lines: 5-13
{
"NodeWithEmptyAttributes" : {
"description": "Minimal node with empty attribute lists",
"version": 1,
"inputs": {
"NAME": { "ATTRIBUTE_PROPERTY": "PROPERTY_VALUE" }
},
"outputs": {
"NAME": { "ATTRIBUTE_PROPERTY": "PROPERTY_VALUE" }
},
"state": {
"NAME": { "ATTRIBUTE_PROPERTY": "PROPERTY_VALUE" }
}
}
}
Each attribute section contains the name of an attribute in that location. See :ref:`omnigraph_naming_conventions` for a
description of allowable names. The properties in the attribute definitions are described below in the sections on
:ref:`ogn_mandatory_attribute_properties` and :ref:`ogn_secondary_attribute_properties`.
**inputs** are treated as read-only during a compute, and within the database interface to the attribute data. The
input values can only be set through a command, or the ABI. This is intentional, and should not be overridden as it
could cause graph evaluation to become incorrect or unstable.
**outputs** are values the node is to generate during a compute. From one evaluation to another they are not guaranteed
to be valid, or even exist, so it is the node's responsibility to define them and set their values during the compute.
(Optimizations to this process exist, but are beyond the scope of this document.)
**state** attributes persist from one evaluation to the next and are readable and writable. It is the node's
responsibility to ensure that they initialize correctly, either by explicit initialization in the node or through
use of a recognizable default value that indicate an uninitialized state.
Other than the access restrictions described above the attributes are all described in the same way so any
of the keywords descriptions shown for one attribute location type can be used for any of them.
Automatic Test Definitions
--------------------------
It is always a good idea to have test cases for your node to ensure it is and continues to be operating correctly.
The .ogn file helps with this process by generating some simple test scenarios automatically, along with a script
that will exercise them within the test environment.
.. code-block:: json
:emphasize-lines: 5
{
"NodeWithEmptyAttributes" : {
"description": "Minimal node with empty attribute lists",
"version": 1,
"tests": [
{ "TEST_PROPERTY": "TEST_VALUE" }
]
}
}
This subsection will contain a list of such test definitions. More detail on the **TEST_PROPERTY** values is
available in the discussion on :ref:`ogn_defining_automatic_tests`.
.. _ogn_mandatory_attribute_properties:
Mandatory Attribute Properties
++++++++++++++++++++++++++++++
All attributes in any location subsection has certain minimally required properties. The attribute must have a name,
a :ref:`ogn_keyword_attribute_description`, and a :ref:`ogn_keyword_attribute_type`. This is a minimal node definition
with one simple integer value attribute.
.. code-block:: json
{
"Ignore" : {
"description": "Ignore an integer value",
"version": 1,
"inputs": {
"x": {
"description": "Value to be ignored",
"type": "int"
}
}
}
}
The value of the **"type"** property can create very different interfaces to the underlying data. Although the
syntax in the file is the same for every type (with one exception, explained below) the generated access methods are
tuned to be natural for the type of underlying data. See the document on :ref:`ogn_attribute_types` for full details
on the accepted attribute types and how they correspond to C++, Python, JSON, and USD types.
The data types can be divided into categories, explained separately here though there can be any arbitrary amount
of type mixing.
.. note::
The attribute type **"execution"** can also be specified. These attributes do not carry any data, they merely
exist to form connections to trigger node sequences to evaluate based on external conditions. This behavior can
only be seen at the graph level, not at the individual node level.
Simple Data Attribute Types
---------------------------
These denote individual values with a fixed size such as float, int, etc. In Fabric they are stored directly, using
the size of the type to determine how much space to allocate.
This example will illustrate how to access simple data of type float and token. A full set of compatible types and
how they are accessed can be found in :ref:`ogn_attribute_types`.
.. code-block:: json
{
"TokenStringLength" : {
"description": "Compute the length of a tokenized string, in characters",
"version": 1,
"inputs": {
"token": {
"description": "Value whose length is to be calculated",
"type": "token"
}
},
"outputs": {
"length": {
"description": "Number of characters in the input token's string",
"type": "int64"
}
}
}
}
+------------------------------------------+--------------------------------------------+
| :ref:`C++ Code<ogn_simple_node_cpp>` | :ref:`Python Code<ogn_simple_node_py>` |
+------------------------------------------+--------------------------------------------+
Relevant tutorials - :ref:`ogn_tutorial_simpleData`, :ref:`ogn_tutorial_simpleDataPy`
.. note::
Tokens are simple data types as they have a fixed size in Fabric, however strings do not. Using them is
a special case described in :ref:`ogn_string_attribute_type`.
Tuple Data Attribute Types
--------------------------
These denote fixed numbers of simple values, such as double[4], vectord[3], etc. Each tuple value can be treated
as a single entity, but also provide access to individual tuple elements. In Fabric they are stored directly, using
the size of the simple type and the tuple count to determine how much space to allocate.
.. code-block:: json
{
"VectorMultiply" : {
"description": "Multiple two mathematical vectors to create a matrix",
"version": 1,
"inputs": {
"vector1": {
"description": "First vector to multiply",
"type": "double[4]"
},
"vector2": {
"description": "Second vector to multiply",
"type": "double[4]"
},
"outputs": {
"product": {
"description": "Matrix equal to the product of the two input vectors",
"type": "matrixd[4]"
}
}
}
}
+------------------------------------------+--------------------------------------------+
| :ref:`C++ Code<ogn_tuple_node_cpp>` | :ref:`Python Code<ogn_tuple_node_py>` |
+------------------------------------------+--------------------------------------------+
Relevant tutorials - :ref:`ogn_tutorial_simpleData`,
:ref:`ogn_tutorial_abi`,
:ref:`ogn_tutorial_cudaData`,
:ref:`ogn_tutorial_cpuGpuData`,
:ref:`ogn_tutorial_simpleDataPy`,
:ref:`ogn_tutorial_abi_py`,
:ref:`ogn_tutorial_state_py`,
:ref:`ogn_tutorial_defaults`, and
:ref:`ogn_tutorial_state_attributes_py`.
Role Data Attribute Types
-------------------------
Roles are specially named types that assign special meanings to certain tuple attribute types. See the details of
what types are available in :ref:`ogn_attribute_roles`.
.. code-block:: json
{
"PointsToVector" : {
"description": "Calculate the vector between two points",
"version": 1,
"inputs": {
"point1": {
"description": "Starting point of the vector",
"type": "pointf[4]"
},
"point2": {
"description": "Ending point of the vector",
"type": "pointf[4]"
}
},
"outputs": {
"vector": {
"description": "Vector from the starting point to the ending point",
"type": "vectorf[4]"
}
}
}
}
+------------------------------------+--------------------------------------+
| :ref:`C++ Code<ogn_role_node_cpp>` | :ref:`Python Code<ogn_role_node_py>` |
+------------------------------------+--------------------------------------+
Relevant tutorial - :ref:`ogn_tutorial_roleData`.
Array Data Attribute Types
--------------------------
These denote variable numbers of simple values, such as double[], bool[], etc. Although the number of elements they
contain is flexible they do not dynamically resize as a ``std::vector`` might, the node writer is responsible for
explicitly setting the size of outputs and the size of inputs is fixed when the compute is called. In Fabric they
are stored in two parts - the array element count, indicating how many of the simple values are contained within the
array, and as a flat piece of memory equal in size to the element count times the size of the simple value.
.. code-block:: json
{
"PartialSum" : {
"description": [
"Calculate the partial sums of an array. Element i of the output array",
"is equal to the sum of elements 0 through i of the input array"
],
"version": 1,
"inputs": {
"array": {
"description": "Array whose partial sum is to be computed",
"type": "float[]"
}
},
"outputs": {
"partialSums": {
"description": "Partial sums of the input array",
"type": "float[]"
}
}
}
}
.. important::
There is no guarantee in Fabric that the array data and the array size information are stored together, or even
in the same memory space. The generated code takes care of this for you, but if you decide to access any of the
data directly through the ABI you should be aware of this.
+------------------------------------------+--------------------------------------------+
| :ref:`C++ Code<ogn_array_node_cpp>` | :ref:`Python Code<ogn_array_node_py>` |
+------------------------------------------+--------------------------------------------+
Relevant tutorials - :ref:`ogn_tutorial_arrayData`,
:ref:`ogn_tutorial_cudaData`,
:ref:`ogn_tutorial_cpuGpuData`,
:ref:`ogn_tutorial_complexData_py`,
:ref:`ogn_tutorial_defaults`, and
:ref:`ogn_tutorial_tokens`.
Tuple-Array Data Attribute Types
--------------------------------
These denote variable numbers of a fixed number of simple values, such as pointd[3][], int[2][], etc. In principle
they are accessed the same as regular arrays, with the added capability of accessing the individual tuple values on
the array elements. In Fabric they are stored in two parts - the array element count, indicating how many of the
tuple values are contained within the array, and as a flat piece of memory equal in size to the element count times the
tuple count times the size of the simple value. The tuple elements appear contiguously in the data so for example the
memory layout of a **float[3][]** named `t` implemented with a struct containing x, y, z, would look like this:
+--------+--------+--------+--------+--------+--------+--------+------+
| t[0].x | t[0].y | t[0].z | t[1].x | t[1].y | t[1].z | t[2].x | etc. |
+--------+--------+--------+--------+--------+--------+--------+------+
.. code-block:: json
{
"CrossProducts" : {
"description": "Calculate the cross products of an array of vectors",
"version": 1,
"inputs": {
"a": {
"description": "First set of vectors in the cross product",
"type": "vectord[3][]",
"uiName": "First Vectors"
}
"b": {
"description": "Second set of vectors in the cross product",
"type": "vectord[3][]",
"uiName": "Second Vectors"
}
},
"outputs": {
"crossProduct": {
"description": "Cross products of the elements in the two input arrays",
"type": "vectord[3][]"
}
}
}
}
+-------------------------------------------+---------------------------------------------+
| :ref:`C++ Code<ogn_tuple_array_node_cpp>` | :ref:`Python Code<ogn_tuple_array_node_py>` |
+-------------------------------------------+---------------------------------------------+
Relevant tutorials - :ref:`ogn_tutorial_tupleArrays`,
:ref:`ogn_tutorial_cudaData`,
:ref:`ogn_tutorial_cpuGpuData`, and
:ref:`ogn_tutorial_complexData_py`.
.. _ogn_string_attribute_type:
String Attribute Type
---------------------
String data is slightly different from the others. Although it is conceptually simple data, being a single string
value, it is treated as an array in Fabric due to its size allocation requirements. Effort has been made to make
the data accessed from string attributes to appear as much like a normal string as possible, however there is a
restriction on modifications that can be made to them as they have to be resized in Fabric whenever they change
size locally. For that reason, when modifying output strings it is usually best to do all string operations on a local
copy of the string and then assign it to the output once.
.. code-block:: json
{
"ReverseString" : {
"description": "Output the string in reverse order",
"version": 1,
"inputs": {
"original": {
"description": "The string to be reversed",
"type": "string"
}
},
"outputs": {
"reversed": {
"description": "Reversed string",
"type": "string"
}
}
}
}
.. caution::
At this time there is no support for string arrays. Use tokens instead for that purpose.
+--------------------------------------+----------------------------------------+
| :ref:`C++ Code<ogn_string_node_cpp>` | :ref:`Python Code<ogn_string_node_py>` |
+--------------------------------------+----------------------------------------+
Relevant tutorials - :ref:`ogn_tutorial_simpleData` and :ref:`ogn_tutorial_simpleDataPy`.
.. _ogn_extended_attribute_types:
Extended Attribute Type - Any
-----------------------------
Sometimes you may want to create a node that can accept a wide variety of data types without the burden of
implementing a different attribute for every acceptable type. For this case the **any** type was introduced.
When an attribute has this type it means "allow connections to any type and resolve the type at runtime".
Practically speaking this type resolution can occur in a number of ways. The main way it resolves now is to create
a connection from an **any** type to a concrete type, such as **float**. Once the connection is made the **any**
attribute type will be resolved and then behave as a **float**.
The implication of this flexibility is that the data types of the **any** attributes cannot be assumed at build time,
only at run time. To handle this flexibility, an extra wrapper layer is added to such attributes to handle
identification of the resolved type and retrieval of the attribute data as that specific data type.
.. code-block:: json
{
"Add" : {
"description": "Compute the sum of two arbitrary values",
"version": 1,
"inputs": {
"a": {
"description": "First value to be added",
"type": "any"
},
"b": {
"description": "Second value to be added",
"type": "any"
}
},
"outputs": {
"sum": {
"description": "Sum of the two inputs",
"type": "any"
}
}
}
}
.. caution::
At this time the extended attribute types are not allowed to resolve to :ref:`ogn_bundle_attribute_types`.
+-----------------------------------+-------------------------------------+
| :ref:`C++ Code<ogn_any_node_cpp>` | :ref:`Python Code<ogn_any_node_py>` |
+-----------------------------------+-------------------------------------+
Relevant tutorial - :ref:`ogn_tutorial_extended_types`.
Extended Attribute Type - Union
-------------------------------
The **union** type is similar to the **any** type in that its actual data type is only decided at runtime. It has the
added restriction of only being able to accept a specific subset of data types, unlike the **any** type that can
literally be any of the primary attribute types.
The way this is specified in the .ogn file is, instead of using the type name **"union"**, you specify the list of
allowable attribute types. Here's an example that can accept either double or float values, but nothing else.
.. code-block:: json
{
"MultiplyNumbers" : {
"description": "Compute the product of two float or double values",
"version": 1,
"inputs": {
"a": {
"description": "First value to be added",
"type": ["double", "float"]
},
"b": {
"description": "Second value to be added",
"type": ["double", "float"]
}
},
"outputs": {
"product": {
"description": "Product of the two inputs",
"type": ["double", "float"]
}
}
}
}
Other than this restriction, which the graph will attempt to enforce, the **union** attributes behave exactly the
same way as the **any** attributes.
+-------------------------------------+---------------------------------------+
| :ref:`C++ Code<ogn_union_node_cpp>` | :ref:`Python Code<ogn_union_node_py>` |
+-------------------------------------+---------------------------------------+
Relevant tutorial - :ref:`ogn_tutorial_extended_types`.
.. _ogn_bundle_attribute_types:
Bundle Attribute Types
----------------------
A bundle doesn't describe an attribute with a specific type of data itself, it is a container for a runtime-curated
set of attributes that do not have definitions in the .ogn file.
.. code-block:: json
{
"MergeBundles" : {
"description": [
"Merge the contents of two bundles together.",
"It is an error to have attributes of the same name in both bundles."
],
"version": 1,
"inputs": {
"bundleA": {
"description": "First bundle to be merged",
"type": "bundle",
},
"bundleB": {
"description": "Second bundle to be merged",
"type": "bundle",
}
},
"outputs": {
"bundle": {
"description": "Result of merging the two bundles",
"type": "bundle",
}
}
}
}
+--------------------------------------+----------------------------------------+
| :ref:`C++ Code<ogn_bundle_node_cpp>` | :ref:`Python Code<ogn_bundle_node_py>` |
+--------------------------------------+----------------------------------------+
.. code-block:: json
"CalculateBrightness": {
"version": 1,
"description": "Calculate the brightness value for colors in various formats",
"tokens": ["r", "g", "b", "c", "m", "y", "k"],
"inputs": {
"color": {
"type": "bundle",
"description": [
"Color value, in a variety of color spaces. The bundle members can either be floats",
"named 'r', 'g', 'b', and 'a', or floats named 'c', 'm', 'y', and 'k'."
]
}
},
"outputs": {
"brightness": {
"type": "float",
"description": "The calculated brightness value"
}
}
}
+--------------------------------------+----------------------------------------+
| :ref:`C++ Code<ogn_bundle_data_cpp>` | :ref:`Python Code<ogn_bundle_data_py>` |
+--------------------------------------+----------------------------------------+
Relevant tutorials - :ref:`ogn_tutorial_bundle_manipulation`
:ref:`ogn_tutorial_bundle_data`, and
:ref:`ogn_tutorial_bundle_add_attributes`.
.. _ogn_secondary_attribute_properties:
Secondary Attribute Properties
++++++++++++++++++++++++++++++
Some other attribute properties have simple defaults and need not always be specified in the file. These include
:ref:`ogn_keyword_attribute_default`,
:ref:`maximum <ogn_keyword_attribute_range>`, and
:ref:`ogn_keyword_attribute_memoryType`,
:ref:`ogn_keyword_attribute_metadata`,
:ref:`minimum <ogn_keyword_attribute_range>`, and
:ref:`ogn_keyword_attribute_optional`,
:ref:`ogn_keyword_attribute_unvalidated`,
:ref:`ogn_keyword_attribute_uiName`.
Setting A Default
-----------------
If you don't set an explicit default then the attributes will go to their "natural" default. This is False for a
boolean, zeroes for numeric values including tuples, an empty array for all array types, an empty string for string
and token types, and the identity matrix for matrix, frame, and transform types. Attributes whose types are resolved
at runtime (any, union, and bundle) have no defaults and start in an unresolved state instead.
Sometimes you need a different default though, like setting a scale value to (1.0, 1.0, 1.0), or a token to be used
as an enum to one of the enum values. To do that you simply use the :ref:`ogn_keyword_attribute_default` keyword in
the attribute definition. When it is created it will automatically assume the specified default value.
.. code-block:: json
:emphasize-lines: 5
{
"HairColors": {
"version": 1,
"description": "Collect hair colors for various characters",
"inputs": {
"sabine": {
"type": "token",
"description": "Color of Sabine's hair",
"default": "red"
}
}
}
}
As there is no direct way to access the default values on an attribute yet, no example is necessary.
Relevant tutorials - :ref:`ogn_tutorial_defaults`, :ref:`ogn_tutorial_simpleData` and :ref:`ogn_tutorial_tupleData`.
.. _ogn_overriding_memory_location:
Overriding Memory Location
--------------------------
As described in the :ref:`ogn_using_gpu_data` section, attribute memory can be allocated on the CPU or on the GPU. If
all attributes are in the same location then the node :ref:`ogn_keyword_node_memoryType` keyword specifies where all
of the attribute memory resides. If some attributes are to reside in a different location then those attributes can
override the memory location with their :ref:`ogn_keyword_attribute_memoryType` keyword.
.. code-block:: json
:emphasize-lines: 5,10,21
{
"GpuSwap" : {
"description": "Node that optionally moves data from the GPU to the CPU",
"version": 1,
"memoryType": "any",
"inputs": {
"sizeThreshold": {
"type": "int",
"description": "The number of points at which the computation should be moved to the GPU",
"memoryType": "cpu"
},
"points": {
"type": "pointf[3][]",
"description": "Data to move"
}
},
"outputs": {
"points": {
"type": "pointf[3][]",
"description": "Migrated data, values unchanged"
}
}
}
}
In this description the `inputs:sizeThreshold` data will live on the CPU due to the override, the `inputs:points` data
and the `outputs:points` data will be decided at runtime.
+------------------------------------------------+--------------------------------------------------+
| :ref:`C++ Code<ogn_attribute_memory_type_cpp>` | :ref:`Python Code<ogn_attribute_memory_type_py>` |
+------------------------------------------------+--------------------------------------------------+
Relevant tutorials - :ref:`ogn_tutorial_cudaData` and :ref:`ogn_tutorial_cpuGpuData`.
Attribute Metadata
------------------
Attributes have a metadata dictionary associated with them in the same way that node types do. Some values are
automically generated. Others can be added manually through the :ref:`ogn_keyword_attribute_metadata` keyword.
.. code-block:: json
:emphasize-lines: 6-8
{
"StarWarsCharacters": {
"version": 1,
"description": "Database of character information",
"inputs" : {
"anakin": {
"description": "Jedi Knight",
"type": "token",
"metadata": {
"secret": "He is actually Darth Vader"
}
}
}
}
}
.. note::
This is not the same as USD metadata. It is only accessible through the OmniGraph attribute type.
One special metadata item with the keyword ``allowedTokens`` can be attached to attributes of type ``token``.
It will be automatically be added to the USD Attribute's metadata. Like regular tokens, if the token string
contains any special characters it must be specified as a dictionary whose key is a legal code variable name
name and whose value is the actual string.
+---------------------------------------------+-----------------------------------------------+
| :ref:`C++ Code<ogn_metadata_attribute_cpp>` | :ref:`Python Code<ogn_metadata_attribute_py>` |
+---------------------------------------------+-----------------------------------------------+
Relevant tutorials - :ref:`ogn_tutorial_abi_py`
Suggested Minimum/Maximum Range
-------------------------------
Numeric values can specify a suggested legal range using the keywords :ref:`minimum <ogn_keyword_attribute_range>` and
:ref:`maximum <ogn_keyword_attribute_range>`. These are not used at runtime at the moment, only within the .ogn file to
verify legality of the default values, or values specified in tests.
Default values can be specified on simple values, tuples (as tuples), arrays (as simple values applied to all array
elements), and tuple-arrays (as tuple values applied to all array elements).
.. code-block:: json
:emphasize-lines: 8-9,14-15,20-21,26-27
"MinMax": {
"version": 1,
"description": "Attribute test exercising the minimum and maximum values to verify defaults",
"inputs": {
"simple": {
"description": "Numeric value in [0.0, 1.0]",
"type": "double",
"minimum": 0.0,
"maximum": 1.0
},
"tuple": {
"description": "Tuple[2] value whose first value is in [-1.0, 1.0] with second value in [0.0, 255.0]",
"type": "double[2]",
"minimum": [-1.0, 0.0],
"maximum": [1.0, 255.0]
},
"array": {
"description": "Array value where every element is in [0, 255]",
"type": "uint",
"minimum": 0,
"maximum": 255
},
"tupleArray": {
"description": "Array of tuple[2] values whose first value is in [5, 10] and second value is at least 12",
"type": "uchar[2][]",
"minimum": [5, 12],
"maximum": [10, 255]
}
}
}
Relevant tutorials - :ref:`ogn_node_conversion`.
Optional Attributes
-------------------
Usually an attribute value must exist and be legal in order for a node's compute to run. This helps the graph avoid
executing nodes that cannot compute their outputs due to missing or illegal inputs. Sometimes a node is capable of
computing an output without certain inputs being present. Those inputs can use the :ref:`ogn_keyword_attribute_optional`
keyword to indicate to OmniGraph that it's okay to compute without it.
.. code-block:: json
:emphasize-lines: 5
{
"Shoes": {
"version": 1,
"description": "Create a random shoe type",
"inputs": {
"shoelaceStyle": {
"type": "token",
"description": "If the shoe type needs shoelaces this will contain the style of shoelace to use",
"optional": true
}
},
"outputs": {
"shoeType": {
"type": "string",
"description": "Name of the randomly generated shoe"
}
}
}
}
It is up to the node to confirm that such optional attributes have legal values before they use them.
+-----------------------------------+-------------------------------------+
| :ref:`C++ Code<ogn_optional_cpp>` | :ref:`Python Code<ogn_optional_py>` |
+-----------------------------------+-------------------------------------+
Unvalidated Attributes For Compute
----------------------------------
Above you can see how attributes may optionally not be required to exist depending on your node function. There is also
a slightly weaker requirement whereby the attributes will exist but they need not have valid values in order for
``compute()`` to be called. Those attributes can use the :ref:`ogn_keyword_attribute_unvalidated`
keyword to indicate to OmniGraph that it's okay to compute without verifying it.
The most common use of this is to handle the case of attributes whose values will only be used under certain
circumstances, especially :ref:`ogn_extended_attribute_types`.
.. code-block:: json
:emphasize-lines: 13,18
{
"ABTest": {
"version": 1,
"description": "Choose one of two inputs based on some input criteria",
"inputs": {
"selectA": {
"type": "bool",
"description": "If true then pass through input a, else pass through input b"
},
"a": {
"type": "any",
"description": "First choice for the a/b test",
"unvalidated": true
},
"b": {
"type": "any",
"description": "Second choice for the a/b test",
"unvalidated": true
}
},
"outputs": {
"choice": {
"type": "any",
"description": "Result from the a/b test choice"
}
}
}
}
It is up to the node to confirm that such attributes have legal values before they use them. Notice here that the output
will be validated. In particular, it will have its resolved type validated before calling ``compute()``. After that the
node will have to confirm that the selected input, a or b, has a type that is compatible with that resolved type.
+--------------------------------------+----------------------------------------+
| :ref:`C++ Code<ogn_unvalidated_cpp>` | :ref:`Python Code<ogn_unvalidated_py>` |
+--------------------------------------+----------------------------------------+
Providing A User-Friendly Attribute Name
----------------------------------------
While the unique attribute name is useful for keeping things well organized it may not be the type of name you would want
to see, e.g. in a dropdown interface when selecting the attribute. A specially named metadata value has been reserved
for that purpose, to give a consistent method of specifying a more user-friendly name for the attribute.
Since it is so common, a more succinct method of specifying it is available with the :ref:`ogn_keyword_attribute_uiName`
keyword. It is a shortcut to defining that specially named metadata, so these two definitions generate identical code:
.. code-block:: json
:emphasize-lines: 6-8
{
"AttributeUiName": {
"version": 1,
"description": "No-op node showing how to use the uiName metadata on an attribute",
"inputs" : {
"x": {
"description": "X marks the spot",
"type": "pointf[3]",
"metadata": {
"uiName": "Treasure Location"
}
}
}
}
}
.. code-block:: json
:emphasize-lines: 6
{
"AttributeUiName": {
"version": 1,
"description": "No-op node showing how to use the uiName metadata on an attribute",
"inputs" : {
"x": {
"description": "X marks the spot",
"type": "pointf[3]",
"uiName": "Treasure Location"
}
}
}
}
+-------------------------------------------+---------------------------------------------+
| :ref:`C++ Code<ogn_uiName_attribute_cpp>` | :ref:`Python Code<ogn_uiName_attribute_py>` |
+-------------------------------------------+---------------------------------------------+
Almost every tutorial in :ref:`ogn_tutorial_nodes` make use of this special piece of metadata.
.. _ogn_defining_automatic_tests:
Defining Automatic Tests
++++++++++++++++++++++++
It is good practice to always write tests that exercise your node's functionality. Nodes that are purely
functional, that is their outputs can be calculated using only their inputs, can have simple tests written that
set certain input values and compare the outputs against expected results.
To make this process easier the **"tests"** section of the .ogn file was created. It generates a Python test
script in the Kit testing framework style from a set of input, output, and state values on the node.
The algorithm is simple. For each test in the list it sets input and state attributes to the values given in the test
description, using default values for any unspecified attributes, runs the compute on the node, then gathers the
computed outputs and compares them against the expected ones in the test description, ignoring any that did not
appear there.
There are two ways of specifying test data. They are both equivalent so you can choose the one that makes your
particular test data the most readable. The first is to have each test specify a dictionary of
*ATTRIBUTE* : *VALUE*. This is a simple node that negates an input value. The tests run a number of example values
to ensure the correct results are obtained. Four tests are run, each independent of each other.
.. code-block:: json
:emphasize-lines: 17-22
{
"NegateValue": {
"version": 1,
"description": "Testable node that negates an input value",
"inputs" : {
"value": {
"description": "Value to negate",
"type": "float"
}
},
"outputs": {
"result": {
"description": "Negated value of the input",
"type": "float"
}
},
"tests": [
{ "inputs:value": 5.0, "outputs:result": -5.0 },
{ "inputs:value": 0.0, "outputs:result": 0.0 },
{ "inputs:value": -5.0, "outputs:result": 5.0 },
{ "outputs:result": 0.0 }
]
}
}
Note how the last test relies on using the default input value, which for floats is 0.0 unless otherwise specified.
The tests illustrate a decent coverage of the different possible types of inputs.
The other way of specifying tests is to use the same type of hierarchical dictionary structure as the attribute
definitions. The attribute names are thus shorter. This .ogn file generates exactly the same test code as the one
above, with the addition of test descriptions to add more information at runtime.
.. code-block:: json
:emphasize-lines: 17-51
{
"NegateValue": {
"version": 1,
"description": "Testable node that negates an input value",
"inputs" : {
"value": {
"description": "Value to negate",
"type": "float"
}
},
"outputs": {
"result": {
"description": "Negated value of the input",
"type": "float"
}
},
"tests": [
{
"description": "Negate a positive number",
"inputs": {
"value": 5.0
},
"outputs": {
"result": -5.0
}
},
{
"description": "Negate zero",
"inputs": {
"value": 0.0
},
"outputs": {
"result": 0.0
}
},
{
"description": "Negate a negative number",
"inputs": {
"value": -5.0
},
"outputs": {
"result": 5.0
}
},
{
"description": "Negate the default value",
"outputs": {
"result": 0.0
}
}
]
}
}
For this type of simple node you'd probably use the first, abbreviated, version of the test description. The second
type is more suited to nodes with many inputs and outputs.
In addition, if you require more than one node to properly set up your test you can use this format to add in a special
section defining the state of the graph before the tests start. For example if you want to test two nodes chained
together you could do this:
.. code-block:: json
:emphasize-lines: 24-42
{
"AddTwoValues": {
"version": 1,
"description": "Testable node that adds two input values",
"inputs" : {
"a": {
"description": "First value to add",
"type": "float"
},
"b": {
"description": "Second value to add",
"type": "float"
}
},
"outputs": {
"result": {
"description": "Sum of the two inputs",
"type": "float"
}
},
"tests": [
{
"description": "Sum a constant and a connected value",
"setup": {
"nodes": [
["TestNode", "omni.examples.AddTwoValues"],
["InputNode", "omni.examples.AddTwoValues"]
],
"prims": [
["InputPrim", {"value": ["float", 5.0]}]
],
"connections": [
["InputPrim", "value", "TestNode", "inputs:a"]
]
},
"outputs": {
"result": 5.0
}
},
{
"inputs:b": 7.0, "outputs:result": 12.0
}
]
}
}
When there is more than one test the setup that happened in the previous test will still be applied. It will be as
though the tests are run on live data in sequence. To reset the setup configuration put a new one in your test,
including simply *{}* if you wish to start with an empty scene.
There is no C++ or Python code that access the test information directly, it is only used to generate the test script.
In addition to your defined tests, extra tests are added to verify the template USD file, if it was generated, and
the import of the Python database module, if it was generated. The test script itself will be installed into a
subdirectory of your Python import directory, e.g. ``ogn.examples/ogn/examples/ogn/tests/TestNegateValue.py``
Relevant tutorials -
:ref:`ogn_tutorial_simpleDataPy`,
:ref:`ogn_tutorial_complexData_py`,
:ref:`ogn_tutorial_abi_py`,
:ref:`ogn_tutorial_state_py`,
:ref:`ogn_tutorial_defaults`,
:ref:`ogn_tutorial_state_attributes_py`,
:ref:`ogn_tutorial_state`,
:ref:`ogn_tutorial_simpleData`,
:ref:`ogn_tutorial_tokens`,
:ref:`ogn_tutorial_tokens`,
:ref:`ogn_tutorial_abi`,
:ref:`ogn_tutorial_tupleData`,
:ref:`ogn_tutorial_arrayData`,
:ref:`ogn_tutorial_tupleArrays`,
:ref:`ogn_tutorial_roleData`,
:ref:`ogn_tutorial_cudaData`,
:ref:`ogn_tutorial_cpuGpuData`, and
:ref:`ogn_tutorial_cpu_gpu_extended`.
Internal State
++++++++++++++
In addition to having state attributes you may also need to maintain state information that is not representable as a
set of attributes; e.g. binary data, arbitrary C++ structures, etc. Per-node internal state is the mechanism that
accommodates this need.
The approach is slightly different in C++ and Python but the intent is the same. The internal state data is a
node-managed piece of data that persists on the node from one evaluation to the next (though not across file load and
save).
There is nothing to do in the .ogn file to indicate that internal state of this kind is being used. The ABI function
``hasState()`` will return true when it is being used, or when state attributes exist on the node.
.. code-block:: json
{
"Counter" : {
"description": "Count the number of times the node executes",
"version": 1
}
}
+------------------------------------------+--------------------------------------------+
| :ref:`C++ Code<ogn_state_node_cpp>` | :ref:`Python Code<ogn_state_node_py>` |
+------------------------------------------+--------------------------------------------+
Relevant tutorials - :ref:`ogn_tutorial_state` and :ref:`ogn_tutorial_state_py`.
Versioning
++++++++++
Over time your node type will evolve and you will want to change things within it. When you do that you want all of the
old versions of that node type to continue working, and update themselves to the newer version automatically. The
ABI allows for this by providing a callback to the node that happens whenever a node with a version number lower than
the current version number. (Recall the version number is encoded in the .ogn property **"version"** and in the USD
file as the property **custom int node:typeVersion**.)
The callback provides the version it was attempting to create and the version to which it should be upgraded and lets
the node decide what to do about it. The exact details depend greatly on what changes were made from one version to
the next. This particular node is in version 2, where the second version has added the attribute *offset* because
the node function has changed from ``result = a * b`` to ``result = a * b + offset``.
.. code-block:: json
:emphasize-lines: 3,14-17
{
"Multiply": {
"version": 2,
"description": "Node that multiplies two values and adds an offset",
"inputs" : {
"a": {
"description": "First value",
"type": "float"
},
"b": {
"description": "Second value",
"type": "float"
},
"offset": {
"description": "Offset value",
"type": "float"
}
},
"outputs": {
"result": {
"description": "a times b plus offset",
"type": "float"
}
}
}
}
+------------------------------------------+--------------------------------------------+
| :ref:`C++ Code<ogn_versioned_node_cpp>` | :ref:`Python Code<ogn_versioned_node_py>` |
+------------------------------------------+--------------------------------------------+
Relevant tutorials - :ref:`ogn_tutorial_abi` and :ref:`ogn_tutorial_abi_py`.
Other References
++++++++++++++++
- :ref:`Naming and File Conventions<omnigraph_naming_conventions>`
- :ref:`Setting Up Your Build<ogn_extension_tutorial>`
- :ref:`OGN Reference Guide<ogn_reference_guide>`
- :ref:`Feature-Based Tutorials<ogn_tutorial_nodes>`
- :ref:`Attribute Type Details<ogn_attribute_types>`
- :ref:`OmniGraph Python API<omnigraph_python_api>`
| 69,544 | reStructuredText | 40.469887 | 123 | 0.582509 |
omniverse-code/kit/exts/omni.graph.tools/docs/ogn_generation_script.rst | .. _ogn_generation_script:
Generating OmniGraph Nodes
==========================
The OmniGraph nodes consist of code that is automatically generated from a .ogn file and one or more methods
defined by the node class. These interfaces consist of C++ code, Python code, a USDA file, and Python test scripts
For full details of what is generated automatically see the :ref:`ogn_user_guide`.
.. contents::
.. toctree::
ogn_user_guide
ogn_reference_guide
attribute_types
Running The Script
------------------
The script to run to perform the conversion is *generate_node.py*. It is run with the
same version of Python included with the build in *tools/packman/python.bat* or *tools/packman.python.sh*.
The script `generate_node.py` reads in a node description file in order to automatically generate documentation,
tests, template files, anda simplified interface the node can use to implement its algorithm.
.. note::
Although not required for using the .ogn format, if you are interested in what kind of code is generated from the
descriptions see :ref:`ogn_node_architects_guide`.
Run *generate_node.py --help* to see all of the arguments that can be passed to the script, reproduced here. Each
flag has both a short and long form that are equivalent.
.. code-block:: text
usage: generate_node.py [-h] [-cd DIR] [-c [DIR]] [-d [DIR]]
[-e EXTENSION_NAME] [-i [DIR]]
[-in [INTERMEDIATE_DIRECTORY]]
[-m [PYTHON_IMPORT_MODULE]] [-n [FILE.ogn]] [-p [DIR]]
[-s SETTING_NAME] [-t [DIR]] [-td FILE.json]
[-tp [DIR]] [-u] [-usd [DIR]] [-uw [DIR]] [-v]
Parse a node interface description file and generate code or documentation
optional arguments:
-h, --help show this help message and exit
-cd DIR, --configDirectory DIR
the directory containing the code generator configuration files (default is current)
-c [DIR], --cpp [DIR]
generate the C++ interface class into the specified directory (default is current)
-d [DIR], --docs [DIR]
generate the node documentation into the specified directory (default is current)
-e EXTENSION_NAME, --extension EXTENSION_NAME
name of the extension requesting the generation
-i [DIR], --icons [DIR]
directory into which to install the icon, if one is found
-in [INTERMEDIATE_DIRECTORY], --intermediate [INTERMEDIATE_DIRECTORY]
directory into which temporary build information is stored
-m [PYTHON_IMPORT_MODULE], --module [PYTHON_IMPORT_MODULE]
Python module where the Python node files live
-n [FILE.ogn], --nodeFile [FILE.ogn]
file containing the node description (use stdin if file name is omitted)
-p [DIR], --python [DIR]
generate the Python interface class into the specified directory (default is current)
-s SETTING_NAME, --settings SETTING_NAME
define one or more build-specific settings that can be used to change the generated code at runtime
-t [DIR], --tests [DIR]
generate a file containing basic operational tests for this node
-td FILE.json, --typeDefinitions FILE.json
file name containing the mapping to use from OGN type names to generated code types
-tp [DIR], --template [DIR]
generate an annotated template for the C++ node class into the specified directory (default is current)
-u, --unitTests run the unit tests on this file
-usd [DIR], --usdPath [DIR]
generate a file containing a USD template for nodes of this type
-uw [DIR], --unwritable [DIR]
mark the generated directory as unwritable at runtime
-v, --verbose output the steps the script is performing as it performs them
Available attribute types:
any
bool, bool[]
bundle
colord[3], colord[4], colord[3][], colord[4][]
colorf[3], colorf[4], colorf[3][], colorf[4][]
colorh[3], colorh[4], colorh[3][], colorh[4][]
double, double[2], double[3], double[4], double[], double[2][], double[3][], double[4][]
execution
float, float[2], float[3], float[4], float[], float[2][], float[3][], float[4][]
frame[4], frame[4][]
half, half[2], half[3], half[4], half[], half[2][], half[3][], half[4][]
int, int[2], int[3], int[4], int[], int[2][], int[3][], int[4][]
int64, int64[]
matrixd[2], matrixd[3], matrixd[4], matrixd[2][], matrixd[3][], matrixd[4][]
normald[3], normald[3][]
normalf[3], normalf[3][]
normalh[3], normalh[3][]
objectId, objectId[]
path
pointd[3], pointd[3][]
pointf[3], pointf[3][]
pointh[3], pointh[3][]
quatd[4], quatd[4][]
quatf[4], quatf[4][]
quath[4], quath[4][]
string
texcoordd[2], texcoordd[3], texcoordd[2][], texcoordd[3][]
texcoordf[2], texcoordf[3], texcoordf[2][], texcoordf[3][]
texcoordh[2], texcoordh[3], texcoordh[2][], texcoordh[3][]
timecode, timecode[]
token, token[]
transform[4], transform[4][]
uchar, uchar[]
uint, uint[]
uint64, uint64[]
vectord[3], vectord[3][]
vectorf[3], vectorf[3][]
vectorh[3], vectorh[3][]
["A", "B", "C"... = Any one of the listed types]
The main argument of interest is *--nodeFile MyFile.ogn*. That is how you specify the file for which interfaces are
to be generated. Another one of interest is *--verbose* which, when included, will dump debugging information
describing the operations being performed. Several other options describe which of the available outputs will be
generated.
.. note::
There is also an environment variable controlling build flags. If you set **OGN_DEBUG** then the .ogn
generator will use its *--verbose* option to dump information about the parsing of the file and generation
of the code - worth remembering if you are running into code generation errors.
The usual method of running the script is through the build process, described in :ref:`ogn_tutorial_nodes`.
Other uses of the script are the following:
.. code-block:: bash
# Generate the node using an alternate code path controlled by the value of CODE_SETTING
python generate_node.py --nodeFile MYNODE.ogn --setting CODE_SETTING
# Validate a .ogn file but do not produce any output
python generate_node.py --schema --nodeFile MYNODE.ogn
# Generate all of the interfaces
python generate_node.py --cpp INCLUDE_DIR --python PYTHON_DIR --schema --namespace omni.MY.FEATURE --nodeFile MYNODE.ogn
.. note::
Normally the generated directory is set up to regenerate its files on demand when the .ogn or .py implementation
files change to facilitate hot reloading. When the nodes are installed from an extension through a distribution
this is disabled by using the `--unwritable` flag to tag the generated directory to prevent that. This speeds up
the extension loading and avoids potential write permission problems with installed directories.
.. note::
The script is written to run on Python 3.6 or later. Earlier 3.x versions may work but are untested. It will always
be guaranteed to run on the same version of Python as Kit.
| 7,821 | reStructuredText | 49.141025 | 131 | 0.619742 |
omniverse-code/kit/exts/omni.graph.tools/docs/CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [1.17.2] - 2023-02-14
### Fixed
- Made test file pattern account for node files not beginning with Ogn
## [1.17.1] - 2023-02-08
### Changed
- Modified lookup of target extension version to account for the modified path names
## [1.17.0] - 2022-10-03
### Fixed
- Changed code emission to avoid defining interface that will not be used
## [1.16.3] - 2022-09-28
### Added
- Better documentation for categories
### Removed
- Testing for obsolete transform data type
## [1.16.2] - 2022-08-30
### Fixed
- Linting errors
## [1.16.1] - 2022-08-16
### Changed
- Refactored checking for legal extension name as Python import
## [1.16.0] - 2022-08-25
### Added
- Support for new setting that turns deprecations into errors
- Tests for deprecation code
- Access to deprecation message logs to use for testing
## [1.15.2] - 2022-08-09
### Fixed
- Applied formatting to all of the Python files
## [1.15.1] - 2022-08-05
### Fixed
- All of the lint errors reported on the Python files in this extension
## [1.15.0] - 2022-08-01
###
- Change `m_primHandle` to `m_bundleHandle` in `BundleAttributeManager`
## [1.14.1] - 2022-07-25
###
- Added ALLOW_MULTI_INPUTS metadata key
## [1.14.0] - 2022-07-13
###
- Added UI_TYPE metadata key, added class method to MetadataKeys to get all the metadata key strings
## [1.13.0] - 2022-07-08
### Added
- Support for 'deprecation' attribute keyword in .ogn files.
## [1.12.0] - 2022-07-07
### Changed
- Refactored import of non-public API to emit a deprecation warning
- Moved node_generator/ into the _impl section
### Added
- Support for fully defined Python API at the omni.graph.tools level
- Support for fully defined Python API at the omni.graph.tools.ogn level
- Support for public API consistency test
## [1.11.0] - 2022-06-28
### Changed
- Merged to USD and import generated tests and added enhanced generated code coverage
## [1.10.0] - 2022-06-23
### Removed
- Support for the deprecated Autograph functionality, now in omni.graph.core.autonode
## [1.9.1] - 2022-06-17
### Fixed
- Corrected bad API documentation formatting
### Added
- Documentation links for Python API
## [1.9.0] - 2022-06-13
### Added
- Check to see if values are already set before initializing defaults in Python nodes
## [1.8.1] - 2022-06-08
### Fixed
- Add stdint include when constructing node database files.
## [1.8.0] - 2022-06-07
### Added
- Support for generator settings to alter the generated code
- Generator setting for Python output optimization
- Build flag to modify generator settings
- Generator script support for generator settings being passed around
## [1.7.0] - 2022-05-27
### Added
- Ability for the controller to take an attribute description as the first parameter instead of only attributes
## [1.6.2] - 2022-05-17
### Fixed
- Improved node description formatting by using newlines to indicate paragraph breaks
## [1.6.1] - 2022-05-11
### Added
- Category for UI nodes
## [1.6.0] - 2022-05-10
### Added
- Ability to use @deprecated_function with property getters and setters.
## [1.5.4] - 2022-05-06
### Fixed
- Fixed the emission of the CPU to GPU pointer information for output bundles
## [1.5.3] - 2022-04-29
### Fixed
- Fixed incorrect line highlighting in user guide
## [1.5.2] - 2022-04-25
### Fixed
- Stopped generating bundle handle extraction in situations where the handle will not be used
## [1.5.1] - 2022-04-05
### Fixed
- Removed regeneration warning until such time as the regeneration actually happens
## [1.5.0] - 2022-03-24
### Fixed
- Fixed generated contents of tests/__init__.py to be constant
- Refactored generation to use standard import pattern
- ### Added
- Ability to create generated directories on the fly rather than insisting they already exist
## [1.4.0] - 2022-03-14
### Added
- *ensure_nodes_in_toml.py* to add the **[[omnigraph]]** section to the extension.toml file
## [1.3.2] - 2022-03-14
### Added
- examples category
## [1.3.1] - 2022-03-09
### Added
- Added literalOnly to the list of metadata keys
- Added some explanation for the literalOnly metadata key to the OGN reference guide
## [1.3.0] - 2022-03-08
### Changed
- Changed the naming of the generated tests to be shorter for easier use in TestRunner
- Changed the generated USD to use the schema prims
- Changed the generated test scripts to use the schema prims
- Removed unused USD metadata from generated code
## [1.2.4] - 2022-03-08
### Changed
- Modified C++ generated code node registration to match *omni.graph.core 2.23.3*
## [1.2.3] - 2022-02-15
### Added
- script node category
## [1.2.1] - 2022-02-10
### Added
- Unset the useSchemaPrims setting for tests until they are working
## [1.2.0] - 2022-02-09
### Changed
- Moved autograph to omni.graph.core, retained imports for backward compatibility
## [1.1.1] - 2021-08-30
### Breaking Changes
- Type names have changed. `Float32` is now `Float` and `Int32` is now `Int`.
### Improvements and Bugfixes
- *BUGFIX* Fixed type initializers in autograph
- *PERF IMPROVEMENT* Functions using Autofunc now use code generated at read time instead of runtime lookups.
- *UI IMPROVEMENT* Nodes no longer have extra connections with the node name.
## [1.1.0] - 2021-08-19
### Adds Autograph
Added Autograph tools and types
## [1.0.0] - 2021-03-01
### Initial Version
- Started changelog with initial released version of the OmniGraph core
| 5,628 | Markdown | 27.573604 | 111 | 0.714819 |
omniverse-code/kit/exts/omni.graph.tools/docs/attribute_types.rst | .. _ogn_attribute_types:
Attribute Data Types
====================
The attribute data type is the most important part of the attribute. It describes the type of data the attribute
references, and the type of generated interface the node writer will use to access that data.
Attribute data at its core consists of a short list of data types, called `Base Data Types`_. These types encapsulate
a single value, such as a float or integer.
.. warning::
Not all attribute types may be supported by the code generator. For a list of currently supported types
use the command ``generate_node.py --help``.
.. note::
The information here is for the default type definitions. You can override the type definitions using a
configuration file whose format is show in `Type Definition Overrides`_.
.. important::
When extracting bundle members in C++ you'll be passing in a template type to get the value. That is not the type
shown here, these are the types you'll get as a return value. (e.g. pass in _OgnToken_ to get a return value of
_NameToken_, or _float[]_ to get a return value of _ogn::array<float>_.)
Base Data Types
---------------
This table shows the conversion of the **Type Name**, which is how the attribute type appears in the .ogn file
*type* value of the attribute, to the various data types of the other locations the attribute might be referenced:
+-----------+--------+--------------+--------------+--------+------------------------------------------------------------+
| Type Name | USD | C++ | CUDA | Python | JSON | Description |
+===========+========+==============+==============+========+=========+==================================================+
| bool | bool | bool | bool* | bool | bool | True/False value |
+-----------+--------+--------------+--------------+--------+---------+--------------------------------------------------+
| double | double | double | double* | float | float | 64 bit floating point |
+-----------+--------+--------------+--------------+--------+---------+--------------------------------------------------+
| float | float | float | float* | float | float | 32 bit floating point |
+-----------+--------+--------------+--------------+--------+---------+--------------------------------------------------+
| half | half | pxr::GfHalf | __half* | float | float | 16 bit floating point |
+-----------+--------+--------------+--------------+--------+---------+--------------------------------------------------+
| int | int | int32_t | int* | int | integer | 32-bit signed integer |
+-----------+--------+--------------+--------------+--------+---------+--------------------------------------------------+
| int64 | int64 | int64_t | longlong* | int | integer | 64-bit signed integer |
+-----------+--------+--------------+--------------+--------+---------+--------------------------------------------------+
| path | path | ogn::string | ogn::string | str | string | Path to another node or attribute |
+-----------+--------+--------------+--------------+--------+---------+--------------------------------------------------+
| string | string | ogn::string | ogn::string | str | string | Standard string |
+-----------+--------+--------------+--------------+--------+---------+--------------------------------------------------+
| token | token | NameToken | NameToken* | str | string | Interned string with fast comparison and hashing |
+-----------+--------+--------------+--------------+--------+---------+--------------------------------------------------+
| uchar | uchar | uchar_t | uchar_t* | int | integer | 8-bit unsigned integer |
+-----------+--------+--------------+--------------+--------+---------+--------------------------------------------------+
| uint | uint | uint32_t | uint32_t* | int | integer | 32-bit unsigned integer |
+-----------+--------+--------------+--------------+--------+---------+--------------------------------------------------+
| uint64 | uint64 | uint64_t | uint64_t* | int | integer | 64-bit unsigned integer |
+-----------+--------+--------------+--------------+--------+---------+--------------------------------------------------+
.. note::
For C++ types on input attributes a `const` is prepended to the simple types.
Here are samples of base data type values in the various languages:
**USD Type**
++++++++++++
The type of data as it would appear in a .usd file
.. code-block:: usd
custom int inputs:myInt = 1
custom float inputs:myFloat = 1
**C++ Type**
++++++++++++
The value type a C++ node implementation uses to access the attribute's data
.. code-block:: c++
static bool compute(OgnMyNodeDatabase& db)
{
const int& iValue = db.inputs.myInt();
const float& fValue = db.inputs.myFloat();
}
**CUDA Type**
+++++++++++++
The value type a C++ node implementation uses to pass the attribute's data to CUDA code. Note the use of attribute
type definitions to make the function declarations more consistent.
.. code-block:: c++
extern "C" void runCUDAcompute(inputs::myInt_t*, inputs::myFloat_t*);
static bool compute(OgnMyNodeDatabase& db)
{
const int* iValue = db.inputs.myInt();
const float* fValue = db.inputs.myFloat();
runCUDAcompute( iValue, fValue );
}
.. code-block:: c++
extern "C" void runCUDAcompute(inputs::myInt_t* intValue, inputs::myFloat_t* fValue)
{
}
**Python Type Hint**
++++++++++++++++++++
The value used by the Python typing system to provide a hint about the expected data type
.. code-block:: python
@property
def myInt(self) -> int:
return attributeValues.myInt
**JSON Type**
+++++++++++++
The value type that the .ogn file expects from test or default data from the attribute
.. code-block:: json
{
"myNode" : {
"description" : ["This is my node with one integer and one float input"],
"version" : 1,
"inputs" : {
"myInt" : {
"description" : ["This is an integer attribute"],
"type" : "int",
"default" : 0
},
"myFloat" : {
"description" : ["This is a float attribute"],
"type" : "float",
"default" : 0.0
}
}
}
}
Array Data Types
----------------
An array type is a list of another data type with indeterminate length, analagous to a ``std::vector`` in C++ or a
``list`` type in Python.
Any of the base data types can be made into array types by appending square brackets (`[]`) to the type name. For
example an array of integers would have type `int[]` and an array of floats would have type `float[]`.
The JSON schema type is "array" with the type of the array's "items" being the base type, although in the file it will
just look like ``[VALUE, VALUE, VALUE]``.
Python uses the _numpy_ library to return both tuple and array data types.
+-----------+----------+-------------------------+-------------------+------------------------------+-----------+
| Type Name | USD | C++ | CUDA | Python | JSON |
+===========+==========+=========================+===================+==============================+===========+
| bool[] | bool[] | ogn::array<bool> | bool*,size_t | numpy.ndarray[numpy.bool] | bool[] |
+-----------+----------+-------------------------+-------------------+------------------------------+-----------+
| double[] | double[] | ogn::array<double> | double*,size_t | numpy.ndarray[numpy.float64] | float[] |
+-----------+----------+-------------------------+-------------------+------------------------------+-----------+
| float[] | float[] | ogn::array<float> | float*,size_t | numpy.ndarray[numpy.float64] | float[] |
+-----------+----------+-------------------------+-------------------+------------------------------+-----------+
| half[] | half[] | ogn::array<pxr::GfHalf> | __half*,size_t | numpy.ndarray[numpy.float64] | float[] |
+-----------+----------+-------------------------+-------------------+------------------------------+-----------+
| int[] | int[] | ogn::array<int32_t> | int*,size_t | numpy.ndarray[numpy.int32] | integer[] |
+-----------+----------+-------------------------+-------------------+------------------------------+-----------+
| int64[] | int64[] | ogn::array<int64_t> | longlong*,size_t | numpy.ndarray[numpy.int32] | integer[] |
+-----------+----------+-------------------------+-------------------+------------------------------+-----------+
| token[] | token[] | ogn::array<NameToken> | NameToken*,size_t | numpy.ndarray[numpy.str] | string[] |
+-----------+----------+-------------------------+-------------------+------------------------------+-----------+
| uchar[] | uchar[] | ogn::array<uchar_t> | uchar_t*,size_t | numpy.ndarray[numpy.int32] | integer[] |
+-----------+----------+-------------------------+-------------------+------------------------------+-----------+
| uint[] | uint[] | ogn::array<uint32_t> | uint32_t*,size_t | numpy.ndarray[numpy.int32] | integer[] |
+-----------+----------+-------------------------+-------------------+------------------------------+-----------+
| uint64[] | uint64[] | ogn::array<uint64_t> | uint64_t*,size_t | numpy.ndarray[numpy.int32] | integer[] |
+-----------+----------+-------------------------+-------------------+------------------------------+-----------+
.. note::
For C++ types on input attributes the array type is `ogn::const_array`.
Here are samples of array data type values in the various languages:
**USD Array Type**
++++++++++++++++++
.. code-block:: usd
custom int[] inputs:myIntArray = [1, 2, 3]
custom float[] inputs:myFloatArray = [1.0, 2.0, 3.0]
**C++ Array Type**
++++++++++++++++++
.. code-block:: c++
static bool compute(OgnMyNodeDatabase& db)
{
const ogn::const_array& iValue = db.inputs.myIntArray();
const auto& fValue = db.inputs.myFloatArray();
}
**CUDA Array Type**
+++++++++++++++++++
.. code-block:: c++
extern "C" runCUDAcompute(inputs::myIntArray_t*, size_t, inputs::myFloatArray_t*, size_t);
static bool compute(OgnMyNodeDatabase& db)
{
const int* iValue = db.inputs.myIntArray();
auto iSize = db.inputs.myIntArray.size();
const auto fValue = db.inputs.myFloatArray();
auto fSize = db.inputs.myFloatArray.size();
runCUDAcompute( iValue, iSize, fValue, fSize );
}
.. code-block:: c++
extern "C" void runCUDAcompute(inputs::myIntArray_t* iArray, size_t iSize, inputs::myFloat_t* fArray, size_t fSize)
{
// In here it is true that the number of elements in iArray = iSize
}
**Python Array Type Hint**
++++++++++++++++++++++++++
.. code-block:: python
import numpy as np
@property
def myIntArray(self) -> np.ndarray[np.int32]:
return attributeValues.myIntArray
**JSON Array Type**
+++++++++++++++++++
.. code-block:: json
{
"myNode" : {
"description" : ["This is my node with one integer array and one float array input"],
"version" : 1,
"inputs" : {
"myIntArray" : {
"description" : ["This is an integer array attribute"],
"type" : "int[]",
"default" : [1, 2, 3]
},
"myFloatArray" : {
"description" : ["This is a float array attribute"],
"type" : "float[]",
"default" : [1.0, 2.0, 3.0]
}
}
}
}
Tuple Data Types
----------------
An tuple type is a list of another data type with fixed length, analagous to a ``std::array`` in C++ or a
``tuple`` type in Python. Not every type can be a tuple, and the tuple count is restricted to a small subset of those
supported by USD. They are denoted with by appending square brackets containing the tuple count to the
type name. For example a tuple of two integers would have type `int[2]` and a tuple of three floats would have type
`float[3]`.
Since tuple types are implemented in C++ as raw data there is no differentiation between the types returned by input
versus output attributes, just a *const* clause.
+-----------+-------------------------------+--------------+----------+----------------------------------+------------------------------+
| Type Name | USD | C++ | CUDA | Python | JSON |
+===========+===============================+==============+==========+==================================+==============================+
| double[2] | (double,double) | pxr::GfVec2d | double2* | numpy.ndarray[numpy.float64](2,) | [float, float] |
+-----------+-------------------------------+--------------+----------+----------------------------------+------------------------------+
| double[3] | (double,double,double) | pxr::GfVec3d | double3* | numpy.ndarray[numpy.float64](3,) | [float, float, float] |
+-----------+-------------------------------+--------------+----------+----------------------------------+------------------------------+
| double[4] | (double,double,double,double) | pxr::GfVec4d | double4* | numpy.ndarray[numpy.float64](4,) | [float, float, float, float] |
+-----------+-------------------------------+--------------+----------+----------------------------------+------------------------------+
| float[2] | (float,float) | pxr::GfVec2f | float2* | numpy.ndarray[numpy.float](2,) | [float, float] |
+-----------+-------------------------------+--------------+----------+----------------------------------+------------------------------+
| float[3] | (float,float,float) | pxr::GfVec3f | float3* | numpy.ndarray[numpy.float](3,) | [float, float, float] |
+-----------+-------------------------------+--------------+----------+----------------------------------+------------------------------+
| float[4] | (float,float,float,float) | pxr::GfVec4f | float4* | numpy.ndarray[numpy.float](4,) | [float, float, float, float] |
+-----------+-------------------------------+--------------+----------+----------------------------------+------------------------------+
| half[2] | (half,half) | pxr::GfVec2h | __half2* | numpy.ndarray[numpy.float16](2,) | [float, float] |
+-----------+-------------------------------+--------------+----------+----------------------------------+------------------------------+
| half[3] | (half,half,half) | pxr::GfVec3h | __half3* | numpy.ndarray[numpy.float16](3,) | [float, float, float] |
+-----------+-------------------------------+--------------+----------+----------------------------------+------------------------------+
| half[4] | (half,half,half,half) | pxr::GfVec4h | __half4* | numpy.ndarray[numpy.float16](4,) | [float, float, float, float] |
+-----------+-------------------------------+--------------+----------+----------------------------------+------------------------------+
| int[2] | (int,int) | pxr::GfVec2i | int2* | numpy.ndarray[numpy.int32](2,) | [float, float] |
+-----------+-------------------------------+--------------+----------+----------------------------------+------------------------------+
| int[3] | (int,int,int) | pxr::GfVec3i | int3* | numpy.ndarray[numpy.int32](3,) | [float, float, float] |
+-----------+-------------------------------+--------------+----------+----------------------------------+------------------------------+
| int[4] | (int,int,int,int) | pxr::GfVec4i | int4* | numpy.ndarray[numpy.int32](4,) | [float, float, float, float] |
+-----------+-------------------------------+--------------+----------+----------------------------------+------------------------------+
.. note::
Owing to this implementation of a wrapper around raw data all of these types can also be safely
cast to other types that have an equivalent memory layout. For example:
``MyFloat3& f3 = reinterpret_cast<MyFloat3&>(db.inputs.myFloat3Attribute());``
Here's an example of how the class, typedef, USD, and CUDA types relate:
.. code-block:: c++
const GfVec3f& fRaw = db.inputs.myFloat3();
const ogn::float3& fOgn = reinterpret_cast<const ogn::float3&>(fRaw);
const carb::Float3& fCarb = reinterpret_cast<const carb::Float3>(fOgn);
vectorOperation( fCarb.x, fCarb.y, fCarb.z );
callCUDAcode( fRaw );
.. code-block:: c++
extern "C" void callCUDAcode(float3 myFloat3) {...}
Here are samples of tuple data type values in the various languages:
**USD Tuple Type**
++++++++++++++++++
.. code-block:: usd
custom int2 inputs:myIntTuple = (1, 2)
custom float3 inputs:myFloatTuple = (1.0, 2.0, 3.0)
**C++ Tuple Type**
++++++++++++++++++
.. code-block:: c++
static bool compute(OgnMyNodeDatabase& db)
{
const GfVec2i& iValue = db.inputs.myIntTuple();
const GfVec3f& fValue = db.inputs.myFloatTuple();
}
**CUDA Tuple Type**
+++++++++++++++++++
.. code-block:: c++
// Note how the signatures are not identical between the declaration here and the
// definition in the CUDA file. This is possible because the data types have identical
// memory layouts, in this case equivalent to int[2] and float[3].
extern "C" runCUDAcompute(pxr::GfVec2i* iTuple, pxr::GfVec3f* fTuple);
static bool compute(OgnMyNodeDatabase& db)
{
runCUDAcompute( db.inputs.myIntTuple(), db.inputs.myFloatTuple() );
}
.. code-block:: c++
extern "C" void runCUDAcompute(float3* iTuple, float3* fTuple)
{
// In here it is true that the number of elements in iArray = iSize
}
**Python Tuple Type Hint**
++++++++++++++++++++++++++
.. code-block:: python
import numpy as np
@property
def myIntTuple(self) -> np.ndarray[nd.int]:
return attributeValues.myIntTuple
@property
def myFloatTuple(self) -> np.ndarray[nd.float]:
return attributeValues.myFloatTuple
**JSON Tuple Type**
+++++++++++++++++++
.. code-block:: json
{
"myNode" : {
"description" : ["This is my node with one integer tuple and one float tuple input"],
"version" : 1,
"inputs" : {
"myIntTuple" : {
"description" : ["This is an integer tuple attribute"],
"type" : "int[2]",
"default" : [1, 2]
},
"myFloatTuple" : {
"description" : ["This is a float tuple attribute"],
"type" : "float[3]",
"default" : [1.0, 2.0, 3.0]
}
}
}
}
Arrays of Tuple Data Types
--------------------------
Like base data types, there can also be arrays of tuples by appending '[]' to the data type. For now the only
ones supported are the above special types, supported natively in USD. Once the USD conversions are sorted out,
all tuple types can be arrays using these rules.
The type names will have the tuple specification followed by the array specification, e.g. *float[3][]* for an
array of three-floats. This will also extend to arrays of arrays in the future by appending another '[]'.
JSON makes no distinction between arrays and tuples so it will be a multi-dimensional list.
USD uses parentheses **()** for tuples and square brackets **[]** for arrays so both are used to specify the
data values. The types are specified according to the USD Type column in the table above with square brackets appended.
Both the Python and C++ tuple and array types nest for arrays of tuple types.
+-------------+--------------------------------------+--------------------------+--------------------------------------------------+----------------------------+-----------+
| Type Name | C++ | CUDA | Python | JSON | Direction |
+=============+======================================+==========================+==================================================+============================+===========+
| TYPE[N][] | ogn::array<TUPLE_TYPE> | TUPLE_TYPE*,size_t | numpy.ndarray[numpy.TUPLE_TYPE](N,TUPLE_COUNT,) | array of array of JSONTYPE | Output |
+-------------+--------------------------------------+--------------------------+--------------------------------------------------+----------------------------+-----------+
Here are samples of arrays of tuple data type values in the various languages:
**USD Tuple Array Type**
++++++++++++++++++++++++
.. code-block:: usd
custom int2[] inputs:myIntTuple = [(1, 2), (3, 4), (5, 6)]
custom float3[] inputs:myFloatTuple = [(1.0, 2.0, 3.0)]
**C++ Tuple Array Type**
++++++++++++++++++++++++
.. code-block:: c++
static bool compute(OgnMyNodeDatabase& db)
{
const ogn::const_array<GfVec2i>& iValue = db.inputs.myIntTupleArray();
const ogn::const_array<GfVec3f> &fValue = db.inputs.myFloatTupleArray();
// or const auto& fValue = db.inputs.myFloatTupleArray();
}
**CUDA Tuple Array Type**
+++++++++++++++++++++++++
.. code-block:: c++
extern "C" runCUDAcompute(inputs::myIntTupleArray_t* iTuple, size_t iSize,
inputs::myFloatTupleArray_t* fTuple, size_t fSize);
static bool compute(OgnMyNodeDatabase& db)
{
runCUDAcompute( db.inputs.myIntTupleArray(), db.inputs.myIntTupleArray.size(),
db.inputs.myFloatTupleArray(), db.inputs.myFloatTupleArray.size() );
}
.. code-block:: c++
extern "C" void runCUDAcompute(float3** iTuple, size_t iSize, float3** fTuple, size_t fSize)
{
}
**Python Tuple Array Type Hint**
++++++++++++++++++++++++++++++++
.. code-block:: python
import numpy as np
@property
def myIntTupleArray(self) -> np.ndarray:
return attributeValues.myIntTupleArray
@property
def myFloatTupleArray(self) -> np.ndarray:
return attributeValues.myFloatTupleArray
**JSON Tuple Array Type**
+++++++++++++++++++++++++
.. code-block:: json
{
"myNode" : {
"description" : ["This is my node with one integer tuple array and one float tuple array input"],
"version" : 1,
"inputs" : {
"myIntTuple" : {
"description" : ["This is an integer tuple array attribute"],
"type" : "int[2][]",
"default" : [[1, 2], [3, 4], [5, 6]]
},
"myFloatTuple" : {
"description" : ["This is a float tuple array attribute"],
"type" : "float[3][]",
"default" : []
}
}
}
}
.. _ogn_attribute_roles:
Attribute Types With Roles
--------------------------
Some attributes have specific interpretations that are useful for determining how to use them at runtime. These
roles are encoded into the names for simplicity.
.. note::
The fundamental data in the attributes when an AttributeRole is set are unchanged. Adding the role just allows
the interpretation of that data as a first class object of a non-trivial type. The "C++ Type" column in the
table below shows how the underlying data is represented.
For simplicity of specification, the type of base data is encoded in the type name, e.g. *colord* for colors
using double values and *colorf* for colors using float values.
+--------------+------------+------------+----------+-----------------------------------------------------------+
| Type Name | USD | C++ | CUDA | Description |
+==============+============+============+==========+===========================================================+
| colord[3] | color3d | GfVec3d | double3 | Color value with 3 members of type double |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| colorf[3] | color3f | GfVec3f | float3 | Color value with 3 members of type float |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| colorh[3] | color3h | GfVec3h | __half3 | Color value with 3 members of type 16 bit float |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| colord[4] | color4d | GfVec4d | double4 | Color value with 4 members of type double |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| colorf[4] | color4f | GfVec4f | float4 | Color value with 4 members of type float |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| colorh[4] | color4h | GfVec4h | __half4 | Color value with 4 members of type 16 bit float |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| normald[3] | normal3d | GfVec3d | double3 | Normal vector with 3 members of type double |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| normalf[3] | normal3f | GfVec3f | float3 | Normal vector with 3 members of type float |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| normalh[3] | normal3h | GfVec3h | __half3 | Normal vector with 3 members of type 16 bit float |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| pointd[3] | pointd | GfVec3d | double3 | Cartesian point value with 3 members of type double |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| pointf[3] | pointf | GfVec3f | float3 | Cartesian point value with 3 members of type float |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| pointh[3] | pointh | GfVec3h | __half3 | Cartesian point value with 3 members of type 16 bit float |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| quatd[4] | quat4d | GfQuatd | double3 | Quaternion with 4 members of type double as IJKR |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| quatf[4] | quat4f | GfQuatf | float3 | Quaternion with 4 members of type float as IJKR |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| quath[4] | quat4h | GfQuath | __half3 | Quaternion with 4 members of type 16 bit float as IJKR |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| texcoordd[2] | texCoord2d | GfVec2d | double2 | Texture coordinate with 2 members of type double |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| texcoordf[2] | texCoord2f | GfVec2f | float2 | Texture coordinate with 2 members of type float |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| texcoordh[2] | texCoord2h | GfVec2h | __half2 | Texture coordinate with 2 members of type 16 bit float |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| texcoordd[3] | texCoord3d | GfVec3d | double3 | Texture coordinate with 3 members of type double |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| texcoordf[3] | texCoord3f | GfVec3f | float3 | Texture coordinate with 3 members of type float |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| texcoordh[3] | texCoord3h | GfVec3h | __half3 | Texture coordinate with 3 members of type 16 bit float |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| timecode | timecode | double | double | Double value representing a timecode |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| vectord[3] | vector3d | GfVec3d | double3 | Vector with 3 members of type double |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| vectorf[3] | vector3f | GfVec3f | float3 | Vector with 3 members of type float |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| vectorh[3] | vector3h | GfVec3h | __half3 | Vector with 3 members of type 16 bit float |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| matrixd[2] | matrix2d | GfMatrix2d | Matrix2d | Transform matrix with 4 members of type double |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| matrixd[3] | matrix3d | GfMatrix3d | Matrix3d | Transform matrix with 9 members of type double |
+--------------+------------+------------+----------+-----------------------------------------------------------+
| matrixd[4] | matrix4d | GfMatrix4d | Matrix4d | Transform matrix with 16 members of type double |
+--------------+------------+------------+----------+-----------------------------------------------------------+
Python and JSON do not have special types for role-based attributes, although that may change for Python once its
interface is fully defined.
The roles are all tuple types so the Python equivalents will all be of the form **Tuple[TYPE, TYPE...]**, and JSON data
will be of the form **[TYPE, TYPE, TYPE]**. The types corresponding to the *Equivalent* column base types are seen above
in `Base Data Types`_.
The color role will serve for our example types here:
**USD Color Role Attribute**
+++++++++++++++++++++++++++++
.. code-block:: usd
custom color3d inputs:myColorRole = (1.0, 0.5, 1.0)
**C++ Color Role Attribute**
+++++++++++++++++++++++++++++
.. code-block:: c++
static bool compute(OgnMyNodeDatabase& db)
{
const GfVec3d& colorValue = db.inputs.myColorRole();
// or const auto& colorValue = db.inputs.myColorRole();
}
**CUDA Color Role Type**
++++++++++++++++++++++++
.. code-block:: c++
extern "C" runCUDAcompute(pxr::GfVec3d* color);
static bool compute(OgnMyNodeDatabase& db)
{
runCUDAcompute( db.inputs.myColorRole() );
}
.. code-block:: c++
extern "C" void runCUDAcompute(double3* color)
{
}
**Python Color Role Attribute Hint**
+++++++++++++++++++++++++++++++++++++
.. code-block:: python
import numpy as np
@property
def myColorRole(self) -> np.ndarray:
return attributeValues.myColorRole
**JSON Color Role Attribute**
++++++++++++++++++++++++++++++
.. code-block:: json
{
"myNode" : {
"description" : ["This is my node with one color role input"],
"version" : 1,
"inputs" : {
"myColorRole" : {
"description" : ["This is color role attribute"],
"type" : "colord[3]",
"default" : [0.0, 0.5, 1.0]
}
}
}
}
Bundle Type Attributes
----------------------
There is a special type of attribute whose type is *bundle*. This attribute represents a set of attributes whose
contents can only be known at runtime. It can still be in a tuple, array, or both. In itself it has no data in Fabric.
Its purpose is to be a container to a description of other attributes of any of the above types, or even
other bundles.
**USD Bundled Attribute**
++++++++++++++++++++++++++
.. code-block:: usd
custom rel inputs:inBundle (
doc="""The input bundle is a relationship, which could come from a prim or another bundle attribute""")
def Output "outputs_outBundle" (
doc="""Output bundles are represented as empty prims, with any namespace colons replaced by underscores""")
{
}
**C++ Bundled Attribute**
++++++++++++++++++++++++++
.. code-block:: c++
static bool compute(OgnMyNodeDatabase& db)
{
// The simplest method of breaking open a bundle is to get an attribute by name
const auto& inBundle = db.inputs.inBundle();
auto myFloat3Attribute = inBundle.attributeByName(db.stringToToken("myFloat3"));
if (auto asFloat3Array = myFloat3Attribute.get<float[][3]>())
{
handleFloat3Array(asFloat3Array); // The data is of type float[][3]
}
// The bundle has iteration capabilities
for (auto& bundledAttribute : inBundle)
{
// Use the type information to find the actual data type and then cast it
if ((bundledAttribute.type().baseType == BaseDataType::eInt)
&& (bundledAttribute.type().componentCount == 1)
&& (bundledAttribute.type().arrayDepth == 0))
{
CARB_ASSERT( nullptr != bundledAttribute.get<int>() );
}
}
}
See the tutorials on :ref:`ogn_tutorial_bundle_manipulation` and :ref:`ogn_tutorial_bundle_data` for more
details on manipulating the bundle and its attributes.
**CUDA Bundled Attribute**
+++++++++++++++++++++++++++
.. code-block:: c++
extern "C" runCUDAcompute(float3* value, size_t iSize);
static bool compute(OgnMyNodeDatabase& db)
{
const auto& myBundle = db.inputs.myBundle();
auto myFloat3Attribute = myBundle.attributeByName(db.stringToToken("myFloat3"));
if (auto asFloat3Array = myFloat3Attribute.get<float[][3]>())
{
runCUDAcompute(asFloat3Array.data(), asFloat3Array.size());
}
}
.. code-block:: c++
extern "C" void runCUDAcompute(float3** value, size_t iSize)
{
}
**Python Bundled Attribute Hint**
++++++++++++++++++++++++++++++++++
.. code-block:: python
from typing import Union
from omni.graph.core.types import AttributeTypes, Bundle, BundledAttribute
@property
def myBundle(self) -> Bundle:
return attributeValues.myBundle
attribute_count = myNode.myBundle.attribute_count()
for bundled_attribute in myNode.myBundle.attributes():
if bundled_attribute.type.base_type == AttributeTypes.INT:
deal_with_integers(bundled_attribute.value)
See the tutorials on :ref:`ogn_tutorial_bundle_manipulation` and :ref:`ogn_tutorial_bundle_data` for more
details on manipulating the bundle and its attributes.
**JSON Bundled Attribute**
++++++++++++++++++++++++++
.. code-block:: json
{
"myNode" : {
"description" : ["This is my node with one bundled input"],
"version" : 1,
"inputs" : {
"myBundle" : {
"description" : ["This is input bundle attribute"],
"type" : "bundle"
}
}
}
}
It's worth noting here that as a bundle does not represent actual data these attributes are not allowed to have a
default value.
If a bundle attribute is defined to live on the GPU, either at all times or as a decision at runtime, this is
equivalent to stating that any attributes that exist inside the bundle will be living on the GPU using the same
criteria.
Extended Attribute Types
------------------------
Some attribute types are only determined at runtime by the data they receive. These types include the "any" type, which
is a single attribute that can be any of the above types, and the "union" type, which specifies a subset of the above
types it can take on. (The astute will notice that "union" is a subset of "any".)
Extended attribute types allow a single node to handle several different attribute-type configurations. For example a
generic 'Cos' node may be able to compute the cosine of any decimal type.
**USD Extended Attribute**
++++++++++++++++++++++++++
.. code-block:: usd
custom token inputs:floatOrInt
custom token inputs:floatArrayOrIntArray
custom token inputs:anyType
**C++ Extended Attribute**
++++++++++++++++++++++++++
.. code-block:: c++
static bool compute(OgnMyNodeDatabase& db)
{
// Casting can be used to find the actual data type the attribute contains
const auto& floatOrInt = db.inputs.floatOrInt();
bool isFloat = (nullptr != floatOrInt.get<float>());
bool isInt = (nullptr != floatOrInt.get<int>());
// Different types are cast in the same way as bundle attributes
const auto& floatOrIntArray = db.inputs.floatOrIntArray();
bool isFloatArray = (nullptr != floatOrIntArray.get<float[]>());
bool isIntArray = (nullptr != floatOrIntArray.get<int[]>());
const auto& anyType = db.inputs.anyType();
std::cout << "Any type is " << anyType.type() << std::endl;
// Like bundled attributes, use the type information to find the actual data type and then cast it
if ((anyType.type().baseType == BaseDataType::eInt)
&& (anyType.type().componentCount == 1)
&& (anyType.type().arrayDepth == 0))
{
CARB_ASSERT( nullptr != anyType.get<int>() );
}
}
**CUDA Extended Attribute**
+++++++++++++++++++++++++++
.. code-block:: c++
extern "C" runCUDAcomputeFloat(float3* value, size_t iSize);
extern "C" runCUDAcomputeInt(int3* value, size_t iSize);
static bool compute(OgnMyNodeDatabase& db)
{
const auto& float3OrInt3Array = db.inputs.float3OrInt3Array();
if (auto asFloat3Array = float3OrInt3Array.get<float[][3]>())
{
runCUDAcomputeFloat(asFloat3Array.data(), asFloat3Array.size());
}
else if (auto asInt3Array = float3OrInt3Array.get<int[][3]>())
{
runCUDAcomputeint(asInt3Array.data(), asInt3Array.size());
}
}
.. code-block:: c++
extern "C" void runCUDAcomputeFloat(float3** value, size_t iSize)
{
}
extern "C" void runCUDAcomputeInt(int3** value, size_t iSize)
{
}
**Python Extended Attribute Hint**
++++++++++++++++++++++++++++++++++
.. code-block:: python
from typing import Union
@property
def myIntOrFloatArray(self) -> List[Union[int, float]]:
return attributeValues.myIntOrFloatArray
**JSON Extended Attribute**
+++++++++++++++++++++++++++
.. code-block:: json
{
"myNode" : {
"description" : "This is my node with some extended inputs",
"version" : 1,
"inputs" : {
"anyType" : {
"description" : "This attribute accepts any type of data, determined at runtime",
"type" : "any"
},
"someNumber": {
"description": "This attribute accepts either float, double, or half values",
"type": ["float", "double", "half"]
},
"someNumberArray": {
"description": ["This attributes accepts an array of float, double, or half values.",
"All values in the array must be of the same type, like a regular array attribute."],
"type": ["float[]", "double[]", "half[]"],
},
}
}
}
**Extended Attribute Union Groups**
+++++++++++++++++++++++++++++++++++
As described above, union extended types are specified by providing a list of types in the OGN definition. These lists can become
quite long if a node can handle a large subset of the possible types. For convenience there are special type names that can be
used inside the JSON list to denote groups of types. For example:
.. code-block:: json
{
"myNode" : {
"description" : "This is my node using union group types",
"version" : 1,
"inputs" : {
"decimal" : {
"description" : "This attribute accepts double, float and half",
"type" : ["decimal_scalers"]
}
}
}
}
**List of Attribute Union Groups**
++++++++++++++++++++++++++++++++++
+-------------------------+------------------------------------------------------------------------------------------+
| Group Type Name | Type Members |
+=========================+==========================================================================================+
| integral_scalers | uchar, int, uint, uint64, int64, timecode |
+-------------------------+------------------------------------------------------------------------------------------+
| integral_tuples | int[2], int[3], int[4] |
+-------------------------+------------------------------------------------------------------------------------------+
| integral_array_elements | integral_scalers, integral_tuples |
+-------------------------+------------------------------------------------------------------------------------------+
| integral_arrays | arrays of integral_array_elements |
+-------------------------+------------------------------------------------------------------------------------------+
| integrals | integral_array_elements, integral_arrays |
+-------------------------+------------------------------------------------------------------------------------------+
| matrices | matrixd[3], matrixd[4], transform[4], frame[4] |
+-------------------------+------------------------------------------------------------------------------------------+
| decimal_scalers | double, float, half |
+-------------------------+------------------------------------------------------------------------------------------+
| decimal_tuples | double[2], double[3], double[4], float[2], float[3], float[4], half[2], half[3], half[4] |
| | |
| | colord[3], colord[4], colorf[3], colorf[4], colorh[3], colorh[4] |
| | |
| | normald[3], normalf[3], normalh[3] |
| | |
| | pointd[3], pointf[3], pointh[3] |
| | |
| | texcoordd[2], texcoordd[3], texcoordf[2], texcoordf[3], texcoordh[2], texcoordh[3] |
| | |
| | quatd[4], quatf[4], quath[4] |
| | |
| | vectord[3], vectorf[3], vectorh[3] |
+-------------------------+------------------------------------------------------------------------------------------+
| decimal_array_elements | decimal_scalers, decimal_tuples |
+-------------------------+------------------------------------------------------------------------------------------+
| decimal_arrays | arrays of decimal_array_elements |
+-------------------------+------------------------------------------------------------------------------------------+
| decimals | decimal_array_elements, decimal_arrays |
+-------------------------+------------------------------------------------------------------------------------------+
| numeric_scalers | integral_scalers, decimal_scalers |
+-------------------------+------------------------------------------------------------------------------------------+
| numeric_tuples | integral_tuples, decimal_tuples |
+-------------------------+------------------------------------------------------------------------------------------+
| numeric_array_elements | numeric_scalers, numeric_tuples, matrices |
+-------------------------+------------------------------------------------------------------------------------------+
| numeric_arrays | arrays of numeric_array_elements |
+-------------------------+------------------------------------------------------------------------------------------+
| numerics | numeric_array_elements, numeric_arrays |
+-------------------------+------------------------------------------------------------------------------------------+
| array_elements | numeric_array_elements, token |
+-------------------------+------------------------------------------------------------------------------------------+
| arrays | numeric_arrays, token[] |
+-------------------------+------------------------------------------------------------------------------------------+
**Extended Attribute Resolution**
+++++++++++++++++++++++++++++++++
Extended attributes are useful to improve the usability of nodes with different types. However the node author has an
extra responsibility to resolve the extended type attributes when possible in order to resolve possible ambiguity
in the graph. If graph connections are unresolved at execution, the node's computation will be skipped.
There are various helpful Python APIs for type resolution, including :py:func:`omni.graph.core.resolve_base_coupled` and
:py:func:`omni.graph.core.resolve_fully_coupled` which allow you to match unresolved inputs to resolved inputs.
.. code-block:: python
@staticmethod
def on_connection_type_resolve(node) -> None:
aattr = node.get_attribute("inputs:a")
resultattr = node.get_attribute("outputs:result")
og.resolve_fully_coupled([aattr, resultattr])
You can also define your own semantics for custom type resolution. The following node takes two decimals, a and b,
and returns their product. If one input is at a lower "significance" than the other, the less significant will be "promoted"
to prevent loss of precision. For example, if inputs are `float` and `double`, the output will be a `double`.
See :class:`omni.graph.core.Type` for more information about creating custom types.
.. code-block:: python
@staticmethod
def on_connection_type_resolve(node) -> None:
atype = node.get_attribute("inputs:a").get_resolved_type()
btype = node.get_attribute("inputs:b").get_resolved_type()
productattr = node.get_attribute("outputs:product")
producttype = productattr.get_resolved_type()
# we can only infer the output given both inputs are resolved and they are the same.
if (atype.base_type != og.BaseDataType.UNKNOWN and btype.base_type != og.BaseDataType.UNKNOWN
and producttype.base_type == og.BaseDataType.UNKNOWN):
if atype.base_type == btype.base_type:
base_type = atype.base_type
else:
decimals = [og.BaseDataType.HALF, og.BaseDataType.FLOAT, og.BaseDataType.DOUBLE]
try:
a_ix = decimals.index(atype.base_type)
except ValueError:
a_ix = -1
try:
b_ix = decimals.index(btype.base_type)
except ValueError:
b_ix = -1
if a_ix >= 0 or b_ix >= 0:
base_type = atype.base_type if a_ix > b_ix else btype.base_type
else:
base_type = og.BaseDataType.DOUBLE
productattr.set_resolved_type(og.Type(base_type, max(atype.tuple_count, btype.tuple_count),
max(atype.array_depth, btype.array_depth)))
See :ref:`ogn_tutorial_extended_types` for more examples on how to perform attribute resolution in C++ and Python.
.. _ogn_type_definition_overrides:
Type Definition Overrides
-------------------------
The generated types provide a default implementation you can use out of the box. Sometimes you might have your own
favorite library for type manipulation so you can provide a type definition configuration file that modifies the
return types used by the generated code.
There are four ways you can implement type overrides.
1. Use the `typeDefinitions` flag on the `generate_node.py` script to point to the file containing the configuration.
2. Use the `"typeDefinitions": "ConfigurationFile.json"` keyword in the .ogn file to point a single node to a configuration.
3. Use the `"typeDefintitions": {TypeConfigurationDictionary}` keyword in the .ogn file to implement simple targeted overrides in a single node.
4. Add the name of the type definitions file to your premake5.lua file in `get_ogn_project_informat("omni/test", "ConfigurationFile.json")` to modify the types for every node in your extension.
The format used for the type definition information is the same for all methods. Here is a sample, with an embedded
explanation on how it is formatted.
.. literalinclude:: ../../../../source/extensions/omni.graph.tools/ogn_config/TypeConfigurationPod.json
:language: json
| 52,242 | reStructuredText | 49.089166 | 193 | 0.439397 |
omniverse-code/kit/exts/omni.graph.tools/docs/node_architects_guide.rst | .. _ogn_node_architects_guide:
OmniGraph Node Architects Guide
===============================
This outlines the code that will be generated behind the scenes by the node generator from a .ogn file.
This includes the API presented to the node that is outlined in :ref:`ogn_user_guide`.
As the details of the generated code are expected to change rapidly this guide does not go into specific details,
it only provides the broad strokes, expecting the developer to go to the either the generated code or the code
generation scripts for the current implementation details.
Helper Templates
----------------
A lot of the generated C++ code relies on templates defined in the helper files ``omni/graph/core/Ogn*.h``,
which contains code that assists in the registration and running of the node, classes to wrap the internal data used
by the node for evaluation, and general type conversion assistance.
None of the code in there is compiled so there is no ABI compatibility problem. This is a critical feature of these
wrappers.
See the files themselves for full documentation on what they handle.
C ABI Interface
---------------
The key piece of generated code is the one that links the external C ABI for the compute node with the
class implementing a particular node's evaluation algorithm. This lets the node writer focus on their business
logic rather than boilerplate ABI conformity details.
The actual ABI implementation for the *INodeType* interface is handled by the templated helper class
*OmniGraphNode_ABI* from ``OgnHelpers.h``. It implements all of the functions required by the ABI, then uses the
"tag-dispatching" technique to selectively call either the default implementation, or the one provided by the
node writer.
In turn, that class has a derived class of *RegisterOgnNode* (defined in ``OgnHelpers.h``).
It adds handling of the automatic registration and deregistration of node types.
Instantiation of the ABI helper class for a given node type is handled by the *REGISTER_OGN_NODE()* macro, required
at the end of every node implementation file.
Attribute Information Caching
-----------------------------
ABI accesss requires attribute information be looked up by name, which can cause a lot of inefficiency. To prevent
this, a templated helper class *IAttributeOgnInfo* is created for every attribute. It contains the attribute's
information, such as its name, its unique handle token, and default value (if any). This is information that is the
same for the attribute in every instance of the node so there is only one static instance of it.
Fabric Access
-------------
This is the core of the benefit provided by the .ogn generated interface. Every node has a *OgnMyNodeDatabase* class
generated for it, which contains accessors to all of the Fabric data in an intuitive form. The base class
*OmniGraphDatabase*, from ``OgnHelpers.h``, provides the common functionality for that access, including token
conversion, and access to the context and node objects provided by the ABI.
The attribute data access is accomplished with two or three pieces of data for every attribute.
1. A raw pointer to the data for that attribute residing in the Fabric
2. (optional, for arrays only) A raw pointer to the element count for the Fabric array data
3. A wrapper class, which will return a reference object from its *operator()* function
The data accessors are put inside structs names *inputs* and *outputs* so that data can be accessed by the actual
attribute's name, e.g. *db.inputs.myInputAttribute()*.
Accessing Data
++++++++++++++
The general approach to accessing attribute data on a node is to wrap the Fabric data in a class that is suited
to handle the movement of data between Fabric and the node in a way that is more natural to the node writer.
For consistency of access, the accessors all override the call operator (``operator()``) to return a wrapper class
that provides access to the data in a natural form. For example, simple POD data will return a direct reference to
the underlying Fabric data (e.g. a ``float&``) for manipulation.
Inputs typically provide const access only. The declaration of the return values can always be simplified by
using ``const auto&`` for inputs and ``auto&`` for outputs and state attributes.
More complex types will return wrappers tailored towards their own access. Where it gets tricky is with variable
sized attributes, such as arrays or bundles. We cannot rely on standard data types to manage them as Fabric is
the sole arbiter of memory management.
Such classes are returned as wrappers that operate as though they are standard types but are actually intermediates
for translating those operations into Fabric manipulation.
For example instead of retrieving arrays as raw ``float*`` or ``std::vector<float>`` they will be retrieved as the
wrapper class ``ogn::array`` or ``ogn::const_array``. This allows all the same manipulations as a ``std::vector<float>``
including iteration for use in the STL algorithm library, through Fabric interfaces.
If you are familiar with the concept of ``std::span`` an array can be thought of in the same way, where the raw data
is managed by Fabric.
Data Initialization
+++++++++++++++++++
All of this data must be initialized before the *compute()* method can be called. This is done in the constructor
of the generated database class, which is constructed by the ABI wrapper to the node's compute method.
Here's a pseudocode view of the calls that would result from a node compute request:
.. code-block:: text
OmniGraphNode_ABI<MyNode, MyNodeDatabase>::compute(graphContext, node);
OgnMyNodeDatabase db(graphContext, node);
getAttributesR for all input attributes
getAttributesW for all output attributes
getDataR for all input attributes
getElementCount for all input array attributes
getDataW for all output attributes
getElementCount for all output array attributes
return db.validate() ? OgnMyNode::compute(db) : false;
Input Validation
----------------
The node will have specified certain conditions required in order for the compute function to be valid. In the
simplest form it will require that a set of input and output attributes exist on the node. Rather than forcing every
node writer to check these conditions before they run their algorithm this is handled by a generated *validate()*
function.
This function checks the underlying Fabric data to make sure it conforms to the conditions required by the node.
In future versions this validation will perform more sophisticated tasks, such as confirming that two sets of input
arrays have the same size, or that attribute values are within the legal ranges the node can recognize.
Python Support
--------------
Python support comes in two forms - support for C++ nodes, and ABI definition for Python node implementations.
For any node written in C++ there is a Python accessor class created that contains properties for accessing the
attribute data on the node. The evaluation context is passed to the accessor class so it can only be created when
one is available.
The accessor is geared for getting and setting values on the attributes of a node.
.. code-block:: python
import omni.graph.core as og
from my.extension.ogn import OgnMyNodeDatabase
my_node = og.get_graph_by_path("/World/PushGraph").get_node("/World/PushGraph/MyNode")
my_db = OgnMyNodeDatabase(og.ContextHelper(), my_node)
print(f"The current value of my float attribute is {my_db.inputs.myFloat}")
my_db.inputs.myFloat = 5.0
.. important::
For Python node implementations the registration process is performed automatically when you load your extension by
looking for the ``ogn/`` subdirectory of your Python import path. The generated Python class performs all of the
underlying management tasks such as registering the ABI methods, providing forwarding for any methods you might have
overridden, creating attribute accessors, and initializing attributes and metadata.
In addition, all of your nodes will be automatically deregistered when the extension is disabled.
| 8,183 | reStructuredText | 51.461538 | 120 | 0.766834 |
omniverse-code/kit/exts/omni.graph.tools/docs/README.md | # OmniGraph Tools [omni.graph.tools]
This is where the functionality of the Omniverse Graph node generator lives. These are the scripts responsible for parsing .ogn files.
| 173 | Markdown | 42.499989 | 134 | 0.803468 |
omniverse-code/kit/exts/omni.graph.tools/docs/index.rst | OmniGraph Tools And .ogn Node Generator
#######################################
.. tabularcolumns:: |L|R|
.. csv-table::
:width: 100%
**Extension**: omni.graph.tools,**Documentation Generated**: |today|
The OmniGraph Tools extension contains general purpose scripts for dealing with OmniGraph, mostly through the .ogn
file format.
If you are interested in writing nodes the best places to start are the :ref:`ogn_user_guide` or the
:ref:`ogn_reference_guide`.
.. toctree::
:maxdepth: 2
:caption: Contents
node_architects_guide.rst
ogn_user_guide.rst
ogn_reference_guide.rst
attribute_types.rst
ogn_code_samples_cpp.rst
ogn_code_samples_python.rst
ogn_generation_script.rst
CHANGELOG.md
| 733 | reStructuredText | 23.466666 | 114 | 0.683492 |
omniverse-code/kit/exts/omni.graph.tools/docs/ogn_code_samples_cpp.rst | .. _ogn_code_samples_cpp:
OGN Code Samples - C++
======================
This files contains a collection of examples for using the .ogn generated code from C++. There is no particular flow
to these examples, they are used as reference data for the :ref:`ogn_user_guide`.
.. contents::
.. _ogn_minimal_node_cpp:
Minimal C++ Node Implementation
-------------------------------
Every C++ node must contain an include of the file containing the generated database definition, and an implementation
of the ``compute`` method that takes the database as a parameter and returns a boolean indicating if the compute
succeeded.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-minimal
:end-before: end-minimal
:ref:`[Python Version]<ogn_minimal_node_py>`
.. _ogn_metadata_node_cpp:
Node Type Metadata Access
-------------------------
When node types have any metadata added to them they can be accessed through the ABI node type interface.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-node-metadata
:end-before: end-node-metadata
:ref:`[Python Version]<ogn_metadata_node_py>`
.. _ogn_node_with_icon_cpp:
Node Icon Location Access
-------------------------
Specifying the icon location and color information creates consistently named pieces of metadata that the UI can use to
present a more customized visual appearance.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-node-icon
:end-before: end-node-icon
:ref:`[Python Version]<ogn_node_with_icon_py>`
.. _ogn_scheduling_node_cpp:
Node Type Scheduling Hints
--------------------------
Specifying scheduling hints makes it easier for the OmniGraph scheduler to optimize the scheduling of node evaluation.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-node-scheduling
:end-before: end-node-scheduling
:ref:`[Python Version]<ogn_scheduling_node_python>`
.. _ogn_singleton_node_cpp:
C++ Singleton Node Types
------------------------
Specifying that a node type is a singleton creates a consistently named piece of metadata that can be checked to see
if multiple instances of that node type will be allowed in a graph or its child graphs. Attempting to create more than
one of such node types in the same graph or any of its child graphs will result in an error.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-node-singleton
:end-before: end-node-singleton
:ref:`[Python Version]<ogn_singleton_node_py>`
.. _ogn_tags_node_cpp:
Node Type Tags
--------------
Specifying the node tags creates a consistently named piece of metadata that the UI can use to present a more
friendly grouping of the node types to the user.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-node-tags
:end-before: end-node-tags
This example introduces some helper constants which contain strings set to the key values of special internal metadata.
These are metadata elements managed by the code generator. Using these name accessors ensures consistent access.
.. code-block:: text
kOgnMetadataAllowedTokens # On attributes of type token, a CSV formatted comma-separated list of potential legal values for the UI
kOgnMetadataCategories # On node types, contains a comma-separated list of categories to which the node type belongs
kOgnMetadataDescription # On attributes and node types, contains their description from the .ogn file
kOgnMetadataExtension # On node types, contains the extension that owns this node type
kOgnMetadataHidden # On attributes and node types, indicating to the UI that they should not be shown
kOgnMetadataIconPath # On node types, contains the file path to the node's icon representation in the editor
kOgnMetadataIconBackgroundColor # On node types, overrides the background color of the node's icon
kOgnMetadataIconBorderColor # On node types, overrides the border color of the node's icon
kOgnMetadataSingleton # On node types its presence indicates that only one of the node type may be created in a graph
kOgnMetadataTags # On node types, a comma-separated list of tags for the type
kOgnMetadataUiName # On attributes and node types, user-friendly name specified in the .ogn file
kOgnMetadataUiType # On certain attribute types, customize how the attribute is displayed on the property panel
:ref:`[Python Version]<ogn_tags_node_py>`
.. _ogn_tokens_node_cpp:
Token Access
------------
There are two accelerators for dealing with tokens. The first is the tokens that are predefined in the generated
code. The second include a couple of methods on the database that facilitate translation between strings and
tokens.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-tokens
:end-before: end-tokens
:ref:`[Python Version]<ogn_tokens_node_py>`
.. _ogn_uiName_node_cpp:
Node Type UI Name Access
------------------------
Specifying the node UI name creates a consistently named piece of metadata that the UI can use to present a more
friendly name of the node type to the user.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-node-uiName
:end-before: end-node-uiName
:ref:`[Python Version]<ogn_uiName_node_py>`
.. _ogn_simple_node_cpp:
Simple Attribute Data Type
--------------------------
Accessors are created on the generated database class that return a reference to the underlying attribute data, which
lives in Fabric. You can use ``auto&` and ``const auto&`` type declarations to provide local names to clarify your
code, or you can specify the exact types that are referenced, so long as they are compatible. e.g. if you have a local
typedef ``Byte`` that is a ``uint8_t`` then you can use that type to get a reference to the data. This will be more
useful with more complex data types later.
.. tip::
For convenience in debugging the actual typedefs for the data are included in the database. They can be found
in the same location as the attributes, but with a *_t* suffix. For example the attribute ``db.outputs.length``
will have a corresponding typedef of ``db.outputs.length_t``, which resolves to ``int64_t``.
References to Fabric are actually done through lightweight accessor classes, which provide extra functionality
such as the ability to check if an attribute value is valid or not - ``compute()`` will not be called if any of
the required attributes are invalid. For example with the simple data types the accessor implements
``operator()`` to directly return a reference to the Fabric memory, as seen in the example.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-simple
:end-before: end-simple
.. tip::
As these accessor types may be subject to change it's best to avoid exposing their types (i.e. the data type of
``db.inputs.token``). If you must pass around attribute data, use the direct references returned from ``operator()``.
:ref:`[Python Version]<ogn_simple_node_py>`
.. _ogn_tuple_node_cpp:
Tuple Attribute Data Type
-------------------------
The wrappers for tuple-types are largely the same as for simple types. The main difference is the underlying data type
that they are wrapping. By default, the ``pxr::GfVec`` types are the types returned from ``operator()`` on tuple types.
Their memory layout is the same as their equivalent POD types, e.g. ``pxr::GfVec3f`` -> ``float[3]`` so you can cast
them to any other equivalent types from your favourite math library.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-tuple
:end-before: end-tuple
:ref:`[Python Version]<ogn_tuple_node_py>`
.. _ogn_role_node_cpp:
Role Attribute Data Type
------------------------
The wrappers for role-types are identical to their underlying tuple types. The only distinction between the types is
the value that will be returned from the wrapper's ``role()`` method.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-role
:end-before: end-role
:ref:`[Python Version]<ogn_role_node_py>`
.. _ogn_array_node_cpp:
Array Attribute Data Type
-------------------------
Array attributes are stored in a wrapper that mirrors the functionality of ``std::span``. The memory being managed
belongs to Fabric, with the wrapper providing the functionality to seamlessly manage the memory when the array size
changes (on writable attributes only).
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-array
:end-before: end-array
:ref:`[Python Version]<ogn_array_node_py>`
.. _ogn_tuple_array_node_cpp:
Tuple-Array Attribute Data Type
-------------------------------
The wrappers for tuple-array types are largely the same as for simple array types. The main difference is the
underlying data type that they are wrapping. See the :ref:`ogn_tuple_node_cpp` section for details on accessing
tuple data.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-tuple-array
:end-before: end-tuple-array
:ref:`[Python Version]<ogn_tuple_array_node_py>`
.. _ogn_string_node_cpp:
String Attribute Data Type
--------------------------
As the normal operations applied to strings involve size changes, and Fabric requires new allocations every time an
array's size changes, it is best to use a local variable for string manipulations rather than modifying the string
directly. Doing so also gives you the ability to access the wealth of string manipulation library functions.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-string
:end-before: end-string
:ref:`[Python Version]<ogn_string_node_py>`
.. _ogn_any_node_cpp:
Extended Attribute Data Type - Any
----------------------------------
Extended attributes, of type "any" and union, have data types that are resolved at runtime. This requires extra
information that identifies the resolved types and provides methods for extracting the actual data from the wrapper
around the extended attribute.
In C++ this is accomplished with prudent use of templated methods and operator overloads. The wrapper around the
extended attribute types uses this to provide access to a layered wrapper for the resolved data type, which in turn
provides wrapped access to the underlying Fabric functionality.
This is the documentation for the interface of the extended attribute wrapper class, taken directly from the
implementation file:
.. literalinclude:: ../../../../include/omni/graph/core/ogn/RuntimeAttribute.h
:language: cpp
:start-after: begin-extended-attribute-interface-description
:end-before: end-extended-attribute-interface-description
The first extended type, **any**, is allowed to resolve to any other attribute type.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-union
:end-before: end-union
:ref:`[Python Version]<ogn_any_node_py>`
.. _ogn_union_node_cpp:
Extended Attribute Data Type - Union
------------------------------------
The access pattern for the union attribute types is exactly the same as for the **any** type. There is just a tacit
agreement that the resolved types will always be one of the ones listed in the union type description.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-union
:end-before: end-union
:ref:`[Python Version]<ogn_union_node_py>`
.. _ogn_bundle_node_cpp:
Extended Attribute Type Resolution using `ogn::compute`
-------------------------------------------------------
As you might see, resolving attributes manually in c++ results in a lot of repeated code that is difficult to read.
the `ogn::compute` api, defined in `<omni/graph/core/ogn/ComputeHelpers.h>` aims to make this easier for developers.
.. literalinclude:: ../../../../include/omni/graph/core/ogn/ComputeHelpers.h
:language: cpp
:start-after: begin-compute-helpers-interface-description
:end-before: end-compute-helpers-interface-description
For a complete code sample, see the implementation of the `OgnAdd` node:
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-compute-helpers
:end-before: end-compute-helpers
Bundle Attribute Data Type
--------------------------
Bundle attribute information is accessed the same way as information for any other attribute type. As an aggregate,
the bundle can be treated as a container for attributes, without any data itself.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-bundle
:end-before: end-bundle
:ref:`[Python Version]<ogn_bundle_node_py>`
.. _ogn_bundle_data_cpp:
When you want to get at the actual data, you use the bundle API to extract the runtime attribute accessors from the
bundle for those attributes you wish to process.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-bundle-data
:end-before: end-bundle-data
.. tip::
Although you access them in completely different ways the attributes that are bundle members use the same accessors
as the extended attribute types. See further information in :ref:`ogn_any_node_cpp`
This documentation for bundle access is pulled directly from the code. It removes the extra complication in the
accessors required to provide proper typing information for bundle members and shows the appropriate calls in the
bundle attribute API.
.. literalinclude:: ../../../../include/omni/graph/core/ogn/Bundle.h
:language: cpp
:start-after: begin-bundle-interface-description
:end-before: end-bundle-interface-description
:ref:`[Python Version]<ogn_bundle_data_py>`
.. _ogn_attribute_memory_type_cpp:
Attribute Memory Location
-------------------------
In C++ nodes the GPU calculations will typically be done by CUDA code. The important thing to remember for this code
is that the CPU, where the node ``compute()`` method lives, cannot dereference pointers in the GPU memory space.
For this reason, all of the APIs that provide access to attribute data values return pointers to the actual data
when it lives on the GPU.
For example, if an attribute has a float value
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-memory-type
:end-before: end-memory-type
:ref:`[Python Version]<ogn_attribute_memory_type_py>`
.. _ogn_node_cudaPointers_cpp:
Attribute CPU Pointers to GPU Data
----------------------------------
.. note::
Although this value takes effect at the attribute level the keyword is only valid at the node level. All
attributes in a node will use the same type of CUDA array pointer referencing.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-cuda-pointers
:end-before: end-cuda-pointers
:ref:`[Python Version]<ogn_node_cudaPointers_py>`
.. _ogn_node_categories_cpp:
Node Type Categories
--------------------
Categories are added as metadata to the node and can be accessed through the standard metadata interface.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-node-categories
:end-before: end-node-categories
:ref:`[Python Version]<ogn_node_categories_py>`
.. _ogn_metadata_attribute_cpp:
Attribute Metadata Access
-------------------------
When attributes have metadata added to them they can be accessed through the ABI attribute interface. This works
basically the same as with metadata on node types, just with different accessors.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-attribute-metadata
:end-before: end-attribute-metadata
:ref:`[Python Version]<ogn_metadata_attribute_py>`
.. _ogn_optional_cpp:
Required vs. Optional Attributes
--------------------------------
For most attributes the generated code will check to see if the attribute is valid before it calls the `compute()`
function. Optional attributes will not have this check made. If you end up using their value then you must make the
call to the `isValid()` method yourself first and react appropriately if invalid values are found. Further, the
existence of these attributes within the compute method is not guaranteed.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-optional
:end-before: end-optional
:ref:`[Python Version]<ogn_optional_py>`
.. caution::
Optional attributes may not even appear on the node. This puts the onus on the node writer to completely validate
the attributes, and provide their own ABI-based access to the attribute data. Use of such attributes should be rare.
.. _ogn_uiName_attribute_cpp:
Attribute UI Name Access
------------------------
Specifying the attribute **uiName** creates a consistently named piece of metadata that the UI can use to present a more
friendly version of the attribute name to the user. It can be accessed through the regular metadata ABI, with some
constants provided for easier access.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-attribute-uiName
:end-before: end-attribute-uiName
Another access point is shown here. As the attribute patterns defined in the .ogn are unchanging the generated code
provides access to the name of the attribute through a data member so that you don't have to replicate the string.
The access is through local namespaces **inputs::**, **outputs::**, and **state::**, mirroring the database structure.
.. code-block:: cpp
inputs::x.token() // The name of the input attribute "x" as a token
inputs::x.name() // The name of the input attribute "x" as a static string
:ref:`[Python Version]<ogn_uiName_attribute_py>`
.. _ogn_uiType_attribute_cpp:
Attribute Type UI Type Access
-----------------------------
Specifying the attribute **uiType** tells the property panel that this attribute should be shown with custom widgets.
- For path, string, and token attributes, a ui type of "filePath" will show file browser widgets
- For 3- and 4-component numeric tuples, a ui type of "color" will show the color picker widget
in the property panel.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-attribute-uiType
:end-before: end-attribute-uiType
:ref:`[Python Version]<ogn_uiType_attribute_py>`
.. _ogn_unvalidated_cpp:
Unvalidated Attributes
----------------------
For most attributes the generated code will check to see if the attribute is valid before it calls the `compute()`
function. unvalidated attributes will not have this check made. If you end up using their value then you must make the
call to the `isValid()` method yourself first and react appropriately if invalid values are found. Further, for
attributes with extended types you must verify that they have successfully resolved to a legal type.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-unvalidated
:end-before: end-unvalidated
:ref:`[Python Version]<ogn_unvalidated_py>`
.. _ogn_state_node_cpp:
Nodes With Internal State
-------------------------
The easiest way to add internal state information is by adding data members to your node class. The presence of any
member variables automatically marks the node as having internal state information, allocating and destroying it
when the node itself is initialized and released. Once the internal state has been constructed it is not modified by
OmniGraph until the node is released, it is entirely up to the node how and when to modify the data.
The data can be easily retrieved with the templated method *internalState<>* on the database class.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-state-node
:end-before: end-state-node
.. note::
As access to internal state data is templated you can also store your state data in some external structure,
however when you do so you must also be sure to override the `setHasState()` ABI method to always return true.
Unless absolutely necessary it's always easier to make the state a member variable of your node.
:ref:`[Python Version]<ogn_state_node_py>`
.. _ogn_versioned_node_cpp:
Nodes With Version Upgrades
---------------------------
To provide code to upgrade a node from a previous version to the current version you must override the ABI function
`updateNodeVersion()`. The current context and node to be upgraded are passed in, as well as the old version at which
the node was created and the new version to which it should be upgraded. Passing both values allows you to upgrade
nodes at multiple versions in the same code.
This example shows how a new attribute is added using the *INode* ABI interface.
.. literalinclude:: ogn_code_samples_cpp.cpp
:language: cpp
:start-after: begin-versioned-node
:end-before: end-versioned-node
:ref:`[Python Version]<ogn_versioned_node_py>`
| 21,114 | reStructuredText | 38.615385 | 139 | 0.729469 |
omniverse-code/kit/exts/omni.graph.tools/docs/ogn_reference_guide.rst | .. _ogn_reference_guide:
OGN Reference Guide
===================
This is a detailed guide to the syntax of the .ogn file. All of the keywords supported are described in detail, and a
simplified JSON schema file is provided for reference.
Each of the described elements contains an example of its use in a .ogn file to illustrate the syntax.
For a more detailed guide on how each of the elements are used see the :ref:`ogn_user_guide`.
.. contents::
..
..
==================================== ====
Node Level Keywords Attribute Level Keywords
==================================== ====
:ref:`ogn_keyword_node_description` :ref:`ogn_keyword_attribute_description`
:ref:`ogn_keyword_node_exclude` :ref:`ogn_keyword_attribute_default`
:ref:`ogn_keyword_node_icon` :ref:`ogn_keyword_attribute_deprecated`
:ref:`ogn_keyword_node_memoryType` :ref:`ogn_keyword_attribute_memoryType`
:ref:`ogn_keyword_node_cudaPointers` :ref:`ogn_keyword_attribute_metadata`
:ref:`ogn_keyword_node_metadata` :ref:`maximum <ogn_keyword_attribute_range>`
:ref:`ogn_keyword_node_singleton` :ref:`minimum <ogn_keyword_attribute_range>`
:ref:`ogn_keyword_node_tags` :ref:`ogn_keyword_attribute_optional`
:ref:`ogn_keyword_node_tokens` :ref:`ogn_keyword_attribute_type`
:ref:`ogn_keyword_node_uiName` :ref:`ogn_keyword_attribute_uiName`
:ref:`ogn_keyword_node_version` :ref:`ogn_keyword_attribute_uiType`
:ref:`ogn_keyword_node_language` :ref:`ogn_keyword_attribute_unvalidated`
:ref:`ogn_keyword_node_scheduling`
:ref:`ogn_keyword_node_categories`
==================================== ====
------------
Basic Structure
---------------
See the :ref:`omnigraph_naming_conventions` for guidance on how to name your files, nodes, and attributes.
.. code-block:: json
{
"NodeName": {
"NODE_PROPERTY": "NODE_PROPERTY_VALUE",
"inputs": "ATTRIBUTE_DICTIONARY",
"outputs": "ATTRIBUTE_DICTIONARY",
"state": "ATTRIBUTE_DICTIONARY",
"tests": "TEST_DATA"
}
}
The :ref:`NODE_PROPERTY<ogn_node_property_keywords>` values are keywords recognized at the node level. The values
in ``NODE_PROPERTY_VALUE`` will vary based on the specific keyword to which they pertain.
The :ref:`ATTRIBUTE_DICTIONARY<ogn_attribute_dictionaries>` sections contain all of the information required to
define the subset of attributes, each containing a set of :ref:`ogn_attribute_property_keywords` that describe the
attribute.
Lastly the :ref:`TEST_DATA<ogn_test_data>` section contains information required to construct one or more Python
tests that exercise the basic node operation.
Comments
--------
JSON files do not have a syntax for adding comments, however in order to allow for adding descriptions or
disabled values to a .ogn file the leading character "$" will treat the key in any key/value pair as a
comment. So while ``"description":"Hello"`` will be treated as a value to be added to the node definition,
``"$description":"Hello"`` will be ignored and not parsed.
Comments can appear pretty much anywhere in your file. They are used extensively in the :ref:`ogn_tutorial_nodes` to
describe the file contents.
.. literalinclude:: ogn_example.json
:language: json
:lines: 1-4
:emphasize-lines: 3
.. _ogn_node_property_keywords:
Node Property Keywords
----------------------
These are the elements that can appear in the `NODE_PROPERTY` section. The values they describe pertain to the node
type as a whole.
.. _ogn_keyword_node_description:
.. rubric:: description
The *description* key value is required on all nodes and will be used in the generated documentation of the node.
You can embed reStructuredText code in the string to be rendered in the final node documentation, though it will
appear as-is in internal documentation such as Python docstrings.
The value can be a string or a list of strings. If it is a list, they will be concatenated as appropriate in the
locations they are used. (Linefeeds preserved in Python docstrings, turned into a single space for text documentation,
prepended with comment directives in code...)
.. tip::
This mandatory string should inform users exactly what function the node performs, as concisely as possible.
.. literalinclude:: ogn_example.json
:language: json
:lines: 1-8
:emphasize-lines: 4-7
.. _ogn_keyword_node_version:
.. rubric:: version
The integer value *version* defines the version number of the current node definition. It is up to the node writer
how to manage the encoding of version levels in the integer value. (For example a node might encode a major version
of 3, a minor version of 6, and a patch version of 12 in two digit groups as the integer 30612, or it might simply
use monotonic increasing values for versions 1, 2, 3...)
.. tip::
This mandatory value can be anything but by convention should start at 1.
.. literalinclude:: ogn_example.json
:language: json
:lines: 4-9
:emphasize-lines: 5
.. _ogn_keyword_node_exclude:
.. rubric:: exclude
Some node types will not be interested in all generated files, e.g. if the node is a Python node it will not need the
C++ interface. Any of the generated files can be skipped by including it in a list of strings whose key is *exclude*.
Here is a node which excludes all generated output, something you might do if you are developing the description of a
new node and just want the node syntax to validate without generating code.
Legal values to include in the exclusion list are **"c++"**, **"docs"**, **"icon"**, **"python"**, **"template"**,
**"tests"**, or **"usd"**, in any combination.
.. note::
C++ is automatically excluded when the implementation language is Python, however when the implementation language
is C++ there will still be a Python interface class generated for convenience. It will have less functionality
than for nodes implemented in Python and is mainly intended to provide an easy interface to the node from Python
scripts.
.. literalinclude:: ogn_example.json
:language: json
:lines: 8-10
:emphasize-lines: 2
.. _ogn_keyword_node_icon:
.. rubric:: icon
A string value that represents the path, relative to the .ogn file, of the icon file that represents the node. This
icon should be a square SVG file. If not specified then it will default to the file with the same name as the *.ogn*
file with the *.svg* extension (e.g. *OgnMyNode.ogn* looks for the file *OgnMyNode.svg*). When no icon file exists the
UI can choose a default for it. The icon will be installed into the extension's generated *ogn/* directory
.. literalinclude:: ogn_example.json
:language: json
:lines: 85-87
:dedent: 4
:emphasize-lines: 2
The extended syntax for the icon description adds the ability to specify custom coloring. Instead of just a string path,
the icon is represented by a dictionary of icon properties. Allowed values are **"path"**, the icon location as with
the simple syntax, **"color"**, a color representation for the draw part of the icon's shape, **"backgroundColor"**,
a color representation for the part of the icon not containing its shape, and **"borderColor"**, a color
representation for the outline of the icon.
Colors are represented in one of two ways - as hexadecimal in the form **#AABBGGRR**, or as a decimal list of
**[R, G, B, A]**, both using the value range [0, 255].
.. literalinclude:: ogn_example.json
:language: json
:lines: 90-97
:dedent: 4
:emphasize-lines: 2-7
.. note::
Unspecified colors will use the defaults. An unspecified path will look for the icon in the default location.
.. _ogn_keyword_node_language:
.. rubric:: language
A string value that represents the language of implementation. The default when not specified is **"c++"**. The
other legal value is **"python"**. This value indicates the language in which the node compute algorithm is
written.
.. literalinclude:: ogn_example.json
:language: json
:lines: 9-11
:emphasize-lines: 2
.. _ogn_keyword_node_memoryType:
.. rubric:: memoryType
Nodes can be written to work on the CPU, GPU via CUDA, or both. For each case the data access has to take this into
account so that the data comes from the correct memory store. The valid values for the *memoryType* property are
*cpu*, *cuda*, and *any*. The first two mean that by default all attribute values on the node are exclusively in either
CPU or CUDA-specific GPU memory. *any* means that the node could run on either CPU or GPU, where the decision of which
to use happens at runtime. The default value is *cpu*.
.. literalinclude:: ogn_example.json
:language: json
:lines: 10-12
:emphasize-lines: 2
.. _ogn_keyword_node_categories:
.. rubric:: categories
Categories provide a way to group similar node types, mostly so that they can be managed easier in the UI.
.. literalinclude:: ogn_example.json
:language: json
:lines: 305-306
:emphasize-lines: 2
For a more detailed example see the :ref:`omnigraph_node_categories` "how-to".
.. _ogn_keyword_node_cudaPointers:
.. rubric:: cudaPointers
Usually when the memory type is set to *cuda* or *any* the CUDA memory pointers for array types are returned as a
GPU pointer to GPU data, so when passing the data to CUDA code you have to pass pointers-to-pointers, since the
CPU code cannot dereference them. Sometimes it is more efficient to just pass the GPU pointer directly though,
pointed at by a CPU pointer. (It's still a pointer to allow for null values.) You can do this by specifying *"cpu"*
as your *cudaPointers* property.
.. literalinclude:: ogn_example.json
:language: json
:lines: 230-234
:emphasize-lines: 4
.. note::
You can also specify *"cuda"* for this value, although as it is the default this has no effect.
.. _ogn_keyword_node_metadata:
.. rubric:: metadata
Node types can have key/value style metadata attached to them by adding a dictionary of them using the *metadata*
property. The key and value are any arbitrary string, though it's a good idea to avoid keywords starting with
with underscore (**_**) as they may have special meaning to the graph. Lists of strings can also be used as metadata
values, though they will be transformed into a single comma-separated string.
A simple example of useful metadata is a human readable format for your node type name. UI code can then read the
consistently named metadata to provide a better name in any interface requiring node type selection. In the example
the keyword *author* is used.
.. literalinclude:: ogn_example.json
:language: json
:lines: 11-15
:emphasize-lines: 2-4
.. tip::
There are several hardcoded metadata values, described in this guide. The keywords under which these are stored
are available as constants for consistency, and can be found in Python in the og.MetadataKeys object and in C++
in the file *omni/graph/core/ogn/Database.h*.
.. _ogn_keyword_node_scheduling:
.. rubric:: scheduling
A string or list of string values that represent information for the scheduler on how nodes of this type may be safely
scheduled. The string values are fixed, and say specific things about the kind of data the node access when
computing.
.. literalinclude:: ogn_example.json
:language: json
:lines: 182-184
:emphasize-lines: 2
.. literalinclude:: ogn_example.json
:language: json
:lines: 187-189
:emphasize-lines: 2
The strings accepted as values in the .ogn file are described below (extracted directly from the code)
.. literalinclude:: ../../../../source/extensions/omni.graph.tools/python/_impl/node_generator/parse_scheduling.py
:language: python
:start-after: begin-scheduling-hints
:end-before: end-scheduling-hints
.. _ogn_keyword_node_singleton:
.. rubric:: singleton
**singleton** is metadata with special meaning to the node type, so as a shortcut it can also be specified as its own
keyword at the node level. The meaning is the same; associate a piece of metadata with the node type. This piece of
metadata indicates the quality of the node type of only being able to instantiate a single node of that type in a graph
or its child graphs. The value is specified as a boolean, though it is stored as the string "1". (If the boolean is
false then nothing is stored, as that is the default.)
.. literalinclude:: ogn_example.json
:language: json
:lines: 111-113
:emphasize-lines: 2
:dedent: 4
.. _ogn_keyword_node_tags:
.. rubric:: tags
**tags** is a very common piece of metadata, so as a shortcut it can also be specified as its own keyword at the
node level. The meaning is the same; associate a piece of metadata with the node type. This piece of
metadata can be used by the UI to better organize sets of nodes into common groups.
.. literalinclude:: ogn_example.json
:language: json
:lines: 15-17
:emphasize-lines: 2
.. tip::
Tags can be either a single string, a comma-separated string, or a list of strings. They will all be represented
as a comma-separated string in the metadata.
.. _ogn_keyword_node_tokens:
.. rubric:: tokens
Token types are more efficient than string types for comparison, and are fairly common. For that reason the .ogn
file provides this shortcut to predefine some tokens for use in your node implementation code.
The simplest method of adding tokens is to add a single token string.
.. literalinclude:: ogn_example.json
:language: json
:lines: 12-16
:emphasize-lines: 4
If you have multiple tokens then you can instead specify a list:
.. literalinclude:: ogn_example.json
:language: json
:lines: 71-75
:emphasize-lines: 4
:dedent: 4
The lookup is the same:
Lastly, if the token value contains illegal names for C++ or Python variables you can specify tokens in a dictionary,
where the key is the name through which it will be accessed and the value is the actual token string:
.. literalinclude:: ogn_example.json
:language: json
:lines: 78-82
:emphasize-lines: 4
:dedent: 4
See the :ref:`ogn_user_guide` for information on how to access the different sets of token in your code.
.. _ogn_keyword_node_uiName:
.. rubric:: uiName
**uiName** is a very common piece of metadata, so as a shortcut it can also be specified as its own keyword at the
node level. The meaning is the same; associate a piece of metadata with the node type. This piece of
metadata can be used by the UI to present a more human-readable name for the node type.
.. literalinclude:: ogn_example.json
:language: json
:lines: 16-17
:emphasize-lines: 2
.. tip::
Unlike the actual name, the uiName has no formatting or uniqueness requirements. Choose a name that will make
its function obvious to a user selecting it from a list.
.. _ogn_attribute_dictionaries:
Attribute Dictionaries
----------------------
Each of the three attribute sections, denoted by the keywords `inputs`, `outputs`, and `state`, contain a list of
attributes of each respective location and their properties.
:**inputs**:
Attributes that are read-only within the node's compute function. These form the collection of data used to run
the node's computation algorithm.
:**outputs**:
Attributes whose values are generated as part of the computation algorithm. Until the node computes their values
they will be undefined. This data is passed on to other nodes in the graph, or made available for inspection.
:**state**:
Attributes that persist between one evaluation and the next. They are both readable and writable. The primary
difference between **state** attributes and **output** attributes is that when you set the value on a **state**
attribute that value is guaranteed to be there the next time the node computes. Its data is entirely owned by
the node.
.. literalinclude:: ogn_example.json
:language: json
:lines: 19-25
:emphasize-lines: 2,4,6
:dedent: 4
.. note::
If there are no attributes of a specific location then that section can simply be omitted.
.. _ogn_attribute_property_keywords:
Attribute Property Keywords
---------------------------
The top level keyword of the attribute is always the unique name. It is always namespaced within the section it
resides and only need be unique within that section. For example, the attribute ``mesh`` can appear in both the
``inputs`` and ``outputs`` sections, where it will be named ``inputs:mesh`` and ``outputs:mesh`` respectively.
Attribute Properties
++++++++++++++++++++
Like the outer node level, each of the attributes has a set of mandatory and optional attributes.
.. _ogn_keyword_attribute_description:
.. rubric:: description
As with the node, the *description* field is a multi-line description of the attribute, optionally with reStructuredText
formatting. The description should contain enough information for the user to know how that attribute will be used
(as an input), computed (as an output), or updated (as state).
.. tip::
This mandatory string should inform users exactly what data the attribute contains, as concisely as possible.
.. literalinclude:: ogn_example.json
:language: json
:lines: 27-30
:emphasize-lines: 3
.. _ogn_keyword_attribute_type:
.. rubric:: type
The *type* property is one of several hard-coded values that specify what type of data the attribute contains. As we
ramp up not all type combinations are supported; run `generate_node.py --help` to see the currently supported list
of attribute types. For a full list of supported types and the data types they generate see :ref:`ogn_attribute_types`.
.. tip::
This field is mandatory, and will help determine what type of interface is generated for the node.
.. literalinclude:: ogn_example.json
:language: json
:lines: 28-31
:emphasize-lines: 3
.. _ogn_keyword_attribute_default:
.. rubric:: default
The *default* property on inputs contains the value of the attribute that will be used when the user has not explicitly
set a value or provided an incoming connection to it. For outputs the default value is optional and will only be used
when the node compute method cannot be run.
The value type of the *default* property will be the JSON version of the type of data, shown in
:ref:`ogn_attribute_types`.
.. literalinclude:: ogn_example.json
:language: json
:lines: 30-32
:emphasize-lines: 2
.. tip::
Although input attributes should all have a default, concrete data types need not have a default set if the intent
is for them to have their natural default. It will be assigned to them automatically. e.g. 0 for "int",
[0.0, 0.0, 0.0] for "float[3]", false for "bool", and "[]" for any array types.
.. warning::
Some attribute types, such as "any" and "bundle", have no well-defined data types and cannot have a default set.
.. _ogn_keyword_attribute_deprecated:
.. rubric:: deprecated
The *deprecated* property is used to indicate that the attribute is being phased out and should no longer be used.
The value of the property is a string or array of strings providing users with information on
how they should change their graphs to accommodate the eventual removal of the attribute.
.. literalinclude:: ogn_example.json
:language: json
:lines: 272-289
:emphasize-lines: 7,14-17
.. _ogn_keyword_attribute_optional:
.. rubric:: optional
The *optional* property is used to tell the node whether the attribute's value needs to be present in order for the
compute function to run. If it is set to `true` then the value is not checked before calling compute. The default
value `false` will not call the compute function if the attribute does not have a valid value.
.. literalinclude:: ogn_example.json
:language: json
:lines: 31-33
:emphasize-lines: 2
.. _ogn_keyword_attribute_memoryType:
.. rubric:: memoryType
By default every attribute in a node will use the *memoryType* defined
:ref:`at the node level<ogn_keyword_node_memoryType>`. It's possible for attributes
to override that choice by adding that same keyword in the attribute properties.
Here's an example of an attribute that overrides the node level memory type to force the attribute onto
the CPU. You might do this to keep cheap POD values on the CPU while the expensive data arrays go directly to the GPU.
.. literalinclude:: ogn_example.json
:language: json
:lines: 32-34
:emphasize-lines: 2
.. _ogn_keyword_attribute_range:
.. rubric:: minimum/maximum
When specified, these properties represent the minimum and maximum allowable value for the attribute. For arrays the
values are applicable to every array element. For tuples the values will themselves be tuples with the same size.
.. literalinclude:: ogn_example.json
:language: json
:lines: 33-36
:emphasize-lines: 2-3
.. note::
These properties are only valid for the numeric attribute types, including tuples and arrays.
At present they are not applied at runtime, only for validating test and default values within the .ogn file,
however in the future they may be saved so it is always a good idea to specify values here when applicable.
.. _ogn_keyword_attribute_metadata:
.. rubric:: metadata
Attributes can also have key/value style metadata attached to them by adding a dictionary of them using the
*metadata* property. The key and value are any arbitrary string, though it's a good idea to avoid keywords starting
with underscore (**_**) as they may have special meaning to the graph. Lists of strings can also be used as metadata
values, though they will be transformed into a single comma-separated string.
.. literalinclude:: ogn_example.json
:language: json
:lines: 35-39
:emphasize-lines: 2-4
There are a number of attribute metadata keys with special meanings:
============= ====
allowedTokens Used only for attributes of type *token* and contains a list of the values that token is allowed to take.
.. literalinclude:: ogn_example.json
:language: json
:lines: 192-200
:emphasize-lines: 5-7
Sometimes you may wish to have special characters in the list of allowed tokens. The generated code uses the token
name for easy access to its values so in these cases you will have to also supply a corresponding safe name for the
token value through which the generated code will access it.
.. literalinclude:: ogn_example.json
:language: json
:lines: 203-215
:emphasize-lines: 5-11
In both cases you would access the token values through the database members ``db.tokens.lt``,
``db.tokens.gt``, and ``db.tokens.ne``.
hidden This is a hint to the application that the attribute should be hidden from the user.
.. literalinclude:: ogn_example.json
:language: json
:lines: 248-257
:emphasize-lines: 6-8
Less commonly used attributes are often hidden to declutter the UI and the application may provide a
mechanism to allow the user to display them on request. For example, in the Create application hidden
attributes are not displayed in its Graph windows but do appear in its Property window from where their
hidden state can be toggled off and on.
internal Marks an attribute which is for internal use only. An application would not normally display the
attribute to users or allow them to interact with it through its UI.
.. literalinclude:: ogn_example.json
:language: json
:lines: 260-269
:emphasize-lines: 6-8
literalOnly Indicates that the value of the attribute can only be set to a literal. In other words, if an attribute
has **literalOnly** set to 1, then it cannot be connected to other attributes, so the only way to modify
the value of the attribute is to set its value to a literal. A typical use case is the input attributes
of event source nodes. Since the action evaluator does not evaluate nodes upstream of an event source
node, the input attributes of event source nodes should not be allowed to connect to upstream nodes, so
they should be declared as **literalOnly**.
.. literalinclude:: ogn_example.json
:language: json
:lines: 237-245
:emphasize-lines: 5-7
outputOnly Used with an input attribute which can be the source of output connections but should not be the
target of input connections. Typically an application will allow input attributes to take their value
from an incoming connection or to be set by the user through the UI if they don't have an incoming
connection. The application may also disallow outbound connections. Setting **outputOnly** to 1 is a
hint to the application that it should continue to allow the user to set the attribute's value through
its UI but disallow incoming connections and enable outgoing connections. A typical use for this is with
a "constant" node which allows the user to enter a constant value which can then be passed on to other
nodes via output connections.
.. literalinclude:: ogn_example.json
:language: json
:lines: 218-227
:emphasize-lines: 6-8
============= ====
.. _ogn_keyword_attribute_uiName:
.. rubric:: uiName
**uiName** is a very common piece of metadata, so as a shortcut it can also be specified as its own keyword at the
attribute level. The meaning is the same; associate a piece of metadata with the attribute. This piece of
metadata can be used by the UI to present a more human-readable name for the attribute.
.. literalinclude:: ogn_example.json
:language: json
:lines: 37-40
:emphasize-lines: 3
.. tip::
Unlike the actual name, the uiName has no formatting or uniqueness requirements. Choose a name that will make
its function obvious to a user selecting it from a list. The UI may or may not include the namespace when
displaying it so if that distinction is critical, include it in the uiName.
.. _ogn_keyword_attribute_uiType:
.. rubric:: uiType
**uiType** is used to provide a hint to the property panel as to how the property should bre displayed. When "filePath"
is specified, string and token fields will create file browser widgets in the property panel. When "color" is
specified, 3- and 4-component tuples will use a color picker widget in the property panel.
.. literalinclude:: ogn_example.json
:language: json
:lines: 298-300
:emphasize-lines: 1
.. _ogn_keyword_attribute_unvalidated:
.. rubric:: unvalidated
**unvalidated** is similar to the **optional** keyword, in that it is used to tag attributes that may not take part
in a ``compute()``. The difference is that these attributes will always exists, they just may not have valid data
when the compute is invoked. For such attributes the onus is on the node writer to check validity of such attributes
if they do end up being used for the compute.
.. literalinclude:: ogn_example.json
:language: json
:lines: 164-179
:emphasize-lines: 9,14
:dedent: 4
.. _ogn_test_data:
Test Definitions
----------------
The node generator is also capable of automatically generating some unit tests on the operation of the node's
algorithm through the **tests** property. This property contains a list of dictionaries, where each entry in the
list is a dictionary of `attribute name : attribute value`.
The test runs by setting all of the input attributes to their corresponding values in the dictionary, executing the
node's compute algorithm, and then comparing the computed values of the outputs against their corrsponding values in
the dictionary.
.. note::
When input attributes do not have a value in the tests their default is used. When output attributes do not have a
value in the test they are not checked against the computed result.
There are two methods of specifying test data. They are equivalent so you can use the one that makes the most sense
for your particular test cases. They can coexist if you have different types of test data.
The first method is using a single dictionary to specify any non-default attribute values.
.. literalinclude:: ogn_example.json
:language: json
:lines: 44-47
:emphasize-lines: 2-3
This example shows test cases to exercise a simple node that adds two integers together. The first test says *if the
node has inputs 1 and 2 the output should be 3* and the second one says *if the node has an input of 5 and a default
valued input the output should be 5* (the defaults have been set to 0).
For a more complex text you can specify the data involved in the test by location instead of all in a single
dictionary. Here's a similar example for an 8 dimensional polynomial solver.
.. literalinclude:: ogn_example.json
:language: json
:lines: 46-61
:emphasize-lines: 2-15
CPU/GPU Data Switch
+++++++++++++++++++
There is one special piece of test configuration that applies when you have output data that switches at runtime
between CPU and GPU (usually through some sort of input information, such as a boolean switch, or a limit on data
size). This information tells the test generator that an output should be expected on the GPU instead of the default
CPU location.
.. literalinclude:: ogn_example.json
:language: json
:lines: 61-67
:emphasize-lines: 6
This example illustrates testing of an output on GPU, where the GPU location is selected at runtime based on the
attribute *inputs:isGpu*.
Extended Type Test Data
+++++++++++++++++++++++
If your node has extended-type attributes, you will have to specify which type you want to use for your test. This is
necessary to distinguish between types which aren't support by json. For example double, float and half.
.. literalinclude:: ogn_example.json
:language: json
:lines: 68-72
:emphasize-lines: 3-4
This example has one input "multiplier" which is a specific decimal type, input "element" and output "tuple" which
are extended-types. "element" and "tuple" are related by the logic of the node in that their base types must match, so
the test is specifying the same type for those attributes.
For tests that deal with extended data types you must also specify the data type with the value to be set so that it
can properly resolve. (The other way of resolving data types is by connection. These simple tests only involve a single
node so resolution can only be done by setting values directly.)
.. literalinclude:: ogn_example.json
:language: json
:lines: 105-108
:emphasize-lines: 2-3
State Test Data
+++++++++++++++
In the special case of tests that need to exercise state data extra syntax is added to differentiate between values
that should be set on state attributes before the test starts and values that are checked on them after the test is
completed. The special suffix *_set* is appended to the state namespace to signify that a value is to be initialized
before a test runs. You may optionally add the suffix *_get* to the state namespace to clarify which values are to be
checked after the test runs but that is the default so it is not necessary.
.. literalinclude:: ogn_example.json
:language: json
:lines: 116-135
:emphasize-lines: 2-19
Test Graph Setup
++++++++++++++++
For more complex situations you may need more than just a single node to test code paths properly. For these situations
there is a pre-test setup section you can add, in the form of the :ref:`Controller.edit<ogn_omnigraph_controller>`
function parameters. Only the creation directives are accepted, not the destructive ones such as *disconnections*.
These creation directives are all executed by the test script before it starts to run, providing you with a known
starting graph configuration consisting of any nodes, prims, and connections needed to run the test.
.. literalinclude:: ogn_example.json
:language: json
:lines: 138-162
:emphasize-lines: 9-23
.. note::
If you define the graph in this way then the first node in your *"nodes"* directives must refer to the node
being tested. If there is no graph specified then a single node of the type being tested will be the sole
contents of the stage when running the test.
Simplified Schema
=================
This schema outlines the relationships between all of the values in the .ogn file. It is simplified in that it does
not include schema information that validates data types in *default*, *minimum*, *maximum*, and test section fields.
(It's possible to include such information in a schema, it just makes it very difficult to follow.)
.. literalinclude:: ogn_schema.json
:linenos:
:language: json
| 33,306 | reStructuredText | 42.199741 | 131 | 0.720801 |
omniverse-code/kit/exts/omni.graph.tools/docs/Overview.md | # OmniGraph Tools And .ogn Node Generator
```{csv-table}
**Extension**: omni.graph.tools,**Documentation Generated**: {sub-ref}`today`
```
The OmniGraph Tools extension contains general purpose scripts for dealing with OmniGraph, mostly through the .ogn file format.
If you are interested in writing nodes the best places to start are the {ref}`ogn_user_guide` or the {ref}`ogn_reference_guide`.
- {doc}`node_architects_guide`
- {doc}`ogn_user_guide`
- {doc}`ogn_reference_guide`
- {doc}`attribute_types`
- {doc}`ogn_code_samples_cpp`
- {doc}`ogn_code_samples_python`
- {doc}`ogn_generation_script`
| 604 | Markdown | 30.842104 | 128 | 0.730132 |
omniverse-code/kit/exts/omni.kit.manipulator.viewport/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "104.0.7"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["NVIDIA"]
# The title and description fields are primarly for displaying extension info in UI
title = "Viewport Manipulator Manager"
description="Viewport Manipulator Manager"
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Scene"
# Keywords for the extension
keywords = ["kit", "manipulator", "viewport"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
[dependencies]
"omni.ui.scene" = {}
"omni.kit.window.viewport" = { optional = true }
# Main python module this extension provides, it will be publicly available as "import omni.example.hello".
[[python.module]]
name = "omni.kit.manipulator.viewport"
[[test]]
dependencies = [
"omni.kit.renderer.core",
"omni.kit.window.viewport"
]
waiver = "Part of the extension features (single viewport 1.0 scene overlay) is tested by omni.kit.manipulator.prim."
| 1,659 | TOML | 32.199999 | 118 | 0.743822 |
omniverse-code/kit/exts/omni.kit.manipulator.viewport/omni/kit/manipulator/viewport/viewport_manipulator.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import inspect
from typing import Type
import carb
class ViewportManipulator:
"""
ViewportManipulator is a generic wrapper class on top of normal Manipulator Object. It contains all "instances" of the
Manipulators exist in each Viewports.
When setting an attribute to the ViewportManipulator Object, it forwards the value to all instances.
Do NOT initialize ViewportManipulator directly. Instead using ManipulatorFactory.create_manipulator(...) instead.
It adds the functionality to automatically track viewport and create or destory "instance" of Manipulator into
ViewportManipulator.
"""
def __init__(self, manipulator_class: Type, **kwargs):
self._manipulator_class = manipulator_class
self._properties = {}
signature = inspect.signature(manipulator_class.__init__)
for arg, val in signature.parameters.items():
if val.default is not inspect.Parameter.empty:
self._properties[arg] = val.default
self._properties.update(kwargs)
for k, v in self._properties.items():
setattr(self, k, v)
self._instances = []
def add_instance(self, instance):
for k, v in self._properties.items():
if hasattr(instance, k):
setattr(instance, k, getattr(self, k))
else:
carb.log_verbose(f"{k} is not an attribute of {type(instance)}")
self._instances.append(instance)
def get_all_instances(self):
return self._instances
def clear_all_instances(self):
self._instances.clear()
@property
def manipulator_class(self):
return self._manipulator_class
def __setattr__(self, name, value):
super().__setattr__(name, value)
if hasattr(self, "_instances"):
for instance in self._instances:
if hasattr(instance, name):
setattr(instance, name, value)
else:
carb.log_verbose(f"{name} is not an attribute of {type(instance)}")
| 2,491 | Python | 33.611111 | 122 | 0.662385 |
omniverse-code/kit/exts/omni.kit.manipulator.viewport/omni/kit/manipulator/viewport/manipulator_factory.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from typing import List, Type
import carb.events
import omni.kit.app
import omni.ui as ui
from omni.ui import scene as sc
from .viewport_manipulator import ViewportManipulator
DRAW_ON_VP1 = True
SUPPORT_MULTI_VP1 = True
class ManipulatorPool:
"""
A ManipulatorPool allows omni.ui.scene items in released Manipulator to be reused for a newly created Manipulator,
instead having to rebuild them in the scene view.
"""
def __init__(self, manipulator_class, scene_view: sc.SceneView):
self._manipulator_class = manipulator_class
self._scene_view = scene_view
self._pool = []
def create(self):
if self._pool:
manipulator = self._pool.pop()
else:
with self._scene_view.scene:
manipulator = self._manipulator_class()
return manipulator
def release(self, manipulator):
manipulator.enabled = False
manipulator.model = None
# Put the released manipulator back into the pool
self._pool.append(manipulator)
class _ViewportWindowObject:
def __init__(self, ui_window, ui_scene_view, draw_sub):
self.window = ui_window
self.scene_view = ui_scene_view
self.draw_sub = draw_sub
def __del__(self):
self.draw_sub = None
self.scene_view.destroy()
self.window.destroy()
class ManipulatorFactory:
_instance = None
@classmethod
def create_manipulator(cls, manipulator_class: Type, **kwargs):
"""
Creates a ViewportManipulator object.
Args: Arguments need to match manipulator_type's constructor
"""
return cls._instance._create_manipulator(manipulator_class, **kwargs)
@classmethod
def destroy_manipulator(cls, *args, **kwargs):
"""
Destroys a ViewportManipulator object.
Args @see `_destroy_manipulator`
"""
cls._instance._destroy_manipulator(*args, **kwargs)
def startup(self):
self._instances = dict()
self._manipulators = set()
self._pools = {} # Each manipulator type needs a pool for each viewport.
self._vp1_overlay_windows = {}
self._viewport = None
ManipulatorFactory._instance = self
global DRAW_ON_VP1, SUPPORT_MULTI_VP1
try:
import omni.kit.viewport_legacy
self._viewport = omni.kit.viewport_legacy.acquire_viewport_interface()
except ImportError:
DRAW_ON_VP1, SUPPORT_MULTI_VP1 = False, False
if not DRAW_ON_VP1:
return
self._dead_vp1_overlay_windows = []
self._vp1_instances = []
self._update_sub = (
omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_update)
)
def shutdown(self):
if DRAW_ON_VP1:
self._vp1_overlay_windows.clear()
self._dead_vp1_overlay_windows.clear()
self._update_sub = None
ManipulatorFactory._instance = None
def _create_vp1_scene_view(self, instance):
name = self._viewport.get_viewport_window_name(instance)
window = self._viewport.get_viewport_window(instance)
vp1_scene_window = ui.Window(name, visible=window.is_visible(), detachable=False)
with vp1_scene_window.frame:
vp1_scene_view = sc.SceneView(aspect_ratio_policy=sc.AspectRatioPolicy.PRESERVE_ASPECT_FIT)
draw_event_stream = window.get_ui_draw_event_stream()
draw_sub = draw_event_stream.create_subscription_to_pop(
lambda event, instance=instance: self._on_vp_draw(event, instance)
)
self._vp1_overlay_windows[instance] = _ViewportWindowObject(vp1_scene_window, vp1_scene_view, draw_sub)
return vp1_scene_view
def _create_manipulator(self, manipulator_class: Type, **kwargs) -> ViewportManipulator:
"""
This function creates a TransformManipulator object that has instances in ALL existing viewports and future created viewports.
Args: Arguments need to match manipulator_type's constructor
Return:
ViewportManipulator object.
"""
manipulator = ViewportManipulator(manipulator_class, **kwargs)
pools = self._get_or_create_pools_for_manipulator_class(manipulator_class)
for pool in pools:
instance = pool.create()
self._instances[instance] = pool
manipulator.add_instance(instance)
self._manipulators.add(manipulator)
return manipulator
def _destroy_manipulator(self, manipulator: ViewportManipulator):
"""
Destroy the TransformManipulator and all its instances.
"""
instances = manipulator.get_all_instances()
for instance in instances:
self._instances.pop(instance).release(instance)
manipulator.clear_all_instances()
self._manipulators.remove(manipulator)
def _get_or_create_pools_for_manipulator_class(self, manipulator_class: Type) -> List[ManipulatorPool]:
if manipulator_class not in self._pools:
self._pools[manipulator_class] = [
ManipulatorPool(manipulator_class, vp_obj.scene_view) for vp_obj in self._vp1_overlay_windows.values()
]
return self._pools[manipulator_class]
def _on_update(self, e: carb.events.IEvent):
if DRAW_ON_VP1:
# The visible state between vp 1.0 and the dummy ui.scene overlay window needs to be in sync.
vp_instances_to_remove = []
for vp_instance, vp_obj in self._vp1_overlay_windows.items():
vp_window = self._viewport.get_viewport_window(vp_instance)
if vp_window:
visible = vp_window.is_visible()
if visible != vp_obj.window.visible:
vp_obj.window.visible = visible
vp_obj.scene_view.visible = visible
else:
# Destroy the draw_sub not, its what's caling this method
vp_obj.draw_sub = None
# Hide all ui as well
vp_obj.window.visible = False
vp_obj.scene_view.visible = False
vp_instances_to_remove.append(vp_instance)
# Clear any pending dead Windows and Scenes
self._dead_vp1_overlay_windows.clear()
# Gather all the newly dead Windows and stash them for destruction later
for vp_instance in vp_instances_to_remove:
self._dead_vp1_overlay_windows.append(self._vp1_overlay_windows[vp_instance])
del self._vp1_overlay_windows[vp_instance]
self._vp1_instances.remove(vp_instance)
# Since there's no callback for new VP1 creation, we check for change in update
if SUPPORT_MULTI_VP1:
vp1_instances = self._viewport.get_instance_list()
if vp1_instances != self._vp1_instances:
# assuming Kit can only add viewport.
diff = list(set(vp1_instances) - set(self._vp1_instances))
for instance in diff:
scene_view = self._create_vp1_scene_view(instance)
# make a new pool for the manipulators
for manipulator_class, pools in self._pools.items():
pools.append(ManipulatorPool(manipulator_class, scene_view))
# create a new instance in the new viewport for each existing manipulators
for manipulator in self._manipulators:
manipulator_class = manipulator.manipulator_class
pools = self._get_or_create_pools_for_manipulator_class(manipulator_class)
pool = pools[-1]
instance = pool.create()
self._instances[instance] = pool
manipulator.add_instance(instance)
self._vp1_instances = vp1_instances
def _on_vp_draw(self, event: carb.events.IEvent, vp_instance):
vp_obj = self._vp1_overlay_windows.get(vp_instance)
if not vp_obj:
return
vm = event.payload["viewMatrix"]
pm = event.payload["projMatrix"]
if pm[15] == 0.0:
# perspective matrix matrix
proj_matrix = sc.Matrix44(pm[0], pm[1], pm[2], pm[3], pm[4], pm[5], pm[6], pm[7], pm[8], pm[9], pm[10], pm[11], pm[12], pm[13], pm[14], pm[15])
compensation = sc.Matrix44(1.0, 0.0, -0.0, 0.0, 0.0, 1.0, 0.0, -0.0, -0.0, 0.0, 1.0, -1.0, 0.0, -0.0, 0.0, -2.0)
proj_matrix = proj_matrix * compensation
# Flatten into list for model
pm = [proj_matrix[0], proj_matrix[1], proj_matrix[2], proj_matrix[3],
proj_matrix[4], proj_matrix[5], proj_matrix[6], proj_matrix[7],
proj_matrix[8], proj_matrix[9], proj_matrix[10], proj_matrix[11],
proj_matrix[12], proj_matrix[13], proj_matrix[14], proj_matrix[15]]
scene_view = vp_obj.scene_view
scene_view.model.set_floats("projection", pm)
scene_view.model.set_floats("view", vm)
| 9,726 | Python | 38.54065 | 155 | 0.604874 |
omniverse-code/kit/exts/omni.kit.manipulator.viewport/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [104.0.7] - 2022-05-25
### Changed
- Fix issue with crash when a legacy Viewport is destroyed.
## [104.0.6] - 2022-05-04
### Changed
- Imported to kit repro and bump version to match Kit SDK
## [1.0.6] - 2022-04-20
### Changed
- Delay omni.ui.scene.SceneView creation until app is updating
## [1.0.5] - 2022-04-07
### Added
- Added test waiver.
## [1.0.4] - 2022-04-06
### Changed
- Use omni.ui.SceneView.model to set view and projection
## [1.0.3] - 2021-12-07
### Fixed
- Fixed dangling draw event subscription.
## [1.0.2] - 2021-12-01
### Removed
- Move omni.kit.viewport import to omni.kit.viewport_legacy
## [1.0.1] - 2021-12-01
### Removed
- Made omni.kit.window.viewport an optional dependency.
## [1.0.0] - 2021-09-23
### Added
- Init commit.
| 861 | Markdown | 18.590909 | 80 | 0.659698 |
omniverse-code/kit/exts/omni.kit.manipulator.viewport/docs/README.md | # Viewport Manipulator Extension [omni.kit.manipulator.viewport]
This is the extension providing a way to create managed manipulator in Viewport.
| 148 | Markdown | 28.799994 | 80 | 0.817568 |
omniverse-code/kit/exts/omni.kit.test_app_full_nonrtx/omni/kit/test_app_full_nonrtx/__init__.py | from .app_dev_test import *
| 28 | Python | 13.499993 | 27 | 0.714286 |
omniverse-code/kit/exts/omni.kit.test_app_full_nonrtx/omni/kit/test_app_full_nonrtx/app_dev_test.py | import os
import sys
import inspect
import importlib
import carb
import carb.settings
import carb.tokens
import omni.kit.app
import omni.kit.test
#import omni.kit.test_helpers_gfx
import omni.kit.renderer.bind
from omni.kit.test.teamcity import teamcity_publish_image_artifact
OUTPUTS_DIR = omni.kit.test.get_test_output_path()
USE_TUPLES = True
class AppDevCompatTest(omni.kit.test.AsyncTestCase):
async def setUp(self):
self._settings = carb.settings.acquire_settings_interface()
self._app_window_factory = omni.appwindow.acquire_app_window_factory_interface()
self._renderer = omni.kit.renderer.bind.acquire_renderer_interface()
self._module_test_helpers_gfx = importlib.import_module('omni.kit.test_helpers_gfx')
self._captured_buffer = []
self._captured_buffer_w = 0
self._captured_buffer_h = 0
self._captured_buffer_fmt = 0
def __test_name(self) -> str:
return f"{self.__module__}.{self.__class__.__name__}.{inspect.stack()[2][3]}"
async def tearDown(self):
self._renderer = None
self._app_window_factory = None
self._settings = None
def _capture_callback(self, buf, buf_size, w, h, fmt):
if USE_TUPLES:
self._captured_buffer = omni.renderer_capture.convert_raw_bytes_to_rgba_tuples(buf, buf_size, w, h, fmt)
else:
self._captured_buffer = omni.renderer_capture.convert_raw_bytes_to_list(buf, buf_size, w, h, fmt)
self._captured_buffer_w = w
self._captured_buffer_h = h
self._captured_buffer_fmt = fmt
async def test_1_capture_variance(self):
test_name = self.__test_name()
import omni.renderer_capture
app_window = self._app_window_factory.get_default_window()
capture_interface = omni.renderer_capture.acquire_renderer_capture_interface()
capture_interface.capture_next_frame_swapchain_callback(self._capture_callback, app_window)
await omni.kit.app.get_app().next_update_async()
capture_interface.wait_async_capture(app_window)
if "PIL" not in sys.modules.keys():
# Checking if we have Pillow imported
try:
from PIL import Image
except ImportError:
# Install Pillow if it's not installed
import omni.kit.pipapi
omni.kit.pipapi.install("Pillow", module="PIL")
from PIL import Image
from PIL import ImageStat
image = Image.new('RGBA', [self._captured_buffer_w, self._captured_buffer_h])
if USE_TUPLES:
image.putdata(self._captured_buffer)
else:
buf_channel_it = iter(self._captured_buffer)
captured_buffer_tuples = list(zip(buf_channel_it, buf_channel_it, buf_channel_it, buf_channel_it))
image.putdata(captured_buffer_tuples)
# Only compare the standard deviation, since the test failing because of the UI elements shifting around is
# no good. Generally, it is enough to test that:
# 1. the test doesn't crash on iGPUs,
# 2. the test produces anything other than the black screen (the std dev of which is 0.0)
image_stats = ImageStat.Stat(image)
avg_std_dev = sum(image_stats.stddev) / 3.0
# Save image for user verification
test_name = self.__test_name()
TEST_IMG_PATH = os.path.join(OUTPUTS_DIR, test_name + ".png")
image.save(TEST_IMG_PATH)
# The full app.dev has a std dev ~30.0, and the version with broken layout has a std dev of ~18.0
STD_DEV_THRESHOLD = 10.0
if avg_std_dev < STD_DEV_THRESHOLD:
teamcity_publish_image_artifact(TEST_IMG_PATH, "results", "Generated")
carb.log_error("Standard deviation of the produced image is lower than the threshold!")
self.assertTrue(False)
| 3,894 | Python | 36.095238 | 116 | 0.643297 |
omniverse-code/kit/exts/omni.kit.test_app_full_nonrtx/docs/index.rst | omni.kit.test_app_full_nonrtx
#############################
Test app.full in PXR mode.
| 89 | reStructuredText | 13.999998 | 29 | 0.494382 |
omniverse-code/kit/exts/omni.kit.widget.spinner/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.4"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["NVIDIA"]
# The title and description fields are primarly for displaying extension info in UI
title = "Spinner widgets"
description="A Spinner widget includes a value field and a set of arrow (up/down or left/right) to change field value."
# URL of the extension source repository.
repository = ""
# Keywords for the extension
keywords = ["kit", "ui", "widget", "spinner", "field"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
category = "Internal"
# We only depend on testing framework currently:
[dependencies]
"omni.ui" = {}
"omni.kit.widget.examples" = { optional = true }
# Main python module this extension provides, it will be publicly available as "import omni.kit.widget.spinner".
[[python.module]]
name = "omni.kit.widget.spinner"
[settings]
| 1,548 | TOML | 35.023255 | 119 | 0.744186 |
omniverse-code/kit/exts/omni.kit.widget.spinner/omni/kit/widget/spinner/spinner.py | import abc
from typing import Optional, Union, Dict
import omni.ui as ui
import omni.kit.app
import carb.events
from .style import UI_STYLE
# First delay time to start a long click (in seconds)
SPINNER_LONG_CLICK_DELAY = 0.6
# Long click interval (in seconds)
SPINNER_LONG_CLICK_INTERVAL = 0.2
class AbstractSpinner:
"""
A Spinner widget includes a value field and a set of arrow (up/down or left/right) to change field value.
Kwargs:
model (ui.AbstractValueModel): Value model for value field. Default None.
step (Union[float, int]): Step value. Default 1.
min (Union[float, int, None]): Min value. Default None means no min value.
max (Union[float, int, None]): Max value. Default None means no max value.
vertical (bool): Vertical spinner (with up/down arrows) if True. Otherwise horizontal spinner (with left/right arrows). Default True.
width (ui.Length): Widget width. Default ui.Fraction(1).
"""
def __init__(
self,
model: ui.AbstractValueModel = None,
step: Union[float, int] = 1,
min: Union[float, int, None] = None,
max: Union[float, int, None] = None,
vertical: bool = True,
width: ui.Length = ui.Fraction(1),
style: Dict = None,
precision: int = 7
):
self._model = model
self._step = step
self._width = width
self._min = min
self._max = max
self._vertical = vertical
self._precision = precision
self._arrow_size = 6
ui_style = UI_STYLE.copy()
if style is not None:
ui_style.update(style)
self._build_ui(ui_style)
def destroy(self) -> None:
self._sub = None
@abc.abstractclassmethod
def _create_field_widget(self, model: Optional[ui.AbstractValueModel], **kwargs) -> ui.AbstractField:
pass
@property
def enabled(self) -> bool:
return self._container.enabled
@enabled.setter
def enabled(self, value: bool) -> None:
self._container.enabled = value
def _build_ui(self, style: Dict):
self._container = ui.HStack(width=self._width, height=0, style=style)
with self._container:
if self._vertical:
self._build_vertical_ui()
else:
self._build_horizontal_ui()
def _build_vertical_ui(self):
self._field = self._create_field_widget(self._model, style_type_name_override="Spinner.Field")
ui.Spacer(width=5)
with ui.ZStack(width=self._arrow_size):
ui.Rectangle(style_type_name_override="Spinner.Arrow.Background")
with ui.VStack(width=0, spacing=0):
ui.Spacer()
ui.Triangle(
width=self._arrow_size,
height=self._arrow_size,
alignment=ui.Alignment.CENTER_TOP,
style_type_name_override="Spinner.Arrow",
mouse_pressed_fn=(lambda x, y, key, m: self._begin_change(key, self._step)),
mouse_released_fn=(lambda *_: self._end_change()),
)
ui.Spacer(height=4)
ui.Triangle(
width=self._arrow_size,
height=self._arrow_size,
alignment=ui.Alignment.CENTER_BOTTOM,
style_type_name_override="Spinner.Arrow",
mouse_pressed_fn=(lambda x, y, key, m: self._begin_change(key, -self._step)),
mouse_released_fn=(lambda *_: self._end_change()),
)
ui.Spacer()
def _build_horizontal_ui(self):
with ui.VStack(width=0):
ui.Spacer()
ui.Triangle(
width=self._arrow_size,
height=self._arrow_size,
alignment=ui.Alignment.LEFT_CENTER,
style_type_name_override="Spinner.Arrow",
mouse_pressed_fn=(lambda x, y, key, m: self._begin_change(key, -self._step)),
mouse_released_fn=(lambda *_: self._end_change()),
)
ui.Spacer()
ui.Spacer(width=4)
self._field = self._create_field_widget(self._model, style_type_name_override="Spinner.Field")
ui.Spacer(width=4)
with ui.VStack(width=0, spacing=0):
ui.Spacer()
ui.Triangle(
width=self._arrow_size,
height=self._arrow_size,
alignment=ui.Alignment.RIGHT_CENTER,
style_type_name_override="Spinner.Arrow",
mouse_pressed_fn=(lambda x, y, key, m: self._begin_change(key, self._step)),
mouse_released_fn=(lambda *_: self._end_change()),
)
ui.Spacer()
def _begin_change(self, button: int, step: int):
if not self._container.enabled:
return
if button != 0:
return
self._delta = step
self._action_time = 0.0
self._current_time = 0.0
self._update_value()
self._register_update()
def _end_change(self):
self._deregister_update()
def _register_update(self):
self._sub = (
omni.kit.app.get_app()
.get_update_event_stream()
.create_subscription_to_pop(self._on_update, name="spinner update")
)
def _deregister_update(self):
self._sub = None
def _on_update(self, event: carb.events.IEvent):
dt = event.payload["dt"]
self._current_time += dt
if self._action_time == 0.0:
self._action_time = self._current_time + SPINNER_LONG_CLICK_DELAY
elif self._current_time > self._action_time:
self._update_value()
self._action_time += SPINNER_LONG_CLICK_INTERVAL
def _update_value(self):
new_value = round(self._field.model.as_float + self._delta, self._precision)
if self._min is not None:
new_value = max(new_value, self._min)
if self._max is not None:
new_value = min(new_value, self._max)
self._field.model.set_value(new_value)
class FloatSpinner(AbstractSpinner):
def _create_field_widget(self, model: Optional[ui.AbstractValueModel], **kwargs) -> ui.AbstractField:
return ui.FloatField(model, **kwargs)
class IntSpinner(AbstractSpinner):
def _create_field_widget(self, model: Optional[ui.AbstractValueModel], **kwargs) -> ui.AbstractField:
return ui.IntField(model, **kwargs)
class IntDragSpinner(AbstractSpinner):
def _create_field_widget(self, model: Optional[ui.AbstractValueModel], **kwargs) -> ui.AbstractField:
return ui.IntDrag(model, **kwargs)
| 6,714 | Python | 34.909091 | 141 | 0.573131 |
omniverse-code/kit/exts/omni.kit.widget.spinner/omni/kit/widget/spinner/style.py | from omni.ui import color as cl
UI_STYLE = {
"Spinner.Field": {},
"Spinner.Arrow": {"background_color": cl.shade(cl("#D6D6D6"))},
"Spinner.Arrow:disabled": {"background_color": cl.shade(cl("#A0A0A0"))},
"Spinner.Arrow.Background": {"background_color": 0},
}
| 275 | Python | 29.666663 | 76 | 0.632727 |
omniverse-code/kit/exts/omni.kit.widget.spinner/omni/kit/widget/spinner/__init__.py | from .spinner import AbstractSpinner, FloatSpinner, IntSpinner, IntDragSpinner
from .example import *
| 102 | Python | 33.333322 | 78 | 0.833333 |
omniverse-code/kit/exts/omni.kit.widget.spinner/omni/kit/widget/spinner/example/spinner.py | from omni import ui
from omni.kit.widget.spinner import FloatSpinner, IntSpinner
from omni.kit.widget.examples import ExamplePage
from typing import List, Optional
class SpinnerPage(ExamplePage):
def __init__(self):
super().__init__("Spinner")
def destroy(self):
self._h_float_spinner.destroy()
self._float_spinner.destroy()
self._int_spinner.destroy()
def build_page(self):
with ui.VStack(spacing=5):
with ui.HStack(height=20, spacing=10):
ui.Label("Float spinner:", width=100)
self._float_spinner = FloatSpinner(width=60)
with ui.HStack(height=20, spacing=10):
ui.Label("Int spinner:", width=100)
self._int_spinner = IntSpinner(width=60)
with ui.HStack(height=20, spacing=10):
ui.Label("Horizontal:", width=100)
self._h_float_spinner = FloatSpinner(vertical=False, width=60)
with ui.HStack(height=20, spacing=10):
ui.Label("Disabled:", width=100)
self._disbled_spinner = FloatSpinner(width=60)
self._disbled_spinner.enabled = False
| 1,186 | Python | 32.914285 | 78 | 0.596965 |
omniverse-code/kit/exts/omni.kit.widget.spinner/omni/kit/widget/spinner/example/__init__.py | import carb
try:
from omni.kit.widget.examples import register_page
from .spinner import SpinnerPage
register_page(SpinnerPage())
except Exception as e:
carb.log_info(f"Failed to add example for spinner: {str(e)}")
pass
| 243 | Python | 19.333332 | 65 | 0.711934 |
omniverse-code/kit/exts/omni.kit.widget.spinner/omni/kit/widget/spinner/tests/__init__.py | from .test_spinner import *
| 28 | Python | 13.499993 | 27 | 0.75 |
omniverse-code/kit/exts/omni.kit.widget.spinner/omni/kit/widget/spinner/tests/test_spinner.py | import omni.kit.test
from omni.kit.widget.spinner import FloatSpinner, IntSpinner, IntDragSpinner
from omni.ui.tests.test_base import OmniUiTest
import omni.kit.app
import asyncio
import carb
import copy
import omni.ui as ui
from omni.ui import color as cl
import asyncio
SPINNER_STYLE = {
"Spinner.Field": {"background_color": 0, "color": cl.viewport_menubar_light},
"Spinner.Field:disabled": {"color": cl.viewport_menubar_medium},
"Spinner.Arrow": {"background_color": cl.viewport_menubar_light},
"Spinner.Arrow:disabled": {"background_color": cl.viewport_menubar_medium},
}
class TestSpinner(OmniUiTest):
# Before running each test
async def setUp(self):
await super().setUp()
# After running each test
async def tearDown(self):
await super().tearDown()
async def test_sliderbar(self):
float_spinner = FloatSpinner(step=0.1, min=0, max=10, style=SPINNER_STYLE)
self.assertEqual(float_spinner._field.model.as_float, 0.0)
float_spinner._begin_change(0, 0.1)
self.assertEqual(float_spinner._field.model.as_float, 0.1)
float_spinner._end_change()
# Test this change is reduced to value + 0.1
float_spinner._begin_change(0, 0.10000001)
self.assertEqual(float_spinner._field.model.as_float, 0.2)
float_spinner._end_change()
float_spinner._begin_change(1, 0.10000001)
self.assertEqual(float_spinner._field.model.as_float, 0.2)
float_spinner._end_change()
# Test for no change
float_spinner._begin_change(0, 0.100001)
self.assertEqual(float_spinner._field.model.as_float, 0.300001)
float_spinner._end_change()
float_spinner.enabled = False
self.assertFalse(float_spinner.enabled)
float_spinner._begin_change(0, 0.100001)
self.assertEqual(float_spinner._field.model.as_float, 0.300001)
float_spinner._end_change()
int_spinner = IntSpinner(step=1, min=0, max=10, vertical=False)
self.assertEqual(int_spinner._field.model.as_int, 0)
int_spinner._begin_change(0, 1)
self.assertEqual(int_spinner._field.model.as_int, 1)
int_spinner._end_change()
int_drag_spinner = IntDragSpinner(step=1, min=0, max=10)
self.assertEqual(int_drag_spinner._field.model.as_int, 0)
int_drag_spinner._begin_change(0, 1)
self.assertEqual(int_drag_spinner._field.model.as_int, 1)
# Check for long click
await asyncio.sleep(0.7)
self.assertEqual(int_drag_spinner._field.model.as_int, 2)
int_drag_spinner._end_change() | 2,615 | Python | 38.044776 | 82 | 0.668834 |
omniverse-code/kit/exts/omni.kit.widget.spinner/docs/CHANGELOG.md | # CHANGELOG
This document records all notable changes to ``omni.kit.widget.spinner`` extension.
This project adheres to `Semantic Versioning <https://semver.org/>`_.
## [1.0.4] - 2022-07-25
### Added
- Add precision to reduced floating point precision issues.
## [1.0.3] - 2022-04-08
### Added
- Add test
## [1.0.2] - 2022-03-07
### Changed
- Donot change value when disabled
## [1.0.1] - 2022-03-07
### Changed
- Widget height
## [1.0.0] - 2022-03-04
### Added
- Initial version implementation
| 502 | Markdown | 18.346153 | 83 | 0.669323 |
omniverse-code/kit/exts/omni.kit.widget.spinner/docs/README.md | # Spinner widget
To use:
```
from omni.kit.widget.spinner import FloatSpinner, IntSpinner
float_model = ui.SimpleFloatModel()
float_spinner = FloatSpinner(model=float_model)
int_model = ui.SimpleIntModel()
int_spinner = IntSpinner(model=int_model)
```
Cleanup:
```
float_spinner.destroy()
int_spinner.destroy()
``` | 320 | Markdown | 15.049999 | 60 | 0.740625 |
omniverse-code/kit/exts/omni.kit.widget.stage_icons/config/extension.toml | [package]
version = "1.0.2"
authors = ["NVIDIA"]
title = "Icons for Stage Widget"
description = "Default Set of Icons for the Stage Window."
readme = "docs/README.md"
repository = ""
category = "Stage"
keywords = ["kit", "example"]
changelog = "docs/CHANGELOG.md"
icon = "data/icon.png"
[[python.module]]
name = "omni.kit.widget.stage_icons"
# Additional python module with tests, to make them discoverable by test system.
[[python.module]]
name = "omni.kit.widget.stage_icons.tests"
[[test]]
dependencies = [
"omni.kit.widget.stage",
]
| 544 | TOML | 21.708332 | 80 | 0.696691 |
omniverse-code/kit/exts/omni.kit.widget.stage_icons/omni/kit/widget/stage_icons/stage_icons_extension.py | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from pathlib import Path
import carb.settings
import omni.ext
class StageIconsExtension(omni.ext.IExt):
_icons_registered = None
@staticmethod
def get_registered_icons():
return StageIconsExtension._icons_registered
def register_icons(self):
StageIconsExtension._icons_registered = []
import omni.kit.widget.stage
stage_icons = omni.kit.widget.stage.StageIcons()
current_path = Path(__file__).parent
icon_path = current_path.parent.parent.parent.parent.joinpath("icons")
style = carb.settings.get_settings().get_as_string("/persistent/app/window/uiStyle") or "NvidiaDark"
# Read all the svg files in the directory
icons = {icon.stem: str(icon) for icon in icon_path.joinpath(style).glob("*.svg")}
for prim_type, filename in icons.items():
stage_icons.set(prim_type, filename)
StageIconsExtension._icons_registered.append(prim_type)
def on_startup(self, ext_id):
StageIconsExtension._icons_registered = None
manager = omni.kit.app.get_app().get_extension_manager()
self._hook = manager.subscribe_to_extension_enable(
on_enable_fn=lambda _: self.register_icons(),
ext_name="omni.kit.widget.stage",
hook_name="omni.kit.widget.stage_icons",
)
def on_shutdown(self):
if StageIconsExtension._icons_registered:
try:
import omni.kit.widget.stage
except ImportError as e:
return
stage_icons = omni.kit.widget.stage.StageIcons()
for prim_type in StageIconsExtension._icons_registered:
stage_icons.set(prim_type, None)
| 2,153 | Python | 36.13793 | 108 | 0.672085 |
omniverse-code/kit/exts/omni.kit.widget.stage_icons/omni/kit/widget/stage_icons/tests/stage_icons_test.py | ## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
from ..stage_icons_extension import StageIconsExtension
import omni.kit.test
class TestStageIcons(omni.kit.test.AsyncTestCase):
async def test_registry(self):
icons = StageIconsExtension.get_registered_icons()
self.assertIsInstance(icons, list)
self.assertIn("Camera", icons)
self.assertIn("GeomSubset", icons)
self.assertIn("Instance", icons)
self.assertIn("Light", icons)
self.assertIn("Material", icons)
self.assertIn("Prim", icons)
self.assertIn("Reference", icons)
self.assertIn("Scope", icons)
self.assertIn("Shader", icons)
self.assertIn("SkelJoint", icons)
self.assertIn("Xform", icons)
async def test_disable_extension(self):
manager = omni.kit.app.get_app().get_extension_manager()
ext_id = "omni.kit.widget.stage_icons"
self.assertTrue(ext_id)
self.assertTrue(manager.is_extension_enabled(ext_id))
manager.set_extension_enabled(ext_id, False)
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
self.assertTrue(not manager.is_extension_enabled(ext_id))
manager.set_extension_enabled(ext_id, True)
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
self.assertTrue(manager.is_extension_enabled(ext_id))
| 1,969 | Python | 41.826086 | 77 | 0.687659 |
omniverse-code/kit/exts/omni.kit.widget.stage_icons/docs/CHANGELOG.md | # CHANGELOG
This document records all notable changes to ``omni.kit.widget.stage_icons`` extension.
This project adheres to `Semantic Versioning <https://semver.org/>`.
## [1.0.2] - 2021-07-29
### Changes
- Added payload icon
## [1.0.1] - 2021-02-10
### Changes
- Updated StyleUI handling
## [1.0.0] - 2020-10-22
### Added
- The initial extension
| 354 | Markdown | 16.749999 | 87 | 0.677966 |
omniverse-code/kit/exts/omni.kit.widget.stage_icons/docs/README.md | # Default Set of Icons for the Stage Window [omni.kit.widget.stage_icons]
To add an icon, just name it with the type of USD Prim and place it to the icons folder.
| 164 | Markdown | 40.24999 | 88 | 0.756098 |
omniverse-code/kit/exts/omni.kit.raycast.query/omni/kit/raycast/query/__init__.py | from ._omni_kit_raycast_query import *
from .scripts import *
__all__ = []
| 76 | Python | 14.399997 | 38 | 0.644737 |
omniverse-code/kit/exts/omni.kit.raycast.query/omni/kit/raycast/query/_omni_kit_raycast_query.pyi | """pybind11 omni.kit.raycast.query.IRaycastQuery bindings"""
from __future__ import annotations
import omni.kit.raycast.query._omni_kit_raycast_query
import typing
__all__ = [
"IRaycastQuery",
"Ray",
"RayQueryResult",
"Result",
"acquire_raycast_query_interface"
]
class IRaycastQuery():
def add_raycast_sequence(self) -> int: ...
def get_latest_result_from_raycast_sequence(self, arg0: int) -> typing.Tuple[Ray, RayQueryResult]: ...
def remove_raycast_sequence(self, arg0: int) -> Result: ...
def set_raycast_sequence_array_size(self, arg0: int, arg1: int) -> Result: ...
def submit_ray_to_raycast_sequence(self, arg0: int, arg1: Ray) -> Result: ...
def submit_raycast_query(self, ray: Ray, callback: typing.Callable[[Ray, RayQueryResult], None]) -> None: ...
pass
class Ray():
def __init__(self, origin: object, direction: object, min_t: float = 0.0, max_t: float = inf, adjust_for_section: bool = True) -> None:
"""
Create a new Ray
"""
def __repr__(self) -> str: ...
def __str__(self) -> str: ...
@property
def forward(self) -> object:
"""
direction of the ray
:type: object
"""
@forward.setter
def forward(self, arg1: object) -> None:
"""
direction of the ray
"""
@property
def max_t(self) -> float:
"""
t value at end of ray
:type: float
"""
@max_t.setter
def max_t(self, arg1: float) -> None:
"""
t value at end of ray
"""
@property
def min_t(self) -> float:
"""
t value of start of ray
:type: float
"""
@min_t.setter
def min_t(self, arg1: float) -> None:
"""
t value of start of ray
"""
@property
def origin(self) -> object:
"""
origin of the ray
:type: object
"""
@origin.setter
def origin(self, arg1: object) -> None:
"""
origin of the ray
"""
pass
class RayQueryResult():
def __init__(self) -> None:
"""
Create a new RayQueryResult
"""
def __repr__(self) -> str: ...
def __str__(self) -> str: ...
def get_target_usd_path(self) -> str:
"""
This function returns the usd path of geometry that was hit.
Return:
Usd path of object that is the target, or an empty string if nothing was hit
"""
@property
def hit_position(self) -> object:
"""
position of hit
:type: object
"""
@property
def hit_t(self) -> float:
"""
t value of hit position
:type: float
"""
@property
def instance_id(self) -> int:
"""
instance id
:type: int
"""
@property
def normal(self) -> object:
"""
normal of geometry at hit point
:type: object
"""
@property
def primitive_id(self) -> int:
"""
primitive id
:type: int
"""
@property
def valid(self) -> bool:
"""
indicates whether result is valid
:type: bool
"""
pass
class Result():
"""
Result.
Members:
SUCCESS
INVALID_PARAMETER
PARAMETER_IS_NULL
RAYCAST_SEQUENCE_DOES_NOT_EXIST
RAYCAST_QUERY_MANAGER_DOES_NOT_EXIT
RAYCAST_SEQUENCE_ADDITION_FAILED
"""
def __eq__(self, other: object) -> bool: ...
def __getstate__(self) -> int: ...
def __hash__(self) -> int: ...
def __index__(self) -> int: ...
def __init__(self, value: int) -> None: ...
def __int__(self) -> int: ...
def __ne__(self, other: object) -> bool: ...
def __repr__(self) -> str: ...
def __setstate__(self, state: int) -> None: ...
@property
def name(self) -> str:
"""
:type: str
"""
@property
def value(self) -> int:
"""
:type: int
"""
INVALID_PARAMETER: omni.kit.raycast.query._omni_kit_raycast_query.Result # value = <Result.INVALID_PARAMETER: 1>
PARAMETER_IS_NULL: omni.kit.raycast.query._omni_kit_raycast_query.Result # value = <Result.PARAMETER_IS_NULL: 2>
RAYCAST_QUERY_MANAGER_DOES_NOT_EXIT: omni.kit.raycast.query._omni_kit_raycast_query.Result # value = <Result.RAYCAST_QUERY_MANAGER_DOES_NOT_EXIT: 4>
RAYCAST_SEQUENCE_ADDITION_FAILED: omni.kit.raycast.query._omni_kit_raycast_query.Result # value = <Result.RAYCAST_SEQUENCE_ADDITION_FAILED: 5>
RAYCAST_SEQUENCE_DOES_NOT_EXIST: omni.kit.raycast.query._omni_kit_raycast_query.Result # value = <Result.RAYCAST_SEQUENCE_DOES_NOT_EXIST: 3>
SUCCESS: omni.kit.raycast.query._omni_kit_raycast_query.Result # value = <Result.SUCCESS: 0>
__members__: dict # value = {'SUCCESS': <Result.SUCCESS: 0>, 'INVALID_PARAMETER': <Result.INVALID_PARAMETER: 1>, 'PARAMETER_IS_NULL': <Result.PARAMETER_IS_NULL: 2>, 'RAYCAST_SEQUENCE_DOES_NOT_EXIST': <Result.RAYCAST_SEQUENCE_DOES_NOT_EXIST: 3>, 'RAYCAST_QUERY_MANAGER_DOES_NOT_EXIT': <Result.RAYCAST_QUERY_MANAGER_DOES_NOT_EXIT: 4>, 'RAYCAST_SEQUENCE_ADDITION_FAILED': <Result.RAYCAST_SEQUENCE_ADDITION_FAILED: 5>}
pass
def acquire_raycast_query_interface(*args, **kwargs) -> typing.Any:
pass
| 5,306 | unknown | 27.842391 | 418 | 0.563513 |
omniverse-code/kit/exts/omni.kit.raycast.query/omni/kit/raycast/query/scripts/__init__.py | from .utils import * | 20 | Python | 19.99998 | 20 | 0.75 |
omniverse-code/kit/exts/omni.kit.raycast.query/omni/kit/raycast/query/scripts/utils.py | __all__ = ["raycast_from_mouse_ndc"]
import omni.kit.raycast.query as rq
from typing import Sequence, Callable
def raycast_from_mouse_ndc(
mouse_ndc: Sequence[float],
viewport_api: "ViewportAPI",
on_complete_fn: Callable
):
ndc_near = (mouse_ndc[0], mouse_ndc[1], -1)
ndc_far = (mouse_ndc[0], mouse_ndc[1], 1)
view_proj_inv = (viewport_api.view * viewport_api.projection).GetInverse()
origin = view_proj_inv.Transform(ndc_near)
dir = (view_proj_inv.Transform(ndc_far) - origin).GetNormalized()
ray = rq.Ray(origin, dir)
rqi = rq.acquire_raycast_query_interface()
rqi.submit_raycast_query(ray, on_complete_fn)
| 658 | Python | 28.954544 | 78 | 0.673252 |
omniverse-code/kit/exts/omni.kit.raycast.query/omni/kit/raycast/query/tests/test_raycast.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import asyncio
from pathlib import Path
import carb
import carb.settings
import omni.kit.app
import omni.kit.commands
import omni.kit.raycast.query
from omni.kit.raycast.query import utils as rq_utils
import omni.kit.test
import omni.usd
from omni import ui
import omni.kit.ui_test as ui_test
from omni.kit.viewport.utility import get_active_viewport
from pxr import Gf, UsdGeom
SETTING_SECTION_ENABLED = "/rtx/sectionPlane/enabled"
SETTING_SECTION_PLANE = "/rtx/sectionPlane/plane"
DATA_PATH = Path(carb.tokens.get_tokens_interface().resolve("${omni.kit.raycast.query}/data"))
USD_FILES = DATA_PATH.joinpath("tests").joinpath("usd")
class TestRaycast(omni.kit.test.AsyncTestCase):
async def setUp(self):
self._settings = carb.settings.get_settings()
self._app = omni.kit.app.get_app()
self._context = omni.usd.get_context()
self._raycast = omni.kit.raycast.query.acquire_raycast_query_interface()
async def tearDown(self):
pass
async def _setup_simple_scene(self):
await self._context.new_stage_async()
stage = self._context.get_stage()
self.assertTrue(stage)
CUBE_PATH = "/Cube"
cube = UsdGeom.Cube.Define(stage, CUBE_PATH)
UsdGeom.XformCommonAPI(cube.GetPrim()).SetTranslate(Gf.Vec3d(123.45, 0, 0))
return cube
async def test_single_raycast_query(self):
await self._setup_simple_scene()
viewport_api = get_active_viewport()
await viewport_api.wait_for_rendered_frames(5)
ray = omni.kit.raycast.query.Ray((1000, 0, 0), (-1, 0, 0))
future = asyncio.Future()
def callback(ray, result):
future.set_result(result)
self._raycast.submit_raycast_query(ray, callback)
result = await future
self.assertTrue(result.valid)
self.assertTrue(Gf.IsClose(Gf.Vec3d(*result.hit_position), Gf.Vec3d(124.45, 0, 0), 1e-4))
self.assertTrue(Gf.IsClose(result.hit_t, 875.551, 1e-4))
self.assertTrue(Gf.IsClose(Gf.Vec3d(*result.normal), Gf.Vec3d(1, 0, 0), 1e-4))
self.assertEqual(result.get_target_usd_path(), "/Cube")
async def test_ray_with_section_tool(self):
"""Test if raycast section plane awareness"""
await self._context.new_stage_async()
# Set up a scene with 2 large flattened cubes as floors, one is 3m higher the other
cube_bottom_path = "/World/Cube_bottom"
omni.kit.commands.create(
"CreatePrim",
prim_type="Cube",
prim_path=cube_bottom_path,
select_new_prim=False,
).do()
omni.kit.commands.create(
"TransformPrimSRTCommand",
path=cube_bottom_path,
new_scale=Gf.Vec3d(1000, 1, 1000)
).do()
cube_top_path = "/World/Cube_top"
cube_top_height = 300.0
omni.kit.commands.create(
"CreatePrim",
prim_type="Cube",
prim_path=cube_top_path,
select_new_prim=False,
).do()
omni.kit.commands.create(
"TransformPrimSRTCommand",
path=cube_top_path,
new_scale=Gf.Vec3d(1000, 1, 1000),
new_translation=Gf.Vec3d(0, cube_top_height, 0)
).do()
await self._app.next_update_async()
section_plane_height = 150
self._settings.set(SETTING_SECTION_ENABLED, True)
self._settings.set(SETTING_SECTION_PLANE, [0, -1, 0, section_plane_height])
async def submit_ray_and_wait_for_result(ray):
future = asyncio.Future()
def callback(ray, result):
future.set_result(result)
self._raycast.submit_raycast_query(ray, callback)
return await future
#### Test 1, ray origin is culled, and direction points away from section plane ####
ray = omni.kit.raycast.query.Ray((0, section_plane_height + 10, 0), (0, 1, 0))
result = await submit_ray_and_wait_for_result(ray)
# no valid result should return
self.assertFalse(result.valid)
#### Test 1.1, ray origin is culled, and direction points away from section plane, but no section awareness ####
ray = omni.kit.raycast.query.Ray((0, section_plane_height + 10, 0), (0, 1, 0), adjust_for_section=False)
result = await submit_ray_and_wait_for_result(ray)
# ray should hit Top Cube even though it's culled
self.assertTrue(result.valid)
self.assertAlmostEqual(result.hit_position[1], cube_top_height - 1.0)
#### Test 2, ray origin is culled, but direction points towards section plane ####
ray = omni.kit.raycast.query.Ray((0, cube_top_height + 20, 0), (0, -1, 0))
result = await submit_ray_and_wait_for_result(ray)
# ray should skip top Cube and hit bottom Cube
self.assertTrue(result.valid)
self.assertAlmostEqual(result.hit_position[1], 1.0)
#### Test 2.1, ray origin is culled, but direction points towards section plane, but no section awareness ####
ray = omni.kit.raycast.query.Ray((0, cube_top_height + 20, 0), (0, -1, 0), adjust_for_section=False)
result = await submit_ray_and_wait_for_result(ray)
# ray should hit top Cube even though it's culled
self.assertTrue(result.valid)
self.assertAlmostEqual(result.hit_position[1], cube_top_height + 1.0)
#### Test 3, ray origin is not culled, but direction points towards section plane ####
ray = omni.kit.raycast.query.Ray((0, section_plane_height - 10, 0), (0, 1, 0))
result = await submit_ray_and_wait_for_result(ray)
# no valid result should return
self.assertFalse(result.valid)
#### Test 4, ray origin is not culled, and direction points away from section plane ####
ray = omni.kit.raycast.query.Ray((0, section_plane_height - 10, 0), (0, -1, 0))
result = await submit_ray_and_wait_for_result(ray)
# ray should hit bottom Cube
self.assertTrue(result.valid)
self.assertAlmostEqual(result.hit_position[1], 1.0)
self._settings.set(SETTING_SECTION_ENABLED, False)
async def test_raycast_from_mouse_ndc(self):
await self._context.new_stage_async()
omni.kit.commands.create(
"CreatePrim",
prim_type="Cube",
prim_path="/World/Cube",
select_new_prim=False,
).do()
omni.kit.commands.create(
"TransformPrimSRTCommand",
path="/World/Cube",
new_scale=Gf.Vec3d(1000, 1, 1000)
).do()
viewport_api = get_active_viewport()
await viewport_api.wait_for_rendered_frames(5)
future = asyncio.Future()
def callback(ray, result):
future.set_result(result)
rq_utils.raycast_from_mouse_ndc(
(0, 0),
viewport_api,
callback,
)
result = await future
self.assertTrue(result.valid)
self.assertTrue(Gf.IsClose(Gf.Vec3d(*result.hit_position), Gf.Vec3d(1, 1, 1), 1e-4))
self.assertTrue(Gf.IsClose(result.hit_t, 863.2943, 1e-4))
self.assertTrue(Gf.IsClose(Gf.Vec3d(*result.normal), Gf.Vec3d(0, 1, 0), 1e-4))
self.assertEqual(result.get_target_usd_path(), "/World/Cube")
async def test_viewport_drag_and_drop(self):
await self._context.new_stage_async()
stage = self._context.get_stage()
# Create a floor
floor = UsdGeom.Cube.Define(stage, "/World/floor")
UsdGeom.XformCommonAPI(floor.GetPrim()).SetTranslate(Gf.Vec3d(0, 100, 0))
UsdGeom.XformCommonAPI(floor.GetPrim()).SetScale(Gf.Vec3f(500, 1, 500))
await ui_test.wait_n_updates(10)
# Create an area in test window as drag source
drag_window_size = 80
drag_source_window = ui.Window(
"TestDrag",
width=drag_window_size,
height=drag_window_size
)
with drag_source_window.frame:
stack = ui.ZStack()
with stack:
ui.Rectangle(width=drag_window_size, height=drag_window_size)
def on_drag():
return str(USD_FILES.joinpath("test_drag_drop.usda"))
stack.set_drag_fn(on_drag)
# Layout window
app_window = omni.appwindow.get_default_app_window()
window_width = app_window.get_width()
window_height = app_window.get_height()
vp_window = ui.Workspace.get_window("Viewport Next")
vp_width = int(window_width / 2)
vp_height = int(window_height / 2)
vp_window.position_x = 0
vp_window.position_y = 0
vp_window.width = vp_width
vp_window.height = vp_height
drag_source_window.position_x = 0
drag_source_window.position_y = vp_height +10
drag_source_window.focus()
await ui_test.wait_n_updates(10)
# Drag and drop
rect = ui_test.find("TestDrag//Frame/**/Rectangle[*]")
await ui_test.human_delay(20)
vp_center = ui_test.Vec2(
int(vp_window.position_x + vp_window.width/2),
int(vp_window.position_y + vp_window.height/2)
)
await rect.drag_and_drop(vp_center)
await ui_test.human_delay(20)
# Check dropped prim
dropped_path = "/test_drag_drop"
droped_prim = stage.GetPrimAtPath(dropped_path)
self.assertTrue(droped_prim)
world_mtx = omni.usd.get_world_transform_matrix(droped_prim)
dropped_pos = world_mtx.ExtractTranslation()
# Expected height = floor height + floor thickess
self.assertTrue(Gf.IsClose(dropped_pos[1], 101.0, 1e-2))
await ui_test.human_delay(20)
if drag_source_window:
drag_source_window.destroy()
del drag_source_window
| 10,261 | Python | 35.781362 | 120 | 0.621869 |
omniverse-code/kit/exts/omni.kit.raycast.query/omni/kit/raycast/query/tests/__init__.py | from .test_raycast import *
| 28 | Python | 13.499993 | 27 | 0.75 |
omniverse-code/kit/exts/omni.kit.raycast.query/docs/index.rst | omni.kit.raycast.query
###########################
RTX Raycast Query extension:
.. toctree::
:maxdepth: 1
README.md
CHANGELOG.md
| 144 | reStructuredText | 12.181817 | 28 | 0.541667 |
omniverse-code/kit/exts/omni.kit.window.script_editor/config/extension.toml | [package]
version = "1.7.2"
authors = ["NVIDIA"]
title = "Script Editor"
description = "Window to edit and run python scripts."
repository = ""
category = "Internal"
feature = true
preview_image = "data/preview.png"
icon = "data/icon.png"
[dependencies]
"omni.ui" = {}
"omni.kit.actions.core" = {}
"omni.kit.hotkeys.core" = {}
"omni.kit.widget.filebrowser" = {}
"omni.kit.window.filepicker" = {}
[[native.library]]
path = "bin/${lib_prefix}omni.kit.window.script_editor.plugin${lib_ext}"
[[python.module]]
name = "omni.kit.window.script_editor"
[settings]
# Folder to look for snippets. Snippets are just .py files, where first line is a comment with snippet name. Snippet
# name can contain one '/' to define category. E.g. # Settings/Get Setting
exts."omni.kit.window.script_editor".snippetFolders = [
"${kit}/snippets",
"${shared_documents}/snippets"
]
# Clear text from the editor tab after it has been executed?
persistent.exts."omni.kit.window.script_editor".clearAfterExecute = false
# Color palette to use. Can be "Dark", "Light" or "Retro Blue".
persistent.exts."omni.kit.window.script_editor".editorPalette = "Dark"
# If you open or reload a script file, should it be automatically executed?
persistent.exts."omni.kit.window.script_editor".executeOnReload = false
# Size, in points, of font used in the log window and editor tabs.
# If the exact size is not supported the nearest one will be used.
persistent.exts."omni.kit.window.script_editor".fontSize = 14
# Set the path to the font of the bottom part of the window
persistent.exts."omni.kit.window.script_editor".font = ""
# Write to temp file before executing. That allows inspect and ast std modules to be able to get sources and work correctly.
exts."omni.kit.window.script_editor".executeInTempFile = true
[[test]]
dependencies = [
"omni.kit.ui_test",
]
stdoutFailPatterns.exclude = [
"*SyntaxError*" # We have syntax error produced on purpose during testing
]
| 1,962 | TOML | 29.671875 | 124 | 0.726809 |
omniverse-code/kit/exts/omni.kit.window.script_editor/omni/kit/window/script_editor/__init__.py | from ._script_editor import *
from .scripts import * | 53 | Python | 16.999994 | 29 | 0.735849 |
omniverse-code/kit/exts/omni.kit.window.script_editor/omni/kit/window/script_editor/scripts/__init__.py | from .script_editor_extension import ScriptEditorExtension, ScriptEditorWindow | 78 | Python | 77.999922 | 78 | 0.897436 |
omniverse-code/kit/exts/omni.kit.window.script_editor/omni/kit/window/script_editor/scripts/script_editor_extension.py | # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["ScriptEditorExtension"]
import asyncio
import omni.ext
import omni.kit.ui
import omni.ui as ui
from functools import partial
from .script_editor_window import ScriptEditorWindow
class ScriptEditorExtension(omni.ext.IExt):
"""The entry point for Script Editor Window"""
WINDOW_NAME = "Script Editor"
MENU_PATH = f"Window/{WINDOW_NAME}"
def on_startup(self, ext_id):
self._ext_name = omni.ext.get_extension_name(ext_id)
ui.Workspace.set_show_window_fn(ScriptEditorExtension.WINDOW_NAME, partial(self.show_window, None))
editor_menu = omni.kit.ui.get_editor_menu()
if editor_menu:
# off by default
self._menu = editor_menu.add_item(ScriptEditorExtension.MENU_PATH, self.show_window, toggle=True, value=False)
self._window = None
def on_shutdown(self):
self._menu = None
asyncio.ensure_future(self._destroy_window_async())
ui.Workspace.set_show_window_fn(ScriptEditorExtension.WINDOW_NAME, None)
def _set_menu(self, value):
"""Set the menu to create this window on and off"""
editor_menu = omni.kit.ui.get_editor_menu()
if editor_menu:
editor_menu.set_value(ScriptEditorExtension.MENU_PATH, value)
async def _destroy_window_async(self):
# wait one frame, this is due to the one frame defer
# in Window::_moveToMainOSWindow()
await omni.kit.app.get_app().next_update_async()
if self._window:
self._window.deregister_hotkeys(self._ext_name)
self._window.deregister_actions(self._ext_name)
self._window.destroy()
self._window = None
def _visibility_changed_fn(self, visible):
self._set_menu(visible)
def show_window(self, menu, value):
if value:
if self._window:
# If we already have the window hidden, just show it, rather than creating a new one
self._window.visible = True
else:
self._window = ScriptEditorWindow()
self._window.set_visibility_changed_listener(self._visibility_changed_fn)
self._window.register_actions(self._ext_name)
self._window.register_hotkeys(self._ext_name)
elif self._window:
self._window.visible = False
| 2,778 | Python | 35.565789 | 122 | 0.660907 |
omniverse-code/kit/exts/omni.kit.window.script_editor/omni/kit/window/script_editor/scripts/script_editor_menu.py | import carb
import omni.client
from omni.kit.widget.filebrowser import FileBrowserItem
from omni.kit.window.filepicker import FilePickerDialog
import os
def _encode_content(content):
if type(content) == str:
payload = bytes(content.encode("utf-8"))
elif type(content) != type(None):
payload = bytes(content)
else:
payload = bytes()
return payload
class MenuOptions:
def __init__(self, **kwargs):
self._dialog = None
self._current_filename = None
self._current_dir = None
self.load_data = kwargs.pop("load_data", None)
self.save_data = kwargs.pop("save_data", None)
self.get_file_path = kwargs.pop("get_file_path", None)
def destroy(self):
if self._dialog:
self._dialog.destroy()
def __on_apply_save(self, filename: str, dir: str):
"""Called when the user press "Export" in the pick filename dialog"""
# don't accept as long as no filename is selected
if not filename or not dir:
return
self._dialog.hide()
# add the file extension if missing
if self._dialog.current_filter_option == 0 and not filename.lower().endswith(".py"):
filename += ".py"
self._current_filename = filename
self._current_dir = dir
# add a trailing slash for the client library
if dir[-1] != os.sep:
dir = dir + os.sep
current_export_path = omni.client.combine_urls(dir, filename)
self.save(current_export_path)
def __on_apply_load(self, filename: str, dir: str):
"""Called when the user press "Export" in the pick filename dialog"""
# don't accept as long as no filename is selected
if not filename or not dir:
return
self._dialog.hide()
# add a trailing slash for the client library
if dir[-1] != os.sep:
dir = dir + os.sep
# OM-93082: Fix issue with omni.client.combine_urls will eat up the subfolder path for omniverse urls if
# the dirname is ending with "\" on windows
dir = dir.replace("\\", "/")
current_path = omni.client.combine_urls(dir, filename)
if omni.client.stat(current_path)[0] != omni.client.Result.OK:
# add the file extension if missing
if self._dialog.current_filter_option == 0 and not filename.lower().endswith(".py"):
filename += ".py"
current_path = omni.client.combine_urls(dir, filename)
if omni.client.stat(current_path)[0] != omni.client.Result.OK:
# Still can't find
return
self._current_filename = filename
self._current_dir = dir
self.open(current_path)
def __menu_filter_files(self, item: FileBrowserItem) -> bool:
"""Used by pick folder dialog to hide all the files"""
if not item or item.is_folder:
return True
if self._dialog.current_filter_option == 0:
# Show only files with listed extensions
if item.path.endswith(".py"):
return True
else:
return False
else:
# Show All Files (*)
return True
def menu_open(self):
"""Open "Open" dialog"""
if self._dialog:
self._dialog.destroy()
self._dialog = FilePickerDialog(
"Open...",
allow_multi_selection=False,
apply_button_label="Open",
click_apply_handler=self.__on_apply_load,
item_filter_options=["Python Files (*.py)", "Any file (*.*)"],
item_filter_fn=self.__menu_filter_files,
current_filename=self._current_filename,
current_directory=self._current_dir,
)
def menu_save(self):
file_path = self.get_file_path()
if file_path:
self.save(file_path)
else:
self.menu_save_as()
def menu_save_as(self):
"""Open "Save As" dialog"""
if self._dialog:
self._dialog.destroy()
self._dialog = FilePickerDialog(
"Save As...",
allow_multi_selection=False,
apply_button_label="Save",
click_apply_handler=self.__on_apply_save,
item_filter_options=["Python Files (*.py)", "Any file (*.*)"],
item_filter_fn=self.__menu_filter_files,
current_filename=self._current_filename,
current_directory=self._current_dir,
)
def save(self, filename: str):
"""Save the current model to external file"""
data = self.save_data(filename)
if not data:
return
# Save to the file
result = omni.client.write_file(filename, _encode_content(data))
if result != omni.client.Result.OK:
carb.log_error(f"[omni.kit.window.script_editor] Cannot write to {filename}, error code: {result}")
return
carb.log_info(f"[omni.kit.window.script_editor] The scripts have saved to {filename}")
def open(self, filename: str):
"""Load the model from the file"""
result, _, content = omni.client.read_file(filename)
if result != omni.client.Result.OK:
carb.log_error(f"[omni.kit.window.script_editor] Can't read file {filename}, error code: {result}")
return
data = memoryview(content).tobytes().decode("utf-8")
self.load_data(filename, data)
| 5,522 | Python | 32.676829 | 112 | 0.573886 |
omniverse-code/kit/exts/omni.kit.window.script_editor/omni/kit/window/script_editor/scripts/script_editor_window.py | # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["ScriptEditorWindow"]
import omni.ui as ui
from .._script_editor import ScriptEditorWidget
from .script_editor_menu import MenuOptions
from omni.kit.actions.core import get_action_registry
import omni.kit.hotkeys.core as hotkeys
_DEFAULT_HOTKEY_MAP = {
"Open": "ALT+O",
"Save": "ALT+S",
"Save As": "SHIFT+ALT+S",
}
class ScriptEditorWindow(ui.Window):
"""The Script Editor window"""
def __init__(self):
self._title = "Script Editor"
super().__init__(
self._title,
width=800,
height=600,
flags=ui.WINDOW_FLAGS_MENU_BAR,
raster_policy=ui.RasterPolicy.NEVER
)
self._visiblity_changed_listener = None
self.set_visibility_changed_fn(self._visibility_changed_fn)
with self.frame:
self._script_editor_widget = ScriptEditorWidget()
with self.menu_bar:
self._menu_option = MenuOptions(
load_data=self.load_data,
save_data=self.save_data,
get_file_path=self.get_file_path,
)
with ui.Menu("File"):
ui.MenuItem("Open", hotkey_text="Alt + O", triggered_fn=self._menu_option.menu_open)
ui.MenuItem("Save", hotkey_text="Alt + S", triggered_fn=self._menu_option.menu_save)
ui.MenuItem("Save As...", hotkey_text="Shift + Alt + S", triggered_fn=self._menu_option.menu_save_as)
def register_actions(self, extension_id: str):
action_registry = get_action_registry()
actions_tag = "Script Editor File Actions"
action_registry.register_action(
extension_id,
"Open",
self._menu_option.menu_open,
display_name="Open",
description="Open a script",
tag=actions_tag,
)
action_registry.register_action(
extension_id,
"Save",
self._menu_option.menu_save,
display_name="Save",
description="Save the script from the current tab to a file",
tag=actions_tag,
)
action_registry.register_action(
extension_id,
"Save As",
self._menu_option.menu_save_as,
display_name="Save As...",
description="Save the script from the current tab as a file",
tag=actions_tag,
)
def deregister_actions(self, extension_id):
action_registry = get_action_registry()
action_registry.deregister_all_actions_for_extension(extension_id)
def register_hotkeys(self, extension_id: str):
hotkey_registry = hotkeys.get_hotkey_registry()
action_registry = get_action_registry()
ext_actions = action_registry.get_all_actions_for_extension(extension_id)
for action in ext_actions:
key = _DEFAULT_HOTKEY_MAP.get(action.id, None)
# Not all Actions will have default hotkeys
if not key:
continue
hotkey_registry.register_hotkey(
hotkey_ext_id=extension_id,
key=key,
action_ext_id=action.extension_id,
action_id=action.id,
filter=None,
)
def deregister_hotkeys(self, extension_id: str):
hotkey_registry = hotkeys.get_hotkey_registry()
hotkey_registry.deregister_all_hotkeys_for_extension(extension_id)
def load_data(self, filename, data):
self._script_editor_widget.load_script(filename, data)
def save_data(self, filename):
return self._script_editor_widget.save_script(filename)
def clear_data(self):
self._script_editor_widget.clear_script()
def get_file_path(self):
return self._script_editor_widget.get_script_path()
def _visibility_changed_fn(self, visible):
if self._visiblity_changed_listener:
self._visiblity_changed_listener(visible)
def set_visibility_changed_listener(self, listener):
self._visiblity_changed_listener = listener
def destroy(self):
"""
Called by extension before destroying this object. It doesn't happen automatically.
Without this hot reloading doesn't work.
"""
if self._script_editor_widget:
self._script_editor_widget.destroy()
self._script_editor_widget = None
self._visiblity_changed_listener = None
super().destroy()
| 4,914 | Python | 33.612676 | 117 | 0.613553 |
omniverse-code/kit/exts/omni.kit.window.script_editor/omni/kit/window/script_editor/tests/test_script_editor.py | ## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
import omni.kit.test
import omni.kit.app
from omni.kit.test import AsyncTestCase
import omni.kit.ui_test as ui_test
from omni.ui.tests.test_base import OmniUiTest
from pathlib import Path
from omni.kit.window.script_editor.scripts import ScriptEditorWindow
import omni.ui as ui
CURRENT_PATH = Path(__file__).parent.joinpath("../../../../../data")
# Use it to receive extraterrestrial messages from executing script
public_mailbox = ""
class TestScriptEditor(AsyncTestCase):
# Before running each test
async def setUp(self):
global public_mailbox
public_mailbox = ""
window = ScriptEditorWindow()
await ui_test.wait_n_updates(2)
self.editor = ui_test.find("Script Editor")
await self.editor.focus()
# Close tab and clear script
await ui_test.emulate_key_combo("CTRL+W")
window.clear_data()
# Focus on text field
await ui_test.emulate_mouse_move_and_click(ui_test.Vec2(700, 500))
async def test_script_run(self):
await ui_test.emulate_char_press("import omni.kit.window.script_editor.tests.test_script_editor as editor\n")
await ui_test.emulate_char_press("editor.public_mailbox = 'who is there?'\n")
await ui_test.wait_n_updates(2)
await ui_test.emulate_key_combo("CTRL+ENTER")
self.assertEqual(public_mailbox, "who is there?")
# Add another line
await ui_test.emulate_char_press("editor.public_mailbox = 123\n")
await ui_test.wait_n_updates(2)
await ui_test.emulate_key_combo("CTRL+ENTER")
self.assertEqual(public_mailbox, 123)
# New tab
await ui_test.emulate_key_combo("CTRL+N")
await ui_test.emulate_mouse_move_and_click(ui_test.Vec2(700, 500))
await ui_test.emulate_char_press("import omni.kit.window.script_editor.tests.test_script_editor as editor\n")
await ui_test.emulate_char_press("editor.public_mailbox = 'yellow'\n")
await ui_test.wait_n_updates(2)
await ui_test.emulate_key_combo("CTRL+ENTER")
self.assertEqual(public_mailbox, "yellow")
async def test_script_fail(self):
await ui_test.emulate_char_press("import omni.kit.window.script_editor.tests.test_script_editor as editor\n")
await ui_test.emulate_char_press("editor.public_mailbox = 555\n")
await ui_test.emulate_char_press("x = \n") # typo
await ui_test.wait_n_updates(2)
await ui_test.emulate_key_combo("CTRL+ENTER")
self.assertEqual(public_mailbox, "")
# fix it
await ui_test.emulate_key_combo("BACKSPACE")
await ui_test.emulate_key_combo("BACKSPACE")
await ui_test.emulate_char_press("3")
await ui_test.wait_n_updates(2)
await ui_test.emulate_key_combo("CTRL+ENTER")
self.assertEqual(public_mailbox, 555)
async def test_script_get_source(self):
# That tests: exts."omni.kit.window.script_editor".executeInTempFile = true
# If we are not writing to a temp file ast/inspect doesn't work and throws error that it can't find source.
await ui_test.emulate_char_press("""
import ast
import inspect
import omni.kit.window.script_editor.tests.test_script_editor as editor
foo = lambda y: y
lines = inspect.getsource(foo)
r = ast.parse(lines)
editor.public_mailbox = r is not None
""")
await ui_test.wait_n_updates(2)
await ui_test.emulate_key_combo("CTRL+ENTER")
self.assertEqual(public_mailbox, True)
async def test_close_and_reopen_window(self):
"""Tests to make sure that content remains in the window even after it has been closed and reopened."""
global public_mailbox
await ui_test.emulate_char_press("import omni.kit.window.script_editor.tests.test_script_editor as editor\n")
await ui_test.emulate_char_press("editor.public_mailbox = 'Hello World'\n")
await ui_test.wait_n_updates(2)
await ui_test.emulate_key_combo("CTRL+ENTER")
self.assertEqual(public_mailbox, "Hello World")
await ui_test.wait_n_updates(2)
self.editor._widget.visible = False
await ui_test.wait_n_updates(2)
public_mailbox = ""
self.editor._widget.visible = True
await ui_test.wait_n_updates(2)
await ui_test.emulate_key_combo("CTRL+ENTER")
self.assertEqual(public_mailbox, "Hello World")
| 4,815 | Python | 39.813559 | 117 | 0.681412 |
omniverse-code/kit/exts/omni.kit.window.script_editor/omni/kit/window/script_editor/tests/__init__.py | from .test_script_editor import *
| 34 | Python | 16.499992 | 33 | 0.764706 |
omniverse-code/kit/exts/omni.kit.test_suite.context_browser/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.0"
category = "Internal"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["NVIDIA"]
# The title and description fields are primarily for displaying extension info in UI
title = "omni.kit.test_suite.viewport"
description="omni.kit.test_suite.viewport"
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# Keywords for the extension
keywords = ["kit", "ui", "test"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog = "docs/CHANGELOG.md"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
[[python.module]]
name = "omni.kit.test_suite.context_browser"
[dependencies]
"omni.kit.test" = {}
"omni.kit.test_helpers_gfx" = {}
"omni.kit.renderer.capture" = {}
[[test]]
args = [
"--/renderer/enabled=pxr",
"--/renderer/active=pxr",
"--/renderer/multiGpu/enabled=false",
"--/renderer/multiGpu/autoEnable=false", # Disable mGPU with PXR due to OM-51026, OM-53611
"--/renderer/multiGpu/maxGpuCount=1",
"--/app/asyncRendering=false",
"--/app/file/ignoreUnsavedOnExit=true",
"--/app/window/dpiScaleOverride=1.0",
"--/app/window/scaleToMonitor=false",
"--/persistent/app/omniverse/filepicker/options_menu/show_details=false",
"--/persistent/app/stage/dragDropImport='reference'",
"--no-window"
]
dependencies = [
"omni.kit.mainwindow",
"omni.usd",
"omni.kit.ui_test",
"omni.kit.test_suite.helpers",
"omni.kit.window.stage",
"omni.kit.window.file",
"omni.kit.property.bundle",
"omni.kit.window.status_bar",
"omni.hydra.pxr",
"omni.kit.window.viewport",
"omni.kit.material.library",
"omni.kit.window.content_browser",
]
stdoutFailPatterns.exclude = [
"*HydraRenderer failed to render this frame*", # Can drop a frame or two rendering with OpenGL interop
"*Cannot use omni.hydra.pxr without OpenGL interop*" # Linux TC configs with multi-GPU might not have OpenGL available
]
| 2,211 | TOML | 29.301369 | 122 | 0.695613 |
omniverse-code/kit/exts/omni.kit.test_suite.context_browser/omni/kit/test_suite/context_browser/tests/__init__.py | from .test_content_browser_settings import *
from .test_file_picker_settings import *
| 86 | Python | 27.999991 | 44 | 0.790698 |
omniverse-code/kit/exts/omni.kit.test_suite.context_browser/omni/kit/test_suite/context_browser/tests/test_content_browser_settings.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.usd
import carb
import omni.ui as ui
from omni.kit.test.async_unittest import AsyncTestCase
from omni.kit import ui_test
from omni.kit.test_suite.helpers import get_test_data_path, wait_stage_loading, arrange_windows
class TestContentBrowserSettings(AsyncTestCase):
# Before running each test
async def setUp(self):
super().setUp()
await arrange_windows()
async def test_content_browser_settings(self):
from omni.kit.window.content_browser import get_content_window
from omni.kit.window.content_browser.test_helper import ContentBrowserTestHelper
async with ContentBrowserTestHelper() as content_browser_helper:
path = get_test_data_path(__name__, "folder1/")
async def get_files():
return sorted([c.name for c in await content_browser_helper.select_items_async(path, "*")])
# all off
await content_browser_helper.set_config_menu_settings({'hide_unknown': False, 'hide_thumbnails': False, 'show_udim_sequence': False, 'show_details': False})
menu_state = await content_browser_helper.get_config_menu_settings()
self.assertEqual(menu_state, {'hide_unknown': False, 'hide_thumbnails': False, 'show_details': False, 'show_udim_sequence': False})
self.assertEqual(await get_files(), ['.thumbs', 'badfile.cheese', 'cheese.1041.png', 'file.9999.xxx.png', 'tile.1001.png', 'tile.1200.png', 'udim.1001.jpg', 'udim.1200.jpg'])
# unknown enabled
await content_browser_helper.set_config_menu_settings({'hide_unknown': True, 'hide_thumbnails': False, 'show_udim_sequence': False, 'show_details': False})
menu_state = await content_browser_helper.get_config_menu_settings()
self.assertEqual(menu_state, {'hide_unknown': True, 'hide_thumbnails': False, 'show_details': False, 'show_udim_sequence': False})
self.assertEqual(await get_files(), ['.thumbs', 'cheese.1041.png', 'file.9999.xxx.png', 'tile.1001.png', 'tile.1200.png', 'udim.1001.jpg', 'udim.1200.jpg'])
# thumbs enabled
await content_browser_helper.set_config_menu_settings({'hide_unknown': False, 'hide_thumbnails': True, 'show_udim_sequence': False, 'show_details': False})
menu_state = await content_browser_helper.get_config_menu_settings()
self.assertEqual(menu_state, {'hide_unknown': False, 'hide_thumbnails': True, 'show_details': False, 'show_udim_sequence': False})
self.assertEqual(await get_files(), ['badfile.cheese', 'cheese.1041.png', 'file.9999.xxx.png', 'tile.1001.png', 'tile.1200.png', 'udim.1001.jpg', 'udim.1200.jpg'])
# udim enabled
await content_browser_helper.set_config_menu_settings({'hide_unknown': False, 'hide_thumbnails': False, 'show_udim_sequence': True, 'show_details': False})
menu_state = await content_browser_helper.get_config_menu_settings()
self.assertEqual(menu_state, {'hide_unknown': False, 'hide_thumbnails': False, 'show_details': False, 'show_udim_sequence': True})
self.assertEqual(await get_files(), ['.thumbs', 'badfile.cheese', 'cheese.<UDIM>.png', 'file.9999.xxx.png', 'tile.<UDIM>.png', 'udim.<UDIM>.jpg'])
# all enabled
await content_browser_helper.set_config_menu_settings({'hide_unknown': True, 'hide_thumbnails': True, 'show_udim_sequence': True, 'show_details': False})
menu_state = await content_browser_helper.get_config_menu_settings()
self.assertEqual(menu_state, {'hide_unknown': True, 'hide_thumbnails': True, 'show_details': False, 'show_udim_sequence': True})
self.assertEqual(await get_files(), ['cheese.<UDIM>.png', 'file.9999.xxx.png', 'tile.<UDIM>.png', 'udim.<UDIM>.jpg'])
| 4,230 | Python | 67.241934 | 186 | 0.678487 |
omniverse-code/kit/exts/omni.kit.test_suite.context_browser/omni/kit/test_suite/context_browser/tests/test_file_picker_settings.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.usd
import carb
from functools import partial
from omni.kit.test.async_unittest import AsyncTestCase
from omni.kit.window.file_importer import get_file_importer
from omni.kit.test_suite.helpers import get_test_data_path, wait_stage_loading, arrange_windows
from omni.kit import ui_test
class TestFilePickerSettings(AsyncTestCase):
# Before running each test
async def setUp(self):
super().setUp()
await arrange_windows()
async def test_file_picker_settings(self):
from omni.kit.window.content_browser import get_content_window
from omni.kit.window.file_importer.test_helper import FileImporterTestHelper
async with FileImporterTestHelper() as file_import_helper:
path = get_test_data_path(__name__, "folder1/")
async def get_files():
return sorted([c.name for c in await file_import_helper.select_items_async(path, "*")])
file_importer = get_file_importer()
file_importer.show_window(
title="File Picker Test",
import_button_label="Ok",
file_extension_types=[('*.*', 'All Files')],
filename_url=path,
)
await ui_test.human_delay(50)
# all off
await file_import_helper.set_config_menu_settings({'hide_thumbnails': False, 'show_udim_sequence': False, 'show_details': False})
menu_state = await file_import_helper.get_config_menu_settings()
self.assertEqual(menu_state, {'hide_thumbnails': False, 'show_details': False, 'show_udim_sequence': False})
self.assertEqual(await get_files(), ['.thumbs', 'badfile.cheese', 'cheese.1041.png', 'file.9999.xxx.png', 'tile.1001.png', 'tile.1200.png', 'udim.1001.jpg', 'udim.1200.jpg'])
# thumbs enabled
await file_import_helper.set_config_menu_settings({'hide_thumbnails': True, 'show_udim_sequence': False, 'show_details': False})
menu_state = await file_import_helper.get_config_menu_settings()
self.assertEqual(menu_state, {'hide_thumbnails': True, 'show_details': False, 'show_udim_sequence': False})
self.assertEqual(await get_files(), ['badfile.cheese', 'cheese.1041.png', 'file.9999.xxx.png', 'tile.1001.png', 'tile.1200.png', 'udim.1001.jpg', 'udim.1200.jpg'])
# udim enabled
await file_import_helper.set_config_menu_settings({'hide_thumbnails': False, 'show_udim_sequence': True, 'show_details': False})
menu_state = await file_import_helper.get_config_menu_settings()
self.assertEqual(menu_state, {'hide_thumbnails': False, 'show_details': False, 'show_udim_sequence': True})
self.assertEqual(await get_files(), ['.thumbs', 'badfile.cheese', 'cheese.<UDIM>.png', 'file.9999.xxx.png', 'tile.<UDIM>.png', 'udim.<UDIM>.jpg'])
# all enabled
await file_import_helper.set_config_menu_settings({'hide_thumbnails': True, 'show_udim_sequence': True, 'show_details': False})
menu_state = await file_import_helper.get_config_menu_settings()
menu_state = await file_import_helper.get_config_menu_settings()
self.assertEqual(menu_state, {'hide_thumbnails': True, 'show_details': False, 'show_udim_sequence': True})
self.assertEqual(await get_files(), ['badfile.cheese', 'cheese.<UDIM>.png', 'file.9999.xxx.png', 'tile.<UDIM>.png', 'udim.<UDIM>.jpg'])
file_importer.hide_window()
await ui_test.human_delay(50)
| 3,972 | Python | 56.579709 | 186 | 0.660624 |
omniverse-code/kit/exts/omni.kit.test_suite.context_browser/docs/README.md | # omni.kit.test_suite.context_browser
## omni.kit.test_suite.context_browser
Test Suite
| 92 | Markdown | 10.624999 | 38 | 0.75 |
omniverse-code/kit/exts/omni.kit.test_suite.context_browser/docs/index.rst | omni.kit.test_suite.context_browser
###################################
viewport tests
.. toctree::
:maxdepth: 1
CHANGELOG
| 132 | reStructuredText | 12.299999 | 35 | 0.515152 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.