file_path
stringlengths 22
162
| content
stringlengths 19
501k
| size
int64 19
501k
| lang
stringclasses 1
value | avg_line_length
float64 6.33
100
| max_line_length
int64 18
935
| alphanum_fraction
float64 0.34
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tutorials/tutorial11/OgnTutorialComplexDataPy.py | """
Implementation of a node handling complex attribute data
"""
# This class exercises access to the DataModel through the generated database class for a representative set of
# complex data types, including tuples, arrays, arrays of tuples, and role-based attributes. More details on
# individual type definitions can be found in the earlier C++ tutorial nodes where each of those types are
# explored in detail.
# Any Python node with array attributes will receive its data wrapped in a numpy array for efficiency.
# Unlike C++ includes, a Python import is not transitive so this has to be explicitly imported here.
import numpy
import omni.graph.core as og
class OgnTutorialComplexDataPy:
"""Exercise a sample of complex data types through a Python OmniGraph node"""
@staticmethod
def compute(db) -> bool:
"""
Multiply a float array by a float[3] to yield a float[3] array, using the point3f role.
Practically speaking the data in the role-based attributes is no different than the underlying raw data
types. The role only helps you understand what the intention behind the data is, e.g. to differentiate
surface normals and colours, both of which might have float[3] types.
"""
# Verify that the output array was correctly set up to have a "point" role
assert db.role.outputs.a_productArray == og.AttributeRole.POSITION
multiplier = db.inputs.a_vectorMultiplier
input_array = db.inputs.a_inputArray
input_array_size = len(db.inputs.a_inputArray)
# The output array should have the same number of elements as the input array.
# Setting the size informs fabric that when it retrieves the data it should allocate this much space.
db.outputs.a_productArray_size = input_array_size
# The assertions illustrate the type of data that should have been received for inputs and set for outputs
assert isinstance(multiplier, numpy.ndarray) # numpy.ndarray is the underlying type of tuples
assert multiplier.shape == (3,)
assert isinstance(input_array, numpy.ndarray) # numpy.ndarray is the underlying type of simple arrays
assert input_array.shape == (input_array_size,)
# If the input array is empty then the output is empty and does not need any computing
if input_array.shape[0] == 0:
db.outputs.a_productArray = []
assert db.outputs.a_productArray.shape == (0, 3)
return True
# numpy has a nice little method for replicating the multiplier vector the number of times required
# by the size of the input array.
# e.g. numpy.tile( [1, 2], (3, 1) ) yields [[1, 2], [1, 2], [1, 2]]
product = numpy.tile(multiplier, (input_array_size, 1))
# Multiply each of the tiled vectors by the corresponding constant in the input array
for i in range(0, product.shape[0]):
product[i] = product[i] * input_array[i]
db.outputs.a_productArray = product
# Make sure the correct type of array was produced
assert db.outputs.a_productArray.shape == (input_array_size, 3)
# Create the output token array by copying the input array with the elements changed to strings
db.outputs.a_tokenArray = numpy.array([str(x) for x in input_array])
# Note that at the time of this writing you are not able to assign token arrays element-by-element as you
# might do for other types of arrays. So this coding method, usable for other types of arrays such as float[],
# would issue a warning and then fail to set the values on the output:
# db.outputs.a_tokenArray_size = input_array_size
# for index, element in enumerate(input_array):
# db.outputs.a_tokenArray[index] = str(element)
return True
| 3,862 | Python | 51.202702 | 118 | 0.690316 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tutorials/tutorial12/OgnTutorialABIPy.py | """
Implementation of the Python node accessing all of the simple data types.
This class exercises access to the DataModel through the generated database class for all simple data types.
It implements the same algorithm as the C++ node OgnTutorialABI.cpp
"""
import colorsys
from contextlib import suppress
import carb
import omni.graph.core as og
class OgnTutorialABIPy:
"""Illustrate overrides of the various ABI functions available to a Python OmniGraph node"""
@staticmethod
def compute(context, node) -> bool:
"""
Convert a color into components using the raw ABI function without the nice Database interface.
Rarely Overridden:
Usually you will implement the much more friendly and Pythonic compute(OgnTutorialABIDatabasePy)
method so that you can have easier access to your data.
"""
# Manually acquire the data on the known attributes
input_color_attr = node.get_attribute("inputs:color")
# Extract the input value from the controller
input_color = og.Controller.get(input_color_attr)
output_hue_attr = node.get_attribute("outputs:h")
output_saturation_attr = node.get_attribute("outputs:s")
output_value_attr = node.get_attribute("outputs:v")
(h, s, v) = colorsys.rgb_to_hsv(*input_color)
# This exception is triggered if you accidentally reverse the parameters to set_attr_value.
# The error isn't recovered, to prevent proliferation of inconsistent calls. The exception is
# thrown to help with debugging. (As this is an example the exception is caught and ignored here.)
with suppress(og.OmniGraphError):
og.Controller.set(h, output_hue_attr)
og.Controller.set(output_hue_attr, h)
og.Controller.set(output_saturation_attr, s)
og.Controller.set(output_value_attr, v)
#
# For comparison, here is the same algorithm implemented using "compute(db)"
#
# def compute(db) -> bool:
# (db.outputs.h, db.outputs.s, db.outputs.v) = colorsys.rgb_to_hsv(*db.inputs.color)
return True
# ----------------------------------------------------------------------
@staticmethod
def get_node_type() -> str:
"""
Rarely overridden
This should almost never be overridden as the auto-generated code will handle the name
"""
carb.log_info("Python ABI override of get_node_type")
return "omni.graph.tutorials.AbiPy"
# ----------------------------------------------------------------------
@staticmethod
def initialize(graph_context, node):
"""
Occasionally overridden
This method might be overridden to set up initial conditions when a node of this type is created.
Note that overridding this puts the onus on the node writer to set up initial conditions such as
attribute default values and metadata.
When a node is created this will be called
"""
carb.log_info("Python ABI override of initialize")
# There is no default behaviour on initialize so nothing else is needed for this tutorial to function
# ----------------------------------------------------------------------
@staticmethod
def initialize_type(node_type) -> bool:
"""
Rarely overridden
This method might be overridden to set up initial conditions when a node type is registered.
Note that overriding this puts the onus on the node writer to initialize the attributes and metadata.
By returning "True" the function is requesting that the attributes and metadata be initialized upon return,
otherwise the caller will assume that this override has already done that.
"""
carb.log_info("Python ABI override of initialize_type")
return True
# ----------------------------------------------------------------------
@staticmethod
def release(node):
"""
Occasionally overridden
After a node is removed it will get a release call where anything set up in initialize() can be torn down
"""
carb.log_info("Python ABI override of release")
# There is no default behaviour on release so nothing else is needed for this tutorial to function
# ----------------------------------------------------------------------
@staticmethod
def update_node_version(graph_context, node, old_version: int, new_version: int):
"""
Occasionally overridden
This is something you do want to override when you have more than version of your node.
In it you would translate attribute information from older versions into the current one.
"""
carb.log_info(f"Python ABI override of update_node_version from {old_version} to {new_version}")
# There is no default behaviour on update_node_version so nothing else is needed for this tutorial to function
return old_version < new_version
# ----------------------------------------------------------------------
@staticmethod
def on_connection_type_resolve(node):
"""
Occasionally overridden
When there is a connection change to this node which results in an extended type attribute being automatically
resolved, this callback gives the node a change to resolve other extended type attributes. For example a generic
'Increment' node can resolve its output to an int only after its input has been resolved to an int. Attribute
types are resolved using omni.graph.attribute.set_resolved_type(), or the utility functions such as
og.resolve_fully_coupled().
"""
carb.log_info("Python ABI override of on_connection_type_resolve")
# There is no default behaviour for on_connection_type_resolve so nothing else is needed for this
# tutorial to function
| 5,945 | Python | 43.373134 | 120 | 0.632296 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tutorials/tutorial15/OgnTutorialBundlesPy.py | """
Implementation of the Python node accessing attributes through the bundle in which they are contained.
"""
import omni.graph.core as og
# Types recognized by the integer filter
_INTEGER_TYPES = [og.BaseDataType.INT, og.BaseDataType.UINT, og.BaseDataType.INT64, og.BaseDataType.UINT64]
class OgnTutorialBundlesPy:
"""Exercise the bundled data types through a Python OmniGraph node"""
@staticmethod
def compute(db) -> bool:
"""Implements the same algorithm as the C++ node OgnTutorialBundles.cpp"""
full_bundle = db.inputs.fullBundle
filtered_bundle = db.inputs.filteredBundle
filters = db.inputs.filters
output_bundle = db.outputs.combinedBundle
# This does a copy of the full bundle contents from the input bundle to the output bundle
output_bundle.bundle = full_bundle
# Extract the filter flags from the contents of the filters array
filter_big_arrays = "big" in filters
filter_type_int = "int" in filters
filter_name_x = "x" in filters
# The "attributes" member is a list that can be iterated. The members of the list do not contain real
# og.Attribute objects, which must always exist, they are wrappers on og.AttributeData objects, which can
# come and go at runtime.
for bundled_attribute in filtered_bundle.attributes:
# The two main accessors for the bundled attribute provide the name and type information
name = bundled_attribute.name
attribute_type = bundled_attribute.type
# Check each of the filters to see which attributes are to be skipped
if filter_type_int and attribute_type.base_type in _INTEGER_TYPES:
continue
if filter_name_x and name.find("x") >= 0:
continue
# A method on the bundled attribute provides access to array size (non-arrays are size 1)
if filter_big_arrays and bundled_attribute.size > 10:
continue
# All filters have been passed so the attribute is eligible to be copied onto the output.
output_bundle.insert(bundled_attribute)
return True
| 2,203 | Python | 39.814814 | 113 | 0.671811 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tutorials/tutorial25/OgnTutorialDynamicAttributesPy.py | """Implementation of the node OgnTutorialDynamicAttributesPy.ogn"""
from contextlib import suppress
from operator import xor
import omni.graph.core as og
class OgnTutorialDynamicAttributesPy:
@staticmethod
def compute(db) -> bool:
"""Compute the output based on the input and the presence or absence of dynamic attributes"""
raw_output = db.inputs.value
# The suppression of the AttributeError will just skip this section of code if the dynamic attribute
# is not present
with suppress(AttributeError):
# firstBit will invert the bit with its number, if present.
if 0 <= db.inputs.firstBit <= 31:
raw_output = xor(raw_output, 2**db.inputs.firstBit)
else:
db.log_error(f"Could not xor bit {db.inputs.firstBit}. Must be in [0, 31]")
with suppress(AttributeError):
# secondBit will invert the bit with its number, if present
if 0 <= db.inputs.secondBit <= 31:
raw_output = xor(raw_output, 2**db.inputs.secondBit)
else:
db.log_error(f"Could not xor bit {db.inputs.secondBit}. Must be in [0, 31]")
with suppress(AttributeError):
# invert will invert the bits, if the role is set and the attribute access is correct
_ = db.inputs.invert
if (
db.role.inputs.invert == og.AttributeRole.TIMECODE
and db.attributes.inputs.invert == db.abi_node.get_attribute(db.tokens.invert)
):
raw_output = xor(raw_output, 0xFFFFFFFF)
db.outputs.result = raw_output
| 1,652 | Python | 40.324999 | 108 | 0.621065 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tutorials/tutorial21/OgnTutorialBundleAddAttributesPy.py | """
Implementation of the Python node adding attributes with a given description to an output bundle.
"""
import omni.graph.core as og
class OgnTutorialBundleAddAttributesPy:
"""Exercise the bundled data types through a Python OmniGraph node"""
@staticmethod
def compute(db) -> bool:
"""Implements the same algorithm as the C++ node OgnTutorialBundleAddAttributes.cpp using the Python
bindings to the bundle method
"""
# Start with an empty output bundle.
output_bundle = db.outputs.bundle
output_bundle.clear()
if db.inputs.useBatchedAPI:
attr_types = [og.AttributeType.type_from_ogn_type_name(type_name) for type_name in db.inputs.typesToAdd]
output_bundle.add_attributes(attr_types, db.inputs.addedAttributeNames)
output_bundle.remove_attributes(db.inputs.removedAttributeNames)
else:
for attribute_type_name, attribute_name in zip(db.inputs.typesToAdd, db.inputs.addedAttributeNames):
attribute_type = og.AttributeType.type_from_ogn_type_name(attribute_type_name)
output_bundle.insert((attribute_type, attribute_name))
# Remove attributes from the bundle that were already added. This is a somewhat contrived operation that
# allows testing of both adding and removal within a simple environment.
for attribute_name in db.inputs.removedAttributeNames:
output_bundle.remove(attribute_name)
return True
| 1,529 | Python | 41.499999 | 116 | 0.689993 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tutorials/tutorial10/OgnTutorialSimpleDataPy.py | """
Implementation of the Python node accessing all of the simple data types.
This class exercises access to the DataModel through the generated database class for all simple data types.
It implements the same algorithm as the C++ node OgnTutorialSimpleData.cpp
"""
import omni.graph.tools.ogn as ogn
class OgnTutorialSimpleDataPy:
"""Exercise the simple data types through a Python OmniGraph node"""
@staticmethod
def compute(db) -> bool:
"""Perform a trivial computation on all of the simple data types to make testing easy"""
# Inside the database the contained object "inputs" holds the data references for all input attributes and the
# contained object "outputs" holds the data references for all output attributes.
# Each of the attribute accessors are named for the name of the attribute, with the ":" replaced by "_".
# The colon is used in USD as a convention for creating namespaces so it's safe to replace it without
# modifying the meaning. The "inputs:" and "outputs:" prefixes in the generated attributes are matched
# by the container names.
# For example attribute "inputs:translate:x" would be accessible as "db.inputs.translate_x" and attribute
# "outputs:matrix" would be accessible as "db.outputs.matrix".
# The "compute" of this method modifies each attribute in a subtle way so that a test can be written
# to verify the operation of the node. See the .ogn file for a description of tests.
db.outputs.a_bool = not db.inputs.a_bool
db.outputs.a_half = 1.0 + db.inputs.a_half
db.outputs.a_int = 1 + db.inputs.a_int
db.outputs.a_int64 = 1 + db.inputs.a_int64
db.outputs.a_double = 1.0 + db.inputs.a_double
db.outputs.a_float = 1.0 + db.inputs.a_float
db.outputs.a_uchar = 1 + db.inputs.a_uchar
db.outputs.a_uint = 1 + db.inputs.a_uint
db.outputs.a_uint64 = 1 + db.inputs.a_uint64
db.outputs.a_string = db.inputs.a_string.replace("hello", "world")
db.outputs.a_objectId = 1 + db.inputs.a_objectId
# The token interface is made available in the database as well, for convenience.
# By calling "db.token" you can look up the token ID of a given string.
if db.inputs.a_token == "helloToken":
db.outputs.a_token = "worldToken"
# Path just gets a new child named "Child".
# In the implementation the string is manipulated directly, as it does not care if the SdfPath is valid or
# not. If you want to manipulate it using the pxr.Sdf.Path API this is how you could do it:
#
# from pxr import Sdf
# input_path Sdf.Path(db.inputs.a_path)
# if input_path.IsValid():
# db.outputs.a_path() = input_path.AppendChild("/Child").GetString();
#
db.outputs.a_path = db.inputs.a_path + "/Child"
# To access the metadata you have to go out to the ABI, though the hardcoded metadata tags are in the
# OmniGraph Python namespace
assert db.node.get_attribute("inputs:a_bool").get_metadata(ogn.MetadataKeys.UI_NAME) == "Simple Boolean Input"
# You can also use the database interface to get the same data
db.outputs.a_nodeTypeUiName = db.get_metadata(ogn.MetadataKeys.UI_NAME)
db.outputs.a_a_boolUiName = db.get_metadata(ogn.MetadataKeys.UI_NAME, db.attributes.inputs.a_bool)
return True
| 3,475 | Python | 52.476922 | 118 | 0.671655 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tutorials/tutorial13/OgnTutorialStatePy.py | """
Implementation of a Python node that uses internal state information to compute outputs.
There are two types of state information in use here:
- OgnTutorialStatePy.step (per-class state information)
This is inherently dangerous in a multi-threaded multi-hardware evaluation so
it must be used with care. In this case the value is only used when a node is created, which for now is a safe
single-threaded operation
- per-node state information.
"""
class OgnTutorialStatePyInternalState:
"""Convenience class for maintaining per-node state information"""
def __init__(self):
"""Instantiate the per-node state information.
Note: For convenience, per-node state data is maintained as members of this class, imposing the minor
restriction of having no parameters allowed in this constructor.
The presence of the "state" section in the .ogn node has flagged to omnigraph the fact that this node will
be managing some per-node state data.
"""
# Start all nodes with a monotinic increment value of 0
self.increment_value = 0
# Get this node's internal step value from the per-class state information
self.node_step = OgnTutorialStatePy.step
# Update the per-class state information for the next node
OgnTutorialStatePy.step += 1
def update_state(self):
"""Helper function to update the node's internal state based on the previous values and the per-class state"""
self.increment_value += self.node_step
class OgnTutorialStatePy:
"""Use internal node state information in addition to inputs"""
# This is a simplified bit of internal per-class state information. In real applications this would be a complex
# structure, potentially keyed off of combinations of inputs or real time information.
#
# This value increases for each node and indicates the value at which a node's own internal state value increments.
# e.g. the first instance of this node type will increment its state value by 1, the second instance of it by 2,
# and so on...
step = 1
# Defining this method, in conjunction with adding the "state" section in the .ogn file, tells OmniGraph that you
# intend to maintain opaque internal state information on your node. OmniGraph will ensure that your node is not
# scheduled for evaluation in such a way that it would compromise the thread-safety of your node due to this state
# information, however you are responsible for updating the values and/or maintaining your own dirty bits when
# required.
@staticmethod
def internal_state():
"""Returns an object that will contain per-node state information"""
return OgnTutorialStatePyInternalState()
@staticmethod
def compute(db) -> bool:
"""Compute the output based on inputs and internal state"""
# This illustrates how internal state and inputs can be used in conjunction. The inputs can be used
# to divert to a different computation path.
if db.inputs.override:
db.outputs.monotonic = db.inputs.overrideValue
else:
# OmniGraph ensures that the database contains the correct internal state information for the node
# being evaluated. Beyond that it has no knowledge of the data within that state.
db.outputs.monotonic = db.internal_state.increment_value
# Update the node's internal state data for the next evaluation.
db.internal_state.update_state()
return True
| 3,600 | Python | 46.381578 | 119 | 0.709167 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tutorials/tutorial19/OgnTutorialExtendedTypesPy.py | """
Implementation of the Python node accessing attributes whose type is determined at runtime.
This class exercises access to the DataModel through the generated database class for all simple data types.
"""
import omni.graph.core as og
# Hardcode each of the expected types for easy comparison
FLOAT_TYPE = og.Type(og.BaseDataType.FLOAT)
TOKEN_TYPE = og.Type(og.BaseDataType.TOKEN)
BOOL_ARRAY_TYPE = og.Type(og.BaseDataType.BOOL, array_depth=1)
FLOAT_ARRAY_TYPE = og.Type(og.BaseDataType.FLOAT, array_depth=1)
FLOAT3_TYPE = og.Type(og.BaseDataType.FLOAT, tuple_count=3)
INT2_TYPE = og.Type(og.BaseDataType.INT, tuple_count=2)
FLOAT3_ARRAY_TYPE = og.Type(og.BaseDataType.FLOAT, tuple_count=3, array_depth=1)
class OgnTutorialExtendedTypesPy:
"""Exercise the runtime data types through a Python OmniGraph node"""
@staticmethod
def compute(db) -> bool:
"""Implements the same algorithm as the C++ node OgnTutorialExtendedTypes.cpp.
It follows the same code pattern for easier comparison, though in practice you would probably code Python
nodes differently from C++ nodes to take advantage of the strengths of each language.
"""
def __compare_resolved_types(input_attribute, output_attribute) -> og.Type:
"""Returns the resolved type if they are the same, outputs a warning and returns None otherwise"""
resolved_input_type = input_attribute.type
resolved_output_type = output_attribute.type
if resolved_input_type != resolved_output_type:
db.log_warn(f"Resolved types do not match {resolved_input_type} -> {resolved_output_type}")
return None
return resolved_input_type if resolved_input_type.base_type != og.BaseDataType.UNKNOWN else None
# ---------------------------------------------------------------------------------------------------
def _compute_simple_values():
"""Perform the first algorithm on the simple input data types"""
# Unlike C++ code the Python types are flexible so you must check the data types to do the right thing.
# This works out better when the operation is the same as you don't even have to check the data type. In
# this case the "doubling" operation is slightly different for floats and tokens.
resolved_type = __compare_resolved_types(db.inputs.floatOrToken, db.outputs.doubledResult)
if resolved_type == FLOAT_TYPE:
db.outputs.doubledResult.value = db.inputs.floatOrToken.value * 2.0
elif resolved_type == TOKEN_TYPE:
db.outputs.doubledResult.value = db.inputs.floatOrToken.value + db.inputs.floatOrToken.value
# A Pythonic way to do the same thing by just applying an operation and checking for compatibility is:
# try:
# db.outputs.doubledResult = db.inputs.floatOrToken * 2.0
# except TypeError:
# # Gets in here for token types since multiplying string by float is not legal
# db.outputs.doubledResult = db.inputs.floatOrToken + db.inputs.floatOrToken
return True
# ---------------------------------------------------------------------------------------------------
def _compute_array_values():
"""Perform the second algorithm on the array input data types"""
resolved_type = __compare_resolved_types(db.inputs.toNegate, db.outputs.negatedResult)
if resolved_type == BOOL_ARRAY_TYPE:
db.outputs.negatedResult.value = [not value for value in db.inputs.toNegate.value]
elif resolved_type == FLOAT_ARRAY_TYPE:
db.outputs.negatedResult.value = [-value for value in db.inputs.toNegate.value]
return True
# ---------------------------------------------------------------------------------------------------
def _compute_tuple_values():
"""Perform the third algorithm on the 'any' data types"""
resolved_type = __compare_resolved_types(db.inputs.tuple, db.outputs.tuple)
# Notice how, since the operation is applied the same for both recognized types, the
# same code can handle both of them.
if resolved_type in (FLOAT3_TYPE, INT2_TYPE):
db.outputs.tuple.value = tuple(-x for x in db.inputs.tuple.value)
# An unresolved type is a temporary state and okay, resolving to unsupported types means the graph is in
# an unsupported configuration that needs to be corrected.
elif resolved_type is not None:
type_name = resolved_type.get_type_name()
db.log_error(f"Only float[3] and int[2] types are supported by this node, not {type_name}")
return False
return True
# ---------------------------------------------------------------------------------------------------
def _compute_flexible_values():
"""Perform the fourth algorithm on the multi-shape data types"""
resolved_type = __compare_resolved_types(db.inputs.flexible, db.outputs.flexible)
if resolved_type == FLOAT3_ARRAY_TYPE:
db.outputs.flexible.value = [(-x, -y, -z) for (x, y, z) in db.inputs.flexible.value]
elif resolved_type == TOKEN_TYPE:
db.outputs.flexible.value = db.inputs.flexible.value[::-1]
return True
# ---------------------------------------------------------------------------------------------------
compute_success = _compute_simple_values()
compute_success = _compute_array_values() and compute_success
compute_success = _compute_tuple_values() and compute_success
compute_success = _compute_flexible_values() and compute_success
# ---------------------------------------------------------------------------------------------------
# As Python has a much more flexible typing system it can do things in a few lines that require a lot
# more in C++. One such example is the ability to add two arbitrary data types. Here is an example of
# how, using "any" type inputs "a", and "b", with an "any" type output "result" you can generically
# add two elements without explicitly checking the type, failing only when Python cannot support
# the operation.
#
# try:
# db.outputs.result = db.inputs.a + db.inputs.b
# return True
# except TypeError:
# a_type = inputs.a.type().get_type_name()
# b_type = inputs.b.type().get_type_name()
# db.log_error(f"Cannot add attributes of type {a_type} and {b_type}")
# return False
return True
@staticmethod
def on_connection_type_resolve(node) -> None:
# There are 4 sets of type-coupled attributes in this node, meaning that the base_type of the attributes
# must be the same for the node to function as designed.
# 1. floatOrToken <-> doubledResult
# 2. toNegate <-> negatedResult
# 3. tuple <-> tuple
# 4. flexible <-> flexible
#
# The following code uses a helper function to resolve the attribute types of the coupled pairs. Note that
# without this logic a chain of extended-attribute connections may result in a non-functional graph, due to
# the requirement that types be resolved before graph evaluation, and the ambiguity of the graph without knowing
# how the types are related.
og.resolve_fully_coupled(
[node.get_attribute("inputs:floatOrToken"), node.get_attribute("outputs:doubledResult")]
)
og.resolve_fully_coupled([node.get_attribute("inputs:toNegate"), node.get_attribute("outputs:negatedResult")])
og.resolve_fully_coupled([node.get_attribute("inputs:tuple"), node.get_attribute("outputs:tuple")])
og.resolve_fully_coupled([node.get_attribute("inputs:flexible"), node.get_attribute("outputs:flexible")])
| 8,136 | Python | 55.506944 | 120 | 0.599435 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tutorials/tutorial23/OgnTutorialCpuGpuExtendedPy.py | """
Implementation of the Python node accessing extended attributes whose memory location is determined at runtime.
"""
import omni.graph.core as og
# Only one type of data is handled by the compute - pointf[3][]
POINT_ARRAY_TYPE = og.Type(og.BaseDataType.FLOAT, tuple_count=3, array_depth=1, role=og.AttributeRole.POSITION)
class OgnTutorialCpuGpuExtendedPy:
"""Exercise GPU access for extended attributes through a Python OmniGraph node"""
@staticmethod
def compute(db) -> bool:
"""Implements the same algorithm as the C++ node OgnTutorialCpuGpuExtended.cpp.
It follows the same code pattern for easier comparison, though in practice you would probably code Python
nodes differently from C++ nodes to take advantage of the strengths of each language.
"""
# Find and verify the attributes containing the points
if db.attributes.inputs.cpuData.get_resolved_type() != POINT_ARRAY_TYPE:
db.log_warning("Skipping compute - CPU attribute type did not resolve to pointf[3][]")
return False
if db.attributes.inputs.gpuData.get_resolved_type() != POINT_ARRAY_TYPE:
db.log_warning("Skipping compute - GPU attribute type did not resolve to pointf[3][]")
return False
if db.attributes.outputs.cpuGpuSum.get_resolved_type() != POINT_ARRAY_TYPE:
db.log_warning("Skipping compute - Sum attribute type did not resolve to pointf[3][]")
return False
# Put accessors into local variables for convenience
gpu_data = db.inputs.gpuData
cpu_data = db.inputs.cpuData
sums = db.outputs.cpuGpuSum
# Mismatched sizes cannot be computed
if gpu_data.size != cpu_data.size:
db.log_warning(f"Skipping compute - Point arrays are different sizes ({gpu_data.size} and {cpu_data.size})")
# Set the size to what is required for the dot product calculation
sums.size = cpu_data.size
# Use the correct data access based on whether the output is supposed to be on the GPU or not
if db.inputs.gpu:
# The second line is how the values would be extracted if Python supported GPU data extraction.
# When it does this tutorial will be updated
sums.cpu_value = cpu_data.value + gpu_data.cpu_value
# sums.gpu_value = cpu_data.gpu_value + gpu_data.value
else:
sums.cpu_value = cpu_data.value + gpu_data.cpu_value
return True
@staticmethod
def on_connection_type_resolve(node: og.Node) -> None:
"""Whenever any of the inputs or the output get a resolved type the others should get the same resolution"""
attribs = [
node.get_attribute("inputs:cpuData"),
node.get_attribute("inputs:gpuData"),
node.get_attribute("outputs:cpuGpuSum"),
]
og.resolve_fully_coupled(attribs)
| 2,925 | Python | 46.193548 | 120 | 0.666667 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tutorials/tutorial22/OgnTutorialCpuGpuBundlesPy.py | """
Implementation of the Python node accessing attributes whose memory location is determined at runtime.
"""
import numpy as np
import omni.graph.core as og
# Types to check on bundled attributes
FLOAT_ARRAY_TYPE = og.Type(og.BaseDataType.FLOAT, array_depth=1)
FLOAT3_ARRAY_TYPE = og.Type(og.BaseDataType.FLOAT, tuple_count=3, array_depth=1, role=og.AttributeRole.POSITION)
class OgnTutorialCpuGpuBundlesPy:
"""Exercise bundle members through a Python OmniGraph node"""
@staticmethod
def compute(db) -> bool:
"""Implements the same algorithm as the C++ node OgnTutorialCpuGpuBundles.cpp.
It follows the same code pattern for easier comparison, though in practice you would probably code Python
nodes differently from C++ nodes to take advantage of the strengths of each language.
"""
if db.inputs.gpu:
# Invalid data yields no compute
if not db.inputs.gpuBundle.valid:
return True
db.outputs.cpuGpuBundle = db.inputs.gpuBundle
else:
if not db.inputs.cpuBundle.valid:
return True
db.outputs.cpuGpuBundle = db.inputs.cpuBundle
# Find and verify the attributes containing the points
cpu_points = db.inputs.cpuBundle.attribute_by_name(db.tokens.points)
if cpu_points.type != FLOAT3_ARRAY_TYPE:
db.log_warning(
f"Skipping compute - No valid float[3][] attribute named '{db.tokens.points}' on the CPU bundle"
)
return False
gpu_points = db.inputs.gpuBundle.attribute_by_name(db.tokens.points)
if gpu_points.type != FLOAT3_ARRAY_TYPE:
db.log_warning(
f"Skipping compute - No valid float[3][] attribute named '{db.tokens.points}' on the GPU bundle"
)
return False
# If the attribute is not already on the output bundle then add it
dot_product = db.outputs.cpuGpuBundle.attribute_by_name(db.tokens.dotProducts)
if dot_product is None:
dot_product = db.outputs.cpuGpuBundle.insert((og.Type(og.BaseDataType.FLOAT, array_depth=1), "dotProducts"))
elif dot_product.type != FLOAT_ARRAY_TYPE:
# Python types do not use a cast to find out if they are the correct type so explicitly check it instead
db.log_warning(
f"Skipping compute - No valid float[] attribute named '{db.tokens.dotProducts}' on the output bundle"
)
return False
# Set the size to what is required for the dot product calculation
dot_product.size = cpu_points.size
# Use the correct data access based on whether the output is supposed to be on the GPU or not
if db.inputs.gpu:
# The second line is how the values would be extracted if Python supported GPU data extraction.
# When it does this tutorial will be updated
dot_product.cpu_value = np.einsum("ij,ij->i", cpu_points.value, gpu_points.cpu_value)
# dot_product.gpu_value = np.einsum("ij,ij->i", cpu_points.gpu_value, gpu_points.value)
else:
dot_product.cpu_value = np.einsum("ij,ij->i", cpu_points.value, gpu_points.cpu_value)
return True
| 3,273 | Python | 45.771428 | 120 | 0.651085 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tutorials/tutorial16/OgnTutorialBundleDataPy.py | """
Implementation of the Python node accessing attributes through the bundle in which they are contained.
"""
import numpy as np
import omni.graph.core as og
# Types recognized by the integer filter
_INTEGER_TYPES = [og.BaseDataType.INT, og.BaseDataType.UINT, og.BaseDataType.INT64, og.BaseDataType.UINT64]
class OgnTutorialBundleDataPy:
"""Exercise the bundled data types through a Python OmniGraph node"""
@staticmethod
def compute(db) -> bool:
"""Implements the same algorithm as the C++ node OgnTutorialBundleData.cpp
As Python is so much more flexible it doubles values on any attribute type that can handle it, unlike
the C++ node which only operates on integer types
"""
input_bundle = db.inputs.bundle
output_bundle = db.outputs.bundle
# This does a copy of the full bundle contents from the input bundle to the output bundle so that the
# output data can be modified directly.
output_bundle.bundle = input_bundle
# The "attributes" member is a list that can be iterated. The members of the list do not contain real
# og.Attribute objects, which must always exist, they are wrappers on og.AttributeData objects, which can
# come and go at runtime.
for bundled_attribute in output_bundle.attributes:
attribute_type = bundled_attribute.type
# Only integer types are recognized for this node's operation (doubling all integral values).
# It does operate on tuples and arrays though so that part does not need to be set.
# if attribute_type.base_type not in _INTEGER_TYPES:
# continue
# This operation does the right thing on all compatible types, unlike the C++ equivalent where it
# requires special handling for each variation of the data types it can handle.
if attribute_type.base_type == og.BaseDataType.TOKEN:
if attribute_type.array_depth > 0:
bundled_attribute.value = [f"{element}{element}" for element in bundled_attribute.value]
else:
bundled_attribute.value = f"{bundled_attribute.value}{bundled_attribute.value}"
elif attribute_type.role in [og.AttributeRole.TEXT, og.AttributeRole.PATH]:
bundled_attribute.value = f"{bundled_attribute.value}{bundled_attribute.value}"
else:
try:
bundled_attribute.value = np.multiply(bundled_attribute.value, 2)
except TypeError:
db.log_error(f"This node does not handle data of type {attribute_type.get_type_name()}")
return True
| 2,702 | Python | 48.145454 | 113 | 0.663583 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tutorials/tutorial27/OgnTutorialCudaDataCpuPy.py | """
Implementation of the Python node accessing CUDA attributes in a way that accesses the GPU arrays with a CPU pointer.
No actual computation is done here as the tutorial nodes are not set up to handle GPU computation.
"""
import ctypes
import omni.graph.core as og
# Only one type of data is handled by the compute - pointf[3][]
POINT_ARRAY_TYPE = og.Type(og.BaseDataType.FLOAT, tuple_count=3, array_depth=1, role=og.AttributeRole.POSITION)
def get_address(attr: og.Attribute) -> int:
"""Returns the contents of the memory the attribute points to"""
ptr_type = ctypes.POINTER(ctypes.c_size_t)
ptr = ctypes.cast(attr.memory, ptr_type)
return ptr.contents.value
class OgnTutorialCudaDataCpuPy:
"""Exercise GPU access for extended attributes through a Python OmniGraph node"""
@staticmethod
def compute(db) -> bool:
"""Accesses the CUDA data, which for arrays exists as wrappers around CPU memory pointers to GPU pointer arrays.
No compute is done here.
"""
# Put accessors into local variables for convenience
input_points = db.inputs.points
multiplier = db.inputs.multiplier
# Set the size to what is required for the multiplication - this can be done without accessing GPU data.
# Notice that since this is CPU pointers to GPU data the size has to be taken from the data type description
# rather than the usual method of taking len(input_points).
db.outputs.points_size = input_points.dtype.size
# After changing the size the memory isn't allocated immediately (when necessary). It is delayed until you
# request access to it, which is what this line will do.
output_points = db.outputs.points
# This is a separate test to add a points attribute to the output bundle to show how when a bundle has
# CPU pointers to the GPU data that information propagates to its children
# Start with an empty output bundle.
output_bundle = db.outputs.outBundle
output_bundle.clear()
output_bundle.add_attributes([og.Type(og.BaseDataType.FLOAT, 3, 1)], ["points"])
bundle_attr = output_bundle.attribute_by_name("points")
# As for the main attributes, setting the bundle member size readies the buffer of the given size on the GPU
bundle_attr.size = input_points.dtype.size
# The output cannot be written to here through the normal assignment mechanisms, e.g. the typical step of
# copying input points to the output points, as the data is not accessible on the GPU through Python directly.
# Instead you can access the GPU memory pointers through the attribute values and send it to CUDA code, either
# generated from the Python code or accessed through something like pybind wrappers.
print("Locations in CUDA() should be in GPU memory space")
print(f" CPU Location for reference = {hex(id(db))}", flush=True)
print(f" Input points are {input_points} at CUDA({hex(get_address(input_points))})", flush=True)
print(f" Multiplier is CUDA({multiplier})", flush=True)
print(f" Output points are {output_points} at CUDA({hex(get_address(output_points))})", flush=True)
print(f" Bundle {bundle_attr.gpu_value} at CUDA({hex(get_address(bundle_attr.gpu_value))})", flush=True)
return True
| 3,391 | Python | 50.393939 | 120 | 0.699204 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tutorials/extensionTutorial/test_example.py | """Tests for my simple math nodes"""
# To make code more concise use a shortform in the import, much as you would for numpy -> np
import omni.graph.core as og
# This module contains some useful testing utilities
import omni.graph.core.tests as ogts
# By using the standard test case base class you get scene setUp() and tearDown(), and the current set of default
# OmniGraph settings. If you wish to use any non-standard settings, leave the scene in place after the test completes,
# or add your own scoped setting variables see the set of classes that derive from ogts.OmniGraphTestCase and look at
# the test configuration factory function ogts.test_case_class().
class TestMySimpleNodes(ogts.OmniGraphTestCase):
"""Class containing tests for my simple nodes. The base class allows the tests to be run asynchronously"""
# ----------------------------------------------------------------------
async def test_math(self):
"""Run a node network with the math operation (A + 6) * 10 = B
Exercises the math nodes for addition (my.node.add) and multiplication (my.node.multiply)
"""
# The Controller class is useful for manipulating the OmniGraph. See the Python help on it for details.
# Using this shortcut keeps the graph configuration specification more readable.
keys = og.Controller.Keys
# This sets up the graph in the configuration required for the math operation to work
(graph, (plus_node, times_node), _, _) = og.Controller.edit(
"/mathGraph",
{
keys.CREATE_NODES: [("plus", "my.extension.addInts"), ("times", "my.extension.multiplyInts")],
keys.CONNECT: [("plus.outputs:result", "times.inputs:a")],
keys.SET_VALUES: [("plus.inputs:a", 6), ("times.inputs:b", 10)],
},
)
# This contains pairs of (A, B) values that should satisfy the math equation.
test_data = [(1, 70), (0, 60), (-6, 0)]
# Creating specific controllers tied to the attributes that will be accessed multiple times in the loop
# makes the access a little bit faster.
in_controller = og.Controller(og.Controller.attribute("inputs:b", plus_node))
out_controller = og.Controller(og.Controller.attribute("outputs:result", times_node))
# Loop through the operation to set the input and test the output
for (value_a, value_b) in test_data:
# Set the test input on the node
in_controller.set(value_a)
# This has to be done to ensure the nodes do their computation before testing the results
await og.Controller.evaluate(graph)
# Compare the expected value from the test data against the computed value from the node
self.assertEqual(value_b, out_controller.get())
| 2,843 | Python | 52.660376 | 118 | 0.65459 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tutorials/tutorial26/OgnTutorialGenericMathNode.py | import numpy as np
import omni.graph.core as og
# Mappings of possible numpy dtypes from the result data type and back
dtype_from_basetype = {
og.BaseDataType.INT: np.int32,
og.BaseDataType.INT64: np.int64,
og.BaseDataType.HALF: np.float16,
og.BaseDataType.FLOAT: np.float32,
og.BaseDataType.DOUBLE: np.float64,
}
supported_basetypes = [
og.BaseDataType.INT,
og.BaseDataType.INT64,
og.BaseDataType.HALF,
og.BaseDataType.FLOAT,
og.BaseDataType.DOUBLE,
]
basetype_resolution_table = [
[0, 1, 3, 3, 4], # Int
[1, 1, 4, 4, 4], # Int64
[3, 4, 2, 3, 4], # Half
[3, 4, 3, 3, 4], # Float
[4, 4, 4, 4, 4], # Double
]
class OgnTutorialGenericMathNode:
"""Node to multiple two values of any type"""
@staticmethod
def compute(db) -> bool:
"""Compute the product of two values, if the types are all resolved.
When the types are not compatible for multiplication, or the result type is not compatible with the
resolved output type, the method will log an error and fail
"""
try:
# To support multiplying array of vectors by array of scalars we need to broadcast the scalars to match the
# shape of the vector array, and we will convert the result to whatever the result is resolved to
atype = db.inputs.a.type
btype = db.inputs.b.type
rtype = db.outputs.product.type
result_dtype = dtype_from_basetype.get(rtype.base_type, None)
# Use numpy to perform the multiplication in order to automatically handle both scalar and array types
# and automatically convert to the resolved output type
if atype.array_depth > 0 and btype.array_depth > 0 and btype.tuple_count < atype.tuple_count:
r = np.multiply(db.inputs.a.value, db.inputs.b.value[:, np.newaxis], dtype=result_dtype)
else:
r = np.multiply(db.inputs.a.value, db.inputs.b.value, dtype=result_dtype)
db.outputs.product.value = r
except TypeError as error:
db.log_error(f"Multiplication could not be performed: {error}")
return False
return True
@staticmethod
def on_connection_type_resolve(node) -> None:
# Resolves the type of the output based on the types of inputs
atype = node.get_attribute("inputs:a").get_resolved_type()
btype = node.get_attribute("inputs:b").get_resolved_type()
productattr = node.get_attribute("outputs:product")
producttype = productattr.get_resolved_type()
# The output types can be only inferred when both inputs types are resolved.
if (
atype.base_type != og.BaseDataType.UNKNOWN
and btype.base_type != og.BaseDataType.UNKNOWN
and producttype.base_type == og.BaseDataType.UNKNOWN
):
# Resolve the base type using the lookup table
base_type = og.BaseDataType.DOUBLE
a_index = supported_basetypes.index(atype.base_type)
b_index = supported_basetypes.index(btype.base_type)
if a_index >= 0 and b_index >= 0:
base_type = supported_basetypes[basetype_resolution_table[a_index][b_index]]
productattr.set_resolved_type(
og.Type(base_type, max(atype.tuple_count, btype.tuple_count), max(atype.array_depth, btype.array_depth))
)
| 3,448 | Python | 37.322222 | 120 | 0.633991 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tests/TestOgnTutorialTupleData.py | import os
import omni.kit.test
import omni.graph.core as og
import omni.graph.core.tests as ogts
from omni.graph.core.tests.omnigraph_test_utils import _TestGraphAndNode
from omni.graph.core.tests.omnigraph_test_utils import _test_clear_scene
from omni.graph.core.tests.omnigraph_test_utils import _test_setup_scene
from omni.graph.core.tests.omnigraph_test_utils import _test_verify_scene
class TestOgn(ogts.OmniGraphTestCase):
TEST_DATA = [
{
'outputs': [
['outputs:a_double2', [2.1, 3.2], False],
['outputs:a_float2', [5.4, 6.5], False],
['outputs:a_half2', [8.0, 9.0], False],
['outputs:a_int2', [11, 12], False],
['outputs:a_float3', [7.6, 8.7, 9.8], False],
['outputs:a_double3', [2.1, 3.2, 4.3], False],
],
},
{
'inputs': [
['inputs:a_double2', [2.1, 3.2], False],
['inputs:a_float2', [5.1, 6.2], False],
['inputs:a_half2', [8.0, 9.0], False],
['inputs:a_int2', [11, 12], False],
['inputs:a_float3', [7.1, 8.2, 9.3], False],
['inputs:a_double3', [10.1, 11.2, 12.3], False],
],
'outputs': [
['outputs:a_double2', [3.1, 4.2], False],
['outputs:a_float2', [6.1, 7.2], False],
['outputs:a_half2', [9.0, 10.0], False],
['outputs:a_int2', [12, 13], False],
['outputs:a_float3', [8.1, 9.2, 10.3], False],
['outputs:a_double3', [11.1, 12.2, 13.3], False],
],
},
]
async def test_generated(self):
test_info = _TestGraphAndNode()
controller = og.Controller()
for i, test_run in enumerate(self.TEST_DATA):
await _test_clear_scene(self, test_run)
test_info = await _test_setup_scene(self, controller, "/TestGraph", "TestNode_omni_tutorials_TupleData", "omni.tutorials.TupleData", test_run, test_info)
await controller.evaluate(test_info.graph)
_test_verify_scene(self, controller, test_run, test_info, f"omni.tutorials.TupleData User test case #{i+1}")
async def test_vectorized_generated(self):
test_info = _TestGraphAndNode()
controller = og.Controller()
for i, test_run in enumerate(self.TEST_DATA):
await _test_clear_scene(self, test_run)
test_info = await _test_setup_scene(self, controller, "/TestGraph", "TestNode_omni_tutorials_TupleData","omni.tutorials.TupleData", test_run, test_info, 16)
await controller.evaluate(test_info.graph)
_test_verify_scene(self, controller, test_run, test_info, f"omni.tutorials.TupleData User test case #{i+1}", 16)
async def test_thread_safety(self):
import omni.kit
# Generate multiple instances of the test setup to run them concurrently
instance_setup = dict()
for n in range(24):
instance_setup[f"/TestGraph_{n}"] = _TestGraphAndNode()
for i, test_run in enumerate(self.TEST_DATA):
await _test_clear_scene(self, test_run)
for (key, test_info) in instance_setup.items():
test_info = await _test_setup_scene(self, og.Controller(allow_exists_prim=True), key, "TestNode_omni_tutorials_TupleData", "omni.tutorials.TupleData", test_run, test_info)
self.assertEqual(len(og.get_all_graphs()), 24)
# We want to evaluate all graphs concurrently. Kick them all.
# Evaluate multiple times to skip 2 serial frames and increase chances for a race condition.
for _ in range(10):
await omni.kit.app.get_app().next_update_async()
for (key, test_instance) in instance_setup.items():
_test_verify_scene(self, og.Controller(), test_run, test_info, f"omni.tutorials.TupleData User test case #{i+1}, instance{key}")
async def test_data_access(self):
from omni.graph.tutorials.ogn.OgnTutorialTupleDataDatabase import OgnTutorialTupleDataDatabase
test_file_name = "OgnTutorialTupleDataTemplate.usda"
usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name)
if not os.path.exists(usd_path):
self.assertTrue(False, f"{usd_path} not found for loading test")
(result, error) = await ogts.load_test_file(usd_path)
self.assertTrue(result, f'{error} on {usd_path}')
test_node = og.Controller.node("/TestGraph/Template_omni_tutorials_TupleData")
database = OgnTutorialTupleDataDatabase(test_node)
self.assertTrue(test_node.is_valid())
node_type_name = test_node.get_type_name()
self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1)
def _attr_error(attribute: og.Attribute, usd_test: bool) -> str:
test_type = "USD Load" if usd_test else "Database Access"
return f"{node_type_name} {test_type} Test - {attribute.get_name()} value error"
self.assertTrue(test_node.get_attribute_exists("inputs:a_double2"))
attribute = test_node.get_attribute("inputs:a_double2")
db_value = database.inputs.a_double2
expected_value = [1.1, 2.2]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_double3"))
attribute = test_node.get_attribute("inputs:a_double3")
db_value = database.inputs.a_double3
expected_value = [1.1, 2.2, 3.3]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_float2"))
attribute = test_node.get_attribute("inputs:a_float2")
db_value = database.inputs.a_float2
expected_value = [4.4, 5.5]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_float3"))
attribute = test_node.get_attribute("inputs:a_float3")
db_value = database.inputs.a_float3
expected_value = [6.6, 7.7, 8.8]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_half2"))
attribute = test_node.get_attribute("inputs:a_half2")
db_value = database.inputs.a_half2
expected_value = [7.0, 8.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_int2"))
attribute = test_node.get_attribute("inputs:a_int2")
db_value = database.inputs.a_int2
expected_value = [10, 11]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("outputs:a_double2"))
attribute = test_node.get_attribute("outputs:a_double2")
db_value = database.outputs.a_double2
self.assertTrue(test_node.get_attribute_exists("outputs:a_double3"))
attribute = test_node.get_attribute("outputs:a_double3")
db_value = database.outputs.a_double3
self.assertTrue(test_node.get_attribute_exists("outputs:a_float2"))
attribute = test_node.get_attribute("outputs:a_float2")
db_value = database.outputs.a_float2
self.assertTrue(test_node.get_attribute_exists("outputs:a_float3"))
attribute = test_node.get_attribute("outputs:a_float3")
db_value = database.outputs.a_float3
self.assertTrue(test_node.get_attribute_exists("outputs:a_half2"))
attribute = test_node.get_attribute("outputs:a_half2")
db_value = database.outputs.a_half2
self.assertTrue(test_node.get_attribute_exists("outputs:a_int2"))
attribute = test_node.get_attribute("outputs:a_int2")
db_value = database.outputs.a_int2
| 8,539 | Python | 48.364162 | 187 | 0.65031 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tests/TestOgnTutorialSimpleDataPy.py | import os
import omni.kit.test
import omni.graph.core as og
import omni.graph.core.tests as ogts
from omni.graph.core.tests.omnigraph_test_utils import _TestGraphAndNode
from omni.graph.core.tests.omnigraph_test_utils import _test_clear_scene
from omni.graph.core.tests.omnigraph_test_utils import _test_setup_scene
from omni.graph.core.tests.omnigraph_test_utils import _test_verify_scene
class TestOgn(ogts.OmniGraphTestCase):
TEST_DATA = [
{
'inputs': [
['inputs:a_bool', False, False],
],
'outputs': [
['outputs:a_bool', True, False],
],
},
{
'inputs': [
['inputs:a_bool', True, False],
],
'outputs': [
['outputs:a_bool', False, False],
['outputs:a_a_boolUiName', "Simple Boolean Input", False],
['outputs:a_nodeTypeUiName', "Tutorial Python Node: Attributes With Simple Data", False],
],
},
{
'inputs': [
['inputs:a_path', "/World/Domination", False],
],
'outputs': [
['outputs:a_path', "/World/Domination/Child", False],
],
},
{
'inputs': [
['inputs:a_bool', False, False],
['inputs:a_double', 1.1, False],
['inputs:a_float', 3.3, False],
['inputs:a_half', 5.0, False],
['inputs:a_int', 7, False],
['inputs:a_int64', 9, False],
['inputs:a_token', "helloToken", False],
['inputs:a_string', "helloString", False],
['inputs:a_objectId', 10, False],
['inputs:a_uchar', 11, False],
['inputs:a_uint', 13, False],
['inputs:a_uint64', 15, False],
],
'outputs': [
['outputs:a_bool', True, False],
['outputs:a_double', 2.1, False],
['outputs:a_float', 4.3, False],
['outputs:a_half', 6.0, False],
['outputs:a_int', 8, False],
['outputs:a_int64', 10, False],
['outputs:a_token', "worldToken", False],
['outputs:a_string', "worldString", False],
['outputs:a_objectId', 11, False],
['outputs:a_uchar', 12, False],
['outputs:a_uint', 14, False],
['outputs:a_uint64', 16, False],
],
},
]
async def test_generated(self):
test_info = _TestGraphAndNode()
controller = og.Controller()
for i, test_run in enumerate(self.TEST_DATA):
await _test_clear_scene(self, test_run)
test_info = await _test_setup_scene(self, controller, "/TestGraph", "TestNode_omni_graph_tutorials_SimpleDataPy", "omni.graph.tutorials.SimpleDataPy", test_run, test_info)
await controller.evaluate(test_info.graph)
_test_verify_scene(self, controller, test_run, test_info, f"omni.graph.tutorials.SimpleDataPy User test case #{i+1}")
async def test_vectorized_generated(self):
test_info = _TestGraphAndNode()
controller = og.Controller()
for i, test_run in enumerate(self.TEST_DATA):
await _test_clear_scene(self, test_run)
test_info = await _test_setup_scene(self, controller, "/TestGraph", "TestNode_omni_graph_tutorials_SimpleDataPy","omni.graph.tutorials.SimpleDataPy", test_run, test_info, 16)
await controller.evaluate(test_info.graph)
_test_verify_scene(self, controller, test_run, test_info, f"omni.graph.tutorials.SimpleDataPy User test case #{i+1}", 16)
async def test_data_access(self):
from omni.graph.tutorials.ogn.OgnTutorialSimpleDataPyDatabase import OgnTutorialSimpleDataPyDatabase
test_file_name = "OgnTutorialSimpleDataPyTemplate.usda"
usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name)
if not os.path.exists(usd_path):
self.assertTrue(False, f"{usd_path} not found for loading test")
(result, error) = await ogts.load_test_file(usd_path)
self.assertTrue(result, f'{error} on {usd_path}')
test_node = og.Controller.node("/TestGraph/Template_omni_graph_tutorials_SimpleDataPy")
database = OgnTutorialSimpleDataPyDatabase(test_node)
self.assertTrue(test_node.is_valid())
node_type_name = test_node.get_type_name()
self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1)
def _attr_error(attribute: og.Attribute, usd_test: bool) -> str:
test_type = "USD Load" if usd_test else "Database Access"
return f"{node_type_name} {test_type} Test - {attribute.get_name()} value error"
self.assertTrue(test_node.get_attribute_exists("inputs:a_constant_input"))
attribute = test_node.get_attribute("inputs:a_constant_input")
db_value = database.inputs.a_constant_input
expected_value = 0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_double"))
attribute = test_node.get_attribute("inputs:a_double")
db_value = database.inputs.a_double
expected_value = 0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_float"))
attribute = test_node.get_attribute("inputs:a_float")
db_value = database.inputs.a_float
expected_value = 0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_half"))
attribute = test_node.get_attribute("inputs:a_half")
db_value = database.inputs.a_half
expected_value = 0.0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_int"))
attribute = test_node.get_attribute("inputs:a_int")
db_value = database.inputs.a_int
expected_value = 0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_int64"))
attribute = test_node.get_attribute("inputs:a_int64")
db_value = database.inputs.a_int64
expected_value = 0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_objectId"))
attribute = test_node.get_attribute("inputs:a_objectId")
db_value = database.inputs.a_objectId
expected_value = 0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_path"))
attribute = test_node.get_attribute("inputs:a_path")
db_value = database.inputs.a_path
expected_value = ""
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_string"))
attribute = test_node.get_attribute("inputs:a_string")
db_value = database.inputs.a_string
expected_value = "helloString"
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_token"))
attribute = test_node.get_attribute("inputs:a_token")
db_value = database.inputs.a_token
expected_value = "helloToken"
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_uchar"))
attribute = test_node.get_attribute("inputs:a_uchar")
db_value = database.inputs.a_uchar
expected_value = 0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_uint"))
attribute = test_node.get_attribute("inputs:a_uint")
db_value = database.inputs.a_uint
expected_value = 0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_uint64"))
attribute = test_node.get_attribute("inputs:a_uint64")
db_value = database.inputs.a_uint64
expected_value = 0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("outputs:a_a_boolUiName"))
attribute = test_node.get_attribute("outputs:a_a_boolUiName")
db_value = database.outputs.a_a_boolUiName
self.assertTrue(test_node.get_attribute_exists("outputs:a_bool"))
attribute = test_node.get_attribute("outputs:a_bool")
db_value = database.outputs.a_bool
self.assertTrue(test_node.get_attribute_exists("outputs:a_double"))
attribute = test_node.get_attribute("outputs:a_double")
db_value = database.outputs.a_double
self.assertTrue(test_node.get_attribute_exists("outputs:a_float"))
attribute = test_node.get_attribute("outputs:a_float")
db_value = database.outputs.a_float
self.assertTrue(test_node.get_attribute_exists("outputs:a_half"))
attribute = test_node.get_attribute("outputs:a_half")
db_value = database.outputs.a_half
self.assertTrue(test_node.get_attribute_exists("outputs:a_int"))
attribute = test_node.get_attribute("outputs:a_int")
db_value = database.outputs.a_int
self.assertTrue(test_node.get_attribute_exists("outputs:a_int64"))
attribute = test_node.get_attribute("outputs:a_int64")
db_value = database.outputs.a_int64
self.assertTrue(test_node.get_attribute_exists("outputs:a_nodeTypeUiName"))
attribute = test_node.get_attribute("outputs:a_nodeTypeUiName")
db_value = database.outputs.a_nodeTypeUiName
self.assertTrue(test_node.get_attribute_exists("outputs:a_objectId"))
attribute = test_node.get_attribute("outputs:a_objectId")
db_value = database.outputs.a_objectId
self.assertTrue(test_node.get_attribute_exists("outputs:a_path"))
attribute = test_node.get_attribute("outputs:a_path")
db_value = database.outputs.a_path
self.assertTrue(test_node.get_attribute_exists("outputs:a_string"))
attribute = test_node.get_attribute("outputs:a_string")
db_value = database.outputs.a_string
self.assertTrue(test_node.get_attribute_exists("outputs:a_token"))
attribute = test_node.get_attribute("outputs:a_token")
db_value = database.outputs.a_token
self.assertTrue(test_node.get_attribute_exists("outputs:a_uchar"))
attribute = test_node.get_attribute("outputs:a_uchar")
db_value = database.outputs.a_uchar
self.assertTrue(test_node.get_attribute_exists("outputs:a_uint"))
attribute = test_node.get_attribute("outputs:a_uint")
db_value = database.outputs.a_uint
self.assertTrue(test_node.get_attribute_exists("outputs:a_uint64"))
attribute = test_node.get_attribute("outputs:a_uint64")
db_value = database.outputs.a_uint64
| 13,304 | Python | 47.915441 | 186 | 0.63402 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tests/TestOgnTutorialRoleData.py | import os
import omni.kit.test
import omni.graph.core as og
import omni.graph.core.tests as ogts
from omni.graph.core.tests.omnigraph_test_utils import _TestGraphAndNode
from omni.graph.core.tests.omnigraph_test_utils import _test_clear_scene
from omni.graph.core.tests.omnigraph_test_utils import _test_setup_scene
from omni.graph.core.tests.omnigraph_test_utils import _test_verify_scene
class TestOgn(ogts.OmniGraphTestCase):
TEST_DATA = [
{
'inputs': [
['inputs:a_color3d', [1.0, 2.0, 3.0], False],
['inputs:a_color3f', [11.0, 12.0, 13.0], False],
['inputs:a_color3h', [21.0, 22.0, 23.0], False],
['inputs:a_color4d', [1.0, 2.0, 3.0, 4.0], False],
['inputs:a_color4f', [11.0, 12.0, 13.0, 14.0], False],
['inputs:a_color4h', [21.0, 22.0, 23.0, 24.0], False],
['inputs:a_frame', [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0], False],
['inputs:a_matrix2d', [1.0, 2.0, 3.0, 4.0], False],
['inputs:a_matrix3d', [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0], False],
['inputs:a_matrix4d', [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0], False],
['inputs:a_normal3d', [1.0, 2.0, 3.0], False],
['inputs:a_normal3f', [11.0, 12.0, 13.0], False],
['inputs:a_normal3h', [21.0, 22.0, 23.0], False],
['inputs:a_point3d', [1.0, 2.0, 3.0], False],
['inputs:a_point3f', [11.0, 12.0, 13.0], False],
['inputs:a_point3h', [21.0, 22.0, 23.0], False],
['inputs:a_quatd', [1.0, 2.0, 3.0, 4.0], False],
['inputs:a_quatf', [11.0, 12.0, 13.0, 14.0], False],
['inputs:a_quath', [21.0, 22.0, 23.0, 24.0], False],
['inputs:a_texcoord2d', [1.0, 2.0], False],
['inputs:a_texcoord2f', [11.0, 12.0], False],
['inputs:a_texcoord2h', [21.0, 22.0], False],
['inputs:a_texcoord3d', [1.0, 2.0, 3.0], False],
['inputs:a_texcoord3f', [11.0, 12.0, 13.0], False],
['inputs:a_texcoord3h', [21.0, 22.0, 23.0], False],
['inputs:a_timecode', 10.0, False],
['inputs:a_vector3d', [1.0, 2.0, 3.0], False],
['inputs:a_vector3f', [11.0, 12.0, 13.0], False],
['inputs:a_vector3h', [21.0, 22.0, 23.0], False],
],
'outputs': [
['outputs:a_color3d', [2.0, 3.0, 4.0], False],
['outputs:a_color3f', [12.0, 13.0, 14.0], False],
['outputs:a_color3h', [22.0, 23.0, 24.0], False],
['outputs:a_color4d', [2.0, 3.0, 4.0, 5.0], False],
['outputs:a_color4f', [12.0, 13.0, 14.0, 15.0], False],
['outputs:a_color4h', [22.0, 23.0, 24.0, 25.0], False],
['outputs:a_frame', [2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0], False],
['outputs:a_matrix2d', [2.0, 3.0, 4.0, 5.0], False],
['outputs:a_matrix3d', [2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0], False],
['outputs:a_matrix4d', [2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0], False],
['outputs:a_normal3d', [2.0, 3.0, 4.0], False],
['outputs:a_normal3f', [12.0, 13.0, 14.0], False],
['outputs:a_normal3h', [22.0, 23.0, 24.0], False],
['outputs:a_point3d', [2.0, 3.0, 4.0], False],
['outputs:a_point3f', [12.0, 13.0, 14.0], False],
['outputs:a_point3h', [22.0, 23.0, 24.0], False],
['outputs:a_quatd', [2.0, 3.0, 4.0, 5.0], False],
['outputs:a_quatf', [12.0, 13.0, 14.0, 15.0], False],
['outputs:a_quath', [22.0, 23.0, 24.0, 25.0], False],
['outputs:a_texcoord2d', [2.0, 3.0], False],
['outputs:a_texcoord2f', [12.0, 13.0], False],
['outputs:a_texcoord2h', [22.0, 23.0], False],
['outputs:a_texcoord3d', [2.0, 3.0, 4.0], False],
['outputs:a_texcoord3f', [12.0, 13.0, 14.0], False],
['outputs:a_texcoord3h', [22.0, 23.0, 24.0], False],
['outputs:a_timecode', 11.0, False],
['outputs:a_vector3d', [2.0, 3.0, 4.0], False],
['outputs:a_vector3f', [12.0, 13.0, 14.0], False],
['outputs:a_vector3h', [22.0, 23.0, 24.0], False],
],
},
]
async def test_generated(self):
test_info = _TestGraphAndNode()
controller = og.Controller()
for i, test_run in enumerate(self.TEST_DATA):
await _test_clear_scene(self, test_run)
test_info = await _test_setup_scene(self, controller, "/TestGraph", "TestNode_omni_graph_tutorials_RoleData", "omni.graph.tutorials.RoleData", test_run, test_info)
await controller.evaluate(test_info.graph)
_test_verify_scene(self, controller, test_run, test_info, f"omni.graph.tutorials.RoleData User test case #{i+1}")
async def test_vectorized_generated(self):
test_info = _TestGraphAndNode()
controller = og.Controller()
for i, test_run in enumerate(self.TEST_DATA):
await _test_clear_scene(self, test_run)
test_info = await _test_setup_scene(self, controller, "/TestGraph", "TestNode_omni_graph_tutorials_RoleData","omni.graph.tutorials.RoleData", test_run, test_info, 16)
await controller.evaluate(test_info.graph)
_test_verify_scene(self, controller, test_run, test_info, f"omni.graph.tutorials.RoleData User test case #{i+1}", 16)
async def test_thread_safety(self):
import omni.kit
# Generate multiple instances of the test setup to run them concurrently
instance_setup = dict()
for n in range(24):
instance_setup[f"/TestGraph_{n}"] = _TestGraphAndNode()
for i, test_run in enumerate(self.TEST_DATA):
await _test_clear_scene(self, test_run)
for (key, test_info) in instance_setup.items():
test_info = await _test_setup_scene(self, og.Controller(allow_exists_prim=True), key, "TestNode_omni_graph_tutorials_RoleData", "omni.graph.tutorials.RoleData", test_run, test_info)
self.assertEqual(len(og.get_all_graphs()), 24)
# We want to evaluate all graphs concurrently. Kick them all.
# Evaluate multiple times to skip 2 serial frames and increase chances for a race condition.
for _ in range(10):
await omni.kit.app.get_app().next_update_async()
for (key, test_instance) in instance_setup.items():
_test_verify_scene(self, og.Controller(), test_run, test_info, f"omni.graph.tutorials.RoleData User test case #{i+1}, instance{key}")
async def test_data_access(self):
from omni.graph.tutorials.ogn.OgnTutorialRoleDataDatabase import OgnTutorialRoleDataDatabase
test_file_name = "OgnTutorialRoleDataTemplate.usda"
usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name)
if not os.path.exists(usd_path):
self.assertTrue(False, f"{usd_path} not found for loading test")
(result, error) = await ogts.load_test_file(usd_path)
self.assertTrue(result, f'{error} on {usd_path}')
test_node = og.Controller.node("/TestGraph/Template_omni_graph_tutorials_RoleData")
database = OgnTutorialRoleDataDatabase(test_node)
self.assertTrue(test_node.is_valid())
node_type_name = test_node.get_type_name()
self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1)
def _attr_error(attribute: og.Attribute, usd_test: bool) -> str:
test_type = "USD Load" if usd_test else "Database Access"
return f"{node_type_name} {test_type} Test - {attribute.get_name()} value error"
self.assertTrue(test_node.get_attribute_exists("inputs:a_color3d"))
attribute = test_node.get_attribute("inputs:a_color3d")
db_value = database.inputs.a_color3d
expected_value = [0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_color3f"))
attribute = test_node.get_attribute("inputs:a_color3f")
db_value = database.inputs.a_color3f
expected_value = [0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_color3h"))
attribute = test_node.get_attribute("inputs:a_color3h")
db_value = database.inputs.a_color3h
expected_value = [0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_color4d"))
attribute = test_node.get_attribute("inputs:a_color4d")
db_value = database.inputs.a_color4d
expected_value = [0.0, 0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_color4f"))
attribute = test_node.get_attribute("inputs:a_color4f")
db_value = database.inputs.a_color4f
expected_value = [0.0, 0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_color4h"))
attribute = test_node.get_attribute("inputs:a_color4h")
db_value = database.inputs.a_color4h
expected_value = [0.0, 0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_frame"))
attribute = test_node.get_attribute("inputs:a_frame")
db_value = database.inputs.a_frame
expected_value = [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_matrix2d"))
attribute = test_node.get_attribute("inputs:a_matrix2d")
db_value = database.inputs.a_matrix2d
expected_value = [[1.0, 0.0], [0.0, 1.0]]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_matrix3d"))
attribute = test_node.get_attribute("inputs:a_matrix3d")
db_value = database.inputs.a_matrix3d
expected_value = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_matrix4d"))
attribute = test_node.get_attribute("inputs:a_matrix4d")
db_value = database.inputs.a_matrix4d
expected_value = [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_normal3d"))
attribute = test_node.get_attribute("inputs:a_normal3d")
db_value = database.inputs.a_normal3d
expected_value = [0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_normal3f"))
attribute = test_node.get_attribute("inputs:a_normal3f")
db_value = database.inputs.a_normal3f
expected_value = [0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_normal3h"))
attribute = test_node.get_attribute("inputs:a_normal3h")
db_value = database.inputs.a_normal3h
expected_value = [0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_point3d"))
attribute = test_node.get_attribute("inputs:a_point3d")
db_value = database.inputs.a_point3d
expected_value = [0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_point3f"))
attribute = test_node.get_attribute("inputs:a_point3f")
db_value = database.inputs.a_point3f
expected_value = [0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_point3h"))
attribute = test_node.get_attribute("inputs:a_point3h")
db_value = database.inputs.a_point3h
expected_value = [0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_quatd"))
attribute = test_node.get_attribute("inputs:a_quatd")
db_value = database.inputs.a_quatd
expected_value = [0.0, 0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_quatf"))
attribute = test_node.get_attribute("inputs:a_quatf")
db_value = database.inputs.a_quatf
expected_value = [0.0, 0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_quath"))
attribute = test_node.get_attribute("inputs:a_quath")
db_value = database.inputs.a_quath
expected_value = [0.0, 0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_texcoord2d"))
attribute = test_node.get_attribute("inputs:a_texcoord2d")
db_value = database.inputs.a_texcoord2d
expected_value = [0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_texcoord2f"))
attribute = test_node.get_attribute("inputs:a_texcoord2f")
db_value = database.inputs.a_texcoord2f
expected_value = [0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_texcoord2h"))
attribute = test_node.get_attribute("inputs:a_texcoord2h")
db_value = database.inputs.a_texcoord2h
expected_value = [0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_texcoord3d"))
attribute = test_node.get_attribute("inputs:a_texcoord3d")
db_value = database.inputs.a_texcoord3d
expected_value = [0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_texcoord3f"))
attribute = test_node.get_attribute("inputs:a_texcoord3f")
db_value = database.inputs.a_texcoord3f
expected_value = [0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_texcoord3h"))
attribute = test_node.get_attribute("inputs:a_texcoord3h")
db_value = database.inputs.a_texcoord3h
expected_value = [0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_timecode"))
attribute = test_node.get_attribute("inputs:a_timecode")
db_value = database.inputs.a_timecode
expected_value = 1.0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_vector3d"))
attribute = test_node.get_attribute("inputs:a_vector3d")
db_value = database.inputs.a_vector3d
expected_value = [0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_vector3f"))
attribute = test_node.get_attribute("inputs:a_vector3f")
db_value = database.inputs.a_vector3f
expected_value = [0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_vector3h"))
attribute = test_node.get_attribute("inputs:a_vector3h")
db_value = database.inputs.a_vector3h
expected_value = [0.0, 0.0, 0.0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("outputs:a_color3d"))
attribute = test_node.get_attribute("outputs:a_color3d")
db_value = database.outputs.a_color3d
self.assertTrue(test_node.get_attribute_exists("outputs:a_color3f"))
attribute = test_node.get_attribute("outputs:a_color3f")
db_value = database.outputs.a_color3f
self.assertTrue(test_node.get_attribute_exists("outputs:a_color3h"))
attribute = test_node.get_attribute("outputs:a_color3h")
db_value = database.outputs.a_color3h
self.assertTrue(test_node.get_attribute_exists("outputs:a_color4d"))
attribute = test_node.get_attribute("outputs:a_color4d")
db_value = database.outputs.a_color4d
self.assertTrue(test_node.get_attribute_exists("outputs:a_color4f"))
attribute = test_node.get_attribute("outputs:a_color4f")
db_value = database.outputs.a_color4f
self.assertTrue(test_node.get_attribute_exists("outputs:a_color4h"))
attribute = test_node.get_attribute("outputs:a_color4h")
db_value = database.outputs.a_color4h
self.assertTrue(test_node.get_attribute_exists("outputs:a_frame"))
attribute = test_node.get_attribute("outputs:a_frame")
db_value = database.outputs.a_frame
self.assertTrue(test_node.get_attribute_exists("outputs:a_matrix2d"))
attribute = test_node.get_attribute("outputs:a_matrix2d")
db_value = database.outputs.a_matrix2d
self.assertTrue(test_node.get_attribute_exists("outputs:a_matrix3d"))
attribute = test_node.get_attribute("outputs:a_matrix3d")
db_value = database.outputs.a_matrix3d
self.assertTrue(test_node.get_attribute_exists("outputs:a_matrix4d"))
attribute = test_node.get_attribute("outputs:a_matrix4d")
db_value = database.outputs.a_matrix4d
self.assertTrue(test_node.get_attribute_exists("outputs:a_normal3d"))
attribute = test_node.get_attribute("outputs:a_normal3d")
db_value = database.outputs.a_normal3d
self.assertTrue(test_node.get_attribute_exists("outputs:a_normal3f"))
attribute = test_node.get_attribute("outputs:a_normal3f")
db_value = database.outputs.a_normal3f
self.assertTrue(test_node.get_attribute_exists("outputs:a_normal3h"))
attribute = test_node.get_attribute("outputs:a_normal3h")
db_value = database.outputs.a_normal3h
self.assertTrue(test_node.get_attribute_exists("outputs:a_point3d"))
attribute = test_node.get_attribute("outputs:a_point3d")
db_value = database.outputs.a_point3d
self.assertTrue(test_node.get_attribute_exists("outputs:a_point3f"))
attribute = test_node.get_attribute("outputs:a_point3f")
db_value = database.outputs.a_point3f
self.assertTrue(test_node.get_attribute_exists("outputs:a_point3h"))
attribute = test_node.get_attribute("outputs:a_point3h")
db_value = database.outputs.a_point3h
self.assertTrue(test_node.get_attribute_exists("outputs:a_quatd"))
attribute = test_node.get_attribute("outputs:a_quatd")
db_value = database.outputs.a_quatd
self.assertTrue(test_node.get_attribute_exists("outputs:a_quatf"))
attribute = test_node.get_attribute("outputs:a_quatf")
db_value = database.outputs.a_quatf
self.assertTrue(test_node.get_attribute_exists("outputs:a_quath"))
attribute = test_node.get_attribute("outputs:a_quath")
db_value = database.outputs.a_quath
self.assertTrue(test_node.get_attribute_exists("outputs:a_texcoord2d"))
attribute = test_node.get_attribute("outputs:a_texcoord2d")
db_value = database.outputs.a_texcoord2d
self.assertTrue(test_node.get_attribute_exists("outputs:a_texcoord2f"))
attribute = test_node.get_attribute("outputs:a_texcoord2f")
db_value = database.outputs.a_texcoord2f
self.assertTrue(test_node.get_attribute_exists("outputs:a_texcoord2h"))
attribute = test_node.get_attribute("outputs:a_texcoord2h")
db_value = database.outputs.a_texcoord2h
self.assertTrue(test_node.get_attribute_exists("outputs:a_texcoord3d"))
attribute = test_node.get_attribute("outputs:a_texcoord3d")
db_value = database.outputs.a_texcoord3d
self.assertTrue(test_node.get_attribute_exists("outputs:a_texcoord3f"))
attribute = test_node.get_attribute("outputs:a_texcoord3f")
db_value = database.outputs.a_texcoord3f
self.assertTrue(test_node.get_attribute_exists("outputs:a_texcoord3h"))
attribute = test_node.get_attribute("outputs:a_texcoord3h")
db_value = database.outputs.a_texcoord3h
self.assertTrue(test_node.get_attribute_exists("outputs:a_timecode"))
attribute = test_node.get_attribute("outputs:a_timecode")
db_value = database.outputs.a_timecode
self.assertTrue(test_node.get_attribute_exists("outputs:a_vector3d"))
attribute = test_node.get_attribute("outputs:a_vector3d")
db_value = database.outputs.a_vector3d
self.assertTrue(test_node.get_attribute_exists("outputs:a_vector3f"))
attribute = test_node.get_attribute("outputs:a_vector3f")
db_value = database.outputs.a_vector3f
self.assertTrue(test_node.get_attribute_exists("outputs:a_vector3h"))
attribute = test_node.get_attribute("outputs:a_vector3h")
db_value = database.outputs.a_vector3h
| 25,764 | Python | 52.123711 | 197 | 0.662669 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tests/TestOgnTutorialDefaults.py | import os
import omni.kit.test
import omni.graph.core as og
import omni.graph.core.tests as ogts
from omni.graph.core.tests.omnigraph_test_utils import _TestGraphAndNode
from omni.graph.core.tests.omnigraph_test_utils import _test_clear_scene
from omni.graph.core.tests.omnigraph_test_utils import _test_setup_scene
from omni.graph.core.tests.omnigraph_test_utils import _test_verify_scene
class TestOgn(ogts.OmniGraphTestCase):
TEST_DATA = [
{
'outputs': [
['outputs:a_bool', False, False],
['outputs:a_double', 0.0, False],
['outputs:a_float', 0.0, False],
['outputs:a_half', 0.0, False],
['outputs:a_int', 0, False],
['outputs:a_int64', 0, False],
['outputs:a_string', "", False],
['outputs:a_token', "", False],
['outputs:a_uchar', 0, False],
['outputs:a_uint', 0, False],
['outputs:a_uint64', 0, False],
['outputs:a_int2', [0, 0], False],
['outputs:a_matrix', [1.0, 0.0, 0.0, 1.0], False],
['outputs:a_array', [], False],
],
},
]
async def test_generated(self):
test_info = _TestGraphAndNode()
controller = og.Controller()
for i, test_run in enumerate(self.TEST_DATA):
await _test_clear_scene(self, test_run)
test_info = await _test_setup_scene(self, controller, "/TestGraph", "TestNode_omni_graph_tutorials_Defaults", "omni.graph.tutorials.Defaults", test_run, test_info)
await controller.evaluate(test_info.graph)
_test_verify_scene(self, controller, test_run, test_info, f"omni.graph.tutorials.Defaults User test case #{i+1}")
async def test_vectorized_generated(self):
test_info = _TestGraphAndNode()
controller = og.Controller()
for i, test_run in enumerate(self.TEST_DATA):
await _test_clear_scene(self, test_run)
test_info = await _test_setup_scene(self, controller, "/TestGraph", "TestNode_omni_graph_tutorials_Defaults","omni.graph.tutorials.Defaults", test_run, test_info, 16)
await controller.evaluate(test_info.graph)
_test_verify_scene(self, controller, test_run, test_info, f"omni.graph.tutorials.Defaults User test case #{i+1}", 16)
async def test_thread_safety(self):
import omni.kit
# Generate multiple instances of the test setup to run them concurrently
instance_setup = dict()
for n in range(24):
instance_setup[f"/TestGraph_{n}"] = _TestGraphAndNode()
for i, test_run in enumerate(self.TEST_DATA):
await _test_clear_scene(self, test_run)
for (key, test_info) in instance_setup.items():
test_info = await _test_setup_scene(self, og.Controller(allow_exists_prim=True), key, "TestNode_omni_graph_tutorials_Defaults", "omni.graph.tutorials.Defaults", test_run, test_info)
self.assertEqual(len(og.get_all_graphs()), 24)
# We want to evaluate all graphs concurrently. Kick them all.
# Evaluate multiple times to skip 2 serial frames and increase chances for a race condition.
for _ in range(10):
await omni.kit.app.get_app().next_update_async()
for (key, test_instance) in instance_setup.items():
_test_verify_scene(self, og.Controller(), test_run, test_info, f"omni.graph.tutorials.Defaults User test case #{i+1}, instance{key}")
async def test_data_access(self):
from omni.graph.tutorials.ogn.OgnTutorialDefaultsDatabase import OgnTutorialDefaultsDatabase
test_file_name = "OgnTutorialDefaultsTemplate.usda"
usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name)
if not os.path.exists(usd_path):
self.assertTrue(False, f"{usd_path} not found for loading test")
(result, error) = await ogts.load_test_file(usd_path)
self.assertTrue(result, f'{error} on {usd_path}')
test_node = og.Controller.node("/TestGraph/Template_omni_graph_tutorials_Defaults")
database = OgnTutorialDefaultsDatabase(test_node)
self.assertTrue(test_node.is_valid())
node_type_name = test_node.get_type_name()
self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1)
def _attr_error(attribute: og.Attribute, usd_test: bool) -> str:
test_type = "USD Load" if usd_test else "Database Access"
return f"{node_type_name} {test_type} Test - {attribute.get_name()} value error"
self.assertTrue(test_node.get_attribute_exists("inputs:a_array"))
attribute = test_node.get_attribute("inputs:a_array")
db_value = database.inputs.a_array
expected_value = []
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_bool"))
attribute = test_node.get_attribute("inputs:a_bool")
db_value = database.inputs.a_bool
expected_value = False
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_double"))
attribute = test_node.get_attribute("inputs:a_double")
db_value = database.inputs.a_double
expected_value = 0.0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_float"))
attribute = test_node.get_attribute("inputs:a_float")
db_value = database.inputs.a_float
expected_value = 0.0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_half"))
attribute = test_node.get_attribute("inputs:a_half")
db_value = database.inputs.a_half
expected_value = 0.0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_int"))
attribute = test_node.get_attribute("inputs:a_int")
db_value = database.inputs.a_int
expected_value = 0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_int2"))
attribute = test_node.get_attribute("inputs:a_int2")
db_value = database.inputs.a_int2
expected_value = [0, 0]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_int64"))
attribute = test_node.get_attribute("inputs:a_int64")
db_value = database.inputs.a_int64
expected_value = 0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_matrix"))
attribute = test_node.get_attribute("inputs:a_matrix")
db_value = database.inputs.a_matrix
expected_value = [[1.0, 0.0], [0.0, 1.0]]
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_string"))
attribute = test_node.get_attribute("inputs:a_string")
db_value = database.inputs.a_string
expected_value = ""
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_token"))
attribute = test_node.get_attribute("inputs:a_token")
db_value = database.inputs.a_token
expected_value = ""
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_uchar"))
attribute = test_node.get_attribute("inputs:a_uchar")
db_value = database.inputs.a_uchar
expected_value = 0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_uint"))
attribute = test_node.get_attribute("inputs:a_uint")
db_value = database.inputs.a_uint
expected_value = 0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("inputs:a_uint64"))
attribute = test_node.get_attribute("inputs:a_uint64")
db_value = database.inputs.a_uint64
expected_value = 0
actual_value = og.Controller.get(attribute)
ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))
ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
self.assertTrue(test_node.get_attribute_exists("outputs:a_array"))
attribute = test_node.get_attribute("outputs:a_array")
db_value = database.outputs.a_array
self.assertTrue(test_node.get_attribute_exists("outputs:a_bool"))
attribute = test_node.get_attribute("outputs:a_bool")
db_value = database.outputs.a_bool
self.assertTrue(test_node.get_attribute_exists("outputs:a_double"))
attribute = test_node.get_attribute("outputs:a_double")
db_value = database.outputs.a_double
self.assertTrue(test_node.get_attribute_exists("outputs:a_float"))
attribute = test_node.get_attribute("outputs:a_float")
db_value = database.outputs.a_float
self.assertTrue(test_node.get_attribute_exists("outputs:a_half"))
attribute = test_node.get_attribute("outputs:a_half")
db_value = database.outputs.a_half
self.assertTrue(test_node.get_attribute_exists("outputs:a_int"))
attribute = test_node.get_attribute("outputs:a_int")
db_value = database.outputs.a_int
self.assertTrue(test_node.get_attribute_exists("outputs:a_int2"))
attribute = test_node.get_attribute("outputs:a_int2")
db_value = database.outputs.a_int2
self.assertTrue(test_node.get_attribute_exists("outputs:a_int64"))
attribute = test_node.get_attribute("outputs:a_int64")
db_value = database.outputs.a_int64
self.assertTrue(test_node.get_attribute_exists("outputs:a_matrix"))
attribute = test_node.get_attribute("outputs:a_matrix")
db_value = database.outputs.a_matrix
self.assertTrue(test_node.get_attribute_exists("outputs:a_string"))
attribute = test_node.get_attribute("outputs:a_string")
db_value = database.outputs.a_string
self.assertTrue(test_node.get_attribute_exists("outputs:a_token"))
attribute = test_node.get_attribute("outputs:a_token")
db_value = database.outputs.a_token
self.assertTrue(test_node.get_attribute_exists("outputs:a_uchar"))
attribute = test_node.get_attribute("outputs:a_uchar")
db_value = database.outputs.a_uchar
self.assertTrue(test_node.get_attribute_exists("outputs:a_uint"))
attribute = test_node.get_attribute("outputs:a_uint")
db_value = database.outputs.a_uint
self.assertTrue(test_node.get_attribute_exists("outputs:a_uint64"))
attribute = test_node.get_attribute("outputs:a_uint64")
db_value = database.outputs.a_uint64
| 12,650 | Python | 47.84556 | 197 | 0.686087 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/tests/test_omnigraph_python_interface.py | """Basic tests of the Python interface to C++ nodes generated from a .ogn file"""
import omni.graph.core as og
import omni.graph.core.tests as ogts
from omni.graph.tutorials.ogn.OgnTutorialSimpleDataDatabase import OgnTutorialSimpleDataDatabase
# ======================================================================
class TestOmniGraphPythonInterface(ogts.OmniGraphTestCase):
"""Run a simple unit test that exercises generated Python interface functionality"""
# ----------------------------------------------------------------------
async def test_setting_in_ogn_python_api(self):
"""Test ability to set inputs on a node and retrieve them"""
(_, [simple_node], _, _) = og.Controller.edit(
"/TestGraph",
{
og.Controller.Keys.CREATE_NODES: ("PyTest_SimpleNode", "omni.graph.tutorials.SimpleData"),
},
)
await og.Controller.evaluate()
interface = OgnTutorialSimpleDataDatabase(simple_node)
# Set input values through the database interface
interface.inputs.a_bool = False
interface.inputs.a_half = 2.0
interface.inputs.a_int = 3
interface.inputs.a_int64 = 4
interface.inputs.a_float = 5.0
interface.inputs.a_double = 6.0
# interface.inputs.a_token = "hello"
interface.inputs.unsigned_a_uchar = 7
interface.inputs.unsigned_a_uint = 8
interface.inputs.unsigned_a_uint64 = 9
# Run the node's compute method
await og.Controller.evaluate()
# Retrieve output values from the database interface and verify against expected values
self.assertEqual(interface.outputs.a_bool, True)
self.assertAlmostEqual(interface.outputs.a_half, 3.0)
self.assertEqual(interface.outputs.a_int, 4)
self.assertEqual(interface.outputs.a_int64, 5)
self.assertAlmostEqual(interface.outputs.a_float, 6.0)
self.assertAlmostEqual(interface.outputs.a_double, 7.0)
# self.assertEqual(interface.outputs.a_token, "world")
self.assertEqual(interface.outputs.unsigned_a_uchar, 8)
self.assertEqual(interface.outputs.unsigned_a_uint, 9)
self.assertEqual(interface.outputs.unsigned_a_uint64, 10)
# ----------------------------------------------------------------------
async def test_check_has_state(self):
"""Test ability to correctly determine if a node has marked itself as having internal state"""
stateless_node_type = og.get_node_type("omni.graph.tutorials.SimpleData")
stateful_node_type = og.get_node_type("omni.graph.tutorials.StatePy")
self.assertFalse(stateless_node_type.has_state())
self.assertTrue(stateful_node_type.has_state())
| 2,753 | Python | 46.482758 | 106 | 0.625499 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/tests/test_api.py | """Testing the stability of the API in this module"""
import omni.graph.core.tests as ogts
import omni.graph.tutorials as ogtu
from omni.graph.tools.tests.internal_utils import _check_module_api_consistency, _check_public_api_contents
# ======================================================================
class _TestOmniGraphTutorialsApi(ogts.OmniGraphTestCase):
_UNPUBLISHED = ["bindings", "ogn", "tests"]
async def test_api(self):
_check_module_api_consistency(ogtu, self._UNPUBLISHED) # noqa: PLW0212
_check_module_api_consistency(ogtu.tests, is_test_module=True) # noqa: PLW0212
async def test_api_features(self):
"""Test that the known public API features continue to exist"""
_check_public_api_contents(ogtu, [], self._UNPUBLISHED, only_expected_allowed=True) # noqa: PLW0212
_check_public_api_contents(ogtu.tests, [], [], only_expected_allowed=True) # noqa: PLW0212
| 936 | Python | 48.315787 | 108 | 0.65812 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/tests/test_tutorial_extended_types.py | """
Tests for the omni.graph.tutorials.ExtendedTypes and omni.graph.tutorials.ExtendedTypesPy nodes
"""
import omni.graph.core as og
import omni.graph.core.tests as ogts
class TestTutorialExtendedTypes(ogts.OmniGraphTestCase):
"""Extended attribute type tests require multiple nodes, not supported in the .ogn test framework"""
# ----------------------------------------------------------------------
async def _test_tutorial_extended_attributes_node(self, test_node_type_name: str):
"""Test basic operation of the tutorial node containing extended attributes"""
# Set up a graph that creates full type resolution for simple and array types of the extended attribute node
#
# SimpleIn ----=> Extended1 ----=> SimpleOut
# \ / \ /
# X X
# / \ / \
# ArrayIn ----=> Extended2 ----=> ArrayOut
#
keys = og.Controller.Keys
(_, [simple_node, _, extended_node_1, extended_node_2, array_node, _], _, _) = og.Controller.edit(
"/TestGraph",
{
keys.CREATE_NODES: [
("SimpleIn", "omni.graph.tutorials.SimpleData"),
("SimpleOut", "omni.graph.tutorials.SimpleData"),
("Extended1", test_node_type_name),
("Extended2", test_node_type_name),
("ArrayIn", "omni.graph.tutorials.ArrayData"),
("ArrayOut", "omni.graph.tutorials.ArrayData"),
],
keys.CONNECT: [
("SimpleIn.outputs:a_float", "Extended1.inputs:floatOrToken"),
("Extended1.outputs:doubledResult", "SimpleOut.inputs:a_float"),
("ArrayIn.outputs:result", "Extended1.inputs:toNegate"),
("Extended1.outputs:negatedResult", "ArrayOut.inputs:original"),
("SimpleIn.outputs:a_token", "Extended2.inputs:floatOrToken"),
("Extended2.outputs:doubledResult", "SimpleOut.inputs:a_token"),
("ArrayIn.outputs:negativeValues", "Extended2.inputs:toNegate"),
("Extended2.outputs:negatedResult", "ArrayOut.inputs:gates"),
],
keys.SET_VALUES: [
("SimpleIn.inputs:a_float", 5.0),
("SimpleIn.inputs:a_token", "hello"),
("ArrayIn.inputs:multiplier", 2.0),
("ArrayIn.inputs:original", [5.0, -6.0]),
("ArrayIn.inputs:gates", [False, True]),
],
},
)
await og.Controller.evaluate()
# Check that the inputs into the extended type nodes are correct
self.assertEqual(6.0, og.Controller.get(("outputs:a_float", simple_node)))
self.assertEqual("world", og.Controller.get(("outputs:a_token", simple_node)))
self.assertCountEqual([5.0, -12.0], og.Controller.get(("outputs:result", array_node)))
self.assertCountEqual([False, True], og.Controller.get(("outputs:negativeValues", array_node)))
# Check the extended simple value outputs
self.assertEqual(12.0, og.Controller.get(("outputs:doubledResult", extended_node_1)))
self.assertEqual("worldworld", og.Controller.get(("outputs:doubledResult", extended_node_2)))
# Check the extended array value outputs
self.assertCountEqual([-5.0, 12.0], og.Controller.get(("outputs:negatedResult", extended_node_1)))
self.assertCountEqual([True, False], og.Controller.get(("outputs:negatedResult", extended_node_2)))
# ----------------------------------------------------------------------
async def test_tutorial_extended_attributes_node_cpp(self):
"""Test basic operation of the C++ tutorial node containing extended attributes."""
await self._test_tutorial_extended_attributes_node("omni.graph.tutorials.ExtendedTypes")
# ----------------------------------------------------------------------
async def test_tutorial_extended_attributes_node_python(self):
"""Test basic operation of the Python tutorial node containing extended attributes."""
await self._test_tutorial_extended_attributes_node("omni.graph.tutorials.ExtendedTypesPy")
# ----------------------------------------------------------------------
async def _test_tutorial_extended_attributes_tuples(self, test_node_type_name: str):
"""Test basic operation of the tutorial node containing extended attributes on its tuple-accepting attributes"""
# Set up a graph that creates full type resolution for the tuple types of the extended attribute node with
# two different resolved types.
#
keys = og.Controller.Keys
(
_,
[tuple_node, _, extended_node_1, extended_node_2, tuple_array_node, _, simple_node, _],
_,
_,
) = og.Controller.edit(
"/TestGraph",
{
keys.CREATE_NODES: [
("TupleIn", "omni.tutorials.TupleData"),
("TupleOut", "omni.tutorials.TupleData"),
("Extended1", test_node_type_name),
("Extended2", test_node_type_name),
("TupleArrayIn", "omni.graph.tutorials.TupleArrays"),
("TupleArrayOut", "omni.graph.tutorials.TupleArrays"),
("SimpleIn", "omni.graph.tutorials.SimpleData"),
("SimpleOut", "omni.graph.tutorials.SimpleData"),
],
keys.CONNECT: [
("TupleIn.outputs:a_int2", "Extended1.inputs:tuple"),
("Extended1.outputs:tuple", "TupleOut.inputs:a_int2"),
("TupleArrayIn.inputs:a", "Extended1.inputs:flexible"),
("Extended1.outputs:flexible", "TupleArrayOut.inputs:a"),
("TupleIn.outputs:a_float3", "Extended2.inputs:tuple"),
("Extended2.outputs:tuple", "TupleOut.inputs:a_float3"),
("SimpleIn.outputs:a_token", "Extended2.inputs:flexible"),
("Extended2.outputs:flexible", "SimpleOut.inputs:a_token"),
],
keys.SET_VALUES: [
("TupleIn.inputs:a_int2", [4, 2]),
("TupleIn.inputs:a_float3", (4.0, 10.0, 2.0)),
("TupleArrayIn.inputs:a", [[2.0, 3.0, 7.0], [21.0, 14.0, 6.0]]),
("TupleArrayIn.inputs:b", [[21.0, 14.0, 6.0], [2.0, 3.0, 7.0]]),
("SimpleIn.inputs:a_token", "hello"),
],
},
)
await og.Controller.evaluate()
# Check that the inputs into the extended type nodes are correct
self.assertEqual("world", og.Controller.get(("outputs:a_token", simple_node)))
self.assertCountEqual([5, 3], og.Controller.get(("outputs:a_int2", tuple_node)))
self.assertCountEqual([5.0, 11.0, 3.0], og.Controller.get(("outputs:a_float3", tuple_node)))
self.assertCountEqual([126.0, 126.0], og.Controller.get(("outputs:result", tuple_array_node)))
# Check the resulting values from resolving the "any" type to tuples
self.assertCountEqual([-5, -3], og.Controller.get(("outputs:tuple", extended_node_1)))
self.assertCountEqual([-5.0, -11.0, -3.0], og.Controller.get(("outputs:tuple", extended_node_2)))
# Check the resulting values from resolving the flexible type as both of its types
self.assertEqual("dlrow", og.Controller.get(("outputs:flexible", extended_node_2)))
list_expected = [[-2.0, -3.0, -7.0], [-21.0, -14.0, -6.0]]
list_computed = og.Controller.get(("outputs:flexible", extended_node_1))
for expected, computed in zip(list_expected, list_computed):
self.assertCountEqual(expected, computed)
# ----------------------------------------------------------------------
async def test_tutorial_extended_attributes_tuples_cpp(self):
"""Test basic operation of the C++ tutorial node containing extended attributes."""
await self._test_tutorial_extended_attributes_tuples("omni.graph.tutorials.ExtendedTypes")
# ----------------------------------------------------------------------
async def test_tutorial_extended_attributes_tuples_python(self):
"""Test basic operation of the Python tutorial node containing extended attributes."""
await self._test_tutorial_extended_attributes_tuples("omni.graph.tutorials.ExtendedTypesPy")
| 8,633 | Python | 55.431372 | 120 | 0.554269 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/tests/test_omnigraph_deletion.py | """OmniGraph deletion tests"""
import omni.graph.core as og
import omni.graph.core.tests as ogts
import omni.kit.commands
import omni.kit.test
import omni.usd
class TestOmniGraphDeletion(ogts.OmniGraphTestCase):
# In these tests, we test various aspects of deleting things from OG
async def test_omnigraph_usd_deletion(self):
# In this test we set up 2 very similar looking nodes:
# /new_node and /new_node_01. We then delete /new_node
# The idea is that this looks very similar to cases like
# /parent/path/stuff/mynode where we delete /parent/path
# In that case we want to delete mynode, but in our case
# we do not want to delete /new_node_01 when /new_node is
# deleted.
keys = og.Controller.Keys
(graph, [new_node, new_node_01], _, _) = og.Controller.edit(
"/TestGraph",
{
keys.CREATE_NODES: [
("new_node", "omni.graph.tutorials.Empty"),
("new_node_01", "omni.graph.tutorials.Empty"),
]
},
)
await og.Controller.evaluate()
self.assertIsNotNone(new_node_01)
self.assertTrue(new_node_01.is_valid())
self.assertIsNotNone(new_node)
self.assertTrue(new_node.is_valid())
og.Controller.delete_node(new_node)
new_node_01 = graph.get_node("/TestGraph/new_node_01")
self.assertIsNotNone(new_node_01)
self.assertTrue(new_node_01.is_valid())
new_node = graph.get_node("/TestGraph/new_node")
self.assertTrue(not new_node.is_valid())
omni.kit.undo.undo()
new_node_01 = graph.get_node("/TestGraph/new_node_01")
self.assertIsNotNone(new_node_01)
self.assertTrue(new_node_01.is_valid())
new_node = graph.get_node("/TestGraph/new_node")
self.assertIsNotNone(new_node)
self.assertTrue(new_node.is_valid())
# --------------------------------------------------------------------------------------------------------------
async def test_fabric_dangling_connections(self):
# In this test we create two nodes, connect them together. The output of the first node drives the input
# of the second node. When we break the connection, we need to verify that the output of the second node
# now takes its input from its own value, rather than the connected value (ie. the connection is actually
# broken in the fabric).
keys = og.Controller.Keys
(graph, [node_a, node_b], _, _) = og.Controller.edit(
"/TestGraph",
{
keys.CREATE_NODES: [
("node_a", "omni.graph.tutorials.SimpleData"),
("node_b", "omni.graph.tutorials.SimpleData"),
]
},
)
await og.Controller.evaluate()
node_a = graph.get_node("/TestGraph/node_a")
self.assertIsNotNone(node_a)
self.assertTrue(node_a.is_valid())
node_b = graph.get_node("/TestGraph/node_b")
self.assertIsNotNone(node_b)
self.assertTrue(node_b.is_valid())
upstream_attr = node_a.get_attribute("outputs:a_int")
downstream_attr = node_b.get_attribute("inputs:a_int")
og.Controller.connect(upstream_attr, downstream_attr)
await og.Controller.evaluate()
# This node "omni.graph.tutorials.SimpleData" add 1 to the input. The default value of a_int is 0, so the output
# of the first node is 1. When this is used as the input to the second node, we expect the value to be 2:
value = og.Controller.get(node_b.get_attribute("outputs:a_int"))
self.assertEqual(value, 2)
og.Controller.disconnect(upstream_attr, downstream_attr)
await og.Controller.evaluate()
# Now that the connection is broken, the value should now be 1. However, if it didn't actually break the
# connection in the fabric, the graph would still "think" it's connected and output 2.
value = og.Controller.get(node_b.get_attribute("outputs:a_int"))
self.assertEqual(value, 1)
| 4,140 | Python | 42.135416 | 120 | 0.602657 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/tests/test_tutorial_state.py | """
Tests for the omnigraph.tutorial.state node
"""
import omni.graph.core as og
import omni.graph.core.tests as ogts
class TestTutorialState(ogts.OmniGraphTestCase):
""".ogn tests only run once while state tests require multiple evaluations, handled here"""
# ----------------------------------------------------------------------
async def test_tutorial_state_node(self):
"""Test basic operation of the tutorial node containing internal node state"""
self.assertTrue(og.get_node_type("omni.graph.tutorials.State").has_state(), "Tutorial state node has state")
# Create a set of state nodes, updating after each one so that the state information is properly initialized.
# If that requirement weren't there this would just be done in one call.
state_nodes = []
for index in range(5):
(graph, new_nodes, _, _) = og.Controller.edit(
"/StateGraph",
{
og.Controller.Keys.CREATE_NODES: [
(f"State{index}", "omni.graph.tutorials.State"),
]
},
)
state_nodes.append(new_nodes[0])
await og.Controller.evaluate(graph)
output_attrs = [state_node.get_attribute("outputs:monotonic") for state_node in state_nodes]
output_values = [og.Controller(output_attr).get() for output_attr in output_attrs]
for i in range(len(output_values) - 1):
self.assertLess(output_values[i], output_values[i + 1], "Comparing state of other nodes")
await og.Controller.evaluate(graph)
new_values = [og.Controller(output_attr).get() for output_attr in output_attrs]
for i in range(len(new_values) - 1):
self.assertLess(new_values[i], new_values[i + 1], "Comparing second state of other nodes")
for i, new_value in enumerate(new_values):
self.assertLess(output_values[i], new_value, "Comparing node state updates")
| 1,998 | Python | 45.488371 | 117 | 0.605105 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/tests/test_tutorial_state_attributes_py.py | """
Tests for the omnigraph.tutorial.stateAttributesPy node
"""
import omni.graph.core as og
import omni.graph.core.tests as ogts
class TestTutorialStateAttributesPy(ogts.OmniGraphTestCase):
""".ogn tests only run once while state tests require multiple evaluations, handled here"""
# ----------------------------------------------------------------------
async def test_tutorial_state_attributes_py_node(self):
"""Test basic operation of the Python tutorial node containing state attributes"""
keys = og.Controller.Keys
(_, [state_node], _, _) = og.Controller.edit(
"/TestGraph",
{
keys.CREATE_NODES: ("StateNode", "omni.graph.tutorials.StateAttributesPy"),
keys.SET_VALUES: ("StateNode.state:reset", True),
},
)
await og.Controller.evaluate()
reset_attr = og.Controller.attribute("state:reset", state_node)
monotonic_attr = og.Controller.attribute("state:monotonic", state_node)
self.assertEqual(og.Controller.get(reset_attr), False, "Reset attribute set back to False")
self.assertEqual(og.Controller.get(monotonic_attr), 0, "Monotonic attribute reset to start")
await og.Controller.evaluate()
self.assertEqual(og.Controller.get(reset_attr), False, "Reset attribute still False")
self.assertEqual(og.Controller.get(monotonic_attr), 1, "Monotonic attribute incremented once")
await og.Controller.evaluate()
self.assertEqual(og.Controller.get(monotonic_attr), 2, "Monotonic attribute incremented twice")
og.Controller.set(reset_attr, True)
await og.Controller.evaluate()
self.assertEqual(og.Controller.get(reset_attr), False, "Reset again set back to False")
self.assertEqual(og.Controller.get(monotonic_attr), 0, "Monotonic attribute again reset to start")
| 1,890 | Python | 46.274999 | 106 | 0.655026 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/tests/test_omnigraph_bundles.py | """
Tests for attribute bundles
"""
import unittest
from contextlib import suppress
from typing import Any, List, Tuple
import omni.graph.core as og
import omni.graph.core.tests as ogts
import omni.graph.nodes.tests as ognts
import omni.graph.tools as ogt
from omni.kit.test.teamcity import is_running_in_teamcity
from pxr import Gf
# ==============================================================================================================
def multiply_elements(multiplier, original: Any) -> Any:
"""Take in a numeric value, tuple, list, tuple of tuples, etc. and multiply every element by a constant"""
if isinstance(original, tuple):
return tuple(multiply_elements(multiplier, element) for element in original)
if isinstance(original, list):
return tuple(multiply_elements(multiplier, element) for element in original)
return multiplier * original
# ==============================================================================================================
def as_tuple(value: Any) -> Tuple:
"""Return the value, interpreted as a tuple (except simple values, which are returned as themselves)"""
if isinstance(value, tuple):
return value
if isinstance(value, list):
return tuple(value)
if isinstance(value, (Gf.Quatd, Gf.Quatf, Gf.Quath)):
return (*value.GetImaginary(), value.GetReal())
if isinstance(value, Gf.Matrix4d):
return tuple(tuple(element for element in list(row)) for row in value)
with suppress(TypeError):
if len(value) > 1:
return tuple(value)
return value
# ==============================================================================================================
class TestOmniGraphBundles(ogts.OmniGraphTestCase):
"""Attribute bundles do not yet have the ability to log tests directly in a .ogn file so it's done here"""
# --------------------------------------------------------------------------------------------------------------
def _compare_results(self, expected_raw: Any, actual_raw: Any, test_info: str):
"""Loose comparison of two types of compatible values
Args:
expected_raw: Expected results. Can be simple values, tuples, lists, or pxr.Gf types
actual_raw: Actual results. Can be simple values, tuples, lists, or pxr.Gf types
test_info: String to accompany error messages
Returns:
Error encountered when mismatched values found, None if everything matched
"""
expected = as_tuple(expected_raw)
actual = as_tuple(actual_raw)
self.assertEqual(
type(expected), type(actual), f"{test_info} Mismatched types - expected {expected_raw}, got {actual_raw}"
)
if isinstance(expected, tuple):
for expected_element, actual_element in zip(expected, actual):
self._compare_results(expected_element, actual_element, test_info)
else:
self.assertEqual(expected, actual, f"{test_info} Expected {expected}, got {actual}")
# ----------------------------------------------------------------------
async def _test_bundle_contents(self, node_type_to_test: str):
"""Test access to bundle attribute manipulation for C++ and Python implementations"""
keys = og.Controller.Keys
(graph, [bundle_node, inspector_node, _, _], [_, filtered_prim], _) = og.Controller.edit(
"/TestGraph",
{
keys.CREATE_NODES: [
("BundleManipulator", node_type_to_test),
("Inspector", "omni.graph.nodes.BundleInspector"),
],
keys.CREATE_PRIMS: [
("FullPrim", {"fullInt": ("int", 2), "floatArray": ("float[]", [3.0, 4.0, 5.0])}),
(
"FilteredPrim",
{
"int_1": ("int", 6),
"uint_1": ("uint", 7),
"int64_x": ("int64", 8),
"uint64_1": ("uint64", 9),
"int_3": ("int[3]", (10, 11, 12)),
"int_array": ("int[]", [13, 14, 15]),
"float_1": ("float", 3.0),
"double_x": ("double", 4.0),
"big_int": ("int[]", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]),
},
),
],
keys.EXPOSE_PRIMS: [
(og.Controller.PrimExposureType.AS_BUNDLE, "/FullPrim", "FullPrimExtract"),
(og.Controller.PrimExposureType.AS_BUNDLE, "/FilteredPrim", "FilteredPrimExtract"),
],
keys.CONNECT: [
("FullPrimExtract.outputs_primBundle", "BundleManipulator.inputs:fullBundle"),
("FilteredPrimExtract.outputs_primBundle", "BundleManipulator.inputs:filteredBundle"),
("BundleManipulator.outputs_combinedBundle", "Inspector.inputs:bundle"),
],
},
)
_ = ogt.OGN_DEBUG and ognts.enable_debugging(inspector_node)
await og.Controller.evaluate()
expected_results = {
"big_int": ("int", 1, 1, "none", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]),
"int_1": ("int", 1, 0, "none", 6),
"uint_1": ("uint", 1, 0, "none", 7),
"int64_x": ("int64", 1, 0, "none", 8),
"uint64_1": ("uint64", 1, 0, "none", 9),
"int_3": ("int", 3, 0, "none", (10, 11, 12)),
"int_array": ("int", 1, 1, "none", [13, 14, 15]),
"float_1": ("float", 1, 0, "none", 3.0),
"double_x": ("double", 1, 0, "none", 4.0),
"fullInt": ("int", 1, 0, "none", 2),
"floatArray": ("float", 1, 1, "none", [3.0, 4.0, 5.0]),
"sourcePrimPath": ("token", 1, 0, "none", str(filtered_prim.GetPrimPath())),
"sourcePrimType": ("token", 1, 0, "none", str(filtered_prim.GetTypeName())),
}
def __result_subset(elements_removed: List[str]):
"""Return the set of results with the index list of elements removed"""
return {key: value for key, value in expected_results.items() if key not in elements_removed}
# Test data consists of a list of individual test configurations with the filters to set and the
# expected contents of the output bundle using those filters.
test_data = [
([], expected_results),
(["x"], __result_subset(["int64_x", "double_x"])),
(["int"], __result_subset(["big_int", "int_1", "uint_1", "int64_x", "uint64_1", "int_3", "int_array"])),
(["big"], __result_subset(["big_int"])),
(
["x", "big", "int"],
__result_subset(
["big_int", "int_1", "uint_1", "int64_x", "uint64_1", "int_3", "int_array", "double_x"]
),
),
]
for (filters, expected_results) in test_data:
og.Controller.edit(
graph,
{
keys.SET_VALUES: (("inputs:filters", bundle_node), filters),
},
)
await og.Controller.evaluate()
try:
ognts.verify_bundles_are_equal(
ognts.filter_bundle_inspector_results(
ognts.bundle_inspector_results(inspector_node), [], filter_for_inclusion=False
),
ognts.filter_bundle_inspector_results(
(len(expected_results), expected_results), [], filter_for_inclusion=False
),
)
except ValueError as error:
self.assertTrue(False, error)
# ----------------------------------------------------------------------
async def test_bundle_contents_cpp(self):
"""Test access to bundle attribute manipulation on the C++ implemented node"""
await self._test_bundle_contents("omni.graph.tutorials.BundleManipulation")
# ----------------------------------------------------------------------
async def test_bundle_contents_py(self):
"""Test bundle attribute manipulation on the Python implemented node"""
await self._test_bundle_contents("omni.graph.tutorials.BundleManipulationPy")
# ----------------------------------------------------------------------
async def _test_simple_bundled_data(self, node_type_to_test: str):
"""Test access to attributes with simple data types within bundles for both C++ and Python implementations"""
controller = og.Controller()
keys = og.Controller.Keys
prim_definition = ognts.prim_with_everything_definition()
(_, [_, inspector_node, _], _, _) = controller.edit(
"/TestGraph",
{
keys.CREATE_NODES: [
("BundledDataModifier", node_type_to_test),
("Inspector", "omni.graph.nodes.BundleInspector"),
],
keys.CREATE_PRIMS: ("TestPrim", prim_definition),
keys.EXPOSE_PRIMS: (og.Controller.PrimExposureType.AS_BUNDLE, "TestPrim", "TestBundle"),
keys.CONNECT: [
("TestBundle.outputs_primBundle", "BundledDataModifier.inputs:bundle"),
("BundledDataModifier.outputs_bundle", "Inspector.inputs:bundle"),
],
},
)
_ = ogt.OGN_DEBUG and ognts.enable_debugging(inspector_node)
await controller.evaluate()
(bundled_count, bundled_results) = ognts.bundle_inspector_results(inspector_node)
# Prim will not have the "sourcePrimXX" attributes so it is smaller by 2
self.assertEqual(len(prim_definition) + 2, bundled_count)
for name, (ogn_type, array_depth, tuple_count, role, actual_output) in bundled_results.items():
if name in ["sourcePrimType", "sourcePrimPath"]:
continue
if not node_type_to_test.endswith("Py") and ogn_type not in ["int", "int64", "uint", "uint64"]:
# The C++ node only handles integer types, in the interest of brevity
continue
test_info = "Bundled"
if tuple_count > 1:
test_info += f" tuple[{tuple_count}]"
if array_depth > 0:
test_info += " array"
test_info += f" attribute {name} of type {ogn_type}"
if role != "none":
test_info += f" (role {role})"
if ogn_type == "token" or role == "text":
expected_output = prim_definition[name][1]
if isinstance(expected_output, str):
expected_output += expected_output
else:
expected_output = [f"{element}{element}" for element in expected_output]
self._compare_results(expected_output, actual_output, test_info)
else:
if ogn_type in ["bool", "bool[]"]:
expected_output = as_tuple(prim_definition[name][1])
else:
expected_output = multiply_elements(2, as_tuple(prim_definition[name][1]))
self._compare_results(expected_output, actual_output, test_info)
# ----------------------------------------------------------------------
async def test_simple_bundled_data_cpp(self):
"""Test access to attributes with simple data types within bundles on the C++ implemented node"""
await self._test_simple_bundled_data("omni.graph.tutorials.BundleData")
# ----------------------------------------------------------------------
async def test_simple_bundled_data_py(self):
"""Test access to attributes with simple data types within bundles on the Python implemented node"""
await self._test_simple_bundled_data("omni.graph.tutorials.BundleDataPy")
# ----------------------------------------------------------------------
async def _test_add_attributes_to_bundle(self, node_type_to_test: str):
"""Test basic operation of the tutorial node that adds new attributes to a bundle"""
keys = og.Controller.Keys
(graph, [add_node, inspector_node], _, _) = og.Controller.edit(
"/TestGraph",
{
keys.CREATE_NODES: [
("AddAttributes", node_type_to_test),
("Inspector", "omni.graph.nodes.BundleInspector"),
],
keys.CONNECT: ("AddAttributes.outputs_bundle", "Inspector.inputs:bundle"),
},
)
await og.Controller.evaluate()
types_attribute = og.Controller.attribute("inputs:typesToAdd", add_node)
added_names_attribute = og.Controller.attribute("inputs:addedAttributeNames", add_node)
removed_names_attribute = og.Controller.attribute("inputs:removedAttributeNames", add_node)
_ = ogt.OGN_DEBUG and ognts.enable_debugging(inspector_node)
# List of test data configurations to run. The individual test entries consist of:
# - list of the types for the attributes to be added
# - list of the base types corresponding to the main types
# - list of names for the attributes to be added
# - list of values expected for the new bundle as (count, roles, arrayDepths, tupleCounts, values)
test_data = [
[
["float", "double", "int", "int64", "uchar", "uint", "uint64", "bool", "token"],
["float", "double", "int", "int64", "uchar", "uint", "uint64", "bool", "token"],
[f"output{n}" for n in range(9)],
(9, ["none"] * 9, [0] * 9, [1] * 9, [0.0, 0.0, 0, 0, 0, 0, 0, False, ""]),
],
[
["float[]", "double[]", "int[]", "int64[]", "uchar[]", "uint[]", "uint64[]", "token[]"],
["float", "double", "int", "int64", "uchar", "uint", "uint64", "token"],
[f"output{n}" for n in range(8)],
(8, ["none"] * 8, [1] * 8, [1] * 8, [[], [], [], [], [], [], [], []]),
],
[
["float[3]", "double[2]", "int[4]"],
["float", "double", "int"],
[f"output{n}" for n in range(3)],
(3, ["none"] * 3, [0] * 3, [3, 2, 4], [[0.0, 0.0, 0.0], [0.0, 0.0], [0, 0, 0, 0]]),
],
[
["float[3][]", "double[2][]", "int[4][]"],
["float", "double", "int"],
[f"output{n}" for n in range(3)],
(3, ["none"] * 3, [1] * 3, [3, 2, 4], [[], [], []]),
],
[
["any"],
["token"],
["output0"],
(1, ["none"], [0], [1], [""]),
],
[
["colord[3]", "colorf[4]", "colorh[3]"],
["double", "float", "half"],
[f"output{n}" for n in range(3)],
(3, ["color"] * 3, [0] * 3, [3, 4, 3], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
],
[
["normald[3]", "normalf[3]", "normalh[3]"],
["double", "float", "half"],
[f"output{n}" for n in range(3)],
(3, ["normal"] * 3, [0] * 3, [3, 3, 3], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
],
[
["pointd[3]", "pointf[3]", "pointh[3]"],
["double", "float", "half"],
[f"output{n}" for n in range(3)],
(3, ["point"] * 3, [0] * 3, [3, 3, 3], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
],
[
["quatd[4]", "quatf[4]", "quath[4]"],
["double", "float", "half"],
[f"output{n}" for n in range(3)],
(
3,
["quat"] * 3,
[0] * 3,
[4, 4, 4],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
),
],
[
["texcoordd[3]", "texcoordf[2]", "texcoordh[3]"],
["double", "float", "half"],
[f"output{n}" for n in range(3)],
(3, ["texcoord"] * 3, [0] * 3, [3, 2, 3], [[0.0, 0.0, 0.0], [0.0, 0.0], [0.0, 0.0, 0.0]]),
],
[
["vectord[3]", "vectorf[3]", "vectorh[3]"],
["double", "float", "half"],
[f"output{n}" for n in range(3)],
(3, ["vector"] * 3, [0] * 3, [3, 3, 3], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
],
[
["timecode"],
["double"],
["output0"],
(1, ["timecode"], [0], [1], [0.0]),
],
]
for (types, base_types, names, results) in test_data:
og.Controller.edit(
graph,
{
keys.SET_VALUES: [
(types_attribute, types),
(added_names_attribute, names),
]
},
)
await og.Controller.evaluate()
(bundled_count, bundled_results) = ognts.bundle_inspector_results(inspector_node)
self.assertEqual(bundled_count, results[0])
self.assertCountEqual(list(bundled_results.keys()), names)
for index, name in enumerate(names):
error = f"Checking {{}} for attribute {name}"
named_results = bundled_results[name]
self.assertEqual(
named_results[ognts.BundleResultKeys.TYPE_IDX], base_types[index], error.format("type")
)
self.assertEqual(
named_results[ognts.BundleResultKeys.ARRAY_DEPTH_IDX],
results[2][index],
error.format("array depth"),
)
self.assertEqual(
named_results[ognts.BundleResultKeys.TUPLE_COUNT_IDX],
results[3][index],
error.format("tuple count"),
)
self.assertEqual(
named_results[ognts.BundleResultKeys.ROLE_IDX], results[1][index], error.format("role")
)
# One final test run that also includes a remove of the 2nd, 4th, and 7th attributes in the first test list
(types, base_types, names, results) = test_data[0]
og.Controller.edit(
graph,
{
keys.SET_VALUES: [
(types_attribute, types),
(added_names_attribute, names),
(removed_names_attribute, [names[2], names[4], names[7]]),
]
},
)
await og.Controller.evaluate()
def pruned_list(original: List) -> List:
"""Remove the elements 2, 4, 7 from the list and return the result"""
return [item for index, item in enumerate(original) if index not in [2, 4, 7]]
(bundled_count, bundled_results) = ognts.bundle_inspector_results(inspector_node)
# Modify the expected results to account for the removed attributes
self.assertEqual(bundled_count, results[0] - 3)
names_expected = pruned_list(names)
base_types_expected = pruned_list(base_types)
array_depths_expected = pruned_list(results[2])
tuple_counts_expected = pruned_list(results[3])
roles_expected = pruned_list(results[1])
self.assertCountEqual(list(bundled_results.keys()), names_expected)
for index, name in enumerate(names_expected):
error = f"Checking {{}} for attribute {name}"
named_results = bundled_results[name]
self.assertEqual(
named_results[ognts.BundleResultKeys.TYPE_IDX], base_types_expected[index], error.format("type")
)
self.assertEqual(
named_results[ognts.BundleResultKeys.ARRAY_DEPTH_IDX],
array_depths_expected[index],
error.format("array depth"),
)
self.assertEqual(
named_results[ognts.BundleResultKeys.TUPLE_COUNT_IDX],
tuple_counts_expected[index],
error.format("tuple count"),
)
self.assertEqual(
named_results[ognts.BundleResultKeys.ROLE_IDX], roles_expected[index], error.format("role")
)
# ----------------------------------------------------------------------
async def test_add_attributes_to_bundle_cpp(self):
"""Test adding attributes to bundles on the C++ implemented node"""
await self._test_add_attributes_to_bundle("omni.graph.tutorials.BundleAddAttributes")
# ----------------------------------------------------------------------
async def test_add_attributes_to_bundle_py(self):
"""Test adding attributes to bundles on the Python implemented node"""
await self._test_add_attributes_to_bundle("omni.graph.tutorials.BundleAddAttributesPy")
# ----------------------------------------------------------------------
async def _test_batched_add_attributes_to_bundle(self, node_type_to_test: str):
"""Test basic operation of the tutorial node that adds new attributes to a bundle"""
keys = og.Controller.Keys
(graph, [add_node, inspector_node], _, _) = og.Controller.edit(
"/TestGraph",
{
keys.CREATE_NODES: [
("AddAttributes", node_type_to_test),
("Inspector", "omni.graph.nodes.BundleInspector"),
],
keys.CONNECT: ("AddAttributes.outputs_bundle", "Inspector.inputs:bundle"),
},
)
await og.Controller.evaluate()
types_attribute = og.Controller.attribute("inputs:typesToAdd", add_node)
added_names_attribute = og.Controller.attribute("inputs:addedAttributeNames", add_node)
removed_names_attribute = og.Controller.attribute("inputs:removedAttributeNames", add_node)
batched_api_attribute = og.Controller.attribute("inputs:useBatchedAPI", add_node)
_ = ogt.OGN_DEBUG and ognts.enable_debugging(inspector_node)
# List of test data configurations to run. The individual test entries consist of:
# - list of the types for the attributes to be added
# - list of the base types corresponding to the main types
# - list of names for the attributes to be added
# - list of values expected for the new bundle as (count, roles, arrayDepths, tupleCounts, values)
test_data = [
[
["float", "double", "int", "int64", "uchar", "uint", "uint64", "bool", "token"],
["float", "double", "int", "int64", "uchar", "uint", "uint64", "bool", "token"],
[f"output{n}" for n in range(9)],
(9, ["none"] * 9, [0] * 9, [1] * 9, [0.0, 0.0, 0, 0, 0, 0, 0, False, ""]),
],
[
["float[]", "double[]", "int[]", "int64[]", "uchar[]", "uint[]", "uint64[]", "token[]"],
["float", "double", "int", "int64", "uchar", "uint", "uint64", "token"],
[f"output{n}" for n in range(8)],
(8, ["none"] * 8, [1] * 8, [1] * 8, [[], [], [], [], [], [], [], []]),
],
[
["float[3]", "double[2]", "int[4]"],
["float", "double", "int"],
[f"output{n}" for n in range(3)],
(3, ["none"] * 3, [0] * 3, [3, 2, 4], [[0.0, 0.0, 0.0], [0.0, 0.0], [0, 0, 0, 0]]),
],
[
["float[3][]", "double[2][]", "int[4][]"],
["float", "double", "int"],
[f"output{n}" for n in range(3)],
(3, ["none"] * 3, [1] * 3, [3, 2, 4], [[], [], []]),
],
[
["any"],
["token"],
["output0"],
(1, ["none"], [0], [1], [""]),
],
[
["colord[3]", "colorf[4]", "colorh[3]"],
["double", "float", "half"],
[f"output{n}" for n in range(3)],
(3, ["color"] * 3, [0] * 3, [3, 4, 3], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
],
[
["normald[3]", "normalf[3]", "normalh[3]"],
["double", "float", "half"],
[f"output{n}" for n in range(3)],
(3, ["normal"] * 3, [0] * 3, [3, 3, 3], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
],
[
["pointd[3]", "pointf[3]", "pointh[3]"],
["double", "float", "half"],
[f"output{n}" for n in range(3)],
(3, ["point"] * 3, [0] * 3, [3, 3, 3], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
],
[
["quatd[4]", "quatf[4]", "quath[4]"],
["double", "float", "half"],
[f"output{n}" for n in range(3)],
(
3,
["quat"] * 3,
[0] * 3,
[4, 4, 4],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
),
],
[
["texcoordd[3]", "texcoordf[2]", "texcoordh[3]"],
["double", "float", "half"],
[f"output{n}" for n in range(3)],
(3, ["texcoord"] * 3, [0] * 3, [3, 2, 3], [[0.0, 0.0, 0.0], [0.0, 0.0], [0.0, 0.0, 0.0]]),
],
[
["vectord[3]", "vectorf[3]", "vectorh[3]"],
["double", "float", "half"],
[f"output{n}" for n in range(3)],
(3, ["vector"] * 3, [0] * 3, [3, 3, 3], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
],
[
["timecode"],
["double"],
["output0"],
(1, ["timecode"], [0], [1], [0.0]),
],
]
for (types, base_types, names, results) in test_data:
og.Controller.edit(
graph,
{
keys.SET_VALUES: [
(types_attribute, types),
(added_names_attribute, names),
(batched_api_attribute, True),
]
},
)
await og.Controller.evaluate()
(bundled_count, bundled_results) = ognts.bundle_inspector_results(inspector_node)
self.assertEqual(bundled_count, results[0])
self.assertCountEqual(list(bundled_results.keys()), names)
for index, name in enumerate(names):
error = f"Checking {{}} for attribute {name}"
named_results = bundled_results[name]
self.assertEqual(
named_results[ognts.BundleResultKeys.TYPE_IDX], base_types[index], error.format("type")
)
self.assertEqual(
named_results[ognts.BundleResultKeys.ARRAY_DEPTH_IDX],
results[2][index],
error.format("array depth"),
)
self.assertEqual(
named_results[ognts.BundleResultKeys.TUPLE_COUNT_IDX],
results[3][index],
error.format("tuple count"),
)
self.assertEqual(
named_results[ognts.BundleResultKeys.ROLE_IDX], results[1][index], error.format("role")
)
# One final test run that also includes a remove of the 2nd, 4th, and 7th attributes in the first test list
(types, base_types, names, results) = test_data[0]
og.Controller.edit(
graph,
{
keys.SET_VALUES: [
(types_attribute, types),
(added_names_attribute, names),
(removed_names_attribute, [names[2], names[4], names[7]]),
]
},
)
await og.Controller.evaluate()
def pruned_list(original: List) -> List:
"""Remove the elements 2, 4, 7 from the list and return the result"""
return [item for index, item in enumerate(original) if index not in [2, 4, 7]]
await og.Controller.evaluate()
(bundled_count, bundled_results) = ognts.bundle_inspector_results(inspector_node)
self.assertEqual(bundled_count, results[0] - 3)
# Modify the expected results to account for the removed attributes
names_expected = pruned_list(names)
base_types_expected = pruned_list(base_types)
array_depths_expected = pruned_list(results[2])
tuple_counts_expected = pruned_list(results[3])
roles_expected = pruned_list(results[1])
self.assertCountEqual(list(bundled_results.keys()), names_expected)
for index, name in enumerate(names_expected):
error = f"Checking {{}} for attribute {name}"
named_results = bundled_results[name]
self.assertEqual(
named_results[ognts.BundleResultKeys.TYPE_IDX], base_types_expected[index], error.format("type")
)
self.assertEqual(
named_results[ognts.BundleResultKeys.ARRAY_DEPTH_IDX],
array_depths_expected[index],
error.format("array depth"),
)
self.assertEqual(
named_results[ognts.BundleResultKeys.TUPLE_COUNT_IDX],
tuple_counts_expected[index],
error.format("tuple count"),
)
self.assertEqual(
named_results[ognts.BundleResultKeys.ROLE_IDX], roles_expected[index], error.format("role")
)
# ----------------------------------------------------------------------
async def test_batched_add_attributes_to_bundle_cpp(self):
"""Test adding attributes to bundles on the C++ implemented node"""
await self._test_batched_add_attributes_to_bundle("omni.graph.tutorials.BundleAddAttributes")
# ----------------------------------------------------------------------
async def test_batched_add_attributes_to_bundle_py(self):
"""Test adding attributes to bundles on the Python implemented node"""
await self._test_batched_add_attributes_to_bundle("omni.graph.tutorials.BundleAddAttributesPy")
# ----------------------------------------------------------------------
async def _test_bundle_gpu(self, node_type_to_test: str):
"""Test basic operation of the tutorial node that accesses bundle data on the GPU"""
controller = og.Controller()
keys = og.Controller.Keys
(graph, (bundle_node, inspector_node), _, _) = controller.edit(
"/TestGraph",
{
keys.CREATE_PRIMS: [
("Prim1", {"points": ("pointf[3][]", [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])}),
("Prim2", {"points": ("pointf[3][]", [[4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])}),
],
keys.CREATE_NODES: [
("GpuBundleNode", node_type_to_test),
("Inspector", "omni.graph.nodes.BundleInspector"),
],
},
)
controller.edit(
graph,
{
keys.EXPOSE_PRIMS: [
(og.GraphController.PrimExposureType.AS_BUNDLE, "Prim1", "Prim1Extract"),
(og.GraphController.PrimExposureType.AS_BUNDLE, "Prim2", "Prim2Extract"),
],
keys.CONNECT: [
("Prim1Extract.outputs_primBundle", "GpuBundleNode.inputs:cpuBundle"),
("Prim2Extract.outputs_primBundle", "GpuBundleNode.inputs:gpuBundle"),
("GpuBundleNode.outputs_cpuGpuBundle", "Inspector.inputs:bundle"),
],
},
)
_ = ogt.OGN_DEBUG and ognts.enable_debugging(inspector_node)
await controller.evaluate(graph)
for gpu in [False, True]:
controller.attribute("inputs:gpu", bundle_node, graph).set(gpu)
await controller.evaluate()
(_, bundled_values) = ognts.bundle_inspector_results(inspector_node)
self.assertEqual(bundled_values["dotProducts"][ognts.BundleResultKeys.VALUE_IDX], [32.0, 122.0])
# ----------------------------------------------------------------------
async def test_bundle_gpu_cpp(self):
"""Test access to bundle contents on GPU on the C++ implemented node"""
await self._test_bundle_gpu("omni.graph.tutorials.CpuGpuBundles")
# ----------------------------------------------------------------------
async def test_bundle_gpu_py(self):
"""Test bundle contents on GPU on the Python implemented node"""
await self._test_bundle_gpu("omni.graph.tutorials.CpuGpuBundlesPy")
# ----------------------------------------------------------------------
@unittest.skipIf(is_running_in_teamcity(), "This is a manual test so it only needs to run locally")
async def test_cuda_pointers_py(self):
"""Run a simple test on a node that extracts CUDA pointers from the GPU. Inspect the output for information"""
controller = og.Controller()
keys = og.Controller.Keys
(graph, (_, inspector_node), _, _) = controller.edit(
"/TestGraph",
{
keys.CREATE_PRIMS: [
("Prim1", {"points": ("float[3][]", [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])}),
],
keys.CREATE_NODES: [
("GpuBundleNode", "omni.graph.tutorials.CudaCpuArraysPy"),
("Inspector", "omni.graph.nodes.BundleInspector"),
],
},
)
controller.edit(
graph,
{
keys.EXPOSE_PRIMS: [
(og.GraphController.PrimExposureType.AS_ATTRIBUTES, "Prim1", "Prim1Extract"),
],
},
)
_ = ogt.OGN_DEBUG and ognts.enable_debugging(inspector_node)
await controller.evaluate(graph)
controller.edit(
graph,
{
keys.CONNECT: [
("Prim1Extract.outputs:points", "GpuBundleNode.inputs:points"),
("GpuBundleNode.outputs_outBundle", "Inspector.inputs:bundle"),
],
},
)
| 34,927 | Python | 45.632844 | 118 | 0.474361 |
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/tests/test_omnigraph_metadata.py | """Basic tests of the metadata bindings found in OmniGraphBindingsPython.cpp"""
import omni.graph.core as og
import omni.graph.core.tests as ogts
import omni.graph.tools.ogn as ogn
# ======================================================================
class TestOmniGraphMetadata(ogts.OmniGraphTestCase):
"""Encapsulate simple tests that exercise metadata access"""
# ----------------------------------------------------------------------
def __node_type_metadata_test(self, node_type: og.NodeType):
"""Test a node type object for metadata access"""
original_count = node_type.get_metadata_count()
key = "_node_type_metadata"
value = "_test_value"
node_type.set_metadata(key, value)
# The new metadata key should now have the new value
self.assertEqual(value, node_type.get_metadata(key))
new_metadata = node_type.get_all_metadata()
# The new key/value pair should now be part of the entire metadata listt
self.assertTrue(key in new_metadata)
self.assertEqual(value, new_metadata[key])
# Since a unique key was chosen there should be one extra metadata value
self.assertEqual(original_count + 1, node_type.get_metadata_count())
# Setting a value to None should remove the metadata from the node type.
# Do this last so that the test can run multiple times successfully.
node_type.set_metadata(key, None)
self.assertEqual(original_count, node_type.get_metadata_count())
# Test silent success of trying to set the metadata to illegal data
node_type.set_metadata(None, value)
# Verify the hardcoded metadata type names
self.assertTrue(ogn.MetadataKeys.UI_NAME in new_metadata)
self.assertTrue(ogn.MetadataKeys.TAGS in new_metadata)
self.assertEqual("Tutorial Node: Tuple Attributes", new_metadata[ogn.MetadataKeys.UI_NAME])
self.assertEqual("tuple,tutorial,internal", new_metadata[ogn.MetadataKeys.TAGS])
# ----------------------------------------------------------------------
async def test_node_type_metadata(self):
"""Test metadata features in the NodeType class"""
# The TupleData tutorial node happens to have special metadata types defined
node_type = og.get_node_type("omni.tutorials.TupleData")
self.assertIsNotNone(node_type, "Empty node type to be used for test was not registered")
self.__node_type_metadata_test(node_type)
# ----------------------------------------------------------------------
async def test_node_metadata(self):
"""Test metadata access through the node class"""
# The TupleData tutorial node happens to have special metadata types defined
(_, [test_node], _, _) = og.Controller.edit(
"/TestGraph",
{
og.Controller.Keys.CREATE_NODES: ("TupleNode", "omni.tutorials.TupleData"),
},
)
await og.Controller.evaluate()
self.__node_type_metadata_test(test_node.get_node_type())
# ----------------------------------------------------------------------
async def test_attribute_metadata(self):
"""Test metadata features in the NodeType class"""
# Any node type will do for metadata tests so use a simple one
(_, [test_node], _, _) = og.Controller.edit(
"/TestGraph",
{
og.Controller.Keys.CREATE_NODES: ("SimpleNode", "omni.graph.tutorials.SimpleData"),
},
)
await og.Controller.evaluate()
self.assertIsNotNone(test_node, "Simple data node type to be used for test was not registered")
test_attribute = test_node.get_attribute("inputs:a_bool")
self.assertIsNotNone(test_attribute, "Boolean input on simple data node type not found")
original_count = test_attribute.get_metadata_count()
key = "_test_attribute_metadata"
value = "_test_value"
test_attribute.set_metadata(key, value)
# The new metadata key should now have the new value
self.assertEqual(value, test_attribute.get_metadata(key))
new_metadata = test_attribute.get_all_metadata()
# The new key/value pair should now be part of the entire metadata listt
self.assertTrue(key in new_metadata)
self.assertEqual(value, new_metadata[key])
# Since a unique key was chosen there should be one extra metadata value
self.assertEqual(original_count + 1, test_attribute.get_metadata_count())
# Setting a value to None should remove the metadata from the node type.
# Do this last so that the test can run multiple times successfully.
test_attribute.set_metadata(key, None)
self.assertEqual(original_count, test_attribute.get_metadata_count())
# Test silent success of trying to set the metadata to illegal data
test_attribute.set_metadata(None, value)
# ObjectId types get special metadata hardcoded
for attribute_name in ["inputs:a_objectId", "outputs:a_objectId"]:
object_id_attribute = test_node.get_attribute(attribute_name)
self.assertIsNotNone(object_id_attribute, f"ObjectId {attribute_name} on simple data node type not found")
object_id_metadata = object_id_attribute.get_all_metadata()
self.assertTrue(ogn.MetadataKeys.OBJECT_ID in object_id_metadata)
# Check on the constant attribute
constant_attribute = test_node.get_attribute("inputs:a_constant_input")
self.assertEqual("1", constant_attribute.get_metadata(ogn.MetadataKeys.OUTPUT_ONLY))
| 5,658 | Python | 49.079646 | 118 | 0.627253 |
omniverse-code/kit/exts/omni.audiorecorder/omni/audiorecorder/__init__.py | """
This module contains bindings to the C++ omni::audio::IAudioRecorder interface.
This provides functionality for simple audio recording to file or using a callback.
Recording can be done in 16 bit, 32 bit or float PCM if a callback is required.
Recording can be done in any format when only recording to a file.
Recording to a file while simultaneously receiving data callbacks is also possible.
"""
# recorder bindings depend on some types from carb.audio
import carb.audio
from ._audiorecorder import *
| 550 | Python | 41.384612 | 91 | 0.732727 |
omniverse-code/kit/exts/omni.audiorecorder/omni/audiorecorder/tests/__init__.py | from .test_audio_recorder import * # pragma: no cover
| 56 | Python | 17.999994 | 54 | 0.714286 |
omniverse-code/kit/exts/omni.audiorecorder/omni/audiorecorder/tests/test_audio_recorder.py | import pathlib
import time
import os
import math
from PIL import Image
import omni.kit.test_helpers_gfx.compare_utils
import carb.tokens
import carb.audio
import omni.audiorecorder
import omni.usd.audio
import omni.kit.audiodeviceenum
import omni.kit.test
import omni.log
OUTPUTS_DIR = pathlib.Path(omni.kit.test.get_test_output_path())
# boilerplate to work around python lacking references
class IntReference: # pragma: no cover
def __init__(self, v):
self._v = v;
def _get_value(self):
return self._v
def _set_value(self, v):
self._v = v
value = property(_get_value, _set_value)
def wait_for_data(received: IntReference, ms: int, expected_data: int): # pragma: no cover
for i in range(ms):
if received.value > expected_data:
# log this so we can see timing margins on teamcity
omni.log.warn("finished waiting after " + str(i / 1000.0) + " seconds")
break
if i > 0 and i % 1000 == 0:
omni.log.warn("waited " + str(i // 1000) + " seconds")
if i == ms - 1:
self.assertTrue(not "timeout of " + str(ms) + "ms waiting for " + str(expected_data) + " frames")
time.sleep(0.001)
class TestAudioRecorder(omni.kit.test.AsyncTestCase): # pragma: no cover
def setUp(self):
self._recorder = omni.audiorecorder.create_audio_recorder()
self.assertIsNotNone(self._recorder)
extension_path = carb.tokens.get_tokens_interface().resolve("${omni.audiorecorder}")
self._test_path = pathlib.Path(extension_path).joinpath("data").joinpath("tests").absolute()
self._golden_path = self._test_path.joinpath("golden")
def tearDown(self):
self._recorder = None
def test_callback_recording(self):
valid = []
for i in range(1, 16):
valid.append(4800 * i)
received = IntReference(0)
def read_validation(data):
nonlocal received
received.value += len(data)
self.assertTrue(len(data) in valid)
def float_read_callback(data):
read_validation(data)
self.assertTrue(isinstance(data[0], float))
def int_read_callback(data):
read_validation(data)
self.assertTrue(isinstance(data[0], int))
# if there are no devices, this won't work
if omni.kit.audiodeviceenum.acquire_audio_device_enum_interface().get_device_count(omni.kit.audiodeviceenum.Direction.CAPTURE) == 0:
self.assertFalse(self._recorder.begin_recording_float(
callback = float_read_callback,
))
return
# not open => should be some valid default format
fmt = self._recorder.get_format()
self.assertIsNotNone(fmt)
self.assertGreaterEqual(fmt.channels, carb.audio.MIN_CHANNELS)
self.assertLessEqual(fmt.channels, carb.audio.MAX_CHANNELS)
self.assertGreaterEqual(fmt.frame_rate, carb.audio.MIN_FRAMERATE)
self.assertLessEqual(fmt.frame_rate, carb.audio.MAX_FRAMERATE)
self.assertTrue(self._recorder.begin_recording_float(
callback = float_read_callback,
buffer_length = 4800 * 16, # enormous buffer for test reliability
period = 4800,
length_type = carb.audio.UnitType.FRAMES,
channels = 1
))
# try again and it will fail because a recording is already in progress
self.assertFalse(self._recorder.begin_recording_float(
callback = float_read_callback,
buffer_length = 4800 * 16, # enormous buffer for test reliability
period = 4800,
length_type = carb.audio.UnitType.FRAMES,
channels = 1
))
# not much we can test here aside from it being valid
fmt = self._recorder.get_format()
self.assertIsNotNone(fmt)
self.assertEqual(fmt.channels, 1)
self.assertGreaterEqual(fmt.frame_rate, carb.audio.MIN_FRAMERATE)
self.assertLessEqual(fmt.frame_rate, carb.audio.MAX_FRAMERATE)
self.assertEqual(fmt.format, carb.audio.SampleFormat.PCM_FLOAT)
# this timeout seems absurd, but anything's possible with teamcity's timing
wait_for_data(received, 8000, 4800)
self._recorder.stop_recording()
# try again with int16
received.value = 0
self.assertTrue(self._recorder.begin_recording_int16(
callback = int_read_callback,
buffer_length = 4800 * 16, # enormous buffer for test reliability
period = 4800,
length_type = carb.audio.UnitType.FRAMES,
channels = 1
))
# not much we can test here aside from it being valid
fmt = self._recorder.get_format()
self.assertIsNotNone(fmt)
self.assertEqual(fmt.channels, 1)
self.assertGreaterEqual(fmt.frame_rate, carb.audio.MIN_FRAMERATE)
self.assertLessEqual(fmt.frame_rate, carb.audio.MAX_FRAMERATE)
self.assertEqual(fmt.format, carb.audio.SampleFormat.PCM16)
wait_for_data(received, 8000, 4800)
self._recorder.stop_recording()
# try again with int32
received.value = 0
self.assertTrue(self._recorder.begin_recording_int32(
callback = int_read_callback,
buffer_length = 4800 * 16, # enormous buffer for test reliability
period = 4800,
length_type = carb.audio.UnitType.FRAMES,
channels = 1
))
# not much we can test here aside from it being valid
fmt = self._recorder.get_format()
self.assertIsNotNone(fmt)
self.assertEqual(fmt.channels, 1)
self.assertGreaterEqual(fmt.frame_rate, carb.audio.MIN_FRAMERATE)
self.assertLessEqual(fmt.frame_rate, carb.audio.MAX_FRAMERATE)
self.assertEqual(fmt.format, carb.audio.SampleFormat.PCM32)
wait_for_data(received, 8000, 4800)
self._recorder.stop_recording()
# test that the format is set as specified
self.assertTrue(self._recorder.begin_recording_float(
callback = float_read_callback,
channels = 1, # 1 channel because that's all we can guarantee
# when we upgrade IAudioCapture, this should no longer
# be an issue
frame_rate = 43217,
buffer_length = 4800 * 16, # enormous buffer for test reliability
period = 4800,
length_type = carb.audio.UnitType.FRAMES
))
fmt = self._recorder.get_format()
self.assertEqual(fmt.channels, 1)
self.assertEqual(fmt.frame_rate, 43217)
self.assertEqual(fmt.format, carb.audio.SampleFormat.PCM_FLOAT)
self._recorder.stop_recording()
def _validate_sound(self, path):
# to validate that this is a working sound, load it into a sound prim
# and check if the sound asset loaded successfully
context = omni.usd.get_context()
self.assertIsNotNone(context)
context.new_stage()
stage = context.get_stage()
self.assertIsNotNone(stage)
prim = stage.DefinePrim("/sound", "OmniSound")
self.assertIsNotNone(prim)
prim.GetAttribute("filePath").Set(path)
audio = omni.usd.audio.get_stage_audio_interface()
self.assertIsNotNone(audio)
i = 0
while audio.get_sound_asset_status(prim) == omni.usd.audio.AssetLoadStatus.IN_PROGRESS:
time.sleep(0.001)
if i > 5000:
raise Exception("asset load timed out")
i += 1
self.assertEqual(audio.get_sound_asset_status(prim), omni.usd.audio.AssetLoadStatus.DONE)
def test_recording_to_file(self):
received = IntReference(0)
def read_callback(data):
nonlocal received
received.value += len(data)
if omni.kit.audiodeviceenum.acquire_audio_device_enum_interface().get_device_count(omni.kit.audiodeviceenum.Direction.CAPTURE) == 0:
self.assertFalse(self._recorder.begin_recording_float(
callback = read_callback,
filename = str(OUTPUTS_DIR.joinpath("test.oga"))
))
self.assertFalse(self._recorder.begin_recording_to_file(
filename = str(OUTPUTS_DIR.joinpath("test.oga"))
))
return
self.assertTrue(self._recorder.begin_recording_float(
callback = read_callback,
buffer_length = 4800 * 16, # enormous buffer for test reliability
period = 4800,
channels = 1,
frame_rate = 48000,
length_type = carb.audio.UnitType.FRAMES,
output_format = carb.audio.SampleFormat.OPUS,
filename = str(OUTPUTS_DIR.joinpath("test.opus.oga"))
))
wait_for_data(received, 8000, 4800)
self._recorder.stop_recording()
self._validate_sound(str(OUTPUTS_DIR.joinpath("test.opus.oga")))
# try again with a default output format
self.assertTrue(self._recorder.begin_recording_float(
callback = read_callback,
buffer_length = 4800 * 16, # enormous buffer for test reliability
period = 4800,
channels = 1,
frame_rate = 48000,
length_type = carb.audio.UnitType.FRAMES,
filename = str(OUTPUTS_DIR.joinpath("test.default.0.wav"))
))
wait_for_data(received, 8000, 4800)
self._recorder.stop_recording()
self._validate_sound(str(OUTPUTS_DIR.joinpath("test.default.0.wav")))
self.assertTrue(self._recorder.begin_recording_to_file(
frame_rate = 73172,
channels = 1,
filename = str(OUTPUTS_DIR.joinpath("test.vorbis.oga")),
output_format = carb.audio.SampleFormat.VORBIS
))
time.sleep(2.0)
self._recorder.stop_recording()
self._validate_sound(str(OUTPUTS_DIR.joinpath("test.vorbis.oga")))
# try again with a default output format
self.assertTrue(self._recorder.begin_recording_to_file(
frame_rate = 73172,
channels = 1,
filename = str(OUTPUTS_DIR.joinpath("test.default.1.wav")),
))
time.sleep(2.0)
self._recorder.stop_recording()
self._validate_sound(str(OUTPUTS_DIR.joinpath("test.default.1.wav")))
def test_bad_parameters(self):
def read_callback(data):
pass
# MP3 is not supported
self.assertFalse(self._recorder.begin_recording_float(
callback = read_callback,
output_format = carb.audio.SampleFormat.MP3,
filename = str(OUTPUTS_DIR.joinpath("test.mp3"))
))
# bad format
self.assertFalse(self._recorder.begin_recording_float(
callback = read_callback,
output_format = carb.audio.SampleFormat.RAW,
filename = str(OUTPUTS_DIR.joinpath("test.mp3"))
))
# bad file name
self.assertFalse(self._recorder.begin_recording_float(
callback = read_callback,
output_format = carb.audio.SampleFormat.OPUS,
filename = "a/b/c/d/e/f/g/h/i/j.oga"
))
self.assertFalse(self._recorder.begin_recording_float(
callback = read_callback,
channels = carb.audio.MAX_CHANNELS + 1
))
self.assertFalse(self._recorder.begin_recording_float(
callback = read_callback,
frame_rate = carb.audio.MAX_FRAMERATE + 1
))
def test_draw_waveform(self):
# toggle in case you want to regenerate waveforms
GENERATE_GOLDEN_IMAGES = False
samples_int16 = []
samples_int32 = []
samples_float = []
for i in range(4800):
samples_float.append(math.sin(i / 48.0))
samples_int16.append(int(samples_float[-1] * (2 ** 15 - 1)))
samples_int32.append(int(samples_float[-1] * (2 ** 31 - 1)))
W = 256
H = 256
raw = omni.audiorecorder.draw_waveform_from_blob_float(samples_float, 1, W, H, [1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0])
self.assertEqual(len(raw), W * H * 4)
with Image.frombytes("RGBX", (W, H), bytes(raw), 'raw') as img:
img.convert("RGB").save(str(pathlib.Path(OUTPUTS_DIR).joinpath("waveform.float.png")))
if not GENERATE_GOLDEN_IMAGES:
self.assertLess(omni.kit.test_helpers_gfx.compare_utils.compare(
pathlib.Path(OUTPUTS_DIR).joinpath("waveform.float.png"),
self._golden_path.joinpath("waveform.png"),
pathlib.Path(OUTPUTS_DIR).joinpath("waveform.float.png.diff.png")),
0.1)
raw = omni.audiorecorder.draw_waveform_from_blob_int32(samples_int32, 1, W, H, [1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0])
self.assertEqual(len(raw), W * H * 4)
with Image.frombytes("RGBX", (W, H), bytes(raw), 'raw') as img:
img.convert("RGB").save(str(pathlib.Path(OUTPUTS_DIR).joinpath("waveform.int32.png")))
if not GENERATE_GOLDEN_IMAGES:
self.assertLess(omni.kit.test_helpers_gfx.compare_utils.compare(
pathlib.Path(OUTPUTS_DIR).joinpath("waveform.int32.png"),
self._golden_path.joinpath("waveform.png"),
pathlib.Path(OUTPUTS_DIR).joinpath("waveform.int32.png.diff.png")),
0.1)
raw = omni.audiorecorder.draw_waveform_from_blob_int16(samples_int16, 1, W, H, [1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0])
self.assertEqual(len(raw), W * H * 4)
with Image.frombytes("RGBX", (W, H), bytes(raw), 'raw') as img:
img.convert("RGB").save(str(pathlib.Path(OUTPUTS_DIR).joinpath("waveform.int16.png")))
if not GENERATE_GOLDEN_IMAGES:
# 1 pixel of difference was 756.0 difference
self.assertLess(omni.kit.test_helpers_gfx.compare_utils.compare(
pathlib.Path(OUTPUTS_DIR).joinpath("waveform.int16.png"),
self._golden_path.joinpath("waveform.png"),
pathlib.Path(OUTPUTS_DIR).joinpath("waveform.int16.png.diff.png")),
1024.0)
| 14,313 | Python | 36.276042 | 140 | 0.604066 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/ensure_node_types_in_toml.py | """
Given a collection of node metadata, ensure that all nodes appear in the [omnigraph.node_types] section of the
extension.toml used by the owning extension to register its information.
Invoke this script by passing the location of the .json file containing the metadata for all node types and the location
of the extension.toml file to which the metadata should be populated.
python ensure_node_types_in_toml.py
--nodeInfo $BUILD/exts/omni.my.extension/ogn/nodes.json
--toml $SRC/extensions/omni.my.extension/config/extension.toml
The extension.toml file will be modified to included a generated section with this format:
# === GENERATED BY ensure_node_types_in_toml.py -- DO NOT MODIFY ===
[omnigraph.node_types]
"omni.my.extension.MyNodeType" = 1
"omni.my.extension.MyOtherNodeType" = 1
# === END OF GENERATED CODE ===
or if the "--allData" flag is set then this more verbose format will be used:
# === GENERATED BY ensure_node_types_in_toml.py -- DO NOT MODIFY ===
[omnigraph.node_types."omni.my.extension.MyNodeType"]
version = 1
language = "C++"
description = "This is my node"
[omnigraph.node_types."omni.my.extension.MyOtherNodeType"]
version = 1
language = "C++"
description = "This is my other node"
# === END OF GENERATED CODE ===
Note that this script explicitly does not handle the case of multiple versions of the same node type in the same
extension as that is also not handled by OmniGraph proper.
You might also want to use an intermediate directory, which will create an explicit tag when the .toml is regenerated
so that you can safely handle the case of regeneration after a direct edit of the .toml file itself. This will ensure
that a user cannot accidentally delete a node definition from the automatically generated section;
python ensure_node_types_in_toml.py
--nodeInfo $BUILD/exts/omni.my.extension/ogn/nodes.json
--toml $SRC/extensions/omni.my.extension/config/extension.toml
--intermediate $TOP/_build/intermediate
Lastly, the support for the toml package is only available through repo_man in the build, not in the standard path.
You can pass in the root directory of the repo_man module if it is needed to find the toml package.
python ensure_node_types_in_toml.py
--nodeInfo $BUILD/exts/omni.my.extension/ogn/nodes.json
--toml $SRC/extensions/omni.my.extension/config/extension.toml
--intermediate $TOP/_build/intermediate
--repoMan $TOP/_repo/deps/repo_man
"""
import argparse
import json
import logging
import os
import sys
from pathlib import Path
from node_generator.utils import WritableDir
# Create a logger and selectively turn on logging if the OGN debugging environment variable is set
logger = logging.getLogger("add_nodes_to_toml")
logging_handler = logging.StreamHandler(sys.stdout)
logging_handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
logger.addHandler(logging_handler)
logger.setLevel(logging.INFO if os.getenv("OGN_PARSE_DEBUG") else logging.WARN)
START_MARKER = "# === GENERATED BY ensure_node_types_in_toml.py -- DO NOT MODIFY ==="
"""Special marker text identifying the start of the generated node type identifier code"""
SECTION_NAME = "omnigraph.node_types"
"""Name of the generated section of the .toml file"""
END_MARKER = "# === END OF GENERATED CODE ==="
"""Special marker text identifying the end of the generated node type identifier code"""
# ======================================================================
class TomlModifier:
"""Encapsulates the reading, modifying, and writing of the .toml file with the node information
Attributes:
changes_made: True iff processing the .toml file resulted in changes to it
toml: The toml module root - a member of the class because it may need to be imported from an alternate location
Internal Attributes:
__all_data: If True then the .toml will include language and description in addition to type name and version
__existing_types: Dictionary of node_type_name:version_number for node types found in the original .toml file
__node_info: Dictionary of node_type_name:node_type_info generated by the build for this extension
__node_info_path: Path pointing to the generated file containing the node type information for this extension
__tag_path: Path pointing to a file to use to tag the operation as complete (so that edits to the .toml can
trigger regeneration)
__toml_path: Path pointing to the .toml file to be modified
"""
# --------------------------------------------------------------------------------------------------------------
def __init__(self):
"""Set up the information required for the operations - do nothing just yet"""
# Construct the parsing information. Run the script with "--help" to see the usage.
parser = argparse.ArgumentParser(
description="Ensure that the supplied .toml file contains metadata for all of the nodes defined"
" in the extension",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"-a",
"--allData",
action="store_true",
help="Dump all metadata to the .toml file instead of just the version and node type name",
)
parser.add_argument(
"-in",
"--intermediate",
action=WritableDir,
const=None,
type=Path,
metavar="INTERMEDIATE_DIRECTORY",
help="Directory into which temporary build information is stored",
)
parser.add_argument(
"-ni",
"--nodeInfoFile",
type=Path,
metavar="NODE_INFO_FILE.json",
help=".json file generated by the build that contains the registration information for all of the nodes",
)
parser.add_argument(
"-rm",
"--repoMan",
type=Path,
metavar="REPO_MAN_DIR",
help="Path to the repo_man support directory",
)
parser.add_argument(
"-t",
"--tomlFile",
type=Path,
metavar="TOML_FILE",
help=".toml file that will contain the node information",
)
parser.add_argument("-v", "--verbose", action="store_true", help="Output the steps the script is performing")
args = parser.parse_args()
# If the script steps are to be echoed enable the logger and dump the script arguments as a first step
if args.verbose:
logger.setLevel(logging.DEBUG)
logger.info("Processing the arguments")
logger.info(" Args = %s", args)
# Set up the internal attributes. All path files are resolved to absolute paths for convenience.
self.__all_data = args.allData
self.__toml_path = args.tomlFile.resolve()
self.__node_info_path = args.nodeInfoFile.resolve()
self.__tag_path = args.intermediate / "extension.toml.built" if args.intermediate is not None else None
if self.__tag_path is not None:
self.__tag_path = self.__tag_path.resolve()
self.changes_made = False
self.__in_team_city = False
self.__existing_types = {}
self.__node_info = {}
# The toml package is installed as part of repo_man, not directly available in the build, so it may not be
# available here and may have to be found through a location supplied by the script arguments.
try:
import toml
self.toml = toml
except ModuleNotFoundError:
self.toml = None
# There is some information to get from repoMan when running through a build so that we can successfully
# determine when a failure is fatal and when it is just part of the normal generation process.
if args.repoMan is not None:
try:
python_path = args.repoMan.resolve().as_posix()
sys.path.append(python_path)
import omni.repo.man
if self.toml is None:
toml = omni.repo.man.get_toml_module()
self.toml = toml
self.__in_team_city = omni.repo.man.is_running_in_teamcity()
except (ModuleNotFoundError, AttributeError):
# If repoMan is inaccessible then issue a warning but continue on to avoid spurious failures
if self.toml is None:
logger.warning(
"toml module could not be found natively or at path '%s', parsing cannot happen", python_path
)
else:
logger.warning(
"Not able to determine if running in TeamCity without module at '%s', assuming not.",
python_path,
)
logger.info(" Team City run = %s", self.__in_team_city)
# --------------------------------------------------------------------------------------------------------------
@property
def needs_generation(self) -> bool:
"""Returns True iff the extension.toml is older than either the generator script itself or the nodes.json file.
This is done here after several unsuccessful attempts to get the build structure to recognize when the file
needs rebuilding. At worst it means running this script when it isn't necessary, but then returning immediately
after checking the file modification times so hopefully no big deal.
"""
# If the toml cannot be parsed no generation can happen
if self.toml is None:
return False
this_file = Path(__file__)
# If the nodes.json does not exist then no generation is needed because there are no nodes
if not self.__node_info_path.exists():
logger.info("Skipping generation - no nodes.json file")
return False
# If the .toml file does not exist it definitely needs generation
if not self.__toml_path.exists():
logger.info("Forcing generation - no .toml file")
return True
# If the tag file does not exist but should then generation has never been done so it needs to be done now
if self.__tag_path is not None and not self.__tag_path.exists():
logger.info("Forcing generation - missing tag file")
return True
# All four files exist. Regeneration is only needed if the .tag file is not the newest one
this_file_mtime = this_file.stat().st_mtime
node_info_mtime = self.__node_info_path.stat().st_mtime
toml_mtime = self.__toml_path.stat().st_mtime
tag_mtime = self.__tag_path.stat().st_mtime if self.__tag_path is not None else 0
if tag_mtime < toml_mtime:
logger.info("Forcing generation - .toml is newer %s than the tag file %s", toml_mtime, tag_mtime)
return True
if tag_mtime < node_info_mtime:
logger.info("Forcing generation - tag file is older than nodes.json")
return True
if tag_mtime < this_file_mtime:
logger.info("Forcing generation - tag file is older than the generation script")
return True
# No good reason was found to regenerate, so don't
logger.info("Skipping generation - the .toml file is up to date")
return False
# --------------------------------------------------------------------------------------------------------------
def read_files(self):
"""Read in the contents of the .toml and .json files and parse them for modification"""
logger.info("Reading the extension's .toml file")
contents = self.toml.load(self.__toml_path)
try:
sections = SECTION_NAME.split(".")
self.__existing_types = contents
for section in sections:
self.__existing_types = self.__existing_types[section]
except KeyError:
self.__existing_types = {}
logger.info("Reading the extension's .json node information file")
try:
with open(self.__node_info_path, "r", encoding="utf-8") as json_fd:
self.__node_info = json.load(json_fd)["nodes"]
except (IOError, json.JSONDecodeError):
self.__node_info = {}
# --------------------------------------------------------------------------------------------------------------
def __existing_version_matches(self, node_type_name: str, version: int) -> bool:
"""Returns True iff the node type name has a version number in the .toml file matching the one passed in"""
if node_type_name not in self.__existing_types:
return False
node_type_info = self.__existing_types[node_type_name]
# If the abbreviated version of the metadata was used the version number is all there is
if isinstance(node_type_info, int):
return version == node_type_info
# Otherwise extract the version number from the metadata dictionary
try:
return version == node_type_info["version"]
except KeyError:
return False
# --------------------------------------------------------------------------------------------------------------
def add_nodes(self):
"""Ensure the nodes that were passed in are present in the .toml file"""
logger.info("Ensuring the node types are in the file")
for node_type_name, node_type_info in self.__node_info.items():
version = int(node_type_info["version"])
if not self.__existing_version_matches(node_type_name, version):
self.changes_made = True
if self.__all_data:
new_item = {
"version": version,
"language": node_type_info["language"],
"description": node_type_info["description"],
}
else:
new_item = version
logger.info(" Found an unregistered type - %s = %s", node_type_name, new_item)
self.__existing_types[node_type_name] = new_item
# --------------------------------------------------------------------------------------------------------------
def write_file(self):
"""Write the new contents of the .toml file back to the original location.
The toml library dump() method cannot be used here as it would lose information like comments and formatting
so instead it narrows its focus to the [[omnigraph]] section, surrounding it with fixed markers so that it
can be easily identified and replaced using a text-based edit.
"""
logger.info("Writing the file")
if self.__in_team_city:
raise AttributeError(
f"The file {self.__toml_path} was not up to date in the merge request. Rebuild and add it."
)
with open(self.__toml_path, "r", encoding="utf-8") as toml_fd:
raw_contents = toml_fd.readlines()
raw_line_count = len(raw_contents)
# Convert the node type list to a .toml format
logger.info(" Inserting new section %s", self.__existing_types)
# Build the structure from the bottom up to ensure the .toml has the correct nesting
section_dict = self.__existing_types
sections = SECTION_NAME.split(".")
sections.reverse()
for section in sections:
section_dict = {section: dict(sorted(section_dict.items()))}
inserted_section = self.toml.dumps(section_dict)
# Scan the file to see if/where the generated section currently resides
in_section = False
section_start_index = -1
section_end_index = -1
for line_index, line in enumerate(raw_contents):
if in_section and line.rstrip() == END_MARKER:
in_section = False
section_end_index = line_index
if line.rstrip() == START_MARKER:
in_section = True
section_start_index = line_index
logger.info(" Existing section location was %s, %s", section_start_index, section_end_index)
if section_start_index >= 0 and section_end_index == -1:
raise ValueError(
f"The .toml file '{self.__toml_path}' was illegal - it had a start marker but no end marker"
)
if section_start_index < 0:
section_start_index = raw_line_count
section_end_index = section_start_index
# Write the modified contents with the new generated section
try:
with open(self.__toml_path, "w", encoding="utf-8") as toml_fd:
toml_fd.writelines(raw_contents[0:section_start_index])
# If inserting at the end of the file then insert a blank line for readability
if section_start_index == raw_line_count:
toml_fd.write("\n")
toml_fd.write(f"{START_MARKER}\n")
toml_fd.write(inserted_section)
toml_fd.write(f"{END_MARKER}\n")
toml_fd.writelines(raw_contents[section_end_index + 1 : raw_line_count])
toml_fd.flush() # Required to ensure the mtime is earlier than the tag file's
except IOError as error:
raise IOError(f"Failed to write back the .toml file '{self.__toml_path}'") from error
# --------------------------------------------------------------------------------------------------------------
def touch_tag_file(self):
"""Forces update of the tag file mtime."""
try:
# Tag the conversion as being complete so that a build process can properly manage dependencies.
# This has to happen last to avoid a false positive where the tag file is older than the .toml
logger.info("Touching the tag file %s", self.__tag_path)
if self.__tag_path is not None:
with open(self.__tag_path, "w", newline="\n", encoding="utf-8") as tag_fd:
tag_fd.write("This file tags the last time its .toml file was processed with node metadata")
except IOError as error:
raise IOError(f"Failed to write back the tag file '{self.__tag_path}'") from error
# ==============================================================================================================
def main_update_extension_toml():
"""Walk through the steps required to parse, modify, and write the .toml file."""
modifier = TomlModifier()
if modifier.needs_generation:
modifier.read_files()
modifier.add_nodes()
if modifier.changes_made:
modifier.write_file()
# The tag file needs updating even if changes were not made so that it doesn't repeatedly try and fail to
# regenerate every time the script runs.
modifier.touch_tag_file()
# ==============================================================================================================
if __name__ == "__main__":
main_update_extension_toml()
| 19,319 | Python | 48.035533 | 120 | 0.591076 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_internal.py | """Imports for support code that will used internally by OmniGraph but should not be used elsewhere.
These are all subject to change without supported deprecation paths.
"""
__all__ = [
"cache_location",
"Compatibility",
"ExtensionContentsBase",
"ExtensionContentsStandalone",
"ExtensionContentsV118",
"ExtensionContentsV119",
"ExtensionVersion_t",
"extension_contents_factory",
"FileType",
"find_ogn_build_directory",
"full_cache_path",
"GENERATED_FILE_CONFIG_NAMES",
"GenerationVersions",
"get_generator_extension_version",
"get_module_path",
"get_ogn_file_name",
"get_ogn_type_and_node",
"get_target_extension_version",
"import_tests_in_directory",
"load_module_from_file",
"LOG",
"NodeTypeDefinition",
"OmniGraphExtensionError",
"set_registration_logging",
"Settings",
"TemporaryCacheLocation",
"TemporaryLogLocation",
"VersionProperties",
"walk_with_excludes",
]
from ._impl.internal.cache_utils import TemporaryCacheLocation, cache_location, full_cache_path
from ._impl.internal.extension_contents_1_18 import ExtensionContentsV118
from ._impl.internal.extension_contents_1_19 import ExtensionContentsV119
from ._impl.internal.extension_contents_base import ExtensionContentsBase
from ._impl.internal.extension_contents_factory import extension_contents_factory
from ._impl.internal.extension_contents_standalone import ExtensionContentsStandalone
from ._impl.internal.file_utils import (
GENERATED_FILE_CONFIG_NAMES,
FileType,
find_ogn_build_directory,
get_module_path,
get_ogn_file_name,
get_ogn_type_and_node,
load_module_from_file,
walk_with_excludes,
)
from ._impl.internal.logging_utils import LOG, OmniGraphExtensionError, TemporaryLogLocation, set_registration_logging
from ._impl.internal.node_type_definition import NodeTypeDefinition
from ._impl.internal.versions import (
Compatibility,
ExtensionVersion_t,
GenerationVersions,
VersionProperties,
get_generator_extension_version,
get_target_extension_version,
)
from ._impl.node_generator.generate_test_imports import import_tests_in_directory
from ._impl.node_generator.utils import Settings
| 2,234 | Python | 33.384615 | 118 | 0.744405 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/__init__.py | """Tools that support all of OmniGraph in general, and the .ogn format in particular.
General tools can be imported directly with the top level import:
.. code-block:: python
import omni.graph.tools as ogt
help(ogt.deprecated_function)
This module also supports a submodule just for the .ogn handling.
.. code-block:: python
# Support for the parsing and creation of the .ogn format
import omni.graph.tools.ogn as ogn
"""
from . import ogn
from ._impl.debugging import destroy_property, function_trace
from ._impl.deprecate import (
DeprecatedClass,
DeprecatedDictConstant,
DeprecatedImport,
DeprecatedStringConstant,
DeprecateMessage,
DeprecationError,
DeprecationLevel,
RenamedClass,
deprecated_constant_object,
deprecated_function,
)
from ._impl.extension import _PublicExtension # noqa: F401
from ._impl.node_generator.utils import IndentedOutput, shorten_string_lines_to
# ==============================================================================================================
__all__ = [
"dbg_gc",
"dbg_ui",
"dbg",
"deprecated_constant_object",
"deprecated_function",
"DeprecatedClass",
"DeprecatedDictConstant",
"DeprecatedImport",
"DeprecatedStringConstant",
"DeprecateMessage",
"DeprecationError",
"DeprecationLevel",
"destroy_property",
"function_trace",
"import_tests_in_directory",
"IndentedOutput",
"OGN_DEBUG",
"RenamedClass",
"shorten_string_lines_to",
"supported_attribute_type_names",
]
# ==============================================================================================================
# Soft-deprecated imports. Kept around for backward compatibility for one version.
# _____ ______ _____ _____ ______ _____ _______ ______ _____
# | __ \ | ____|| __ \ | __ \ | ____|/ ____| /\ |__ __|| ____|| __ \
# | | | || |__ | |__) || |__) || |__ | | / \ | | | |__ | | | |
# | | | || __| | ___/ | _ / | __| | | / /\ \ | | | __| | | | |
# | |__| || |____ | | | | \ \ | |____| |____ / ____ \ | | | |____ | |__| |
# |_____/ |______||_| |_| \_\|______|\_____|/_/ \_\|_| |______||_____/
#
from ._impl.debugging import OGN_DEBUG, dbg, dbg_gc, dbg_ui
from ._impl.node_generator.attributes.management import supported_attribute_type_names as _moved_to_ogn
from ._impl.node_generator.generate_test_imports import import_tests_in_directory
@deprecated_function("supported_attribute_type_names() has moved to omni.graph.tools.ogn")
def supported_attribute_type_names(*args, **kwargs):
return _moved_to_ogn(*args, **kwargs)
| 2,683 | Python | 34.315789 | 112 | 0.530004 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_1_11.py | """Backward compatible module for omni.graph.tools version 1.11 and earlier.
This module contains everything that was formerly visible by default but will no longer be part of the Python API
for omni.graph.tools. If there is something here you rely on contact the OmniGraph team and let them know.
Currently the module is in pre-deprecation, meaning you can still access everything here from the main module with
.. code-block:: python
import omni.graph.tools as ogt
ogt.pre_deprecated_but_still_visible()
Once the soft deprecation is enabled you will only be able to access the deprecated function with an explicit import:
.. code-block:: python
import omni.graph.tools as ogt
import omni.graph.tools._1_11 as ogt1_11
if i_want_v1_11:
ogt1_11.soft_deprecated_but_still_accessible()
else:
ogt.current_function()
When hard deprecation is in place all functionality will be removed and import of this module will fail:
.. code-block:: python
import omni.graph.tools._1_11 as ot1_11
# Raises DeprecationError
"""
from ._impl.debugging import OGN_DEBUG, OGN_EVAL_DEBUG, OGN_GC_DEBUG, OGN_UI_DEBUG, dbg, dbg_eval, dbg_gc, dbg_ui
from ._impl.node_generator.attributes.management import ATTRIBUTE_MANAGERS
from ._impl.node_generator.attributes.parsing import sdf_type_name
from ._impl.node_generator.generate_test_imports import import_tests_in_directory
# Code that should be retired
from ._impl.node_generator.keys import GraphSetupKeys_V1
from ._impl.node_generator.type_definitions import apply_type_definitions
# Code that should be refactored and moved to an appropriate location
from ._impl.node_generator.utils import (
OGN_PARSE_DEBUG,
OGN_REG_DEBUG,
Settings,
dbg_parse,
dbg_reg,
is_unwritable,
shorten_string_lines_to,
)
# Code that is entirely internal to the node description editor and should be made local
from ._impl.ogn_types import _OGN_TO_SDF_BASE_NAME as OGN_TO_SDF_BASE_NAME
from ._impl.ogn_types import _SDF_BASE_NAME_TO_OGN as SDF_BASE_NAME_TO_OGN
from ._impl.ogn_types import _SDF_TO_OGN as SDF_TO_OGN
__all__ = [
"apply_type_definitions",
"ATTRIBUTE_MANAGERS",
"dbg_eval",
"dbg_gc",
"dbg_parse",
"dbg_reg",
"dbg_ui",
"dbg",
"GraphSetupKeys_V1",
"import_tests_in_directory",
"is_unwritable",
"OGN_DEBUG",
"OGN_EVAL_DEBUG",
"OGN_GC_DEBUG",
"OGN_PARSE_DEBUG",
"OGN_REG_DEBUG",
"OGN_TO_SDF_BASE_NAME",
"OGN_UI_DEBUG",
"SDF_BASE_NAME_TO_OGN",
"SDF_TO_OGN",
"sdf_type_name",
"Settings",
"shorten_string_lines_to",
]
| 2,611 | Python | 30.853658 | 117 | 0.711605 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/generate_node.py | """Command line script to run the node generator scripts
Mainly a separate script to create a package for the node generator scripts so that the files can use shorter names
and relative imports. See node_generator/README.md for the usage information.
"""
from _impl.node_generator import main
main.main()
| 307 | Python | 33.222219 | 115 | 0.791531 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/ogn.py | """Tools that support interacting with the .ogn format, including parsing and creation.
General tools can be imported directly with the top level import:
.. code-block:: python
import omni.graph.tools.ogn as ogn
help(ogn)
"""
from ._impl.node_generator.attributes.AttributeManager import AttributeManager
from ._impl.node_generator.attributes.management import (
ALL_ATTRIBUTE_TYPES,
ATTRIBUTE_UNION_GROUPS,
expand_attribute_union_groups,
get_attribute_manager,
get_attribute_manager_type,
split_attribute_type_name,
supported_attribute_type_names,
)
from ._impl.node_generator.code_generation import code_generation
from ._impl.node_generator.generate_cpp import generate_cpp
from ._impl.node_generator.generate_documentation import generate_documentation
from ._impl.node_generator.generate_python import generate_python
from ._impl.node_generator.generate_template import generate_template
from ._impl.node_generator.generate_test_imports import generate_test_imports
from ._impl.node_generator.generate_tests import generate_tests
from ._impl.node_generator.generate_usd import generate_usd
from ._impl.node_generator.keys import (
AttributeKeys,
CategoryTypeValues,
CudaPointerValues,
ExclusionTypeValues,
GraphSetupKeys,
IconKeys,
LanguageTypeValues,
MemoryTypeValues,
MetadataKeys,
NodeTypeKeys,
TestKeys,
)
from ._impl.node_generator.nodes import NodeGenerationError
from ._impl.node_generator.parse_scheduling import SchedulingHints
from ._impl.node_generator.utils import (
CarbLogError,
DebugError,
ParseError,
UnimplementedError,
to_cpp_comment,
to_python_comment,
to_usd_comment,
to_usd_docs,
)
from ._impl.ogn_types import ogn_to_sdf, sdf_to_ogn
__all__ = [
"ALL_ATTRIBUTE_TYPES",
"ATTRIBUTE_UNION_GROUPS",
"AttributeKeys",
"AttributeManager",
"CarbLogError",
"CategoryTypeValues",
"code_generation",
"CudaPointerValues",
"DebugError",
"ExclusionTypeValues",
"expand_attribute_union_groups",
"generate_cpp",
"generate_documentation",
"generate_python",
"generate_template",
"generate_test_imports",
"generate_tests",
"generate_usd",
"get_attribute_manager_type",
"get_attribute_manager",
"GraphSetupKeys",
"IconKeys",
"LanguageTypeValues",
"MemoryTypeValues",
"MetadataKeys",
"NodeGenerationError",
"NodeTypeKeys",
"ogn_to_sdf",
"ParseError",
"SchedulingHints",
"sdf_to_ogn",
"split_attribute_type_name",
"supported_attribute_type_names",
"TestKeys",
"to_cpp_comment",
"to_python_comment",
"to_usd_comment",
"to_usd_docs",
"UnimplementedError",
]
# ==============================================================================================================
# These are symbols that should technically be prefaced with an underscore because they are used internally but
# not part of the public API but that would cause a lot of refactoring work so for now they are just added to the
# module contents but not the module exports.
# _ _ _____ _____ _____ ______ _ _
# | | | |_ _| __ \| __ \| ____| \ | |
# | |__| | | | | | | | | | | |__ | \| |
# | __ | | | | | | | | | | __| | . ` |
# | | | |_| |_| |__| | |__| | |____| |\ |
# |_| |_|_____|_____/|_____/|______|_| \_|
#
from ._impl.node_generator.attributes.management import validate_attribute_type_name # noqa: F401
from ._impl.node_generator.attributes.naming import ATTR_NAME_REQUIREMENT # noqa: F401
from ._impl.node_generator.attributes.naming import ATTR_UI_NAME_REQUIREMENT # noqa: F401
from ._impl.node_generator.attributes.naming import INPUT_GROUP # noqa: F401
from ._impl.node_generator.attributes.naming import INPUT_NS # noqa: F401
from ._impl.node_generator.attributes.naming import OUTPUT_GROUP # noqa: F401
from ._impl.node_generator.attributes.naming import OUTPUT_NS # noqa: F401
from ._impl.node_generator.attributes.naming import STATE_GROUP # noqa: F401
from ._impl.node_generator.attributes.naming import STATE_NS # noqa: F401
from ._impl.node_generator.attributes.naming import assemble_attribute_type_name # noqa: F401
from ._impl.node_generator.attributes.naming import attribute_name_as_python_property # noqa: F401
from ._impl.node_generator.attributes.naming import attribute_name_in_namespace # noqa: F401
from ._impl.node_generator.attributes.naming import attribute_name_without_port # noqa: F401
from ._impl.node_generator.attributes.naming import check_attribute_name # noqa: F401
from ._impl.node_generator.attributes.naming import check_attribute_ui_name # noqa: F401
from ._impl.node_generator.attributes.naming import is_input_name # noqa: F401
from ._impl.node_generator.attributes.naming import is_output_name # noqa: F401
from ._impl.node_generator.attributes.naming import is_state_name # noqa: F401
from ._impl.node_generator.attributes.naming import namespace_of_group # noqa: F401
from ._impl.node_generator.attributes.NumericAttributeManager import NumericAttributeManager # noqa: F401
from ._impl.node_generator.attributes.parsing import attributes_as_usd # noqa: F401
from ._impl.node_generator.attributes.parsing import separate_ogn_role_and_type # noqa: F401
from ._impl.node_generator.attributes.parsing import usd_type_name # noqa: F401
from ._impl.node_generator.generate_test_imports import import_file_contents # noqa: F401
from ._impl.node_generator.nodes import NODE_NAME_REQUIREMENT # noqa: F401
from ._impl.node_generator.nodes import NODE_UI_NAME_REQUIREMENT # noqa: F401
from ._impl.node_generator.nodes import NodeInterface # noqa: F401
from ._impl.node_generator.nodes import NodeInterfaceWrapper # noqa: F401
from ._impl.node_generator.nodes import check_node_language # noqa: F401
from ._impl.node_generator.nodes import check_node_name # noqa: F401
from ._impl.node_generator.nodes import check_node_ui_name # noqa: F401
from ._impl.node_generator.OmniGraphExtension import OmniGraphExtension # noqa: F401
from ._impl.node_generator.utils import _EXTENDED_TYPE_ANY as EXTENDED_TYPE_ANY # noqa: F401
from ._impl.node_generator.utils import _EXTENDED_TYPE_REGULAR as EXTENDED_TYPE_REGULAR # noqa: F401
from ._impl.node_generator.utils import _EXTENDED_TYPE_UNION as EXTENDED_TYPE_UNION # noqa: F401
from ._impl.node_generator.utils import OGN_PARSE_DEBUG # noqa: F401
from ._impl.node_generator.utils import GeneratorConfiguration # noqa: F401
from ._impl.node_generator.utils import check_memory_type # noqa: F401
# By placing this in an internal list and exporting the list the backward compatibility code can make use of it
# to allow access to the now-internal objects in a way that looks like they are still published.
_HIDDEN = [
"assemble_attribute_type_name",
"ATTR_NAME_REQUIREMENT",
"ATTR_UI_NAME_REQUIREMENT",
"attribute_name_as_python_property",
"attribute_name_in_namespace",
"attribute_name_without_port",
"attributes_as_usd",
"check_attribute_name",
"check_attribute_ui_name",
"check_memory_type",
"check_node_language",
"check_node_name",
"check_node_ui_name",
"EXTENDED_TYPE_ANY",
"EXTENDED_TYPE_REGULAR",
"EXTENDED_TYPE_UNION",
"GeneratorConfiguration",
"import_file_contents",
"INPUT_GROUP",
"INPUT_NS",
"is_input_name",
"is_output_name",
"is_state_name",
"namespace_of_group",
"NODE_NAME_REQUIREMENT",
"NODE_UI_NAME_REQUIREMENT",
"NodeInterface",
"NodeInterfaceWrapper",
"NumericAttributeManager",
"OGN_PARSE_DEBUG",
"OmniGraphExtension",
"OUTPUT_GROUP",
"OUTPUT_NS",
"separate_ogn_role_and_type",
"STATE_GROUP",
"STATE_NS",
"usd_type_name",
"validate_attribute_type_name",
]
| 7,821 | Python | 40.386243 | 113 | 0.70234 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/make_docs_toc.py | """
Create a table of contents file in index.rst that references all of the OmniGraph node generated
documentation files that live in that directory.
This processing is highly tied to the formatting of the OGN generated documentation files so if they
change this has to as well.
The table of contents will be in two sections.
A table consisting of columns with [node name, node version, link to node doc file, link to node appendix entry]
An appendix with headers consisting of the node name and body consisting of the node's description
"""
from _impl.node_generator import main_docs
main_docs.main_docs()
| 619 | Python | 37.749998 | 116 | 0.781906 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/parse_scheduling.py | from omni.graph.tools._impl.node_generator.parse_scheduling import * # noqa: F401,PLW0401,PLW0614
| 99 | Python | 48.999976 | 98 | 0.787879 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_python.py | from omni.graph.tools._impl.node_generator.generate_python import * # noqa: F401,PLW0401,PLW0614
| 98 | Python | 48.499976 | 97 | 0.785714 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/main_docs.py | from omni.graph.tools._impl.node_generator.main_docs import * # noqa: F401,PLW0401,PLW0614
| 92 | Python | 45.499977 | 91 | 0.771739 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_node_info.py | from omni.graph.tools._impl.node_generator.generate_node_info import * # noqa: F401,PLW0401,PLW0614
| 101 | Python | 49.999975 | 100 | 0.782178 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/main.py | from omni.graph.tools._impl.node_generator.main import * # noqa: F401,PLW0401,PLW0614
| 87 | Python | 42.999979 | 86 | 0.770115 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_template.py | from omni.graph.tools._impl.node_generator.generate_template import * # noqa: F401,PLW0401,PLW0614
| 100 | Python | 49.499975 | 99 | 0.79 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/type_definitions.py | from omni.graph.tools._impl.node_generator.type_definitions import * # noqa: F401,PLW0401,PLW0614
| 99 | Python | 48.999976 | 98 | 0.787879 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_test_imports.py | from omni.graph.tools._impl.node_generator.generate_test_imports import * # noqa: F401,PLW0401,PLW0614
| 104 | Python | 51.499974 | 103 | 0.788462 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/__init__.py | import traceback
from carb import log_warn
_trace = "".join(traceback.format_stack())
log_warn(f"The OmniGraph Node Generator has moved. Use 'import omni.graph.tools.ogn as ogn' to access it.\n{_trace}")
| 206 | Python | 28.571424 | 117 | 0.742718 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_cpp.py | from omni.graph.tools._impl.node_generator.generate_cpp import * # noqa: F401,PLW0401,PLW0614
| 95 | Python | 46.999977 | 94 | 0.778947 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/register_ogn_nodes.py | from omni.graph.tools._impl.node_generator.register_ogn_nodes import * # noqa: F401,PLW0401,PLW0614
| 101 | Python | 49.999975 | 100 | 0.782178 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_tests.py | from omni.graph.tools._impl.node_generator.generate_tests import * # noqa: F401,PLW0401,PLW0614
| 97 | Python | 47.999976 | 96 | 0.783505 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_usd.py | from omni.graph.tools._impl.node_generator.generate_usd import * # noqa: F401,PLW0401,PLW0614
| 95 | Python | 46.999977 | 94 | 0.778947 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/utils.py | from omni.graph.tools._impl.node_generator.utils import * # noqa: F401,PLW0401,PLW0614
| 88 | Python | 43.499978 | 87 | 0.772727 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/ThreadsafeOpen.py | from omni.graph.tools._impl.node_generator.ThreadsafeOpen import * # noqa: F401,PLW0401,PLW0614
| 97 | Python | 47.999976 | 96 | 0.793814 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/keys.py | from omni.graph.tools._impl.node_generator.keys import * # noqa: F401,PLW0401,PLW0614
| 87 | Python | 42.999979 | 86 | 0.770115 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_documentation.py | from omni.graph.tools._impl.node_generator.generate_documentation import * # noqa: F401,PLW0401,PLW0614
| 105 | Python | 51.999974 | 104 | 0.8 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/nodes.py | from omni.graph.tools._impl.node_generator.nodes import * # noqa: F401,PLW0401,PLW0614
| 88 | Python | 43.499978 | 87 | 0.772727 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/OmniGraphExtension.py | from omni.graph.tools._impl.node_generator.OmniGraphExtension import * # noqa: F401,PLW0401,PLW0614
| 101 | Python | 49.999975 | 100 | 0.80198 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/code_generation.py | from omni.graph.tools._impl.node_generator.code_generation import * # noqa: F401,PLW0401,PLW0614
| 98 | Python | 48.499976 | 97 | 0.785714 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_icon.py | from omni.graph.tools._impl.node_generator.generate_icon import * # noqa: F401,PLW0401,PLW0614
| 96 | Python | 47.499976 | 95 | 0.78125 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/category_definitions.py | from omni.graph.tools._impl.node_generator.category_definitions import * # noqa: F401,PLW0401,PLW0614
| 103 | Python | 50.999975 | 102 | 0.796116 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/FloatAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.FloatAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 115 | Python | 56.999972 | 114 | 0.817391 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/TokenAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.TokenAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 115 | Python | 56.999972 | 114 | 0.817391 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/StringAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.StringAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 116 | Python | 57.499971 | 115 | 0.818966 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/TimeCodeAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.TimeCodeAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 118 | Python | 58.499971 | 117 | 0.822034 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/parsing.py | from omni.graph.tools._impl.node_generator.attributes.parsing import * # noqa: F401,PLW0401,PLW0614
| 101 | Python | 49.999975 | 100 | 0.792079 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/ObjectIdAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.ObjectIdAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 118 | Python | 58.499971 | 117 | 0.822034 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/naming.py | from omni.graph.tools._impl.node_generator.attributes.naming import * # noqa: F401,PLW0401,PLW0614
| 100 | Python | 49.499975 | 99 | 0.79 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/DoubleAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.DoubleAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 116 | Python | 57.499971 | 115 | 0.818966 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/NumericAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.NumericAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 117 | Python | 57.999971 | 116 | 0.820513 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/AnyAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.AnyAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 113 | Python | 55.999972 | 112 | 0.814159 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/FrameAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.FrameAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 115 | Python | 56.999972 | 114 | 0.817391 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/NormalAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.NormalAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 116 | Python | 57.499971 | 115 | 0.818966 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/VectorAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.VectorAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 116 | Python | 57.499971 | 115 | 0.818966 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/Int64AttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.Int64AttributeManager import * # noqa: F401,PLW0401,PLW0614
| 115 | Python | 56.999972 | 114 | 0.817391 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/UIntAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.UIntAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 114 | Python | 56.499972 | 113 | 0.815789 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/ExecutionAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.ExecutionAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 119 | Python | 58.999971 | 118 | 0.823529 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/BundleAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.BundleAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 116 | Python | 57.499971 | 115 | 0.818966 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/RoleAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.RoleAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 114 | Python | 56.499972 | 113 | 0.815789 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/management.py | from omni.graph.tools._impl.node_generator.attributes.management import * # noqa: F401,PLW0401,PLW0614
| 104 | Python | 51.499974 | 103 | 0.798077 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/ColorAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.ColorAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 115 | Python | 56.999972 | 114 | 0.817391 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/BoolAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.BoolAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 114 | Python | 56.499972 | 113 | 0.815789 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/PathAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.PathAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 114 | Python | 56.499972 | 113 | 0.815789 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/UInt64AttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.UInt64AttributeManager import * # noqa: F401,PLW0401,PLW0614
| 116 | Python | 57.499971 | 115 | 0.818966 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/UnionAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.UnionAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 115 | Python | 56.999972 | 114 | 0.817391 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/UCharAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.UCharAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 115 | Python | 56.999972 | 114 | 0.817391 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/MatrixAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.MatrixAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 116 | Python | 57.499971 | 115 | 0.818966 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/IntAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.IntAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 113 | Python | 55.999972 | 112 | 0.814159 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/PointAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.PointAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 115 | Python | 56.999972 | 114 | 0.817391 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/HalfAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.HalfAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 114 | Python | 56.499972 | 113 | 0.815789 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/AttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.AttributeManager import * # noqa: F401,PLW0401,PLW0614
| 110 | Python | 54.499973 | 109 | 0.809091 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/TexCoordAttributeManager.py | from omni.graph.tools._impl.node_generator.attributes.TexCoordAttributeManager import * # noqa: F401,PLW0401,PLW0614
| 118 | Python | 58.499971 | 117 | 0.822034 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/debugging.py | """
Collection of tools to help with debugging the operation of scripts.
Mainly lives here so that all OGN-related files can access it, though the tools are pretty general.
"""
import os
import weakref
from contextlib import suppress
from functools import partial, wraps
from typing import Dict, List
from omni.ext import get_dangling_references
__all__ = []
# ======================================================================
# Environment variable gating display and execution of debugging information
# - The value "1" sets OGN_DEBUG for general debugging
# - Any string containing "eval" sets OGN_EVAL_DEBUG
# - Either "1" or a string containing "gc" sets OGN_GC_DEBUG
# - Either "1" or a string containing "ui" sets OGN_UI_DEBUG
# e.g. you could enable UI and GC by setting it to "gc, ui"
_ogn_debug_env_var = os.getenv("OGN_DEBUG")
has_debugging = _ogn_debug_env_var is not None
OGN_DEBUG = _ogn_debug_env_var == "1"
OGN_EVAL_DEBUG = has_debugging and (_ogn_debug_env_var.lower().find("eval") >= 0)
OGN_GC_DEBUG = has_debugging and (_ogn_debug_env_var == "1" or _ogn_debug_env_var.lower().find("gc") >= 0)
OGN_UI_DEBUG = has_debugging and (_ogn_debug_env_var == "1" or _ogn_debug_env_var.lower().find("ui") >= 0)
# ======================================================================
def __dbg(gate_variable: bool, message: str, *args, **kwargs):
"""
Print out a debugging message if the gate_variable is enabled, additional args will be passed
to format the given message.
"""
if gate_variable:
if args or kwargs:
print("DBG: " + message.format(*args, **kwargs), flush=True)
else:
print(f"DBG: {message}", flush=True)
# Define a few helper functions that provide debugging for some standard environment variables.
# Even more efficient use pattern is "OGN_DEBUG and dbg(X)" to prevent side effects.
dbg = partial(__dbg, OGN_DEBUG)
dbg_eval = partial(__dbg, OGN_EVAL_DEBUG)
dbg_gc = partial(__dbg, OGN_GC_DEBUG)
dbg_ui = partial(__dbg, OGN_UI_DEBUG)
# ======================================================================
# String used for indenting debugging information, so that nested function calls are visually distinct
INDENT = ""
# ======================================================================
def function_trace(env_var=None):
"""
Debugging decorator that adds function call tracing, potentially gated by an environment variable.
Use as a normal function decorator:
.. code-block:: python
@function_trace()
def my_function(value: str) -> str:
return value + value
Calling my_function("X") with debugging enabled will print this:
Calling my_function('X')
'my_function' returned 'XX'
The extra parameter lets you selectively disable it based on environment variables:
.. code-block:: python
@function_trace("OGN_DEBUG")
def my_function(value: str) -> str:
return value + value
This version only enables debugging if the environment variable "OGN_DEBUG" is set
"""
def inner_decorator(func):
"""Having an inner decorator allows parameters to be passed to the outer one"""
@wraps(func)
def wrapper_debug(*args, **kwargs):
"""Wrapper function to add debugging information before and after forwarding calls"""
if env_var is None or os.getenv(env_var) is not None:
global INDENT
args_repr = [repr(a) for a in args]
kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()]
signature = ", ".join(args_repr + kwargs_repr)
print(f"{INDENT}Calling {func.__name__}({signature})")
INDENT += " "
value = func(*args, **kwargs)
print(f"{INDENT}{func.__name__!r} returned {value!r}")
INDENT = INDENT[:-2]
return value
return func(*args, **kwargs)
return wrapper_debug
return inner_decorator
# ======================================================================
def __validate_property_destruction(weak_property, name: str):
"""Check that the weak reference to a property value references a destroyed value"""
# Check to see if the property value is still being referenced
if OGN_GC_DEBUG:
with suppress(AttributeError, TypeError):
if weak_property() is not None:
print(f"Property {name} destroy failed: {get_dangling_references(weak_property())}", flush=True)
# ----------------------------------------------------------------------
def __destroy_property_member(obj_property, name: str):
"""Try to call destroy for the obj_property - returns a weak reference to it for later use"""
dbg(f"Destroying member {name} on {obj_property}")
try:
# Use a weak reference to perform a simple test for "real" destruction
weak_property = weakref.ref(obj_property)
obj_property.destroy()
except AttributeError:
dbg_gc(f"...obj_property {name} has no destroy method")
weak_property = None
except TypeError:
dbg_gc(f"...obj_property {name} cannot be weak referenced")
weak_property = None
return weak_property
# ----------------------------------------------------------------------
def __destroy_property_list(property_list: List, base_name: str):
"""Walk a list of properties, recursively destroying them"""
dbg_gc(f"Destroying list {property_list} as {base_name}")
index = 0
# The non-standard loop is to make sure this execution frame does not retain references to the objects
while property_list:
property_member = property_list.pop(0)
debug_name = f"{base_name}[{index}]"
index += 1
dbg_gc(f"...destroying member {debug_name}")
if isinstance(property_member, list):
dbg_gc("...(as list)")
__destroy_property_list(property_member, debug_name)
elif isinstance(property_member, dict):
dbg_gc("...(as dictionary)")
__destroy_property_dict(property_member, debug_name)
else:
dbg_gc("...(as object)")
weak_property = __destroy_property_member(property_member, debug_name)
property_member = None
__validate_property_destruction(weak_property, debug_name)
# ----------------------------------------------------------------------
def __destroy_property_dict(property_dict: Dict, base_name: str):
"""Walk a dictionary of properties, recursively destroying them"""
dbg_gc(f"Destroying dictionary {property_dict} as {base_name}")
# The non-standard loop is to make sure this execution frame does not retain references to the objects
while property_dict:
property_key, property_member = property_dict.popitem()
debug_name = f"{base_name}[{property_key}]"
dbg_gc(f"...destroying member {debug_name}")
if isinstance(property_member, list):
dbg_gc("...(as list)")
__destroy_property_list(property_member, debug_name)
elif isinstance(property_member, dict):
dbg_gc("...(as dictionary)")
__destroy_property_dict(property_member, debug_name)
else:
dbg_gc("...(as object)")
weak_property = __destroy_property_member(property_member, debug_name)
property_member = None
__validate_property_destruction(weak_property, debug_name)
# ----------------------------------------------------------------------
def destroy_property(self, property_name: str):
"""Call the destroy method on a property and set it to None - helps with garbage collection
In a class's destroy() or __del__ method you can call this to generically handle member destruction
when such things do not happen automatically (e.g. when you cross into the C++-bindings, or the
objects have circular references)
def destroy(self):
destroy_property(self, "_widget")
If the property is a list then the list members are individually destroyed.
If the property is a dictionary then the values of the dictionary are individually destroyed.
NOTE: Only call this if you are the ownder of the property, otherwise just set it to None.
Args:
self: The object owning the property to be destroyed (can be anything with a destroy() method)
property_name: Name of the property to be destroyed
"""
debug_name = f"{type(self).__name__}.{property_name}"
# If the property name uses the double-underscore convention for "internal" data then the name must
# be embellished with the class name to allow access, since this function is not part of the class.
property_to_access = property_name if property_name[0:2] != "__" else f"_{type(self).__name__}{property_name}"
obj_property = getattr(self, property_to_access, None)
if obj_property is None:
dbg_gc(f"Destroyed None member {debug_name} {self} {property_to_access}")
return
dbg_gc(f"Destroy property {debug_name}")
if isinstance(obj_property, list):
dbg_gc("(as list)")
__destroy_property_list(obj_property, debug_name)
setattr(self, property_to_access, [])
elif isinstance(obj_property, dict):
dbg_gc("(as dictionary)")
__destroy_property_dict(obj_property, debug_name)
setattr(self, property_to_access, {})
else:
dbg_gc("(as object)")
weak_property = __destroy_property_member(obj_property, debug_name)
setattr(self, property_to_access, None)
obj_property = None
__validate_property_destruction(weak_property, debug_name)
| 9,727 | Python | 41.854625 | 114 | 0.607793 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/deprecate.py | """Manage deprecation for Python features for common use
All deprecation functions can be accessed from the top module level.
The :py:class:`omni.graph.tools.DeprecateMessage` class provides a simple way of logging a message that will only
show up once per session.
The :py:class:`omni.graph.tools.DeprecatedClass` decorator provides a method to emit a deprecation message when the
deprecated class is accessed.
The :py:class:`omni.graph.tools.RenamedClass` decorator is a slightly more sophisticated method of deprecating a
class when the deprecation is simply a name change.
The :py:function:`omni.graph.tools.deprecated_function` decorator provides a method to emit a deprecation message
when the old function is called.
The :py:function:`omni.graph.tools.DeprecatedImport` decorator provides a method to emit a deprecation message
when an entire deprecated file is imported for use. This should not be used for imports that will be included
in the API for backward compatibility, nor should these files be moved as they must continue to exist at the
same import location in order to remain compatible.
"""
import functools
import inspect
import re
import traceback
from typing import Optional, Set
from carb import log_warn, settings
__all__ = []
# ==============================================================================================================
class DeprecationError(Exception):
"""Exception to raise when a hard-deprecated import, class, or function is attempted to be used.
Exists to provide a last bit of information to users who have been ignoring previous deprecation errors.
"""
# ==============================================================================================================
# begin-deprecate-message
class DeprecateMessage:
"""Manager for deprecation messages, to make it efficient to prevent multiple logging of the same
deprecation messages.
The default settings for output is usually enough to help you find where deprecated code is referenced.
If more information is desired these per-class variables can be set to reduce the filtering being done. The
message should contains an action item for the user to upgrade from the deprecated functionality:
.. code-block:: python
DeprecateMessage.deprecated("Install the latest version instead")
# Although it's not usually necessary the class can be tuned using these class variable
SILENCE_LOG = False # When set the output does not go to the console log; useful to disable for testing
SHOW_STACK = True # Report stack trace in the deprecation message - can be turned off if it is too verbose
MAX_STACK_LEVELS = 3 # Maximum number of stack levels to report, after filtering
RE_IGNORE = re.compile("deprecate.py|bindings-python|importlib") # Ignore stack levels matching these patterns
You can use some Python features to handle simple deprecation cases directly such as:
.. code-block:: python
# Rename constant from A to B
A = (DeprecateMessage("A has been renamed to B") and False) or B
# Constant A will be removed
A = (DeprecateMessage("A will be removed, use B instead) and False) or B
"""
# end-deprecate-message
MESSAGES_LOGGED = set()
SILENCE_LOG = False
SHOW_STACK = True
MAX_STACK_LEVELS = 3
RE_IGNORE = re.compile("deprecate.py|bindings-python|importlib")
class NoLogging:
"""Context manager class to let you import a bunch of known deprecated functions without logging warnings.
Typical use would be in providing backward compatibility in a module where submodules have moved.
with DeprecateMessage.NoLogging():
import .v1_0.my_old_function as my_old_function
"""
def __init__(self, *args, **kwargs):
self.__original_logging = None
def __enter__(self):
"""Disable logging for the duration of the context"""
self.__original_logging = DeprecateMessage.SILENCE_LOG
DeprecateMessage.SILENCE_LOG = True
def __exit__(self, exit_type, value, exit_traceback):
"""Restore the original logging state"""
DeprecateMessage.SILENCE_LOG = self.__original_logging
# --------------------------------------------------------------------------------------------------------------
@classmethod
def messages_logged(cls) -> Set[str]:
"""Returns the set of messages that have been logged so far"""
return cls.MESSAGES_LOGGED
# --------------------------------------------------------------------------------------------------------------
@classmethod
def clear_messages(cls):
"""Clear the logged messages so that they can be logged again"""
cls.MESSAGES_LOGGED = set()
# --------------------------------------------------------------------------------------------------------------
@classmethod
def deprecations_are_errors(cls) -> bool:
"""Returns True if deprecations are currently being treated as errors"""
return settings.get_settings().get("/persistent/omnigraph/deprecationsAreErrors")
@classmethod
def set_deprecations_are_errors(cls, make_errors: bool):
"""Enable or disable treating deprecations as errors instead of warnings"""
settings.get_settings().set("/persistent/omnigraph/deprecationsAreErrors", make_errors)
# --------------------------------------------------------------------------------------------------------------
@classmethod
def deprecated(cls, message: str):
"""Log the deprecation message if it has not yet been logged, otherwise do nothing
Args:
message: Message to display; only displays once even if this is called many times
Adds stack trace information if the class member SHOW_STACK is True.
Skips the Carbonite logging if the class member SILENCE_LOG is True (mostly useful for testing when a
warning is the expected result).
"""
if message in cls.MESSAGES_LOGGED:
return
stack = ""
try:
try:
full_stack = traceback.format_stack() if cls.SHOW_STACK else []
except SyntaxError as error:
full_stack = [f"Error encountered when retrieving call stack - {error}"]
if full_stack:
filtered_stack = filter(lambda stack: not cls.RE_IGNORE.search(stack), full_stack)
stack = "\n" + "".join(list(filtered_stack)[-cls.MAX_STACK_LEVELS :])
except SyntaxError as error:
stack = f"Stack trace not accessible - {error}"
if cls.deprecations_are_errors():
raise DeprecationError(f"{message}{stack}")
_ = cls.SILENCE_LOG or log_warn(f"{message}{stack}")
cls.MESSAGES_LOGGED.add(message)
# ==============================================================================================================
# begin-deprecated-class
class DeprecatedClass:
"""Decorator to deprecate a class. Takes one argument that is a string to describe the action the user is to
take to avoid the deprecated class. A deprecation message will be shown once, the first time the deprecated
class is instantiated.
.. code-block:: python
@DeprecatedClass("After version 1.5.0 use og.NewerClass instead")
class OlderClass:
pass
"""
# end-deprecated-class
def __init__(self, deprecation_message: str):
"""Remember the message and only report it on initialization
Args:
deprecation_message: A description of the action the user is to take to avoid the deprecated class.
"""
self.__deprecation_message = deprecation_message
def message(self, deprecated_cls, deprecated_member: Optional[str] = None):
"""Emit a deprecation message with useful information attached"""
try:
old_name = deprecated_cls.__old_name__
except AttributeError:
old_name = deprecated_cls.__name__
what_is_deprecated = old_name if deprecated_member is None else f"{old_name}.{deprecated_member}"
DeprecateMessage.deprecated(f"{what_is_deprecated} is deprecated: {self.__deprecation_message}")
def __call__(self, deprecated_cls):
"""Report the deprecation message if it hasn't already been reported"""
def wrapper(*args, **kwargs):
"""Redirect function calls to the real class"""
self.message(deprecated_cls)
result = deprecated_cls(*args, **kwargs)
return result
# Do some magic here by copying any static methods on the class to the wrapper function object.
# This handles the case where a deprecated class has static or class methods.
for member_name in dir(deprecated_cls):
if isinstance(inspect.getattr_static(deprecated_cls, member_name), staticmethod):
def static_function(cls, method, *sf_args, **sf_kwargs):
"""Wrapper that will give deprecation messages for calling static methods too"""
self.message(cls, method)
return getattr(cls, method)(*sf_args, **sf_kwargs)
setattr(wrapper, member_name, functools.partial(static_function, deprecated_cls, member_name))
elif isinstance(inspect.getattr_static(deprecated_cls, member_name), classmethod):
def class_function(cls, method, *cl_args, **cl_kwargs):
"""Wrapper that will give deprecation messages for calling class methods too"""
self.message(cls, method)
return getattr(cls, method)(*cl_args, **cl_kwargs)
setattr(wrapper, member_name, functools.partial(class_function, deprecated_cls, member_name))
return wrapper
# ==============================================================================================================
# begin-renamed-class
def RenamedClass(cls, old_class_name: str, rename_message: Optional[str] = None) -> object: # noqa: N802
"""Syntactic sugar to provide a class deprecation that is a simple renaming, where all of the functions in
the old class are still present in backwards compatible form in the new class.
Args:
old_class_name: The name of the class that was renamed
rename_message: If not None, what to use instead of the old class. If None then assume the new class is used.
Usage:
.. code-block:: python
MyDeprecatedClass = RenamedClass(MyNewClass, "MyDeprecatedClass")
"""
# end-renamed-class
@DeprecatedClass(f"Use {cls.__name__ if rename_message is None else rename_message} instead")
class _RenamedClass(cls):
__old_name__ = old_class_name
return _RenamedClass
# ==============================================================================================================
# begin-deprecated-function
def deprecated_function(deprecation_message: str, is_property: bool = False):
"""Decorator to deprecate a function.
Args:
deprecation_message: A description of the action the user is to take to avoid the deprecated function.
is_property: Set this True if the function is a property getter or setter.
A deprecation message will only be shown once, the first time the deprecated function is called.
.. code-block:: python
@deprecated_function("After version 1.5.0 use og.newer_function() instead")
def older_function():
pass
For property getters/setters use this decorator *after* the property decorator.
.. code-block:: python
@property
@deprecated_function("use 'your_prop' instead.", is_property=True)
def my_prop(self):
return self.your_prop
@my_prop.setter
@deprecated_function("use 'your_prop' instead.", is_property=True)
def my_prop(self, value):
self.your_prop = value
"""
# end-deprecated-function
def decorator_deprecated(func):
"""Remember the message"""
# The functools internal decorator lets the help functions drill down into the actual function when asked,
# rather that
@functools.wraps(func)
def wrapper_deprecated(*args, **kwargs):
func_str = f"'{func.__name__}'" if is_property else f"{func.__name__}()"
DeprecateMessage.deprecated(f"{func_str} is deprecated: {deprecation_message}")
return func(*args, **kwargs)
return wrapper_deprecated
return decorator_deprecated
# ==============================================================================================================
# begin-deprecated-import
def DeprecatedImport(deprecation_message: str): # noqa: N802
"""Decorator to deprecate a specific file or module import. Usually the functionality has been deprecated and
moved to a different file.
Args:
deprecation_message: String with the action the user is to perform to avoid the deprecated import
Usage:
.. code-block:: python
'''This is the top line of the imported file'''
import omni.graph.tools as og
og.DeprecatedImport("Import 'omni.graph.tools as og' and use og.new_function() instead")
# The rest of the file can be left as-is for best backward compatibility, or import non-deprecated versions
# of objects from their new location to avoid duplication.
"""
# end-deprecated-import
this_module = inspect.currentframe().f_back.f_locals["__name__"]
DeprecateMessage.deprecated(f"{this_module} is deprecated: {deprecation_message}")
| 13,789 | Python | 43.340836 | 119 | 0.618174 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/extension.py | """Extension management support"""
import omni.ext
__all__ = []
class _PublicExtension(omni.ext.IExt):
"""Dummy extension class that just serves to register and deregister the extension"""
def on_startup(self):
"""Callback when the extension is starting up"""
def on_shutdown(self):
"""Callback when the extension is shutting down"""
| 367 | Python | 23.533332 | 89 | 0.675749 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/ogn_types.py | """Helper which contains utilities and data for converting between type representations"""
from typing import Optional
from pxr import Sdf
__all__ = []
# Maping of pxr.Sdf.ValueTypeNames to corresponding OGN types (not including the Array/[] suffixes)
_SDF_BASE_NAME_TO_OGN = {
"Bool": "bool",
"Color3d": "colord[3]",
"Color3f": "colorf[3]",
"Color3h": "colorh[3]",
"Color4d": "colord[4]",
"Color4f": "colorf[4]",
"Color4h": "colorh[4]",
"Double": "double",
"Double2": "double[2]",
"Double3": "double[3]",
"Double4": "double[4]",
"Float": "float",
"Float2": "float[2]",
"Float3": "float[3]",
"Float4": "float[4]",
"Frame4d": "framed[4]",
"Half": "half",
"Half2": "half[2]",
"Half3": "half[3]",
"Half4": "half[4]",
"Int": "int",
"Int2": "int[2]",
"Int3": "int[3]",
"Int4": "int[4]",
"Int64": "int64",
"Matrix2d": "matrixd[2]",
"Matrix3d": "matrixd[3]",
"Matrix4d": "matrixd[4]",
"Normal3d": "normald[3]",
"Normal3f": "normalf[3]",
"Normal3h": "normalh[3]",
"Point3d": "pointd[3]",
"Point3f": "pointf[3]",
"Point3h": "pointh[3]",
"Quatd": "quatd[4]",
"Quatf": "quatf[4]",
"Quath": "quath[4]",
"String": "string",
"TexCoord2d": "texcoordd[2]",
"TexCoord2f": "texcoordf[2]",
"TexCoord2h": "texcoordh[2]",
"TexCoord3d": "texcoordd[3]",
"TexCoord3f": "texcoordf[3]",
"TexCoord3h": "texcoordh[3]",
"TimeCode": "timecode",
"Token": "token",
"UChar": "uchar",
"UInt": "uint",
"UInt64": "uint64",
"Vector3d": "vectord[3]",
"Vector3f": "vectorf[3]",
"Vector3h": "vectorh[3]",
}
# Mapping of OGN types to SDF - not all OGN types can be translated directly
_OGN_TO_SDF_BASE_NAME = {value: key for key, value in _SDF_BASE_NAME_TO_OGN.items()}
# As the Sdf.ValueTypeNames are static Boost objects create a mapping of them back to OGN to avoid linear lookup
_SDF_TO_OGN = {getattr(Sdf.ValueTypeNames, key): value for key, value in _SDF_BASE_NAME_TO_OGN.items()}
_SDF_TO_OGN.update(
{getattr(Sdf.ValueTypeNames, f"{key}Array"): f"{value}[]" for key, value in _SDF_BASE_NAME_TO_OGN.items()}
)
# ================================================================================
def ogn_to_sdf(ogn_type: str) -> Optional[Sdf.ValueTypeNames]:
"""Convert an OGN type string to the equivalent SDF value type name
Args:
ogn_type: String representation of the OGN type as described in its documentation
Return:
Equivalent pxr.Sdf.ValueTypeNames value, or None if there is no equivalent
"""
is_array = False
if ogn_type[-2:] == "[]":
is_array = True
ogn_type = ogn_type[:-2]
try:
sdf_type_name = _OGN_TO_SDF_BASE_NAME[ogn_type]
if is_array:
sdf_type_name += "Array"
sdf_type = getattr(Sdf.ValueTypeNames, sdf_type_name, None)
except KeyError:
sdf_type = None
return sdf_type
# ================================================================================
def sdf_to_ogn(sdf_type: Sdf.ValueTypeName) -> Optional[str]:
"""Convert an SDF type to the equivalent OGN type name
Args:
sdf_type: String representation of the SDF type as described in its documentation
Return:
Equivalent OGN string name value, or None if there is no equivalent
"""
is_array = False
if str(sdf_type)[-5:] == "Array":
is_array = True
try:
ogn_type_name = _SDF_TO_OGN[sdf_type]
if is_array:
ogn_type_name += "[]"
except KeyError:
ogn_type_name = None
return ogn_type_name
| 3,657 | Python | 28.983606 | 112 | 0.563577 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/parse_scheduling.py | """Support for the parsing and interpretation of scheduling hints in the .ogn file"""
from __future__ import annotations # For the forward class reference type in compare()
import re
from enum import Enum
from typing import List, Optional, Union
from .utils import IndentedOutput, ParseError
# ======================================================================
class _AccessType(Enum):
"""Access type for a given scheduling flag
ALL = The data will be both read and written to
READ = The data will only be read
WRITE = The data will only be written
"""
ALL = "ReadWrite"
READ = "ReadOnly"
WRITE = "WriteOnly"
@classmethod
def flag_access_type(cls, flag_name: str):
"""Returns the type of access the flag name implies"""
if flag_name.endswith("-read"):
return cls.READ
if flag_name.endswith("-write"):
return cls.WRITE
return cls.ALL
@classmethod
def as_cpp_enum(cls, access_type: _AccessType) -> str:
"""Returns the C++ enum value corresponding to the access type string taken from the class data values"""
if access_type == cls.READ:
return "eAccessType::eRead"
if access_type == cls.WRITE:
return "eAccessType::eWrite"
return "eAccessType::eReadWrite"
@classmethod
def as_python_enum(cls, access_type: _AccessType) -> str:
"""Returns the Python enum value corresponding to the access type string taken from the class data values"""
if access_type == cls.READ:
return "og.eAccessType.E_READ"
if access_type == cls.WRITE:
return "og.eAccessType.E_WRITE"
return "og.eAccessType.E_READ_WRITE"
# ======================================================================
class _ComputeRule(Enum):
"""Compute Rule for the scheduling flag
DEFAULT = Evaluator default rule
ON_REQUEST = Compute skipped until INode::onRequest
"""
DEFAULT = "compute-default"
ON_REQUEST = "compute-on-request"
@classmethod
def flag_compute_rule(cls, flag_name: str):
"""Returns the type of compute-rule the flag name implies"""
if flag_name == cls.ON_REQUEST.value:
return cls.ON_REQUEST
return cls.DEFAULT
@classmethod
def as_cpp_enum(cls, compute_rule: _ComputeRule) -> str:
"""Returns the C++ enum value corresponding to the string taken from the class data values"""
if compute_rule == cls.ON_REQUEST:
return "eComputeRule::eOnRequest"
return "eComputeRule::eDefault"
@classmethod
def as_python_enum(cls, compute_rule: _ComputeRule) -> str:
"""Returns the Python enum value corresponding to the access type string taken from the class data values"""
if compute_rule == cls.ON_REQUEST:
return "og.eComputeRule.E_ON_REQUEST"
return "og.eComputeRule.E_DEFAULT"
# ======================================================================
# begin-scheduling-hints
class SchedulingHints:
"""Class managing the scheduling hints.
The keywords are case-independent during parsing, specified in lower case here for easy checking.
When there is a -read and -write variant only one of them should be specified at a time:
no suffix: The item in question is accessed for both read and write
-read suffix: The item in question is accessed only for reading
-write suffix: The item in question is accessed only for writing
These class static values list the possible values for the "scheduling" lists in the .ogn file.
# Set when the node accesses other global data, i.e. data stored outside of the node, including the data
# on other nodes.
GLOBAL_DATA = "global"
GLOBAL_DATA_READ = "global-read"
GLOBAL_DATA_WRITE = "global-write"
# Set when a node accesses static data, i.e. data shared among all nodes of the same type
STATIC_DATA = "static"
STATIC_DATA_READ = "static-read"
STATIC_DATA_WRITE = "static-write"
# Set when the node is a threadsafe function, i.e. it can be scheduled in parallel with any other nodes, including
# nodes of the same type. This flag is not allowed to coexist with any of the other types since they all denote
# unsafe threaded data access.
THREADSAFE = "threadsafe"
# Set when the node accesses the graph topology, e.g. connections, attributes, or nodes
TOPOLOGY = "topology"
TOPOLOGY_READ = "topology-read"
TOPOLOGY_WRITE = "topology-write"
# Set when the node accesses the USD stage data (for read-only, write-only, or both read and write)
USD = "usd"
USD_READ = "usd-read"
USD_WRITE = "usd-write"
# Set when the scheduling of the node compute may be modified from the evaluator default.
COMPUTERULE_DEFAULT = "compute-default"
COMPUTERULE_ON_REQUEST = "compute-on-request"
"""
# end-scheduling-hints
GLOBAL_DATA = "global"
GLOBAL_DATA_READ = "global-read"
GLOBAL_DATA_WRITE = "global-write"
STATIC_DATA = "static"
STATIC_DATA_READ = "static-read"
STATIC_DATA_WRITE = "static-write"
THREADSAFE = "threadsafe"
TOPOLOGY = "topology"
TOPOLOGY_READ = "topology-read"
TOPOLOGY_WRITE = "topology-write"
USD = "usd"
USD_READ = "usd-read"
USD_WRITE = "usd-write"
COMPUTERULE_DEFAULT = "compute-default"
COMPUTERULE_ON_REQUEST = "compute-on-request"
def __init__(self, scheduling_hints: Union[List[str], str]):
"""Initialize the scheduling hints from the .ogn description"""
self.global_data = None
self.static_data = None
self.threadsafe = None
self.topology = None
self.usd = None
self.compute_rule = None
self._allowed_tokens = [
getattr(self, token_name) for token_name in dir(SchedulingHints) if token_name.isupper()
]
if not isinstance(scheduling_hints, list) and not isinstance(scheduling_hints, str):
raise ParseError("Scheduling hints must be a comma-separated string or a list of strings")
if isinstance(scheduling_hints, str):
# This trick allows lists to be delimited by arbitrary combinations of commas and spaces, so that the
# user doesn't have to remember which one to use
scheduling_hints = [element for element in re.split(" |, |,", scheduling_hints) if element]
for hints in scheduling_hints:
self.set_flag(hints)
# --------------------------------------------------------------------------------------------------------------
def __str__(self) -> str:
"""Returns a string with the set of flags currently set"""
result = []
result.append(f"GLOBAL={None if self.global_data is None else self.global_data.value}")
result.append(f"STATIC={None if self.static_data is None else self.static_data.value}")
result.append(f"THREADSAFE={False if self.threadsafe is None else self.threadsafe}")
result.append(f"TOPOLOGY={None if self.topology is None else self.topology.value}")
result.append(f"USD={None if self.usd is None else self.usd.value}")
result.append(f'COMPUTE_RULE="{None if self.compute_rule is None else self.compute_rule.value}"')
return ", ".join(result)
# --------------------------------------------------------------------------------------------------------------
def parse_error(self, message: str):
"""Raises a parse error with common information attached to the given message"""
raise ParseError(f"{message} - [{self}]")
# --------------------------------------------------------------------------------------------------------------
def set_flag(self, flag_to_set: str):
"""Tries to enable the named flag.
Raises ParseError if the flag is not legal or not compatible with current flags"""
flag_to_set = flag_to_set.lower()
if flag_to_set not in self._allowed_tokens:
self.parse_error(f"Scheduling flag '{flag_to_set}' not in allowed list {self._allowed_tokens}")
if flag_to_set == self.THREADSAFE:
if [self.usd, self.global_data, self.static_data, self.topology] != [None, None, None, None]:
self.parse_error(f"'{flag_to_set}' scheduling type not compatible with any data modification flags")
self.threadsafe = True
elif flag_to_set in [self.USD, self.USD_READ, self.USD_WRITE]:
if self.usd is not None:
self.parse_error(f"{flag_to_set} must be the only USD flag set")
if self.threadsafe and flag_to_set != self.USD_READ:
self.parse_error(f"{flag_to_set} not compatible with {self.THREADSAFE} flag")
self.usd = _AccessType.flag_access_type(flag_to_set)
elif flag_to_set in [self.STATIC_DATA, self.STATIC_DATA_READ, self.STATIC_DATA_WRITE]:
if self.static_data is not None:
self.parse_error(f"{flag_to_set} must be the only static_data flag set")
if self.threadsafe and flag_to_set != self.STATIC_DATA_READ:
self.parse_error(f"{flag_to_set} not compatible with {self.THREADSAFE} flag")
self.static_data = _AccessType.flag_access_type(flag_to_set)
elif flag_to_set in [self.GLOBAL_DATA, self.GLOBAL_DATA_READ, self.GLOBAL_DATA_WRITE]:
if self.global_data is not None:
self.parse_error(f"{flag_to_set} must be the only global data flag set")
if self.threadsafe and flag_to_set != self.GLOBAL_DATA_READ:
self.parse_error(f"{flag_to_set} not compatible with {self.THREADSAFE} flag")
self.global_data = _AccessType.flag_access_type(flag_to_set)
elif flag_to_set in [self.TOPOLOGY, self.TOPOLOGY_READ, self.TOPOLOGY_WRITE]:
if self.topology is not None:
self.parse_error(f"{flag_to_set} must be the only topology flag set")
if self.threadsafe and flag_to_set != self.TOPOLOGY_READ:
self.parse_error(f"{flag_to_set} not compatible with {self.THREADSAFE} flag")
self.topology = _AccessType.flag_access_type(flag_to_set)
elif flag_to_set in [self.COMPUTERULE_DEFAULT, self.COMPUTERULE_ON_REQUEST]:
if self.compute_rule is not None:
self.parse_error(f"{flag_to_set} must be the only compute-rule flag set")
self.compute_rule = _ComputeRule.flag_compute_rule(flag_to_set)
# --------------------------------------------------------------------------------------------------------------
def compare(self, other: SchedulingHints) -> List[str]:
"""Compare this object against another of the same type to see if their flag configurations match.
If they don't match then a list of differences is returned, otherwise an empty list
"""
errors = []
if self.usd != other.usd:
errors.append(f"usd flag mismatch '{self.usd}' != '{other.usd}'")
if self.global_data != other.global_data:
errors.append(f"global_data flag mismatch '{self.global_data}' != '{other.global_data}'")
if self.topology != other.topology:
errors.append(f"topology flag mismatch '{self.topology}' != '{other.topology}'")
if self.static_data != other.static_data:
errors.append(f"static_data flag mismatch '{self.static_data}' != '{other.static_data}'")
if self.threadsafe != other.threadsafe:
errors.append(f"threadsafe flag mismatch '{self.threadsafe}' != '{other.threadsafe}'")
if self.compute_rule != other.compute_rule:
errors.append(f"compute-rule flag mismatch '{self.compute_rule}' != '{other.compute_rule}'")
return errors
# --------------------------------------------------------------------------------------------------------------
def has_values_set(self) -> bool:
"""Returns True if any of the scheduling hints values have been set"""
return [self.threadsafe, self.global_data, self.static_data, self.topology, self.usd, self.compute_rule] != [
None
] * 6
# --------------------------------------------------------------------------------------------------------------
def cpp_includes_required(self) -> List[str]:
"""Returns a list of files required to be included for the generated C++ code to work"""
return ["#include <omni/graph/core/ISchedulingHints.h>"] if self.has_values_set() else []
# --------------------------------------------------------------------------------------------------------------
def emit_cpp(self, out: IndentedOutput) -> bool:
"""Write the C++ initialization code to the given output stream, writing nothing if no flags were set.
Assumes there is a local variable called nodeTypeObj that contains the NodeTypeObj definition.
Returns True if anything was written.
"""
if not self.has_values_set():
return False
out.write("auto __schedulingInfo = nodeTypeObj.iNodeType->getSchedulingHints(nodeTypeObj);")
out.write('CARB_ASSERT(__schedulingInfo, "Could not acquire the scheduling hints");')
out.write("if (__schedulingInfo)")
if out.indent("{"):
if self.threadsafe:
out.write("__schedulingInfo->setThreadSafety(eThreadSafety::eSafe);")
elif self.threadsafe is not None:
out.write("__schedulingInfo->setThreadSafety(eThreadSafety::eUnsafe);")
if self.global_data is not None:
out.write(
"__schedulingInfo->setDataAccess(eAccessLocation::eGlobal,"
f" {_AccessType.as_cpp_enum(self.global_data)});"
)
if self.static_data is not None:
out.write(
"__schedulingInfo->setDataAccess(eAccessLocation::eStatic,"
f" {_AccessType.as_cpp_enum(self.static_data)});"
)
if self.topology is not None:
out.write(
"__schedulingInfo->setDataAccess(eAccessLocation::eTopology,"
f" {_AccessType.as_cpp_enum(self.topology)});"
)
if self.usd is not None:
out.write(
f"__schedulingInfo->setDataAccess(eAccessLocation::eUsd, {_AccessType.as_cpp_enum(self.usd)});"
)
if self.compute_rule is not None:
out.write(f"__schedulingInfo->setComputeRule({_ComputeRule.as_cpp_enum(self.compute_rule)});")
out.exdent("}")
return True
# --------------------------------------------------------------------------------------------------------------
def emit_python(self, out: IndentedOutput) -> bool:
"""Write the Python initialization code to the given output stream, writing nothing if no flags were set.
Assumes there is a local variable called node_type that contains the Py_NodeType definition.
Returns True if anything was written.
"""
if not self.has_values_set():
return False
out.write("__hints = node_type.get_scheduling_hints()")
if out.indent("if __hints is not None:"):
if self.threadsafe:
out.write("__hints.thread_safety = og.eThreadSafety.E_SAFE")
elif self.threadsafe is not None:
out.write("__hints.thread_safety = og.eThreadSafety.E_UNSAFE")
if self.global_data is not None:
out.write(
"__hints.set_data_access(og.eAccessLocation.E_GLOBAL,"
f" {_AccessType.as_python_enum(self.global_data)})"
)
if self.static_data is not None:
out.write(
"__hints.set_data_access(og.eAccessLocation.E_STATIC,"
f" {_AccessType.as_python_enum(self.static_data)})"
)
if self.topology is not None:
out.write(
"__hints.set_data_access(og.eAccessLocation.E_TOPOLOGY,"
f" {_AccessType.as_python_enum(self.topology)})"
)
if self.usd is not None:
out.write(f"__hints.set_data_access(og.eAccessLocation.E_USD, {_AccessType.as_python_enum(self.usd)})")
if self.compute_rule is not None:
out.write(f"__hints.compute_rule = {_ComputeRule.as_python_enum(self.compute_rule)}")
out.exdent()
return True
# --------------------------------------------------------------------------------------------------------------
@classmethod
def illegal_configurations(cls) -> List[str]:
"""Returns a list of illegal parsing configurations for testing purposes. Keeps the data local"""
return [
'{"not": "a list or string"}',
'["foo"]', # List with bad values
'"usd, bar"', # String with bad values
'["usd", "usd-read"]', # Lists with incompatible values
'["global-write", "global-read"]',
'["topology", "topology-write"]',
'["static", "static-read"]',
'"threadsafe, static"', # String with incompatible values
'["compute-default", "compute-on-request"]',
]
# --------------------------------------------------------------------------------------------------------------
@classmethod
def legal_configurations(cls) -> List[str]:
"""Returns a list of legal parsing configurations and expected results for testing purposes.
The data is a list of pairs where the first element is the flags to be set on the scheduling hints
in the .ogn file (possibly with extra information as needed) and the second element is a SchedulingHints
object configured with the expected results. It has a compare operation so the test will use that to
confirm results
"""
def from_flags(
global_data: Optional[_AccessType] = None,
threadsafe: Optional[bool] = None,
static_data: Optional[_AccessType] = None,
topology: Optional[_AccessType] = None,
usd: Optional[_AccessType] = None,
compute_rule: Optional[_ComputeRule] = None,
) -> SchedulingHints:
"""Returns a SchedulingHints object whose flags are set to the ones passed in"""
scheduling = SchedulingHints([])
scheduling.global_data = global_data
scheduling.threadsafe = threadsafe
scheduling.static_data = static_data
scheduling.topology = topology
scheduling.usd = usd
scheduling.compute_rule = compute_rule
return scheduling
return [
('"global"', from_flags(global_data=_AccessType.ALL)),
('"threadsafe"', from_flags(threadsafe=True)),
('"static-read"', from_flags(static_data=_AccessType.READ)),
('"topology-write"', from_flags(topology=_AccessType.WRITE)),
(
'"usd,global-write,topology-read"',
from_flags(usd=_AccessType.ALL, global_data=_AccessType.WRITE, topology=_AccessType.READ),
),
(
'["usd", "global-read", "topology-write"]',
from_flags(usd=_AccessType.ALL, global_data=_AccessType.READ, topology=_AccessType.WRITE),
),
('"compute-on-request"', from_flags(compute_rule=_ComputeRule.ON_REQUEST)),
('"compute-default"', from_flags(compute_rule=_ComputeRule.DEFAULT)),
]
| 19,726 | Python | 48.3175 | 119 | 0.57812 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_python.py | """Support for generating a pythonic interface class for OmniGraph Nodes.
Exports:
generate_python: Create a NODE.ogn.py file containing a pythonic interface for the node data
"""
import json
import re
from contextlib import suppress
from typing import List, Optional
from .attributes.AttributeManager import AttributeManager
from .attributes.naming import INPUT_NS, OUTPUT_NS, PORT_NAMES, STATE_NS
from .keys import CudaPointerValues, LanguageTypeValues, MemoryTypeValues, MetadataKeyOutput, MetadataKeys
from .nodes import NodeInterfaceGenerator
from .utils import GeneratorConfiguration, logger, shorten_string_lines_to
__all__ = ["generate_python"]
class NodePythonGenerator(NodeInterfaceGenerator):
"""Manage the functions required to generate a Python interface for a node"""
def __init__(self, configuration: GeneratorConfiguration): # noqa: PLW0246
"""Set up the generator and output the Python interface code for the node
Just passes the initialization on to the parent class. See the argument and exception descriptions there.
"""
super().__init__(configuration)
# ----------------------------------------------------------------------
def interface_file_name(self) -> str:
"""Return the path to the name of the Python file"""
return self.base_name + "Database.py"
# ----------------------------------------------------------------------
def database_class_name(self) -> str:
"""Return the name of the generated database class, which is what will be passed to the compute method"""
return f"{self.base_name}Database"
# ----------------------------------------------------------------------
def _value_class_name(self, namespace: str) -> str:
"""Return the name of the internal class that holds attributes in the given namespace"""
return f"ValuesFor{namespace.capitalize()}"
# ----------------------------------------------------------------------
def _pre_class_spacing(self):
"""Writes out spacing before class names - follows Flake8 in verbose mode, nothing otherwise"""
if self.verbose:
self.out.write()
self.out.write()
# ----------------------------------------------------------------------
def _pre_function_spacing(self):
"""Writes out spacing before function definitions - follows Flake8 in verbose mode, nothing otherwise"""
if self.verbose:
self.out.write()
# ----------------------------------------------------------------------
def _filter_out_batched_attributes(self, attribute_list: List[AttributeManager], namespace: str):
"""
Args:
attribute_list: List of attributes belonging to the generated class
namespace: Namespace of attributes in the list. Assumption is that all attributes have the same answer.
Returns:
Two lists of attributes: batched attributes and the filtered list without batched attributes"""
if namespace == STATE_NS:
return [], attribute_list
batched_attribute_list = []
filtered_attribute_list = []
for attribute in attribute_list:
# batching of attributes is not supported for runtime types
# batching of array attributes wouldn't be the most efficient. best is to acquire the right size
# numpy.array once and work with it directly currently only limited to CPU memory
if (
attribute.ogn_base_type() not in ["bundle", "any", "union"]
and attribute.array_depth == 0
and attribute.memory_storage() == MemoryTypeValues.CPU
):
batched_attribute_list.append(attribute)
else:
filtered_attribute_list.append(attribute)
return batched_attribute_list, filtered_attribute_list
# ----------------------------------------------------------------------
def _generate_attribute_class(self, attribute_list: List[AttributeManager], namespace: str) -> Optional[str]:
"""Output a nested class that provides database access for the node's input or output attributes.
Args:
attribute_list: List of attributes belonging to the generated class
namespace: Namespace of attributes in the list. Assumption is that all attributes have the same answer.
Passed explicitly to allow for the possibility of an empty list.
Returns:
The name of the class that was generated (None if not generated)
The attribute classes have two members per attribute:
attr_PROPERTY: Holds a reference to the node's Attribute member for this attribute
PROPERTY: A property through which the attribute values are accessed
"""
# This method is called with all attributes in the same namespace so it's safe to use the first one
# to extract the common definition.
attribute_class = self._value_class_name(namespace)
is_read_only = namespace == INPUT_NS
# For correct syntax the namespace name must be singular
namespace_for_comment = namespace[:-1] if namespace.endswith("s") else namespace
self._pre_function_spacing()
if self.out.indent(f"class {attribute_class}(og.DynamicAttributeAccess):"):
batched_attribute_list, filtered_attribute_list = self._filter_out_batched_attributes(
attribute_list, namespace
)
has_batched_attributes = len(batched_attribute_list) > 0
if has_batched_attributes:
local_property_list = [attribute.python_property_name() for attribute in batched_attribute_list]
if namespace == INPUT_NS:
local_property_list += ["_setting_locked", "_batchedReadAttributes", "_batchedReadValues"]
elif namespace == OUTPUT_NS:
local_property_list += ["_batchedWriteValues"]
batched_str = "{" + ", ".join(f'"{attribute}"' for attribute in local_property_list) + "}"
self.out.write(f"LOCAL_PROPERTY_NAMES = {batched_str}")
elif namespace != STATE_NS:
self.out.write("LOCAL_PROPERTY_NAMES = { }")
self.out.write(
f'"""Helper class that creates natural hierarchical access to {namespace_for_comment} attributes"""'
)
if self.out.indent(
"def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):"
):
self.out.write('"""Initialize simplified access for the attribute data"""')
self.out.write("context = node.get_graph().get_default_graph_context()")
self.out.write("super().__init__(context, node, attributes, dynamic_attributes)")
has_bundles = False
gpu_bundles = []
gpu_ptr_kinds = {}
for attribute in attribute_list:
if attribute.ogn_base_type() == "bundle":
has_bundles = True
if attribute.memory_type != MemoryTypeValues.CPU:
gpu_bundles.append(attribute.usd_name())
with suppress(KeyError):
gpu_ptr_kinds[attribute.usd_name()] = CudaPointerValues.PYTHON[
self.node_interface.cuda_pointer_type
]
if has_bundles:
gpu_ptr_str = "{" + ",".join(f'"{key}": {value}' for key, value in gpu_ptr_kinds.items()) + "}"
self.out.write(
f"self.__bundles = og.BundleContainer(context, node, attributes, {gpu_bundles},"
f" read_only={is_read_only}, gpu_ptr_kinds={gpu_ptr_str})"
)
# Output arrays will need a size since that has to be set when the user gets their values.
# This puts the onus on the caller to set the size before calling get(). For safety, the sizes
# are initialized to None so that failure to set values can generate a sensible error message.
if not is_read_only:
for attribute in attribute_list:
if attribute.fabric_needs_counter():
default_size = "None" if attribute.default is None else len(attribute.default)
self.out.write(f"self.{attribute.python_property_name()}_size = {default_size}")
# Initialize storage for batched values
if namespace == INPUT_NS:
batched_str = (
"["
+ ", ".join(f"self.{attribute.python_attribute_name()}" for attribute in batched_attribute_list)
+ "]"
)
self.out.write(f"self._batchedReadAttributes = {batched_str}")
batched_str = (
"["
+ ", ".join(f"{attribute.python_default_value()}" for attribute in batched_attribute_list)
+ "]"
)
self.out.write(f"self._batchedReadValues = {batched_str}")
elif namespace == OUTPUT_NS:
self.out.write("self._batchedWriteValues = { }")
self.out.exdent()
for attribute in filtered_attribute_list:
# Emit the getters and setters for the attributes.
attribute.generate_python_property_code(self.out)
for index, attribute in enumerate(batched_attribute_list):
# Emit the getters and setters for batched read or write
attribute.generate_python_batched_property_code(index, self.out)
if has_batched_attributes:
# Override any dynamic getters and setters for batched attributes to remove the overhead
self.out.write()
if self.out.indent("def __getattr__(self, item: str):"):
if self.out.indent("if item in self.LOCAL_PROPERTY_NAMES:"):
self.out.write("return object.__getattribute__(self, item)")
self.out.exdent()
if self.out.indent("else:"):
self.out.write("return super().__getattr__(item)")
self.out.exdent()
self.out.exdent()
self.out.write()
if self.out.indent("def __setattr__(self, item: str, new_value):"):
if self.out.indent("if item in self.LOCAL_PROPERTY_NAMES:"):
self.out.write("object.__setattr__(self, item, new_value)")
self.out.exdent()
if self.out.indent("else:"):
self.out.write("super().__setattr__(item, new_value)")
self.out.exdent()
self.out.exdent()
if namespace == INPUT_NS:
self.out.write()
if self.out.indent("def _prefetch(self):"):
self.out.write("readAttributes = self._batchedReadAttributes")
self.out.write("newValues = _og._prefetch_input_attributes_data(readAttributes)")
if self.out.indent("if len(readAttributes) == len(newValues):"):
self.out.write("self._batchedReadValues = newValues")
self.out.exdent()
self.out.exdent()
elif namespace == OUTPUT_NS:
self.out.write()
if self.out.indent("def _commit(self):"):
self.out.write("_og._commit_output_attributes_data(self._batchedWriteValues)")
self.out.write("self._batchedWriteValues = { }")
self.out.exdent()
self.out.exdent()
return attribute_class
# ----------------------------------------------------------------------
def _generate_shared_node_type_initialize(self):
"""
Output the code to set up any shared node type information, like adding attributes and setting metadata.
Assumes this is part of a method where the variable "node_type" contains the node type object to initialize
"""
# Set the metadata for this node type
self.out.write(f"node_type.set_metadata(ogn.MetadataKeys.EXTENSION, {json.dumps(self.extension)})")
for key, value in self.node_interface.metadata.items():
python_key = MetadataKeyOutput.python_name_from_key(key)
if python_key is None:
python_key = json.dumps(key)
# Handle lists as a comma-separated string
if isinstance(value, list):
value = '"' + ",".join([x.replace('"', '\\"') for x in value]) + '"'
else:
value = json.dumps(value)
self.out.write(f"node_type.set_metadata({python_key}, {value})")
if self.node_interface.memory_type != MemoryTypeValues.CPU:
self.out.write(f'node_type.set_metadata(ogn.MetadataKeys.MEMORY_TYPE, "{self.node_interface.memory_type}")')
# The icon path is relative to the extension path, which is only known at runtime, so build it up then.
# To the user it will appear as an absolute path, which they can modify if they wish to.
if self.node_interface.icon_path is not None:
icon_path = json.dumps(self.node_interface.icon_path)
self.out.write(f'icon_path = carb.tokens.get_tokens_interface().resolve("${{{self.extension}}}")')
# Using os.path.join here causes problems due to the backslash path separator on Windows. The components
# both have forward slashes by design so just insert the missing one.
self.out.write(f"icon_path = icon_path + '/' + {icon_path}")
self.out.write("node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path)")
# If any of the scheduling hints flags have been defined then set them here
if self.node_interface.scheduling_hints is not None:
self.node_interface.scheduling_hints.emit_python(self.out)
# Generate the initialization of attributes, including setting defaults and adding them to the node type
if self.node_interface.has_attributes():
self.out.write(f"{self.database_class_name()}.INTERFACE.add_to_node_type(node_type)")
if self.node_interface.all_state_attributes() or self.node_interface.has_state:
self.out.write("node_type.set_has_state(True)")
# ----------------------------------------------------------------------
def _generate_node_registration(self):
"""
Output the definition of the node type's registration support method
By having the node type class object be a static class member a circular import can be avoided.
The node implementation will call OgnTheNodeDatabase.register(OgnTheNode) to handle registration and the
automatic override of any ABI methods that OgnTheNode might implement.
"""
db_class_name = self.database_class_name()
self._pre_function_spacing()
self.out.write("NODE_TYPE_CLASS = None")
# Find the version of this extension in use so that it can be imprinted into the generated file
self.out.write(f"GENERATOR_VERSION = {self.generator_version}")
self.out.write(f"TARGET_VERSION = {self.target_version}")
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def register(node_type_class):"):
self.out.write(f"{db_class_name}.NODE_TYPE_CLASS = node_type_class")
self.out.write(f"og.register_node_type({db_class_name}.abi, {self.node_interface.version})")
self.out.exdent()
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def deregister():"):
self.out.write(f'og.deregister_node_type("{self.node_interface.name}")')
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_abi_get_node_type(self):
"""Output the abi implementation of the get_node_type method"""
db_class_name = self.database_class_name()
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def get_node_type():"):
self.out.write(f"get_node_type_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'get_node_type', None)")
if self.out.indent("if callable(get_node_type_function):"):
self.out.write("return get_node_type_function()")
self.out.exdent(f"return '{self.node_interface.name}'")
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_abi_compute(self):
"""Output the abi implementation of the compute method"""
db_class_name = self.database_class_name()
def __generate_attribute_validate(attribute_list: List[AttributeManager]):
"""Write out any code that verifies the validity of attributes before trying to compute"""
for attribute in attribute_list:
attribute.generate_python_validation(self.out)
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def compute(context, node):"):
# Construct the database that accesses the Fabric data in a Pythonic way
if self.out.indent("try:"):
self.out.write(f"per_node_data = {db_class_name}.PER_NODE_DATA[node.node_id()]")
self.out.write("db = per_node_data.get('_db')")
if self.out.indent("if db is None:"):
self.out.write(f"db = {db_class_name}(node)")
self.out.write("per_node_data['_db'] = db")
self.out.exdent()
self.out.exdent()
# Currently with hot reload we are not getting PER_NODE_DATA initialized. Just generate the db on the fly.
if self.out.indent("except:"):
self.out.write(f"db = {db_class_name}(node)")
self.out.exdent()
self.out.write()
if self.out.indent("try:"):
__generate_attribute_validate(self.node_interface.all_input_attributes())
__generate_attribute_validate(self.node_interface.all_output_attributes())
__generate_attribute_validate(self.node_interface.all_state_attributes())
# The ABI compute method has the same name as the generated compute method to be called, so use
# the fact that the ABI method has more parameters to figure out which one the node has defined.
self.out.write(f"compute_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'compute', None)")
if self.out.indent("if callable(compute_function) and compute_function.__code__.co_argcount > 1:"):
self.out.write("return compute_function(context, node)")
self.out.exdent()
self.out.write()
# Fetch input attributes registered for batch read
self.out.write("db.inputs._prefetch()")
# Special flag that prevents inputs from being modified inside a compute method, which avoids
# synchronization problems. In C++ this is enforced by returning const values; this is equivalent.
# Suppress the error that occurs if no inputs were generated.
self.out.write("db.inputs._setting_locked = True")
# If the node attempted to write a const value the compute will throw AttributeError saying why
if self.out.indent("with og.in_compute():"):
self.out.write(f"return {db_class_name}.NODE_TYPE_CLASS.compute(db)")
self.out.exdent()
self.out.exdent()
# For this error only the name of the attribute is returned, to minimize duplication of strings
if self.out.indent("except Exception as error:"):
self.out.write('stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next))')
self.out.write(
"db.log_error(f'Assertion raised in compute - {error}\\n{stack_trace}', add_context=False)"
)
self.out.exdent()
if self.out.indent("finally:"):
self.out.write("db.inputs._setting_locked = False")
# Commit output attributes registered for batch write
self.out.write("db.outputs._commit()")
self.out.exdent()
self.out.write("return False")
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_abi_initialize(self):
"""Output the abi implementation of the initialize method"""
db_class_name = self.database_class_name()
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def initialize(context, node):"):
# Give the database a chance to cache away any node-specific data that will not change each evaluation
self.out.write(f"{db_class_name}._initialize_per_node_data(node)")
self.out.write(f"initialize_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'initialize', None)")
if self.out.indent("if callable(initialize_function):"):
self.out.write("initialize_function(context, node)")
self.out.exdent()
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_abi_release(self):
"""Output the abi implementation of the release method"""
db_class_name = self.database_class_name()
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def release(node):"):
self.out.write(f"release_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'release', None)")
if self.out.indent("if callable(release_function):"):
self.out.write("release_function(node)")
self.out.exdent()
# Release any node-specific data that was cached during the initialize function
self.out.write(f"{db_class_name}._release_per_node_data(node)")
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_abi_update_node_version(self):
"""Output the abi implementation of the update_node_version method"""
db_class_name = self.database_class_name()
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def update_node_version(context, node, old_version, new_version):"):
self.out.write(
f"update_node_version_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'update_node_version', None)"
)
if self.out.indent("if callable(update_node_version_function):"):
self.out.write("return update_node_version_function(context, node, old_version, new_version)")
self.out.exdent("return False")
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_abi_initialize_type(self):
"""Output the abi implementation of the intialize_type method"""
db_class_name = self.database_class_name()
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def initialize_type(node_type):"):
self.out.write(
f"initialize_type_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'initialize_type', None)"
)
self.out.write("needs_initializing = True")
if self.out.indent("if callable(initialize_type_function):"):
self.out.write("needs_initializing = initialize_type_function(node_type)")
self.out.exdent()
# By returning a bool the initialize_type override can request attribute additions from the parent
# rather than a full override.
if self.out.indent("if needs_initializing:"):
self._generate_shared_node_type_initialize()
self.out.exdent()
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_abi_on_connection_type_resolve(self):
"""Output the abi implementation of the on_connection_type_resolve method"""
db_class_name = self.database_class_name()
self._pre_function_spacing()
self.out.write("@staticmethod")
if self.out.indent("def on_connection_type_resolve(node):"):
self.out.write(
"on_connection_type_resolve_function = "
f"getattr({db_class_name}.NODE_TYPE_CLASS, 'on_connection_type_resolve', None)"
)
if self.out.indent("if callable(on_connection_type_resolve_function):"):
self.out.write("on_connection_type_resolve_function(node)")
self.out.exdent()
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_database_abi(self):
"""Output a registration method and subclass that handles ABI access for the Python node"""
self._pre_function_spacing()
if self.out.indent("class abi:"):
self.out.write('"""Class defining the ABI interface for the node type"""')
self._generate_abi_get_node_type()
self._generate_abi_compute()
self._generate_abi_initialize()
self._generate_abi_release()
self._generate_abi_update_node_version()
self._generate_abi_initialize_type()
self._generate_abi_on_connection_type_resolve()
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_token_help(self):
"""Generate the help information showing how to access any hardcoded tokens in the file"""
if not self.node_interface.tokens:
return
self.out.write()
if self.out.indent("Predefined Tokens:"):
for token_name, _ in self.node_interface.tokens.items():
self.out.write(f"tokens.{token_name}")
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_tokens(self):
"""Generate the code required to define and initialize any hardcoded tokens in the file"""
if not self.node_interface.tokens:
return
self._pre_function_spacing()
if self.out.indent("class tokens:"):
for token_name, token_value in self.node_interface.tokens.items():
value = json.dumps(token_value)
self.out.write(f"{token_name} = {value}")
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_attribute_definitions(self):
"""Output the database class member that describes unchanging attribute data"""
self._pre_function_spacing()
self.out.write("# This is an internal object that provides per-class storage of a per-node data dictionary")
self.out.write("PER_NODE_DATA = {}")
all_attributes = self.node_interface.all_attributes()
self._pre_function_spacing()
self.out.write("# This is an internal object that describes unchanging attributes in a generic way")
self.out.write("# The values in this list are in no particular order, as a per-attribute tuple")
self.out.write("# Name, Type, ExtendedTypeIndex, UiName, Description, Metadata,")
self.out.write("# Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg")
self.out.write("# You should not need to access any of this data directly, use the defined database interfaces")
if self.out.indent("INTERFACE = og.Database._get_interface(["):
empty_list = [None, None, None, None, None, None, None, None, None, None]
for attribute in all_attributes:
attribute_data = empty_list[:]
attribute_data[0] = attribute.name
(extended_type, type_info) = attribute.python_extended_type()
attribute_data[1] = attribute.create_type_name() if type_info is None else type_info
attribute_data[2] = extended_type
with suppress(KeyError):
attribute_data[3] = attribute.metadata[MetadataKeys.UI_NAME]
with suppress(KeyError):
attribute_data[4] = attribute.metadata[MetadataKeys.DESCRIPTION]
metadata = {}
for key, value in attribute.metadata.items():
if key not in [MetadataKeys.UI_NAME, MetadataKeys.DESCRIPTION]:
python_key = MetadataKeyOutput.python_name_from_key(key)
if python_key is None:
python_key = key
metadata[python_key] = value
attribute_data[5] = metadata
attribute_data[6] = attribute.is_required
attribute_data[7] = attribute.default
attribute_data[8] = attribute.is_deprecated
attribute_data[9] = attribute.deprecation_msg if attribute.is_deprecated else ""
raw_output = f"{tuple(attribute_data)},"
# ogn.MetadataKeys is an object name so make sure it is not quoted
raw_output = re.sub(r'"(ogn.MetadataKeys[^"]*)"', r"\1", raw_output)
raw_output = re.sub(r"'(ogn.MetadataKeys[^']*)'", r"\1", raw_output)
self.out.write(raw_output)
self.out.exdent("])")
# ----------------------------------------------------------------------
def _generate_role_definition_method(self):
"""Output the method responsible for initialize the role-based data, if any attributes have roles to set"""
# Find attributes with non-default roles for output.
# Dictionary is {NAMESPACED_ATTRIBUTE, ROLE_NAME}
roles_to_output = {}
for attribute in self.node_interface.all_attributes():
role = attribute.python_role_name()
if role:
roles_to_output[f"{attribute.namespace}.{attribute.python_property_name()}"] = role
# Rely on the base class method if no roles were found
if not roles_to_output:
return
self._pre_function_spacing()
self.out.write("@classmethod")
if self.out.indent("def _populate_role_data(cls):"):
self.out.write('"""Populate a role structure with the non-default roles on this node type"""')
self.out.write("role_data = super()._populate_role_data()")
for attribute_name, role in roles_to_output.items():
self.out.write(f"role_data.{attribute_name} = {role}")
self.out.write("return role_data")
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_attribute_access_help(self):
"""Output the help information describing attribute properties available on this node type"""
if not self.node_interface.has_attributes():
return
def __generate_attribute_access_help(attribute_list: List[AttributeManager]):
"""Output the documentation for a single section of attributes (input/output/state)"""
if not attribute_list:
return
# All attributes are in the same namespace so use the first one to extract its name
if self.out.indent(f"{attribute_list[0].namespace.capitalize()}:"):
for attribute in attribute_list:
self.out.write(f"{attribute.namespace}.{attribute.python_property_name()}")
self.out.exdent()
self.out.write()
if self.out.indent("Attribute Value Properties:"):
__generate_attribute_access_help(self.node_interface.all_input_attributes())
__generate_attribute_access_help(self.node_interface.all_output_attributes())
__generate_attribute_access_help(self.node_interface.all_state_attributes())
self.out.exdent()
# ----------------------------------------------------------------------
def _generate_database_class(self):
"""Output a class that provides database access for the node's compute method.
The class has nested class members called "inputs", "outputs", and "state" that make access to attribute values
more natural:
inputValue = Node.inputs.InputAttribute
Node.outputs.OutputAttribute = inputValue * 2
"""
db_class_name = self.database_class_name()
self._pre_class_spacing()
if self.out.indent(f"class {db_class_name}(og.Database):"):
self.out.write(
f'"""Helper class providing simplified access to data on nodes of type {self.node_interface.name}'
)
self.out.write()
if self.out.indent("Class Members:"):
self.out.write("node: Node being evaluated")
self.out.exdent()
self._generate_attribute_access_help()
self._generate_token_help()
self.out.write('"""')
self._generate_attribute_definitions()
self._generate_tokens()
self._generate_role_definition_method()
input_class_name = self._generate_attribute_class(
self.node_interface.all_input_attributes(), namespace=INPUT_NS
)
output_class_name = self._generate_attribute_class(
self.node_interface.all_output_attributes(), namespace=OUTPUT_NS
)
state_class_name = self._generate_attribute_class(
self.node_interface.all_state_attributes(), namespace=STATE_NS
)
self._pre_function_spacing()
if self.out.indent("def __init__(self, node):"):
self.out.write("super().__init__(node)")
for (value_class_name, namespace) in [
(input_class_name, INPUT_NS),
(output_class_name, OUTPUT_NS),
(state_class_name, STATE_NS),
]:
if value_class_name is not None:
self.out.write(
f"dynamic_attributes = self.dynamic_attribute_data(node, {PORT_NAMES[namespace]})"
)
self.out.write(
f"self.{namespace} = {db_class_name}.{value_class_name}"
f"(node, self.attributes.{namespace}, dynamic_attributes)"
)
self.out.exdent()
# When the node is written in Python there are some helper methods to add
if self.node_interface.language == LanguageTypeValues.PYTHON:
self._generate_database_abi()
# By having the node type class object be a static class member a circular import can be avoided.
# The node implementation will call OgnTheNodeDatabase.register(OgnTheNode) to handle registration and the
# automatic override of any ABI methods that OgnTheNode might implement.
self._generate_node_registration()
# ----------------------------------------------------------------------
def generate_node_interface(self):
"""Output a Python script containing interface and database support for an OmniGraph node
Raises:
NodeGenerationError: When there is a failure in the generation of the Python class
"""
self.out.write(f'"""Support for simplified access to data on nodes of type {self.node_interface.name}')
self.out.write()
for line in shorten_string_lines_to(self.node_interface.description, 120):
self.out.write(line)
self.out.write('"""')
self.out.write()
self.out.write("import omni.graph.core as og")
self.out.write("import omni.graph.core._omni_graph_core as _og")
self.out.write("import omni.graph.tools.ogn as ogn")
imports = []
# Icon path resolution requires more imports
if self.node_interface.icon_path is not None:
imports.append("import carb")
# Python-implemented nodes need access to stack information for compute error reporting
if self.node_interface.language == LanguageTypeValues.PYTHON:
imports.append("import sys")
imports.append("import traceback")
# Imports required by the attributes
for attribute in self.node_interface.all_attributes():
imports += attribute.python_imports()
for import_statement in set(imports):
self.out.write(import_statement)
# Both Python and C++ nodes benefit from the use of the Pythonic database class
self._generate_database_class()
# ======================================================================
def generate_python(configuration: GeneratorConfiguration) -> Optional[str]:
"""Create support files for the pythonic interface to a node
Args:
configuration: Information defining how and where the documentation will be generated
Returns:
String containing the generated Python database definition or None if its generation was not enabled
Raises:
NodeGenerationError: When there is a failure in the generation of the Python database
"""
if not configuration.node_interface.can_generate("python"):
return None
logger.info("Generating Python Database")
generator = NodePythonGenerator(configuration)
generator.generate_interface()
return str(generator.out)
| 38,485 | Python | 52.011019 | 120 | 0.571417 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/main_docs.py | """
Create a table off contents file in index.rst that references all of the OmniGraph node generated
documentation files that live in that directory.
This processing is highly tied to the formatting of the OGN generated documentation files so if they
change this has to as well.
The table of contents will be in two sections.
A table consisting of columns with [node name, node version, link to node doc file, link to node appendix entry]
An appendix with headers consisting of the node name and body consisting of the node's description
"""
import argparse
import logging
import os
from pathlib import Path
from typing import List, Optional
from .generate_documentation import (
RE_OGN_BODY_MARKER,
RE_OGN_DESCRIPTION_TITLE,
RE_OGN_DOC_FILENAME,
RE_OGN_INPUTS_TITLE,
RE_OGN_NAME_INFO,
)
from .utils import WritableDir, logger, rst_table, rst_title
# If True then perform more aggressive directory checks, not safe in a multi-threaded environment
SAFE_DIRECTORY_CREATION = False
# Name of the generated index file
INDEX_FILENAME = "index.rst"
# Selectively turn on logging if the OGN debugging environment variable is set
logger.setLevel(logging.DEBUG if os.getenv("OGN_DEBUG") else logging.WARN)
# ======================================================================
def construct_parser() -> argparse.ArgumentParser:
"""Construct and return the parser for the script arguments"""
# If no output directory is specified generated files will end up in the current directory
default_output_dir = Path.cwd()
# This helps format the usage information in a nicer way
os.putenv("COLUMNS", "120")
# Construct the parsing information. Run the script with "--help" to see the usage.
parser = argparse.ArgumentParser(
description="Read a directory of OGN documentation files and create an index for them",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"-od",
"--ognDirectory",
action=WritableDir,
const=default_output_dir,
type=Path,
metavar="DIR",
help="directory containing the OGN documentation files, where the index will be generated",
)
parser.add_argument("-v", "--verbose", action="store_true", help="output the steps the script is performing")
return parser
# ======================================================================
class OgnIndexCreator:
"""Handler to analyze OGN documentation files and generate a Table of Contents for them
Attributes:
documentation_files: List of documentation files found in the directory
index_file_path: Full path to the generated index file
ogn_directory: Path where the table of contents will be generated
"""
def __init__(self, ogn_directory: Path):
"""Read the contents of the docs directory and prepare it for generation"""
logger.info("Scanning documentation directory %s", ogn_directory)
self.ogn_directory = ogn_directory
self.index_file_path = self.ogn_directory / INDEX_FILENAME
self.documentation_files = []
for path_object in self.ogn_directory.glob("**/*"):
if path_object.is_file() and RE_OGN_DOC_FILENAME.search(str(path_object)):
self.documentation_files.append(path_object)
# ----------------------------------------------------------------------
def extract_node_information(self, ogn_doc_path: str):
"""Read the OGN documentation file and extract the information to use for the index
Patterns assumed, in order:
One line matching RE_OGN_NAME_INFO with the node name
One line matching RE_OGN_DESCRIPTION_TITLE, followed immediately by...
...one line with the title RST (probably dashes)
An undefined number of lines containing the node description
A blank line (to be omitted)
One line matching RE_OGN_INPUTS_TITLE, marking the end of the description
The rest will be ignored as there is no more relevant information
Args:
ogn_doc_path: Path to the node's documentation file
Returns:
(Marker, Name, Description) tuple with the node's information
"""
in_body = False
name = None
description = []
marker = None
found_description = False # True after the description title was found
in_description = False # True after the description body is entered
try:
with open(ogn_doc_path, "r", encoding="utf-8") as doc_fd:
for line in doc_fd:
if not in_body:
body_marker_match = RE_OGN_BODY_MARKER.match(line)
if body_marker_match:
in_body = True
marker = body_marker_match.group(1)
elif name is None:
name_match = RE_OGN_NAME_INFO.match(line)
if name_match:
name = name_match.group(1)
elif found_description:
found_description = False
in_description = True
elif in_description:
if RE_OGN_INPUTS_TITLE.search(line):
in_description = False
break
description.append(line[:-1])
elif RE_OGN_DESCRIPTION_TITLE.search(line):
found_description = True
# If attributes were not found then an extra blank line is needed to separate sections
if in_description:
description.append("\n")
if marker is None:
logger.error("Marker not found in %s", ogn_doc_path)
if name is None:
logger.error("Name not found in %s", ogn_doc_path)
if not description:
logger.error("Description not found in %s", ogn_doc_path)
except Exception as error: # noqa: PLW0703
# Report the failure but continue processing
logger.error("Error processing %s: %s", ogn_doc_path, error)
return (marker, name.rstrip(), description)
# ----------------------------------------------------------------------
def index_is_out_of_date(self) -> bool:
"""Returns True if the index file is older than any of the other files in the directory"""
if not self.index_file_path.is_file():
return True
index_modified_time = self.index_file_path.lstat().st_mtime
return any(
index_modified_time < documentation_file.lstat().st_mtime for documentation_file in self.documentation_files
)
# ----------------------------------------------------------------------
def construct_index(self):
"""Construct the table of contents in an index file"""
if not self.index_is_out_of_date():
logger.info("Documentation is up to date. Index generation skipped")
return
# Dictionary containing the information needed to generate the index file
# Key = Node File, Value = [Name, Version, Description]
node_information = {}
for ogn_doc_file in self.documentation_files:
logger.info("Processing %s", ogn_doc_file)
node_information[ogn_doc_file] = self.extract_node_information(ogn_doc_file)
sorted_keys = sorted(node_information.keys(), key=lambda key: node_information[key][1])
rows = [["Node", "Detailed Documentation"]]
if not node_information:
# Avoid a table with no contents, as that will generate a syntax error
rows.append(["", ""])
else:
for ogn_doc_file in sorted_keys:
(marker, node_name, _) = node_information[ogn_doc_file]
rows.append([f"`{node_name}`_", f":ref:`{marker}`"])
try:
with open(self.index_file_path, "w", newline="\n", encoding="utf-8") as index_file:
index_file.write(rst_title("OGN Node List", 0))
index_file.write("\n\n.. tabularcolumns:: |l|l|\n\n")
index_file.write(rst_table(rows))
index_file.write("\n")
index_file.write(rst_title("Node Descriptions", 1))
index_file.write("\n")
for ogn_doc_file in sorted_keys:
(_, node_name, node_documentation) = node_information[ogn_doc_file]
index_file.write(f"{rst_title(node_name, 1)}\n")
index_file.write("\n".join(node_documentation))
except Exception as error: # noqa: PLW0703
logger.error("Cannot write to index file %s : %s", self.index_file_path, error)
# ======================================================================
def main_docs(args_to_parse: Optional[List] = None):
"""Parse the contents of sys.args and perform the requested function."""
parser = construct_parser()
args = parser.parse_args(args_to_parse)
# If the script steps are to be echoed enable the logger and dump the script arguments as a first step
logger.setLevel(logging.DEBUG if args.verbose else logging.WARN)
logger.info("ognDirectory = %s", args.ognDirectory)
index_handler = OgnIndexCreator(args.ognDirectory)
index_handler.construct_index()
# ======================================================================
if __name__ == "__main__":
main_docs()
| 9,594 | Python | 42.416289 | 120 | 0.58818 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_node_info.py | """
Support for updating the node information file for OmniGraph Nodes.
Exported Methods:
generate_node_info
"""
import json
from json.decoder import JSONDecodeError
from typing import Optional
from .nodes import NodeInterfaceGenerator
from .utils import GeneratorConfiguration, ParseError, ensure_writable_directory, logger
__all__ = [
"generate_node_info",
]
# ======================================================================
class NodeInfoGenerator(NodeInterfaceGenerator):
"""Manage the functions required to generate a C++ interface for a node"""
def __init__(self, configuration: GeneratorConfiguration):
"""Set up the generator and output the meta-information for the node
Just passes the initialization on to the parent class. See the argument and exception descriptions there.
"""
logger.info("Creating NodeInfoGenerator")
try:
ensure_writable_directory(configuration.destination_directory)
except Exception as error:
raise ParseError("Unable to create node information directory") from error
super().__init__(configuration)
try:
with open(self.output_path, "r", encoding="utf-8") as output_fd:
self.node_information = json.load(output_fd)
except (FileNotFoundError, KeyError, JSONDecodeError):
self.node_information = {"nodes": {}}
# ----------------------------------------------------------------------
def interface_file_name(self) -> str:
"""Return the path to the name of the node information file, relative to the configured directory"""
return "nodes.json"
# ----------------------------------------------------------------------
def generate_node_interface(self):
"""Generate the node information for the node"""
logger.info("Generating node information for node %s", self.node_interface.name)
this_nodes_information = {
self.node_interface.name: {
"description": self.node_interface.description,
"version": self.node_interface.version,
"extension": self.extension,
"language": self.node_interface.language,
}
}
self.node_information["nodes"].update(this_nodes_information)
node_info_as_json = json.dumps(self.node_information, indent=4)
self.out.write(node_info_as_json)
# ======================================================================
def generate_node_info(configuration: GeneratorConfiguration) -> Optional[str]:
"""Create or modify the extension's node information file
Args:
configuration: Information defining how and where the node information file will be generated
Returns:
String containing the generated/updated node information
Raises:
NodeGenerationError: When there is a failure in the generation of the node information file
"""
logger.info("Generating node information")
generator = NodeInfoGenerator(configuration)
generator.generate_interface()
return str(generator.out)
| 3,126 | Python | 37.604938 | 113 | 0.615803 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/main.py | # noqa: E501,PLW1203
"""Generate code and documentation for an OmniGraph Node description file.
Takes a JSON file containing information describing the configuration of an OmniGraph node and generates
a header file implementing a simplified interface to the graph ABI.
Run this script with the arg "--help" to see available functions in this form, followed by the current list
of supported attribute types:
usage: generate_node.py [-h] [-cd DIR] [-c [DIR]] [-d [DIR]]
[-e EXTENSION_NAME] [-i [DIR]]
[-in [INTERMEDIATE_DIRECTORY]]
[-m [PYTHON_IMPORT_MODULE]] [-n [FILE.ogn]] [-p [DIR]]
[-s SETTING_NAME] [-t [DIR]] [-td FILE.json]
[-tp [DIR]] [-u] [-usd [DIR]] [-uw [DIR]] [-v]
Parse a node interface description file and generate code or documentation
optional arguments:
-h, --help show this help message and exit
-cd DIR, --configDirectory DIR
the directory containing the code generator configuration files (default is current)
-c [DIR], --cpp [DIR]
generate the C++ interface class into the specified directory (default is current)
-d [DIR], --docs [DIR]
generate the node documentation into the specified directory (default is current)
-e EXTENSION_NAME, --extension EXTENSION_NAME
name of the extension requesting the generation
-i [DIR], --icons [DIR]
directory into which to install the icon, if one is found
-in [INTERMEDIATE_DIRECTORY], --intermediate [INTERMEDIATE_DIRECTORY]
directory into which temporary build information is stored
-m [PYTHON_IMPORT_MODULE], --module [PYTHON_IMPORT_MODULE]
Python module where the Python node files live
-n [FILE.ogn], --nodeFile [FILE.ogn]
file containing the node description (use stdin if file name is omitted)
-p [DIR], --python [DIR]
generate the Python interface class into the specified directory (default is current)
-s SETTING_NAME, --settings SETTING_NAME
define one or more build-specific settings that can be used to change the generated code at runtime
-t [DIR], --tests [DIR]
generate a file containing basic operational tests for this node
-td FILE.json, --typeDefinitions FILE.json
file name containing the mapping to use from OGN type names to generated code types
-tp [DIR], --template [DIR]
generate an annotated template for the C++ node class into the specified directory (default is current)
-u, --unitTests run the unit tests on this file
-usd [DIR], --usdPath [DIR]
generate a file containing a USD template for nodes of this type
-uw [DIR], --unwritable [DIR]
mark the generated directory as unwritable at runtime
-v, --verbose output the steps the script is performing as it performs them
"""
import argparse
import logging
import os
import sys
from pathlib import Path
from typing import List, Optional
from .attributes.management import formatted_supported_attribute_type_names
from .category_definitions import get_category_definitions
from .generate_cpp import generate_cpp
from .generate_documentation import generate_documentation
from .generate_icon import generate_icon
from .generate_node_info import generate_node_info
from .generate_python import generate_python
from .generate_template import generate_template
from .generate_tests import generate_tests
from .generate_usd import generate_usd
from .keys import LanguageTypeValues
from .nodes import NodeInterfaceWrapper
from .type_definitions import apply_type_definitions
from .utils import (
UNWRITABLE_TAG_FILE,
GeneratorConfiguration,
ParseError,
Settings,
UnimplementedError,
ensure_writable_directory,
logger,
)
__all__ = ["main"]
# ======================================================================
def construct_parser() -> argparse.ArgumentParser:
"""Construct and return the parser for the script arguments"""
class ReadableDir(argparse.Action):
"""Helper class for the parser to check for a readable directory"""
def __call__(self, parser, namespace, values, option_string=None):
"""Function called by the arg parser to verify that a directory exists and is readable
Args:
parser: argparser required argument, ignored
namespace: argparser required argument, ignored
values: The path to the directory being checked for writability
option_string: argparser required argument, ignored
Raises:
argparse.ArgumentTypeError if the requested directory cannot be found or created in readable mode
"""
prospective_dir = values
try:
# If the directory can't be read then listdir will raise an exception
if os.listdir(prospective_dir):
setattr(namespace, self.dest, prospective_dir)
except Exception as error:
raise argparse.ArgumentTypeError(str(error))
class WritableDir(argparse.Action):
"""Helper class for the parser to check for a writable directory"""
def __call__(self, parser, namespace, values, option_string=None):
"""Function called by the arg parser to verify that a directory exists and is writable
Args:
parser: argparser required argument, ignored
namespace: argparser required argument, ignored
values: The path to the directory being checked for writability
option_string: argparser required argument, ignored
Raises:
argparse.ArgumentTypeError if the requested directory cannot be found or created in writable mode
"""
prospective_dir = values
try:
ensure_writable_directory(prospective_dir)
setattr(namespace, self.dest, prospective_dir)
except Exception as error:
raise argparse.ArgumentTypeError(str(error))
# If no output directory is specified generated files will end up in the current directory
default_output_dir = os.path.realpath(os.getcwd())
# This helps format the usage information in a nicer way
os.putenv("COLUMNS", "120")
# Generate a message enumerating the set of attribute types currently supported
available_attribute_types = formatted_supported_attribute_type_names()
formatted_types = "\n\t".join(available_attribute_types)
epilog = "Available attribute types:\n\t" + formatted_types
available_settings = Settings().all()
if available_settings:
epilog += "\nAvailable settings:\n\t" + "\n\t".join(
[f"{name}: {description}" for name, (_, description) in available_settings.items()]
)
# Construct the parsing information. Run the script with "--help" to see the usage.
parser = argparse.ArgumentParser(
description="Parse a node interface description file and generate code or documentation",
formatter_class=argparse.RawTextHelpFormatter,
epilog=epilog,
)
parser.add_argument(
"-cd",
"--configDirectory",
action=ReadableDir,
const=default_output_dir,
metavar="DIR",
help="the directory containing the code generator configuration files (default is current)",
)
parser.add_argument(
"-c",
"--cpp",
action=WritableDir,
nargs="?",
const=default_output_dir,
metavar="DIR",
help="generate the C++ interface class into the specified directory (default is current)",
)
parser.add_argument(
"-d",
"--docs",
action=WritableDir,
nargs="?",
const=default_output_dir,
metavar="DIR",
help="generate the node documentation into the specified directory (default is current)",
)
parser.add_argument(
"-e",
"--extension",
action="store",
metavar="EXTENSION_NAME",
default=None,
help="name of the extension requesting the generation",
)
# Notice how, unlike other directory names, this one is not a "WritableDir" as the directory should only
# be created if the node happens to have an icon, which isn't discovered until parse time.
parser.add_argument(
"-i",
"--icons",
action="store",
nargs="?",
const=default_output_dir,
metavar="DIR",
help="directory into which to install the icon, if one is found",
)
parser.add_argument(
"-in",
"--intermediate",
action=WritableDir,
nargs="?",
const=default_output_dir,
metavar="INTERMEDIATE_DIRECTORY",
help="directory into which temporary build information is stored",
)
parser.add_argument(
"-m",
"--module",
nargs="?",
action="store",
metavar="PYTHON_IMPORT_MODULE",
help="Python module where the Python node files live",
)
parser.add_argument(
"-n",
"--nodeFile",
nargs="?",
type=argparse.FileType("r"),
const=sys.stdin,
help="file containing the node description (use stdin if file name is omitted)",
metavar="FILE.ogn",
)
parser.add_argument(
"-p",
"--python",
action=WritableDir,
nargs="?",
const=default_output_dir,
metavar="DIR",
help="generate the Python interface class into the specified directory (default is current)",
)
parser.add_argument(
"-s",
"--settings",
type=str,
action="append",
metavar="SETTING_NAME",
help="define one or more build-specific settings that can be used to change the generated code at runtime",
)
parser.add_argument(
"-t",
"--tests",
action=WritableDir,
nargs="?",
const=default_output_dir,
metavar="DIR",
help="generate a file containing basic operational tests for this node",
)
parser.add_argument(
"-td",
"--typeDefinitions",
action="store",
default=None,
help="file name containing the mapping to use from OGN type names to generated code types",
metavar="FILE.json",
)
parser.add_argument(
"-tp",
"--template",
action=WritableDir,
nargs="?",
const=default_output_dir,
metavar="DIR",
help="generate an annotated template for the C++ node class into the specified directory (default is current)",
)
parser.add_argument("-u", "--unitTests", action="store_true", help="run the unit tests on this file")
parser.add_argument(
"-usd",
"--usdPath",
action=WritableDir,
nargs="?",
const=default_output_dir,
metavar="DIR",
help="generate a file containing a USD template for nodes of this type",
)
parser.add_argument(
"-uw",
"--unwritable",
action=WritableDir,
nargs="?",
const=default_output_dir,
metavar="DIR",
help="mark the generated directory as unwritable at runtime",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="output the steps the script is performing as it performs them"
)
return parser
# ======================================================================
def main(args_to_parse: Optional[List] = None):
"""Parse the contents of the argument list and perform the requested function. Uses sys.argv if None."""
parser = construct_parser()
args = parser.parse_args(args_to_parse)
# If the script steps are to be echoed enable the logger and dump the script arguments as a first step
logger.setLevel(logging.DEBUG if args.verbose else logging.WARN)
logger.info("cpp == %s", args.cpp)
logger.info("configDirectory == %s", args.configDirectory)
logger.info("template == %s", args.template)
logger.info("docs == %s", args.docs)
logger.info("extension == %s", args.extension)
logger.info("icons == %s", args.icons)
logger.info("intermediate == %s", args.intermediate)
logger.info("module == %s", args.module)
logger.info("nodeFile == %s", args.nodeFile)
logger.info("python == %s", args.python)
logger.info("settings == %s", args.settings)
logger.info("tests == %s", args.tests)
logger.info("typeDefinitions == %s", args.typeDefinitions)
logger.info("unitTests == %s", args.unitTests)
logger.info("unwritable == %s", args.unwritable)
logger.info("usdPath == %s", args.usdPath)
logger.info("verbose == %s", args.verbose)
if args.unitTests:
logger.info("Running unit tests")
from ..tests.test_node_generator import run_tests as run_tests_general # noqa: PLE0402
from ..tests.test_node_generator_attributes import run_tests as run_tests_attributes # noqa: PLE0402
from ..tests.test_node_generator_data_types import run_tests as run_tests_data_types # noqa: PLE0402
from ..tests.test_node_generator_illegal import run_tests as run_tests_illegal # noqa: PLE0402
run_tests_general()
run_tests_data_types()
run_tests_illegal()
run_tests_attributes()
# Create the settings object from the list of settings specified on the command line.
# Every setting keyword is assumed to be a boolean, set to true when it is passed in.
settings = Settings()
if args.settings is not None:
for setting in args.settings:
try:
setattr(settings, setting, True)
except AttributeError as error:
raise ParseError(f"{setting} is not in the known settings list [{settings}]") from error
# If there is a node to parse then do so
node_interface_wrapper = None
if not args.nodeFile:
if args.docs or args.cpp or args.template or args.python or args.tests:
logger.error("Cannot generate code unless you specify a nodeFile")
return
try:
# Read in the standard set of category definitions if it can be found
categories_allowed = {}
if args.configDirectory is not None:
config_dir_type_path = Path(args.configDirectory, "CategoryConfiguration.json")
if config_dir_type_path.is_file():
categories_allowed = get_category_definitions(config_dir_type_path)
base_name, node_ext = os.path.splitext(os.path.basename(args.nodeFile.name))
if node_ext != ".ogn":
logger.error("Node files must have the .ogn extension")
return
if (args.python or args.docs or args.tests) and not args.module:
logger.error("When generating Python code or documentation you must include the 'module' argument")
return
node_interface_wrapper = NodeInterfaceWrapper(
args.nodeFile,
extension=args.extension,
config_directory=args.configDirectory,
categories_allowed=categories_allowed,
)
logger.info("Parsed interface for %s", node_interface_wrapper.node_interface.name)
try:
all_supported = True
node_interface_wrapper.check_support()
except UnimplementedError as error:
all_supported = False
logger.warning("Some attributes are not supported. Only documentation will be generated.\n\t%s", error)
# Applying the type definitions make them take immediate effect, which means adding/modifying members of
# the AttributeManager class hierarchy.
if args.typeDefinitions is not None:
type_definition_path = Path(args.typeDefinitions)
if type_definition_path.is_file():
apply_type_definitions(args.typeDefinitions)
elif not type_definition_path.is_absolute():
config_dir_type_path = Path(args.configDirectory, args.typeDefinitions)
if config_dir_type_path.is_file():
apply_type_definitions(config_dir_type_path)
else:
raise ParseError(
f"Type definitions '{args.typeDefinitions}' not found in"
f" config directory '{args.configDirectory}'"
)
else:
raise ParseError(f"Absolute type definition path '{args.typeDefinitions}' not found")
# Sanity check to see if there is a Python file of the same name as the .ogn file but the language was
# not specified as Python.
if node_interface_wrapper.node_interface.language != LanguageTypeValues.PYTHON:
python_file_name = args.nodeFile.name.replace(".ogn", ".py")
if os.path.isfile(python_file_name):
raise ParseError(f"Python node file {python_file_name} exists but language was not set to Python")
# If there is no generation happening then emit a message indicating the success of the parse.
# (Failure of the parse would have already been indicated by a ParseError exception)
if not args.docs and not args.cpp and not args.python:
print(f"Node file {args.nodeFile.name} successfully validated")
configuration = GeneratorConfiguration(
args.nodeFile.name,
node_interface_wrapper.node_interface,
args.extension,
args.module,
base_name,
None,
args.verbose,
settings,
)
# The node interface may have an override on the path - get rid of it if the icon isn't being generated
configuration.destination_directory = args.icons
node_interface_wrapper.node_interface.icon_path = generate_icon(configuration) if args.icons else None
configuration.destination_directory = args.docs
_ = generate_documentation(configuration) if args.docs else None
configuration.destination_directory = str(Path(args.icons).parent)
_ = generate_node_info(configuration) if args.docs and args.icons else None
configuration.destination_directory = args.cpp
_ = generate_cpp(configuration, all_supported) if args.cpp else None
configuration.destination_directory = args.template
_ = generate_template(configuration) if args.template else None
configuration.destination_directory = args.python
_ = generate_python(configuration) if args.python and all_supported else None
configuration.destination_directory = args.tests
_ = generate_tests(configuration) if args.tests and all_supported else None
configuration.destination_directory = args.usdPath
_ = generate_usd(configuration) if args.usdPath and all_supported else None
# The intermediate directory contains a tag file per-node that can be used to determine if the code generator
# has been run since the last time the .ogn file was modified. The cost is that deletion of generated files
# will not trigger their rebuild, but as the information of which files are generated is only known after
# processing that is an acceptable tradeoff. (The alternative would be a much more verbose system that creates
# a separate tag per generated file with all of the extra build dependencies required to make that work.)
if args.intermediate:
logger.info("Tagging the file as being built")
intermediate_tag_path = os.path.join(args.intermediate, f"{os.path.basename(args.nodeFile.name)}.built")
with open(intermediate_tag_path, "w", newline="\n", encoding="utf-8") as tag_fd:
tag_fd.write("The presence of this file tags the last time its .ogn file was processed")
if args.unwritable:
logger.info("Tagging the generated directory as unwritable")
unwritable_tag_path = os.path.join(args.unwritable, UNWRITABLE_TAG_FILE)
with open(unwritable_tag_path, "w", newline="\n", encoding="utf-8") as tag_fd:
tag_fd.write("The presence of this file ensures the directory will not regenerate at runtime")
except Exception as error:
raise ParseError(f"{os.path.basename(args.nodeFile.name)} failed") from error
if __name__ == "__main__":
main()
| 20,790 | Python | 43.61588 | 119 | 0.631746 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_template.py | """Support for generating an annotated C++ template class for OmniGraph Nodes.
Exports:
generate_template: Create a NODE_template.cpp file containing sample uses of the generated interface
"""
from typing import List, Optional
from .attributes.AttributeManager import AttributeManager
from .attributes.naming import INPUT_GROUP, OUTPUT_GROUP, STATE_GROUP, namespace_of_group
from .keys import LanguageTypeValues
from .nodes import NodeInterfaceGenerator
from .utils import GeneratorConfiguration, UnimplementedError, logger, to_comment, to_cpp_comment
__all__ = ["generate_template"]
class NodeTemplateGenerator(NodeInterfaceGenerator):
"""Manage the functions required to generate an annotated template class for a node"""
def __init__(self, configuration: GeneratorConfiguration):
"""Set up the generator and output the annotated template class for the node
Checks the language support.
"""
self.template_extension = None
if configuration.node_interface.language == LanguageTypeValues.CPP:
self.template_extension = "cpp"
elif configuration.node_interface.language == LanguageTypeValues.PYTHON:
self.template_extension = "py"
else:
language_name = "|".join(LanguageTypeValues.ALL[self.node_interface.language])
raise UnimplementedError(f"Template generation not supported for '{language_name}' files")
# This needs the extension set to properly define the interface file name so do it after that
super().__init__(configuration)
# ----------------------------------------------------------------------
def interface_file_name(self) -> str:
"""Return the path to the name of the template file"""
return self.base_name + "." + self.template_extension
# ----------------------------------------------------------------------
def generate_cpp_attribute_info(self, attribute_list: List[AttributeManager], attribute_group: str):
"""Generate the comments explaining how to access the values of attributes in the list
Args:
attribute_list: List of attributes for which explanations are to be emitted
attribute_group: Enum with the attribute's group (input, output, or state)
"""
namespace = namespace_of_group(attribute_group)
for attribute in attribute_list:
self.out.write()
if attribute_group != INPUT_GROUP:
if attribute.fabric_needs_counter():
self.out.write("// Before setting array outputs you must first set their size to allocate space")
self.out.write(f"// db.{namespace}.{attribute.base_name}.size() = newOutputSize;")
self.out.write(f"// auto& output{attribute.base_name} = db.{namespace}.{attribute.base_name}();")
else:
self.out.write(f"// const auto& input_value = db.{namespace}.{attribute.base_name}();")
role = attribute.cpp_role_name()
if role:
self.out.write("// Roles for role-based attributes can be found by name using this member")
self.out.write(f"// auto roleName = db.{namespace}.{attribute.base_name}.role();")
# ----------------------------------------------------------------------
def generate_cpp_template(self):
"""Write out a template for a C++ node describing use of the current OGN configuration.
Raises:
NodeGenerationError: When there is a failure in the generation of the C++ interface
"""
# Rely on the formatter to insert the copyright here
node_description = to_cpp_comment(self.node_interface.description)
self.out.write(f"{node_description}")
self.out.write(f"#include <{self.base_name}Database.h>")
self.out.write(f"class {self.base_name}:")
self.out.write("{")
if self.out.indent("public:"):
self.out.write(f"static bool compute({self.base_name}Database& db)")
if self.out.indent("{"):
input_attributes = self.node_interface.all_input_attributes()
if input_attributes:
self.out.write("// ======================================================================")
self.out.write("// Use these methods to access the input values")
self.out.write("// ======================================================================")
self.generate_cpp_attribute_info(self.node_interface.all_input_attributes(), INPUT_GROUP)
self.out.write()
output_attributes = self.node_interface.all_output_attributes()
if output_attributes:
self.out.write("// ======================================================================")
self.out.write("// Use these methods to set the output values")
self.out.write("// ======================================================================")
self.generate_cpp_attribute_info(self.node_interface.all_output_attributes(), OUTPUT_GROUP)
self.out.write()
state_attributes = self.node_interface.all_state_attributes()
if state_attributes:
self.out.write("// ======================================================================")
self.out.write("// Use these methods to set the state values")
self.out.write("// ======================================================================")
self.generate_cpp_attribute_info(state_attributes, STATE_GROUP)
self.out.write()
self.out.write("// ======================================================================")
self.out.write("// If you have predefined any tokens you can access them by name like this")
self.out.write("// ======================================================================")
self.out.write("auto myColorToken = db.tokens.color;")
self.out.write()
self.out.write("return true;")
self.out.exdent("}")
self.out.exdent("}")
# ----------------------------------------------------------------------
def generate_python_attribute_info(self, attribute_list: List[AttributeManager], attribute_group: str):
"""Generate the comments explaining how to access the values of attributes in the list
Args:
attribute_list: List of attributes for which explanations are to be emitted
attribute_group: Enum with the attribute's group (input, output, or state)
"""
namespace = namespace_of_group(attribute_group)
for attribute in attribute_list:
self.out.write()
if attribute_group != INPUT_GROUP:
if attribute.fabric_needs_counter():
self.out.write("# Before setting array outputs you must first set their size to allocate space")
self.out.write(f"# db.{namespace}.{attribute.base_name}_size = new_output_size")
self.out.write(f"# db.{namespace}.{attribute.base_name} = new_output_value")
else:
self.out.write(f"# input_value = db.{namespace}.{attribute.base_name}")
role = attribute.python_role_name()
if role:
self.out.write("# Roles for role-based attributes can be found by name using this member")
self.out.write(f"# role_name = db.role.{namespace}.{attribute.base_name}")
# ----------------------------------------------------------------------
def generate_python_template(self):
"""Write out the code associated with the node.
Raises:
NodeGenerationError: When there is a failure in the generation of the C++ interface
"""
self.out.write('"""')
self.out.write(f"This is the implementation of the OGN node defined in {self.base_name}.ogn")
self.out.write('"""')
self.out.write()
self.out.write("# Array or tuple values are accessed as numpy arrays so you probably need this import")
self.out.write("import numpy")
self.out.write()
self.out.write()
if self.out.indent(f"class {self.base_name}:"):
node_description = to_comment("", self.node_interface.description, 1)
self.out.write('"""')
self.out.write(node_description)
self.out.write('"""')
self.out.write("@staticmethod")
if self.out.indent("def compute(db) -> bool:"):
self.out.write('"""Compute the outputs from the current input"""\n')
if self.out.indent("try:"):
self.out.write("# With the compute in a try block you can fail the compute by raising an exception")
input_attributes = self.node_interface.all_input_attributes()
if input_attributes:
self.out.write("# ======================================================================")
self.out.write("# Use these methods to access the input values")
self.out.write("# ======================================================================")
self.generate_python_attribute_info(self.node_interface.all_input_attributes(), INPUT_GROUP)
self.out.write()
output_attributes = self.node_interface.all_output_attributes()
if output_attributes:
self.out.write("# ======================================================================")
self.out.write("# Use these methods to set the output values")
self.out.write("# ======================================================================")
self.generate_python_attribute_info(self.node_interface.all_output_attributes(), OUTPUT_GROUP)
self.out.write()
state_attributes = self.node_interface.all_state_attributes()
if state_attributes:
self.out.write("# ======================================================================")
self.out.write("# Use these methods to set the state values")
self.out.write("# ======================================================================")
self.generate_python_attribute_info(state_attributes, STATE_GROUP)
self.out.write()
self.out.write("pass")
self.out.exdent()
if self.out.indent("except Exception as error:"):
self.out.write("# If anything causes your compute to fail report the error and return False")
self.out.write("db.log_error(str(error))")
self.out.write("return False")
self.out.exdent()
self.out.write()
self.out.write("# Even if inputs were edge cases like empty arrays, correct outputs mean success")
self.out.write("return True")
self.out.exdent()
self.out.exdent()
# ----------------------------------------------------------------------
def generate_node_interface(self):
"""Write out a template implementation of the node in the requested language.
Raises:
NodeGenerationError: When there is a failure in the generation of the template
"""
if self.node_interface.language == LanguageTypeValues.CPP:
self.generate_cpp_template()
elif self.node_interface.language == LanguageTypeValues.PYTHON:
self.generate_python_template()
# ======================================================================
def generate_template(configuration: GeneratorConfiguration) -> Optional[str]:
"""Create support files for the C++ interface to a node
For now only a header file is generated for the C++ interface, though there will probably be multiple files
generated in the future. For that reason this single point of contact was created for outside callers.
Args:
configuration: Information defining how and where the template will be generated
Returns:
String containing the generated template class definition or None if its generation was not enabled
Raises:
NodeGenerationError: When there is a failure in the generation of the header
UnimplementedError: When the language of the node does not support template generation
"""
if not configuration.node_interface.can_generate("template"):
return None
logger.info("Generating Template Node Implementation Class")
generator = NodeTemplateGenerator(configuration)
generator.generate_interface()
return str(generator.out)
| 13,115 | Python | 54.812766 | 120 | 0.535951 |
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/type_definitions.py | """Handle the mapping of OGN types onto the various generated code types"""
import json
from contextlib import suppress
from io import TextIOWrapper
from pathlib import Path
from typing import IO, Dict, List, Tuple, Union
from .attributes.AttributeManager import CppConfiguration
from .attributes.management import get_attribute_manager_type
from .keys import NodeTypeKeys
from .utils import ParseError, is_comment
class __TypeDefinitions:
"""Use the function apply_type_definitions instead of directly instantiating this class"""
def __init__(self, type_definitions: Union[str, IO, Dict, Path, None]):
"""Initialize the type definition maps based on a JSON definition
Internal:
__definitions: Dictionary of type information read from the definition description
"""
try:
self.__definitions = {}
if type_definitions is None:
pass
elif isinstance(type_definitions, str):
self.__definitions = json.loads(type_definitions)[NodeTypeKeys.TYPE_DEFINITIONS]
elif isinstance(type_definitions, Dict):
print("Using type definitions straight from a dictionary")
self.__definitions = type_definitions[NodeTypeKeys.TYPE_DEFINITIONS]
elif isinstance(type_definitions, TextIOWrapper):
self.__definitions = json.load(type_definitions)[NodeTypeKeys.TYPE_DEFINITIONS]
elif isinstance(type_definitions, Path):
self.__definitions = json.load(type_definitions.open("r"))[NodeTypeKeys.TYPE_DEFINITIONS]
else:
raise ParseError(f"Type definition type not handled - {type_definitions}")
except OSError as error:
raise ParseError(f"File error when parsing type definitions {type_definitions} - {error}") from None
except json.decoder.JSONDecodeError as error:
raise ParseError(f"Invalid JSON formatting in file {type_definitions} - {error}") from None
# --------------------------------------------------------------------------------------------------------------
def __apply_cpp_definitions(self, configuration_information: Dict[str, Tuple[str, List[str]]]):
"""Apply type definitions from the definition to the C++ types on the attribute managers
Args:
configuration_information: Dictionary whose keys are the names of attribute types and whose values are
a tuple of the C++ data type name for that attribute type and a list of files to be included to use it
"""
for attribute_type_name, attribute_type_configuration in configuration_information.items():
# Empty configuration means leave it as-is
if not attribute_type_configuration:
continue
if is_comment(attribute_type_name):
continue
# Take a single string to mean the type definition, with no extra includes required
if isinstance(attribute_type_configuration, str):
if attribute_type_configuration:
attribute_type_configuration = [attribute_type_configuration]
else:
attribute_type_configuration = []
attribute_manager = get_attribute_manager_type(attribute_type_name)
if attribute_manager is None:
raise ParseError(f"Could not find attribute manager type for configuration of {attribute_type_name}")
# If there is a change it will have a type and include file list, else skip this one
with suppress(AttributeError, KeyError):
cast_type = attribute_type_configuration[0]
include_files = [] if len(attribute_type_configuration) < 2 else attribute_type_configuration[1]
if not isinstance(cast_type, str):
raise ParseError(
f"Cast type for attribute type {attribute_type_name} must be a string, not {cast_type}"
)
if not isinstance(include_files, list):
raise ParseError(
f"Include files for attribute type {attribute_type_name} must be a list, not {include_files}"
)
attribute_manager.override_cpp_configuration(cast_type, include_files, cast_required=False)
attribute_manager.CPP_CONFIGURATION[attribute_manager.tuple_count] = CppConfiguration(
base_type_name=cast_type, include_files=include_files
)
# --------------------------------------------------------------------------------------------------------------
def apply_definitions(self):
"""Apply any type definitions to the attribute manager to which they apply"""
for language, configuration_information in self.__definitions.items():
if language == "c++":
self.__apply_cpp_definitions(configuration_information)
elif not is_comment(language):
raise ParseError(f"Configuration for language '{language}' is not supported")
# ==============================================================================================================
def apply_type_definitions(type_definitions: Union[str, IO, Dict, Path, None]):
definitions = __TypeDefinitions(type_definitions)
definitions.apply_definitions()
| 5,428 | Python | 52.752475 | 117 | 0.607222 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.