file_path
stringlengths
21
202
content
stringlengths
12
1.02M
size
int64
12
1.02M
lang
stringclasses
9 values
avg_line_length
float64
3.33
100
max_line_length
int64
10
993
alphanum_fraction
float64
0.27
0.93
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tests/TestOgnTutorialTupleData.py
import os import omni.kit.test import omni.graph.core as og import omni.graph.core.tests as ogts from omni.graph.core.tests.omnigraph_test_utils import _TestGraphAndNode from omni.graph.core.tests.omnigraph_test_utils import _test_clear_scene from omni.graph.core.tests.omnigraph_test_utils import _test_setup_scene from omni.graph.core.tests.omnigraph_test_utils import _test_verify_scene class TestOgn(ogts.OmniGraphTestCase): TEST_DATA = [ { 'outputs': [ ['outputs:a_double2', [2.1, 3.2], False], ['outputs:a_float2', [5.4, 6.5], False], ['outputs:a_half2', [8.0, 9.0], False], ['outputs:a_int2', [11, 12], False], ['outputs:a_float3', [7.6, 8.7, 9.8], False], ['outputs:a_double3', [2.1, 3.2, 4.3], False], ], }, { 'inputs': [ ['inputs:a_double2', [2.1, 3.2], False], ['inputs:a_float2', [5.1, 6.2], False], ['inputs:a_half2', [8.0, 9.0], False], ['inputs:a_int2', [11, 12], False], ['inputs:a_float3', [7.1, 8.2, 9.3], False], ['inputs:a_double3', [10.1, 11.2, 12.3], False], ], 'outputs': [ ['outputs:a_double2', [3.1, 4.2], False], ['outputs:a_float2', [6.1, 7.2], False], ['outputs:a_half2', [9.0, 10.0], False], ['outputs:a_int2', [12, 13], False], ['outputs:a_float3', [8.1, 9.2, 10.3], False], ['outputs:a_double3', [11.1, 12.2, 13.3], False], ], }, ] async def test_generated(self): test_info = _TestGraphAndNode() controller = og.Controller() for i, test_run in enumerate(self.TEST_DATA): await _test_clear_scene(self, test_run) test_info = await _test_setup_scene(self, controller, "/TestGraph", "TestNode_omni_tutorials_TupleData", "omni.tutorials.TupleData", test_run, test_info) await controller.evaluate(test_info.graph) _test_verify_scene(self, controller, test_run, test_info, f"omni.tutorials.TupleData User test case #{i+1}") async def test_vectorized_generated(self): test_info = _TestGraphAndNode() controller = og.Controller() for i, test_run in enumerate(self.TEST_DATA): await _test_clear_scene(self, test_run) test_info = await _test_setup_scene(self, controller, "/TestGraph", "TestNode_omni_tutorials_TupleData","omni.tutorials.TupleData", test_run, test_info, 16) await controller.evaluate(test_info.graph) _test_verify_scene(self, controller, test_run, test_info, f"omni.tutorials.TupleData User test case #{i+1}", 16) async def test_thread_safety(self): import omni.kit # Generate multiple instances of the test setup to run them concurrently instance_setup = dict() for n in range(24): instance_setup[f"/TestGraph_{n}"] = _TestGraphAndNode() for i, test_run in enumerate(self.TEST_DATA): await _test_clear_scene(self, test_run) for (key, test_info) in instance_setup.items(): test_info = await _test_setup_scene(self, og.Controller(allow_exists_prim=True), key, "TestNode_omni_tutorials_TupleData", "omni.tutorials.TupleData", test_run, test_info) self.assertEqual(len(og.get_all_graphs()), 24) # We want to evaluate all graphs concurrently. Kick them all. # Evaluate multiple times to skip 2 serial frames and increase chances for a race condition. for _ in range(10): await omni.kit.app.get_app().next_update_async() for (key, test_instance) in instance_setup.items(): _test_verify_scene(self, og.Controller(), test_run, test_info, f"omni.tutorials.TupleData User test case #{i+1}, instance{key}") async def test_data_access(self): from omni.graph.tutorials.ogn.OgnTutorialTupleDataDatabase import OgnTutorialTupleDataDatabase test_file_name = "OgnTutorialTupleDataTemplate.usda" usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name) if not os.path.exists(usd_path): self.assertTrue(False, f"{usd_path} not found for loading test") (result, error) = await ogts.load_test_file(usd_path) self.assertTrue(result, f'{error} on {usd_path}') test_node = og.Controller.node("/TestGraph/Template_omni_tutorials_TupleData") database = OgnTutorialTupleDataDatabase(test_node) self.assertTrue(test_node.is_valid()) node_type_name = test_node.get_type_name() self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1) def _attr_error(attribute: og.Attribute, usd_test: bool) -> str: test_type = "USD Load" if usd_test else "Database Access" return f"{node_type_name} {test_type} Test - {attribute.get_name()} value error" self.assertTrue(test_node.get_attribute_exists("inputs:a_double2")) attribute = test_node.get_attribute("inputs:a_double2") db_value = database.inputs.a_double2 expected_value = [1.1, 2.2] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_double3")) attribute = test_node.get_attribute("inputs:a_double3") db_value = database.inputs.a_double3 expected_value = [1.1, 2.2, 3.3] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_float2")) attribute = test_node.get_attribute("inputs:a_float2") db_value = database.inputs.a_float2 expected_value = [4.4, 5.5] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_float3")) attribute = test_node.get_attribute("inputs:a_float3") db_value = database.inputs.a_float3 expected_value = [6.6, 7.7, 8.8] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_half2")) attribute = test_node.get_attribute("inputs:a_half2") db_value = database.inputs.a_half2 expected_value = [7.0, 8.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_int2")) attribute = test_node.get_attribute("inputs:a_int2") db_value = database.inputs.a_int2 expected_value = [10, 11] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("outputs:a_double2")) attribute = test_node.get_attribute("outputs:a_double2") db_value = database.outputs.a_double2 self.assertTrue(test_node.get_attribute_exists("outputs:a_double3")) attribute = test_node.get_attribute("outputs:a_double3") db_value = database.outputs.a_double3 self.assertTrue(test_node.get_attribute_exists("outputs:a_float2")) attribute = test_node.get_attribute("outputs:a_float2") db_value = database.outputs.a_float2 self.assertTrue(test_node.get_attribute_exists("outputs:a_float3")) attribute = test_node.get_attribute("outputs:a_float3") db_value = database.outputs.a_float3 self.assertTrue(test_node.get_attribute_exists("outputs:a_half2")) attribute = test_node.get_attribute("outputs:a_half2") db_value = database.outputs.a_half2 self.assertTrue(test_node.get_attribute_exists("outputs:a_int2")) attribute = test_node.get_attribute("outputs:a_int2") db_value = database.outputs.a_int2
8,539
Python
48.364162
187
0.65031
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tests/TestOgnTutorialSimpleDataPy.py
import os import omni.kit.test import omni.graph.core as og import omni.graph.core.tests as ogts from omni.graph.core.tests.omnigraph_test_utils import _TestGraphAndNode from omni.graph.core.tests.omnigraph_test_utils import _test_clear_scene from omni.graph.core.tests.omnigraph_test_utils import _test_setup_scene from omni.graph.core.tests.omnigraph_test_utils import _test_verify_scene class TestOgn(ogts.OmniGraphTestCase): TEST_DATA = [ { 'inputs': [ ['inputs:a_bool', False, False], ], 'outputs': [ ['outputs:a_bool', True, False], ], }, { 'inputs': [ ['inputs:a_bool', True, False], ], 'outputs': [ ['outputs:a_bool', False, False], ['outputs:a_a_boolUiName', "Simple Boolean Input", False], ['outputs:a_nodeTypeUiName', "Tutorial Python Node: Attributes With Simple Data", False], ], }, { 'inputs': [ ['inputs:a_path', "/World/Domination", False], ], 'outputs': [ ['outputs:a_path', "/World/Domination/Child", False], ], }, { 'inputs': [ ['inputs:a_bool', False, False], ['inputs:a_double', 1.1, False], ['inputs:a_float', 3.3, False], ['inputs:a_half', 5.0, False], ['inputs:a_int', 7, False], ['inputs:a_int64', 9, False], ['inputs:a_token', "helloToken", False], ['inputs:a_string', "helloString", False], ['inputs:a_objectId', 10, False], ['inputs:a_uchar', 11, False], ['inputs:a_uint', 13, False], ['inputs:a_uint64', 15, False], ], 'outputs': [ ['outputs:a_bool', True, False], ['outputs:a_double', 2.1, False], ['outputs:a_float', 4.3, False], ['outputs:a_half', 6.0, False], ['outputs:a_int', 8, False], ['outputs:a_int64', 10, False], ['outputs:a_token', "worldToken", False], ['outputs:a_string', "worldString", False], ['outputs:a_objectId', 11, False], ['outputs:a_uchar', 12, False], ['outputs:a_uint', 14, False], ['outputs:a_uint64', 16, False], ], }, ] async def test_generated(self): test_info = _TestGraphAndNode() controller = og.Controller() for i, test_run in enumerate(self.TEST_DATA): await _test_clear_scene(self, test_run) test_info = await _test_setup_scene(self, controller, "/TestGraph", "TestNode_omni_graph_tutorials_SimpleDataPy", "omni.graph.tutorials.SimpleDataPy", test_run, test_info) await controller.evaluate(test_info.graph) _test_verify_scene(self, controller, test_run, test_info, f"omni.graph.tutorials.SimpleDataPy User test case #{i+1}") async def test_vectorized_generated(self): test_info = _TestGraphAndNode() controller = og.Controller() for i, test_run in enumerate(self.TEST_DATA): await _test_clear_scene(self, test_run) test_info = await _test_setup_scene(self, controller, "/TestGraph", "TestNode_omni_graph_tutorials_SimpleDataPy","omni.graph.tutorials.SimpleDataPy", test_run, test_info, 16) await controller.evaluate(test_info.graph) _test_verify_scene(self, controller, test_run, test_info, f"omni.graph.tutorials.SimpleDataPy User test case #{i+1}", 16) async def test_data_access(self): from omni.graph.tutorials.ogn.OgnTutorialSimpleDataPyDatabase import OgnTutorialSimpleDataPyDatabase test_file_name = "OgnTutorialSimpleDataPyTemplate.usda" usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name) if not os.path.exists(usd_path): self.assertTrue(False, f"{usd_path} not found for loading test") (result, error) = await ogts.load_test_file(usd_path) self.assertTrue(result, f'{error} on {usd_path}') test_node = og.Controller.node("/TestGraph/Template_omni_graph_tutorials_SimpleDataPy") database = OgnTutorialSimpleDataPyDatabase(test_node) self.assertTrue(test_node.is_valid()) node_type_name = test_node.get_type_name() self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1) def _attr_error(attribute: og.Attribute, usd_test: bool) -> str: test_type = "USD Load" if usd_test else "Database Access" return f"{node_type_name} {test_type} Test - {attribute.get_name()} value error" self.assertTrue(test_node.get_attribute_exists("inputs:a_constant_input")) attribute = test_node.get_attribute("inputs:a_constant_input") db_value = database.inputs.a_constant_input expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_double")) attribute = test_node.get_attribute("inputs:a_double") db_value = database.inputs.a_double expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_float")) attribute = test_node.get_attribute("inputs:a_float") db_value = database.inputs.a_float expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_half")) attribute = test_node.get_attribute("inputs:a_half") db_value = database.inputs.a_half expected_value = 0.0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_int")) attribute = test_node.get_attribute("inputs:a_int") db_value = database.inputs.a_int expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_int64")) attribute = test_node.get_attribute("inputs:a_int64") db_value = database.inputs.a_int64 expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_objectId")) attribute = test_node.get_attribute("inputs:a_objectId") db_value = database.inputs.a_objectId expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_path")) attribute = test_node.get_attribute("inputs:a_path") db_value = database.inputs.a_path expected_value = "" actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_string")) attribute = test_node.get_attribute("inputs:a_string") db_value = database.inputs.a_string expected_value = "helloString" actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_token")) attribute = test_node.get_attribute("inputs:a_token") db_value = database.inputs.a_token expected_value = "helloToken" actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_uchar")) attribute = test_node.get_attribute("inputs:a_uchar") db_value = database.inputs.a_uchar expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_uint")) attribute = test_node.get_attribute("inputs:a_uint") db_value = database.inputs.a_uint expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_uint64")) attribute = test_node.get_attribute("inputs:a_uint64") db_value = database.inputs.a_uint64 expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("outputs:a_a_boolUiName")) attribute = test_node.get_attribute("outputs:a_a_boolUiName") db_value = database.outputs.a_a_boolUiName self.assertTrue(test_node.get_attribute_exists("outputs:a_bool")) attribute = test_node.get_attribute("outputs:a_bool") db_value = database.outputs.a_bool self.assertTrue(test_node.get_attribute_exists("outputs:a_double")) attribute = test_node.get_attribute("outputs:a_double") db_value = database.outputs.a_double self.assertTrue(test_node.get_attribute_exists("outputs:a_float")) attribute = test_node.get_attribute("outputs:a_float") db_value = database.outputs.a_float self.assertTrue(test_node.get_attribute_exists("outputs:a_half")) attribute = test_node.get_attribute("outputs:a_half") db_value = database.outputs.a_half self.assertTrue(test_node.get_attribute_exists("outputs:a_int")) attribute = test_node.get_attribute("outputs:a_int") db_value = database.outputs.a_int self.assertTrue(test_node.get_attribute_exists("outputs:a_int64")) attribute = test_node.get_attribute("outputs:a_int64") db_value = database.outputs.a_int64 self.assertTrue(test_node.get_attribute_exists("outputs:a_nodeTypeUiName")) attribute = test_node.get_attribute("outputs:a_nodeTypeUiName") db_value = database.outputs.a_nodeTypeUiName self.assertTrue(test_node.get_attribute_exists("outputs:a_objectId")) attribute = test_node.get_attribute("outputs:a_objectId") db_value = database.outputs.a_objectId self.assertTrue(test_node.get_attribute_exists("outputs:a_path")) attribute = test_node.get_attribute("outputs:a_path") db_value = database.outputs.a_path self.assertTrue(test_node.get_attribute_exists("outputs:a_string")) attribute = test_node.get_attribute("outputs:a_string") db_value = database.outputs.a_string self.assertTrue(test_node.get_attribute_exists("outputs:a_token")) attribute = test_node.get_attribute("outputs:a_token") db_value = database.outputs.a_token self.assertTrue(test_node.get_attribute_exists("outputs:a_uchar")) attribute = test_node.get_attribute("outputs:a_uchar") db_value = database.outputs.a_uchar self.assertTrue(test_node.get_attribute_exists("outputs:a_uint")) attribute = test_node.get_attribute("outputs:a_uint") db_value = database.outputs.a_uint self.assertTrue(test_node.get_attribute_exists("outputs:a_uint64")) attribute = test_node.get_attribute("outputs:a_uint64") db_value = database.outputs.a_uint64
13,304
Python
47.915441
186
0.63402
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tests/TestOgnTutorialRoleData.py
import os import omni.kit.test import omni.graph.core as og import omni.graph.core.tests as ogts from omni.graph.core.tests.omnigraph_test_utils import _TestGraphAndNode from omni.graph.core.tests.omnigraph_test_utils import _test_clear_scene from omni.graph.core.tests.omnigraph_test_utils import _test_setup_scene from omni.graph.core.tests.omnigraph_test_utils import _test_verify_scene class TestOgn(ogts.OmniGraphTestCase): TEST_DATA = [ { 'inputs': [ ['inputs:a_color3d', [1.0, 2.0, 3.0], False], ['inputs:a_color3f', [11.0, 12.0, 13.0], False], ['inputs:a_color3h', [21.0, 22.0, 23.0], False], ['inputs:a_color4d', [1.0, 2.0, 3.0, 4.0], False], ['inputs:a_color4f', [11.0, 12.0, 13.0, 14.0], False], ['inputs:a_color4h', [21.0, 22.0, 23.0, 24.0], False], ['inputs:a_frame', [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0], False], ['inputs:a_matrix2d', [1.0, 2.0, 3.0, 4.0], False], ['inputs:a_matrix3d', [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0], False], ['inputs:a_matrix4d', [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0], False], ['inputs:a_normal3d', [1.0, 2.0, 3.0], False], ['inputs:a_normal3f', [11.0, 12.0, 13.0], False], ['inputs:a_normal3h', [21.0, 22.0, 23.0], False], ['inputs:a_point3d', [1.0, 2.0, 3.0], False], ['inputs:a_point3f', [11.0, 12.0, 13.0], False], ['inputs:a_point3h', [21.0, 22.0, 23.0], False], ['inputs:a_quatd', [1.0, 2.0, 3.0, 4.0], False], ['inputs:a_quatf', [11.0, 12.0, 13.0, 14.0], False], ['inputs:a_quath', [21.0, 22.0, 23.0, 24.0], False], ['inputs:a_texcoord2d', [1.0, 2.0], False], ['inputs:a_texcoord2f', [11.0, 12.0], False], ['inputs:a_texcoord2h', [21.0, 22.0], False], ['inputs:a_texcoord3d', [1.0, 2.0, 3.0], False], ['inputs:a_texcoord3f', [11.0, 12.0, 13.0], False], ['inputs:a_texcoord3h', [21.0, 22.0, 23.0], False], ['inputs:a_timecode', 10.0, False], ['inputs:a_vector3d', [1.0, 2.0, 3.0], False], ['inputs:a_vector3f', [11.0, 12.0, 13.0], False], ['inputs:a_vector3h', [21.0, 22.0, 23.0], False], ], 'outputs': [ ['outputs:a_color3d', [2.0, 3.0, 4.0], False], ['outputs:a_color3f', [12.0, 13.0, 14.0], False], ['outputs:a_color3h', [22.0, 23.0, 24.0], False], ['outputs:a_color4d', [2.0, 3.0, 4.0, 5.0], False], ['outputs:a_color4f', [12.0, 13.0, 14.0, 15.0], False], ['outputs:a_color4h', [22.0, 23.0, 24.0, 25.0], False], ['outputs:a_frame', [2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0], False], ['outputs:a_matrix2d', [2.0, 3.0, 4.0, 5.0], False], ['outputs:a_matrix3d', [2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0], False], ['outputs:a_matrix4d', [2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0], False], ['outputs:a_normal3d', [2.0, 3.0, 4.0], False], ['outputs:a_normal3f', [12.0, 13.0, 14.0], False], ['outputs:a_normal3h', [22.0, 23.0, 24.0], False], ['outputs:a_point3d', [2.0, 3.0, 4.0], False], ['outputs:a_point3f', [12.0, 13.0, 14.0], False], ['outputs:a_point3h', [22.0, 23.0, 24.0], False], ['outputs:a_quatd', [2.0, 3.0, 4.0, 5.0], False], ['outputs:a_quatf', [12.0, 13.0, 14.0, 15.0], False], ['outputs:a_quath', [22.0, 23.0, 24.0, 25.0], False], ['outputs:a_texcoord2d', [2.0, 3.0], False], ['outputs:a_texcoord2f', [12.0, 13.0], False], ['outputs:a_texcoord2h', [22.0, 23.0], False], ['outputs:a_texcoord3d', [2.0, 3.0, 4.0], False], ['outputs:a_texcoord3f', [12.0, 13.0, 14.0], False], ['outputs:a_texcoord3h', [22.0, 23.0, 24.0], False], ['outputs:a_timecode', 11.0, False], ['outputs:a_vector3d', [2.0, 3.0, 4.0], False], ['outputs:a_vector3f', [12.0, 13.0, 14.0], False], ['outputs:a_vector3h', [22.0, 23.0, 24.0], False], ], }, ] async def test_generated(self): test_info = _TestGraphAndNode() controller = og.Controller() for i, test_run in enumerate(self.TEST_DATA): await _test_clear_scene(self, test_run) test_info = await _test_setup_scene(self, controller, "/TestGraph", "TestNode_omni_graph_tutorials_RoleData", "omni.graph.tutorials.RoleData", test_run, test_info) await controller.evaluate(test_info.graph) _test_verify_scene(self, controller, test_run, test_info, f"omni.graph.tutorials.RoleData User test case #{i+1}") async def test_vectorized_generated(self): test_info = _TestGraphAndNode() controller = og.Controller() for i, test_run in enumerate(self.TEST_DATA): await _test_clear_scene(self, test_run) test_info = await _test_setup_scene(self, controller, "/TestGraph", "TestNode_omni_graph_tutorials_RoleData","omni.graph.tutorials.RoleData", test_run, test_info, 16) await controller.evaluate(test_info.graph) _test_verify_scene(self, controller, test_run, test_info, f"omni.graph.tutorials.RoleData User test case #{i+1}", 16) async def test_thread_safety(self): import omni.kit # Generate multiple instances of the test setup to run them concurrently instance_setup = dict() for n in range(24): instance_setup[f"/TestGraph_{n}"] = _TestGraphAndNode() for i, test_run in enumerate(self.TEST_DATA): await _test_clear_scene(self, test_run) for (key, test_info) in instance_setup.items(): test_info = await _test_setup_scene(self, og.Controller(allow_exists_prim=True), key, "TestNode_omni_graph_tutorials_RoleData", "omni.graph.tutorials.RoleData", test_run, test_info) self.assertEqual(len(og.get_all_graphs()), 24) # We want to evaluate all graphs concurrently. Kick them all. # Evaluate multiple times to skip 2 serial frames and increase chances for a race condition. for _ in range(10): await omni.kit.app.get_app().next_update_async() for (key, test_instance) in instance_setup.items(): _test_verify_scene(self, og.Controller(), test_run, test_info, f"omni.graph.tutorials.RoleData User test case #{i+1}, instance{key}") async def test_data_access(self): from omni.graph.tutorials.ogn.OgnTutorialRoleDataDatabase import OgnTutorialRoleDataDatabase test_file_name = "OgnTutorialRoleDataTemplate.usda" usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name) if not os.path.exists(usd_path): self.assertTrue(False, f"{usd_path} not found for loading test") (result, error) = await ogts.load_test_file(usd_path) self.assertTrue(result, f'{error} on {usd_path}') test_node = og.Controller.node("/TestGraph/Template_omni_graph_tutorials_RoleData") database = OgnTutorialRoleDataDatabase(test_node) self.assertTrue(test_node.is_valid()) node_type_name = test_node.get_type_name() self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1) def _attr_error(attribute: og.Attribute, usd_test: bool) -> str: test_type = "USD Load" if usd_test else "Database Access" return f"{node_type_name} {test_type} Test - {attribute.get_name()} value error" self.assertTrue(test_node.get_attribute_exists("inputs:a_color3d")) attribute = test_node.get_attribute("inputs:a_color3d") db_value = database.inputs.a_color3d expected_value = [0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_color3f")) attribute = test_node.get_attribute("inputs:a_color3f") db_value = database.inputs.a_color3f expected_value = [0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_color3h")) attribute = test_node.get_attribute("inputs:a_color3h") db_value = database.inputs.a_color3h expected_value = [0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_color4d")) attribute = test_node.get_attribute("inputs:a_color4d") db_value = database.inputs.a_color4d expected_value = [0.0, 0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_color4f")) attribute = test_node.get_attribute("inputs:a_color4f") db_value = database.inputs.a_color4f expected_value = [0.0, 0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_color4h")) attribute = test_node.get_attribute("inputs:a_color4h") db_value = database.inputs.a_color4h expected_value = [0.0, 0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_frame")) attribute = test_node.get_attribute("inputs:a_frame") db_value = database.inputs.a_frame expected_value = [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_matrix2d")) attribute = test_node.get_attribute("inputs:a_matrix2d") db_value = database.inputs.a_matrix2d expected_value = [[1.0, 0.0], [0.0, 1.0]] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_matrix3d")) attribute = test_node.get_attribute("inputs:a_matrix3d") db_value = database.inputs.a_matrix3d expected_value = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_matrix4d")) attribute = test_node.get_attribute("inputs:a_matrix4d") db_value = database.inputs.a_matrix4d expected_value = [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_normal3d")) attribute = test_node.get_attribute("inputs:a_normal3d") db_value = database.inputs.a_normal3d expected_value = [0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_normal3f")) attribute = test_node.get_attribute("inputs:a_normal3f") db_value = database.inputs.a_normal3f expected_value = [0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_normal3h")) attribute = test_node.get_attribute("inputs:a_normal3h") db_value = database.inputs.a_normal3h expected_value = [0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_point3d")) attribute = test_node.get_attribute("inputs:a_point3d") db_value = database.inputs.a_point3d expected_value = [0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_point3f")) attribute = test_node.get_attribute("inputs:a_point3f") db_value = database.inputs.a_point3f expected_value = [0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_point3h")) attribute = test_node.get_attribute("inputs:a_point3h") db_value = database.inputs.a_point3h expected_value = [0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_quatd")) attribute = test_node.get_attribute("inputs:a_quatd") db_value = database.inputs.a_quatd expected_value = [0.0, 0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_quatf")) attribute = test_node.get_attribute("inputs:a_quatf") db_value = database.inputs.a_quatf expected_value = [0.0, 0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_quath")) attribute = test_node.get_attribute("inputs:a_quath") db_value = database.inputs.a_quath expected_value = [0.0, 0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_texcoord2d")) attribute = test_node.get_attribute("inputs:a_texcoord2d") db_value = database.inputs.a_texcoord2d expected_value = [0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_texcoord2f")) attribute = test_node.get_attribute("inputs:a_texcoord2f") db_value = database.inputs.a_texcoord2f expected_value = [0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_texcoord2h")) attribute = test_node.get_attribute("inputs:a_texcoord2h") db_value = database.inputs.a_texcoord2h expected_value = [0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_texcoord3d")) attribute = test_node.get_attribute("inputs:a_texcoord3d") db_value = database.inputs.a_texcoord3d expected_value = [0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_texcoord3f")) attribute = test_node.get_attribute("inputs:a_texcoord3f") db_value = database.inputs.a_texcoord3f expected_value = [0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_texcoord3h")) attribute = test_node.get_attribute("inputs:a_texcoord3h") db_value = database.inputs.a_texcoord3h expected_value = [0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_timecode")) attribute = test_node.get_attribute("inputs:a_timecode") db_value = database.inputs.a_timecode expected_value = 1.0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_vector3d")) attribute = test_node.get_attribute("inputs:a_vector3d") db_value = database.inputs.a_vector3d expected_value = [0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_vector3f")) attribute = test_node.get_attribute("inputs:a_vector3f") db_value = database.inputs.a_vector3f expected_value = [0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_vector3h")) attribute = test_node.get_attribute("inputs:a_vector3h") db_value = database.inputs.a_vector3h expected_value = [0.0, 0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("outputs:a_color3d")) attribute = test_node.get_attribute("outputs:a_color3d") db_value = database.outputs.a_color3d self.assertTrue(test_node.get_attribute_exists("outputs:a_color3f")) attribute = test_node.get_attribute("outputs:a_color3f") db_value = database.outputs.a_color3f self.assertTrue(test_node.get_attribute_exists("outputs:a_color3h")) attribute = test_node.get_attribute("outputs:a_color3h") db_value = database.outputs.a_color3h self.assertTrue(test_node.get_attribute_exists("outputs:a_color4d")) attribute = test_node.get_attribute("outputs:a_color4d") db_value = database.outputs.a_color4d self.assertTrue(test_node.get_attribute_exists("outputs:a_color4f")) attribute = test_node.get_attribute("outputs:a_color4f") db_value = database.outputs.a_color4f self.assertTrue(test_node.get_attribute_exists("outputs:a_color4h")) attribute = test_node.get_attribute("outputs:a_color4h") db_value = database.outputs.a_color4h self.assertTrue(test_node.get_attribute_exists("outputs:a_frame")) attribute = test_node.get_attribute("outputs:a_frame") db_value = database.outputs.a_frame self.assertTrue(test_node.get_attribute_exists("outputs:a_matrix2d")) attribute = test_node.get_attribute("outputs:a_matrix2d") db_value = database.outputs.a_matrix2d self.assertTrue(test_node.get_attribute_exists("outputs:a_matrix3d")) attribute = test_node.get_attribute("outputs:a_matrix3d") db_value = database.outputs.a_matrix3d self.assertTrue(test_node.get_attribute_exists("outputs:a_matrix4d")) attribute = test_node.get_attribute("outputs:a_matrix4d") db_value = database.outputs.a_matrix4d self.assertTrue(test_node.get_attribute_exists("outputs:a_normal3d")) attribute = test_node.get_attribute("outputs:a_normal3d") db_value = database.outputs.a_normal3d self.assertTrue(test_node.get_attribute_exists("outputs:a_normal3f")) attribute = test_node.get_attribute("outputs:a_normal3f") db_value = database.outputs.a_normal3f self.assertTrue(test_node.get_attribute_exists("outputs:a_normal3h")) attribute = test_node.get_attribute("outputs:a_normal3h") db_value = database.outputs.a_normal3h self.assertTrue(test_node.get_attribute_exists("outputs:a_point3d")) attribute = test_node.get_attribute("outputs:a_point3d") db_value = database.outputs.a_point3d self.assertTrue(test_node.get_attribute_exists("outputs:a_point3f")) attribute = test_node.get_attribute("outputs:a_point3f") db_value = database.outputs.a_point3f self.assertTrue(test_node.get_attribute_exists("outputs:a_point3h")) attribute = test_node.get_attribute("outputs:a_point3h") db_value = database.outputs.a_point3h self.assertTrue(test_node.get_attribute_exists("outputs:a_quatd")) attribute = test_node.get_attribute("outputs:a_quatd") db_value = database.outputs.a_quatd self.assertTrue(test_node.get_attribute_exists("outputs:a_quatf")) attribute = test_node.get_attribute("outputs:a_quatf") db_value = database.outputs.a_quatf self.assertTrue(test_node.get_attribute_exists("outputs:a_quath")) attribute = test_node.get_attribute("outputs:a_quath") db_value = database.outputs.a_quath self.assertTrue(test_node.get_attribute_exists("outputs:a_texcoord2d")) attribute = test_node.get_attribute("outputs:a_texcoord2d") db_value = database.outputs.a_texcoord2d self.assertTrue(test_node.get_attribute_exists("outputs:a_texcoord2f")) attribute = test_node.get_attribute("outputs:a_texcoord2f") db_value = database.outputs.a_texcoord2f self.assertTrue(test_node.get_attribute_exists("outputs:a_texcoord2h")) attribute = test_node.get_attribute("outputs:a_texcoord2h") db_value = database.outputs.a_texcoord2h self.assertTrue(test_node.get_attribute_exists("outputs:a_texcoord3d")) attribute = test_node.get_attribute("outputs:a_texcoord3d") db_value = database.outputs.a_texcoord3d self.assertTrue(test_node.get_attribute_exists("outputs:a_texcoord3f")) attribute = test_node.get_attribute("outputs:a_texcoord3f") db_value = database.outputs.a_texcoord3f self.assertTrue(test_node.get_attribute_exists("outputs:a_texcoord3h")) attribute = test_node.get_attribute("outputs:a_texcoord3h") db_value = database.outputs.a_texcoord3h self.assertTrue(test_node.get_attribute_exists("outputs:a_timecode")) attribute = test_node.get_attribute("outputs:a_timecode") db_value = database.outputs.a_timecode self.assertTrue(test_node.get_attribute_exists("outputs:a_vector3d")) attribute = test_node.get_attribute("outputs:a_vector3d") db_value = database.outputs.a_vector3d self.assertTrue(test_node.get_attribute_exists("outputs:a_vector3f")) attribute = test_node.get_attribute("outputs:a_vector3f") db_value = database.outputs.a_vector3f self.assertTrue(test_node.get_attribute_exists("outputs:a_vector3h")) attribute = test_node.get_attribute("outputs:a_vector3h") db_value = database.outputs.a_vector3h
25,764
Python
52.123711
197
0.662669
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/ogn/tests/TestOgnTutorialDefaults.py
import os import omni.kit.test import omni.graph.core as og import omni.graph.core.tests as ogts from omni.graph.core.tests.omnigraph_test_utils import _TestGraphAndNode from omni.graph.core.tests.omnigraph_test_utils import _test_clear_scene from omni.graph.core.tests.omnigraph_test_utils import _test_setup_scene from omni.graph.core.tests.omnigraph_test_utils import _test_verify_scene class TestOgn(ogts.OmniGraphTestCase): TEST_DATA = [ { 'outputs': [ ['outputs:a_bool', False, False], ['outputs:a_double', 0.0, False], ['outputs:a_float', 0.0, False], ['outputs:a_half', 0.0, False], ['outputs:a_int', 0, False], ['outputs:a_int64', 0, False], ['outputs:a_string', "", False], ['outputs:a_token', "", False], ['outputs:a_uchar', 0, False], ['outputs:a_uint', 0, False], ['outputs:a_uint64', 0, False], ['outputs:a_int2', [0, 0], False], ['outputs:a_matrix', [1.0, 0.0, 0.0, 1.0], False], ['outputs:a_array', [], False], ], }, ] async def test_generated(self): test_info = _TestGraphAndNode() controller = og.Controller() for i, test_run in enumerate(self.TEST_DATA): await _test_clear_scene(self, test_run) test_info = await _test_setup_scene(self, controller, "/TestGraph", "TestNode_omni_graph_tutorials_Defaults", "omni.graph.tutorials.Defaults", test_run, test_info) await controller.evaluate(test_info.graph) _test_verify_scene(self, controller, test_run, test_info, f"omni.graph.tutorials.Defaults User test case #{i+1}") async def test_vectorized_generated(self): test_info = _TestGraphAndNode() controller = og.Controller() for i, test_run in enumerate(self.TEST_DATA): await _test_clear_scene(self, test_run) test_info = await _test_setup_scene(self, controller, "/TestGraph", "TestNode_omni_graph_tutorials_Defaults","omni.graph.tutorials.Defaults", test_run, test_info, 16) await controller.evaluate(test_info.graph) _test_verify_scene(self, controller, test_run, test_info, f"omni.graph.tutorials.Defaults User test case #{i+1}", 16) async def test_thread_safety(self): import omni.kit # Generate multiple instances of the test setup to run them concurrently instance_setup = dict() for n in range(24): instance_setup[f"/TestGraph_{n}"] = _TestGraphAndNode() for i, test_run in enumerate(self.TEST_DATA): await _test_clear_scene(self, test_run) for (key, test_info) in instance_setup.items(): test_info = await _test_setup_scene(self, og.Controller(allow_exists_prim=True), key, "TestNode_omni_graph_tutorials_Defaults", "omni.graph.tutorials.Defaults", test_run, test_info) self.assertEqual(len(og.get_all_graphs()), 24) # We want to evaluate all graphs concurrently. Kick them all. # Evaluate multiple times to skip 2 serial frames and increase chances for a race condition. for _ in range(10): await omni.kit.app.get_app().next_update_async() for (key, test_instance) in instance_setup.items(): _test_verify_scene(self, og.Controller(), test_run, test_info, f"omni.graph.tutorials.Defaults User test case #{i+1}, instance{key}") async def test_data_access(self): from omni.graph.tutorials.ogn.OgnTutorialDefaultsDatabase import OgnTutorialDefaultsDatabase test_file_name = "OgnTutorialDefaultsTemplate.usda" usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name) if not os.path.exists(usd_path): self.assertTrue(False, f"{usd_path} not found for loading test") (result, error) = await ogts.load_test_file(usd_path) self.assertTrue(result, f'{error} on {usd_path}') test_node = og.Controller.node("/TestGraph/Template_omni_graph_tutorials_Defaults") database = OgnTutorialDefaultsDatabase(test_node) self.assertTrue(test_node.is_valid()) node_type_name = test_node.get_type_name() self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1) def _attr_error(attribute: og.Attribute, usd_test: bool) -> str: test_type = "USD Load" if usd_test else "Database Access" return f"{node_type_name} {test_type} Test - {attribute.get_name()} value error" self.assertTrue(test_node.get_attribute_exists("inputs:a_array")) attribute = test_node.get_attribute("inputs:a_array") db_value = database.inputs.a_array expected_value = [] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_bool")) attribute = test_node.get_attribute("inputs:a_bool") db_value = database.inputs.a_bool expected_value = False actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_double")) attribute = test_node.get_attribute("inputs:a_double") db_value = database.inputs.a_double expected_value = 0.0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_float")) attribute = test_node.get_attribute("inputs:a_float") db_value = database.inputs.a_float expected_value = 0.0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_half")) attribute = test_node.get_attribute("inputs:a_half") db_value = database.inputs.a_half expected_value = 0.0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_int")) attribute = test_node.get_attribute("inputs:a_int") db_value = database.inputs.a_int expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_int2")) attribute = test_node.get_attribute("inputs:a_int2") db_value = database.inputs.a_int2 expected_value = [0, 0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_int64")) attribute = test_node.get_attribute("inputs:a_int64") db_value = database.inputs.a_int64 expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_matrix")) attribute = test_node.get_attribute("inputs:a_matrix") db_value = database.inputs.a_matrix expected_value = [[1.0, 0.0], [0.0, 1.0]] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_string")) attribute = test_node.get_attribute("inputs:a_string") db_value = database.inputs.a_string expected_value = "" actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_token")) attribute = test_node.get_attribute("inputs:a_token") db_value = database.inputs.a_token expected_value = "" actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_uchar")) attribute = test_node.get_attribute("inputs:a_uchar") db_value = database.inputs.a_uchar expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_uint")) attribute = test_node.get_attribute("inputs:a_uint") db_value = database.inputs.a_uint expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:a_uint64")) attribute = test_node.get_attribute("inputs:a_uint64") db_value = database.inputs.a_uint64 expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("outputs:a_array")) attribute = test_node.get_attribute("outputs:a_array") db_value = database.outputs.a_array self.assertTrue(test_node.get_attribute_exists("outputs:a_bool")) attribute = test_node.get_attribute("outputs:a_bool") db_value = database.outputs.a_bool self.assertTrue(test_node.get_attribute_exists("outputs:a_double")) attribute = test_node.get_attribute("outputs:a_double") db_value = database.outputs.a_double self.assertTrue(test_node.get_attribute_exists("outputs:a_float")) attribute = test_node.get_attribute("outputs:a_float") db_value = database.outputs.a_float self.assertTrue(test_node.get_attribute_exists("outputs:a_half")) attribute = test_node.get_attribute("outputs:a_half") db_value = database.outputs.a_half self.assertTrue(test_node.get_attribute_exists("outputs:a_int")) attribute = test_node.get_attribute("outputs:a_int") db_value = database.outputs.a_int self.assertTrue(test_node.get_attribute_exists("outputs:a_int2")) attribute = test_node.get_attribute("outputs:a_int2") db_value = database.outputs.a_int2 self.assertTrue(test_node.get_attribute_exists("outputs:a_int64")) attribute = test_node.get_attribute("outputs:a_int64") db_value = database.outputs.a_int64 self.assertTrue(test_node.get_attribute_exists("outputs:a_matrix")) attribute = test_node.get_attribute("outputs:a_matrix") db_value = database.outputs.a_matrix self.assertTrue(test_node.get_attribute_exists("outputs:a_string")) attribute = test_node.get_attribute("outputs:a_string") db_value = database.outputs.a_string self.assertTrue(test_node.get_attribute_exists("outputs:a_token")) attribute = test_node.get_attribute("outputs:a_token") db_value = database.outputs.a_token self.assertTrue(test_node.get_attribute_exists("outputs:a_uchar")) attribute = test_node.get_attribute("outputs:a_uchar") db_value = database.outputs.a_uchar self.assertTrue(test_node.get_attribute_exists("outputs:a_uint")) attribute = test_node.get_attribute("outputs:a_uint") db_value = database.outputs.a_uint self.assertTrue(test_node.get_attribute_exists("outputs:a_uint64")) attribute = test_node.get_attribute("outputs:a_uint64") db_value = database.outputs.a_uint64
12,650
Python
47.84556
197
0.686087
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/tests/test_omnigraph_python_interface.py
"""Basic tests of the Python interface to C++ nodes generated from a .ogn file""" import omni.graph.core as og import omni.graph.core.tests as ogts from omni.graph.tutorials.ogn.OgnTutorialSimpleDataDatabase import OgnTutorialSimpleDataDatabase # ====================================================================== class TestOmniGraphPythonInterface(ogts.OmniGraphTestCase): """Run a simple unit test that exercises generated Python interface functionality""" # ---------------------------------------------------------------------- async def test_setting_in_ogn_python_api(self): """Test ability to set inputs on a node and retrieve them""" (_, [simple_node], _, _) = og.Controller.edit( "/TestGraph", { og.Controller.Keys.CREATE_NODES: ("PyTest_SimpleNode", "omni.graph.tutorials.SimpleData"), }, ) await og.Controller.evaluate() interface = OgnTutorialSimpleDataDatabase(simple_node) # Set input values through the database interface interface.inputs.a_bool = False interface.inputs.a_half = 2.0 interface.inputs.a_int = 3 interface.inputs.a_int64 = 4 interface.inputs.a_float = 5.0 interface.inputs.a_double = 6.0 # interface.inputs.a_token = "hello" interface.inputs.unsigned_a_uchar = 7 interface.inputs.unsigned_a_uint = 8 interface.inputs.unsigned_a_uint64 = 9 # Run the node's compute method await og.Controller.evaluate() # Retrieve output values from the database interface and verify against expected values self.assertEqual(interface.outputs.a_bool, True) self.assertAlmostEqual(interface.outputs.a_half, 3.0) self.assertEqual(interface.outputs.a_int, 4) self.assertEqual(interface.outputs.a_int64, 5) self.assertAlmostEqual(interface.outputs.a_float, 6.0) self.assertAlmostEqual(interface.outputs.a_double, 7.0) # self.assertEqual(interface.outputs.a_token, "world") self.assertEqual(interface.outputs.unsigned_a_uchar, 8) self.assertEqual(interface.outputs.unsigned_a_uint, 9) self.assertEqual(interface.outputs.unsigned_a_uint64, 10) # ---------------------------------------------------------------------- async def test_check_has_state(self): """Test ability to correctly determine if a node has marked itself as having internal state""" stateless_node_type = og.get_node_type("omni.graph.tutorials.SimpleData") stateful_node_type = og.get_node_type("omni.graph.tutorials.StatePy") self.assertFalse(stateless_node_type.has_state()) self.assertTrue(stateful_node_type.has_state())
2,753
Python
46.482758
106
0.625499
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/tests/test_api.py
"""Testing the stability of the API in this module""" import omni.graph.core.tests as ogts import omni.graph.tutorials as ogtu from omni.graph.tools.tests.internal_utils import _check_module_api_consistency, _check_public_api_contents # ====================================================================== class _TestOmniGraphTutorialsApi(ogts.OmniGraphTestCase): _UNPUBLISHED = ["bindings", "ogn", "tests"] async def test_api(self): _check_module_api_consistency(ogtu, self._UNPUBLISHED) # noqa: PLW0212 _check_module_api_consistency(ogtu.tests, is_test_module=True) # noqa: PLW0212 async def test_api_features(self): """Test that the known public API features continue to exist""" _check_public_api_contents(ogtu, [], self._UNPUBLISHED, only_expected_allowed=True) # noqa: PLW0212 _check_public_api_contents(ogtu.tests, [], [], only_expected_allowed=True) # noqa: PLW0212
936
Python
48.315787
108
0.65812
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/tests/test_tutorial_extended_types.py
""" Tests for the omni.graph.tutorials.ExtendedTypes and omni.graph.tutorials.ExtendedTypesPy nodes """ import omni.graph.core as og import omni.graph.core.tests as ogts class TestTutorialExtendedTypes(ogts.OmniGraphTestCase): """Extended attribute type tests require multiple nodes, not supported in the .ogn test framework""" # ---------------------------------------------------------------------- async def _test_tutorial_extended_attributes_node(self, test_node_type_name: str): """Test basic operation of the tutorial node containing extended attributes""" # Set up a graph that creates full type resolution for simple and array types of the extended attribute node # # SimpleIn ----=> Extended1 ----=> SimpleOut # \ / \ / # X X # / \ / \ # ArrayIn ----=> Extended2 ----=> ArrayOut # keys = og.Controller.Keys (_, [simple_node, _, extended_node_1, extended_node_2, array_node, _], _, _) = og.Controller.edit( "/TestGraph", { keys.CREATE_NODES: [ ("SimpleIn", "omni.graph.tutorials.SimpleData"), ("SimpleOut", "omni.graph.tutorials.SimpleData"), ("Extended1", test_node_type_name), ("Extended2", test_node_type_name), ("ArrayIn", "omni.graph.tutorials.ArrayData"), ("ArrayOut", "omni.graph.tutorials.ArrayData"), ], keys.CONNECT: [ ("SimpleIn.outputs:a_float", "Extended1.inputs:floatOrToken"), ("Extended1.outputs:doubledResult", "SimpleOut.inputs:a_float"), ("ArrayIn.outputs:result", "Extended1.inputs:toNegate"), ("Extended1.outputs:negatedResult", "ArrayOut.inputs:original"), ("SimpleIn.outputs:a_token", "Extended2.inputs:floatOrToken"), ("Extended2.outputs:doubledResult", "SimpleOut.inputs:a_token"), ("ArrayIn.outputs:negativeValues", "Extended2.inputs:toNegate"), ("Extended2.outputs:negatedResult", "ArrayOut.inputs:gates"), ], keys.SET_VALUES: [ ("SimpleIn.inputs:a_float", 5.0), ("SimpleIn.inputs:a_token", "hello"), ("ArrayIn.inputs:multiplier", 2.0), ("ArrayIn.inputs:original", [5.0, -6.0]), ("ArrayIn.inputs:gates", [False, True]), ], }, ) await og.Controller.evaluate() # Check that the inputs into the extended type nodes are correct self.assertEqual(6.0, og.Controller.get(("outputs:a_float", simple_node))) self.assertEqual("world", og.Controller.get(("outputs:a_token", simple_node))) self.assertCountEqual([5.0, -12.0], og.Controller.get(("outputs:result", array_node))) self.assertCountEqual([False, True], og.Controller.get(("outputs:negativeValues", array_node))) # Check the extended simple value outputs self.assertEqual(12.0, og.Controller.get(("outputs:doubledResult", extended_node_1))) self.assertEqual("worldworld", og.Controller.get(("outputs:doubledResult", extended_node_2))) # Check the extended array value outputs self.assertCountEqual([-5.0, 12.0], og.Controller.get(("outputs:negatedResult", extended_node_1))) self.assertCountEqual([True, False], og.Controller.get(("outputs:negatedResult", extended_node_2))) # ---------------------------------------------------------------------- async def test_tutorial_extended_attributes_node_cpp(self): """Test basic operation of the C++ tutorial node containing extended attributes.""" await self._test_tutorial_extended_attributes_node("omni.graph.tutorials.ExtendedTypes") # ---------------------------------------------------------------------- async def test_tutorial_extended_attributes_node_python(self): """Test basic operation of the Python tutorial node containing extended attributes.""" await self._test_tutorial_extended_attributes_node("omni.graph.tutorials.ExtendedTypesPy") # ---------------------------------------------------------------------- async def _test_tutorial_extended_attributes_tuples(self, test_node_type_name: str): """Test basic operation of the tutorial node containing extended attributes on its tuple-accepting attributes""" # Set up a graph that creates full type resolution for the tuple types of the extended attribute node with # two different resolved types. # keys = og.Controller.Keys ( _, [tuple_node, _, extended_node_1, extended_node_2, tuple_array_node, _, simple_node, _], _, _, ) = og.Controller.edit( "/TestGraph", { keys.CREATE_NODES: [ ("TupleIn", "omni.tutorials.TupleData"), ("TupleOut", "omni.tutorials.TupleData"), ("Extended1", test_node_type_name), ("Extended2", test_node_type_name), ("TupleArrayIn", "omni.graph.tutorials.TupleArrays"), ("TupleArrayOut", "omni.graph.tutorials.TupleArrays"), ("SimpleIn", "omni.graph.tutorials.SimpleData"), ("SimpleOut", "omni.graph.tutorials.SimpleData"), ], keys.CONNECT: [ ("TupleIn.outputs:a_int2", "Extended1.inputs:tuple"), ("Extended1.outputs:tuple", "TupleOut.inputs:a_int2"), ("TupleArrayIn.inputs:a", "Extended1.inputs:flexible"), ("Extended1.outputs:flexible", "TupleArrayOut.inputs:a"), ("TupleIn.outputs:a_float3", "Extended2.inputs:tuple"), ("Extended2.outputs:tuple", "TupleOut.inputs:a_float3"), ("SimpleIn.outputs:a_token", "Extended2.inputs:flexible"), ("Extended2.outputs:flexible", "SimpleOut.inputs:a_token"), ], keys.SET_VALUES: [ ("TupleIn.inputs:a_int2", [4, 2]), ("TupleIn.inputs:a_float3", (4.0, 10.0, 2.0)), ("TupleArrayIn.inputs:a", [[2.0, 3.0, 7.0], [21.0, 14.0, 6.0]]), ("TupleArrayIn.inputs:b", [[21.0, 14.0, 6.0], [2.0, 3.0, 7.0]]), ("SimpleIn.inputs:a_token", "hello"), ], }, ) await og.Controller.evaluate() # Check that the inputs into the extended type nodes are correct self.assertEqual("world", og.Controller.get(("outputs:a_token", simple_node))) self.assertCountEqual([5, 3], og.Controller.get(("outputs:a_int2", tuple_node))) self.assertCountEqual([5.0, 11.0, 3.0], og.Controller.get(("outputs:a_float3", tuple_node))) self.assertCountEqual([126.0, 126.0], og.Controller.get(("outputs:result", tuple_array_node))) # Check the resulting values from resolving the "any" type to tuples self.assertCountEqual([-5, -3], og.Controller.get(("outputs:tuple", extended_node_1))) self.assertCountEqual([-5.0, -11.0, -3.0], og.Controller.get(("outputs:tuple", extended_node_2))) # Check the resulting values from resolving the flexible type as both of its types self.assertEqual("dlrow", og.Controller.get(("outputs:flexible", extended_node_2))) list_expected = [[-2.0, -3.0, -7.0], [-21.0, -14.0, -6.0]] list_computed = og.Controller.get(("outputs:flexible", extended_node_1)) for expected, computed in zip(list_expected, list_computed): self.assertCountEqual(expected, computed) # ---------------------------------------------------------------------- async def test_tutorial_extended_attributes_tuples_cpp(self): """Test basic operation of the C++ tutorial node containing extended attributes.""" await self._test_tutorial_extended_attributes_tuples("omni.graph.tutorials.ExtendedTypes") # ---------------------------------------------------------------------- async def test_tutorial_extended_attributes_tuples_python(self): """Test basic operation of the Python tutorial node containing extended attributes.""" await self._test_tutorial_extended_attributes_tuples("omni.graph.tutorials.ExtendedTypesPy")
8,633
Python
55.431372
120
0.554269
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/tests/test_omnigraph_deletion.py
"""OmniGraph deletion tests""" import omni.graph.core as og import omni.graph.core.tests as ogts import omni.kit.commands import omni.kit.test import omni.usd class TestOmniGraphDeletion(ogts.OmniGraphTestCase): # In these tests, we test various aspects of deleting things from OG async def test_omnigraph_usd_deletion(self): # In this test we set up 2 very similar looking nodes: # /new_node and /new_node_01. We then delete /new_node # The idea is that this looks very similar to cases like # /parent/path/stuff/mynode where we delete /parent/path # In that case we want to delete mynode, but in our case # we do not want to delete /new_node_01 when /new_node is # deleted. keys = og.Controller.Keys (graph, [new_node, new_node_01], _, _) = og.Controller.edit( "/TestGraph", { keys.CREATE_NODES: [ ("new_node", "omni.graph.tutorials.Empty"), ("new_node_01", "omni.graph.tutorials.Empty"), ] }, ) await og.Controller.evaluate() self.assertIsNotNone(new_node_01) self.assertTrue(new_node_01.is_valid()) self.assertIsNotNone(new_node) self.assertTrue(new_node.is_valid()) og.Controller.delete_node(new_node) new_node_01 = graph.get_node("/TestGraph/new_node_01") self.assertIsNotNone(new_node_01) self.assertTrue(new_node_01.is_valid()) new_node = graph.get_node("/TestGraph/new_node") self.assertTrue(not new_node.is_valid()) omni.kit.undo.undo() new_node_01 = graph.get_node("/TestGraph/new_node_01") self.assertIsNotNone(new_node_01) self.assertTrue(new_node_01.is_valid()) new_node = graph.get_node("/TestGraph/new_node") self.assertIsNotNone(new_node) self.assertTrue(new_node.is_valid()) # -------------------------------------------------------------------------------------------------------------- async def test_fabric_dangling_connections(self): # In this test we create two nodes, connect them together. The output of the first node drives the input # of the second node. When we break the connection, we need to verify that the output of the second node # now takes its input from its own value, rather than the connected value (ie. the connection is actually # broken in the fabric). keys = og.Controller.Keys (graph, [node_a, node_b], _, _) = og.Controller.edit( "/TestGraph", { keys.CREATE_NODES: [ ("node_a", "omni.graph.tutorials.SimpleData"), ("node_b", "omni.graph.tutorials.SimpleData"), ] }, ) await og.Controller.evaluate() node_a = graph.get_node("/TestGraph/node_a") self.assertIsNotNone(node_a) self.assertTrue(node_a.is_valid()) node_b = graph.get_node("/TestGraph/node_b") self.assertIsNotNone(node_b) self.assertTrue(node_b.is_valid()) upstream_attr = node_a.get_attribute("outputs:a_int") downstream_attr = node_b.get_attribute("inputs:a_int") og.Controller.connect(upstream_attr, downstream_attr) await og.Controller.evaluate() # This node "omni.graph.tutorials.SimpleData" add 1 to the input. The default value of a_int is 0, so the output # of the first node is 1. When this is used as the input to the second node, we expect the value to be 2: value = og.Controller.get(node_b.get_attribute("outputs:a_int")) self.assertEqual(value, 2) og.Controller.disconnect(upstream_attr, downstream_attr) await og.Controller.evaluate() # Now that the connection is broken, the value should now be 1. However, if it didn't actually break the # connection in the fabric, the graph would still "think" it's connected and output 2. value = og.Controller.get(node_b.get_attribute("outputs:a_int")) self.assertEqual(value, 1)
4,140
Python
42.135416
120
0.602657
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/tests/test_tutorial_state.py
""" Tests for the omnigraph.tutorial.state node """ import omni.graph.core as og import omni.graph.core.tests as ogts class TestTutorialState(ogts.OmniGraphTestCase): """.ogn tests only run once while state tests require multiple evaluations, handled here""" # ---------------------------------------------------------------------- async def test_tutorial_state_node(self): """Test basic operation of the tutorial node containing internal node state""" self.assertTrue(og.get_node_type("omni.graph.tutorials.State").has_state(), "Tutorial state node has state") # Create a set of state nodes, updating after each one so that the state information is properly initialized. # If that requirement weren't there this would just be done in one call. state_nodes = [] for index in range(5): (graph, new_nodes, _, _) = og.Controller.edit( "/StateGraph", { og.Controller.Keys.CREATE_NODES: [ (f"State{index}", "omni.graph.tutorials.State"), ] }, ) state_nodes.append(new_nodes[0]) await og.Controller.evaluate(graph) output_attrs = [state_node.get_attribute("outputs:monotonic") for state_node in state_nodes] output_values = [og.Controller(output_attr).get() for output_attr in output_attrs] for i in range(len(output_values) - 1): self.assertLess(output_values[i], output_values[i + 1], "Comparing state of other nodes") await og.Controller.evaluate(graph) new_values = [og.Controller(output_attr).get() for output_attr in output_attrs] for i in range(len(new_values) - 1): self.assertLess(new_values[i], new_values[i + 1], "Comparing second state of other nodes") for i, new_value in enumerate(new_values): self.assertLess(output_values[i], new_value, "Comparing node state updates")
1,998
Python
45.488371
117
0.605105
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/tests/test_tutorial_state_attributes_py.py
""" Tests for the omnigraph.tutorial.stateAttributesPy node """ import omni.graph.core as og import omni.graph.core.tests as ogts class TestTutorialStateAttributesPy(ogts.OmniGraphTestCase): """.ogn tests only run once while state tests require multiple evaluations, handled here""" # ---------------------------------------------------------------------- async def test_tutorial_state_attributes_py_node(self): """Test basic operation of the Python tutorial node containing state attributes""" keys = og.Controller.Keys (_, [state_node], _, _) = og.Controller.edit( "/TestGraph", { keys.CREATE_NODES: ("StateNode", "omni.graph.tutorials.StateAttributesPy"), keys.SET_VALUES: ("StateNode.state:reset", True), }, ) await og.Controller.evaluate() reset_attr = og.Controller.attribute("state:reset", state_node) monotonic_attr = og.Controller.attribute("state:monotonic", state_node) self.assertEqual(og.Controller.get(reset_attr), False, "Reset attribute set back to False") self.assertEqual(og.Controller.get(monotonic_attr), 0, "Monotonic attribute reset to start") await og.Controller.evaluate() self.assertEqual(og.Controller.get(reset_attr), False, "Reset attribute still False") self.assertEqual(og.Controller.get(monotonic_attr), 1, "Monotonic attribute incremented once") await og.Controller.evaluate() self.assertEqual(og.Controller.get(monotonic_attr), 2, "Monotonic attribute incremented twice") og.Controller.set(reset_attr, True) await og.Controller.evaluate() self.assertEqual(og.Controller.get(reset_attr), False, "Reset again set back to False") self.assertEqual(og.Controller.get(monotonic_attr), 0, "Monotonic attribute again reset to start")
1,890
Python
46.274999
106
0.655026
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/tests/test_omnigraph_bundles.py
""" Tests for attribute bundles """ import unittest from contextlib import suppress from typing import Any, List, Tuple import omni.graph.core as og import omni.graph.core.tests as ogts import omni.graph.nodes.tests as ognts import omni.graph.tools as ogt from omni.kit.test.teamcity import is_running_in_teamcity from pxr import Gf # ============================================================================================================== def multiply_elements(multiplier, original: Any) -> Any: """Take in a numeric value, tuple, list, tuple of tuples, etc. and multiply every element by a constant""" if isinstance(original, tuple): return tuple(multiply_elements(multiplier, element) for element in original) if isinstance(original, list): return tuple(multiply_elements(multiplier, element) for element in original) return multiplier * original # ============================================================================================================== def as_tuple(value: Any) -> Tuple: """Return the value, interpreted as a tuple (except simple values, which are returned as themselves)""" if isinstance(value, tuple): return value if isinstance(value, list): return tuple(value) if isinstance(value, (Gf.Quatd, Gf.Quatf, Gf.Quath)): return (*value.GetImaginary(), value.GetReal()) if isinstance(value, Gf.Matrix4d): return tuple(tuple(element for element in list(row)) for row in value) with suppress(TypeError): if len(value) > 1: return tuple(value) return value # ============================================================================================================== class TestOmniGraphBundles(ogts.OmniGraphTestCase): """Attribute bundles do not yet have the ability to log tests directly in a .ogn file so it's done here""" # -------------------------------------------------------------------------------------------------------------- def _compare_results(self, expected_raw: Any, actual_raw: Any, test_info: str): """Loose comparison of two types of compatible values Args: expected_raw: Expected results. Can be simple values, tuples, lists, or pxr.Gf types actual_raw: Actual results. Can be simple values, tuples, lists, or pxr.Gf types test_info: String to accompany error messages Returns: Error encountered when mismatched values found, None if everything matched """ expected = as_tuple(expected_raw) actual = as_tuple(actual_raw) self.assertEqual( type(expected), type(actual), f"{test_info} Mismatched types - expected {expected_raw}, got {actual_raw}" ) if isinstance(expected, tuple): for expected_element, actual_element in zip(expected, actual): self._compare_results(expected_element, actual_element, test_info) else: self.assertEqual(expected, actual, f"{test_info} Expected {expected}, got {actual}") # ---------------------------------------------------------------------- async def _test_bundle_contents(self, node_type_to_test: str): """Test access to bundle attribute manipulation for C++ and Python implementations""" keys = og.Controller.Keys (graph, [bundle_node, inspector_node, _, _], [_, filtered_prim], _) = og.Controller.edit( "/TestGraph", { keys.CREATE_NODES: [ ("BundleManipulator", node_type_to_test), ("Inspector", "omni.graph.nodes.BundleInspector"), ], keys.CREATE_PRIMS: [ ("FullPrim", {"fullInt": ("int", 2), "floatArray": ("float[]", [3.0, 4.0, 5.0])}), ( "FilteredPrim", { "int_1": ("int", 6), "uint_1": ("uint", 7), "int64_x": ("int64", 8), "uint64_1": ("uint64", 9), "int_3": ("int[3]", (10, 11, 12)), "int_array": ("int[]", [13, 14, 15]), "float_1": ("float", 3.0), "double_x": ("double", 4.0), "big_int": ("int[]", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]), }, ), ], keys.EXPOSE_PRIMS: [ (og.Controller.PrimExposureType.AS_BUNDLE, "/FullPrim", "FullPrimExtract"), (og.Controller.PrimExposureType.AS_BUNDLE, "/FilteredPrim", "FilteredPrimExtract"), ], keys.CONNECT: [ ("FullPrimExtract.outputs_primBundle", "BundleManipulator.inputs:fullBundle"), ("FilteredPrimExtract.outputs_primBundle", "BundleManipulator.inputs:filteredBundle"), ("BundleManipulator.outputs_combinedBundle", "Inspector.inputs:bundle"), ], }, ) _ = ogt.OGN_DEBUG and ognts.enable_debugging(inspector_node) await og.Controller.evaluate() expected_results = { "big_int": ("int", 1, 1, "none", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]), "int_1": ("int", 1, 0, "none", 6), "uint_1": ("uint", 1, 0, "none", 7), "int64_x": ("int64", 1, 0, "none", 8), "uint64_1": ("uint64", 1, 0, "none", 9), "int_3": ("int", 3, 0, "none", (10, 11, 12)), "int_array": ("int", 1, 1, "none", [13, 14, 15]), "float_1": ("float", 1, 0, "none", 3.0), "double_x": ("double", 1, 0, "none", 4.0), "fullInt": ("int", 1, 0, "none", 2), "floatArray": ("float", 1, 1, "none", [3.0, 4.0, 5.0]), "sourcePrimPath": ("token", 1, 0, "none", str(filtered_prim.GetPrimPath())), "sourcePrimType": ("token", 1, 0, "none", str(filtered_prim.GetTypeName())), } def __result_subset(elements_removed: List[str]): """Return the set of results with the index list of elements removed""" return {key: value for key, value in expected_results.items() if key not in elements_removed} # Test data consists of a list of individual test configurations with the filters to set and the # expected contents of the output bundle using those filters. test_data = [ ([], expected_results), (["x"], __result_subset(["int64_x", "double_x"])), (["int"], __result_subset(["big_int", "int_1", "uint_1", "int64_x", "uint64_1", "int_3", "int_array"])), (["big"], __result_subset(["big_int"])), ( ["x", "big", "int"], __result_subset( ["big_int", "int_1", "uint_1", "int64_x", "uint64_1", "int_3", "int_array", "double_x"] ), ), ] for (filters, expected_results) in test_data: og.Controller.edit( graph, { keys.SET_VALUES: (("inputs:filters", bundle_node), filters), }, ) await og.Controller.evaluate() try: ognts.verify_bundles_are_equal( ognts.filter_bundle_inspector_results( ognts.bundle_inspector_results(inspector_node), [], filter_for_inclusion=False ), ognts.filter_bundle_inspector_results( (len(expected_results), expected_results), [], filter_for_inclusion=False ), ) except ValueError as error: self.assertTrue(False, error) # ---------------------------------------------------------------------- async def test_bundle_contents_cpp(self): """Test access to bundle attribute manipulation on the C++ implemented node""" await self._test_bundle_contents("omni.graph.tutorials.BundleManipulation") # ---------------------------------------------------------------------- async def test_bundle_contents_py(self): """Test bundle attribute manipulation on the Python implemented node""" await self._test_bundle_contents("omni.graph.tutorials.BundleManipulationPy") # ---------------------------------------------------------------------- async def _test_simple_bundled_data(self, node_type_to_test: str): """Test access to attributes with simple data types within bundles for both C++ and Python implementations""" controller = og.Controller() keys = og.Controller.Keys prim_definition = ognts.prim_with_everything_definition() (_, [_, inspector_node, _], _, _) = controller.edit( "/TestGraph", { keys.CREATE_NODES: [ ("BundledDataModifier", node_type_to_test), ("Inspector", "omni.graph.nodes.BundleInspector"), ], keys.CREATE_PRIMS: ("TestPrim", prim_definition), keys.EXPOSE_PRIMS: (og.Controller.PrimExposureType.AS_BUNDLE, "TestPrim", "TestBundle"), keys.CONNECT: [ ("TestBundle.outputs_primBundle", "BundledDataModifier.inputs:bundle"), ("BundledDataModifier.outputs_bundle", "Inspector.inputs:bundle"), ], }, ) _ = ogt.OGN_DEBUG and ognts.enable_debugging(inspector_node) await controller.evaluate() (bundled_count, bundled_results) = ognts.bundle_inspector_results(inspector_node) # Prim will not have the "sourcePrimXX" attributes so it is smaller by 2 self.assertEqual(len(prim_definition) + 2, bundled_count) for name, (ogn_type, array_depth, tuple_count, role, actual_output) in bundled_results.items(): if name in ["sourcePrimType", "sourcePrimPath"]: continue if not node_type_to_test.endswith("Py") and ogn_type not in ["int", "int64", "uint", "uint64"]: # The C++ node only handles integer types, in the interest of brevity continue test_info = "Bundled" if tuple_count > 1: test_info += f" tuple[{tuple_count}]" if array_depth > 0: test_info += " array" test_info += f" attribute {name} of type {ogn_type}" if role != "none": test_info += f" (role {role})" if ogn_type == "token" or role == "text": expected_output = prim_definition[name][1] if isinstance(expected_output, str): expected_output += expected_output else: expected_output = [f"{element}{element}" for element in expected_output] self._compare_results(expected_output, actual_output, test_info) else: if ogn_type in ["bool", "bool[]"]: expected_output = as_tuple(prim_definition[name][1]) else: expected_output = multiply_elements(2, as_tuple(prim_definition[name][1])) self._compare_results(expected_output, actual_output, test_info) # ---------------------------------------------------------------------- async def test_simple_bundled_data_cpp(self): """Test access to attributes with simple data types within bundles on the C++ implemented node""" await self._test_simple_bundled_data("omni.graph.tutorials.BundleData") # ---------------------------------------------------------------------- async def test_simple_bundled_data_py(self): """Test access to attributes with simple data types within bundles on the Python implemented node""" await self._test_simple_bundled_data("omni.graph.tutorials.BundleDataPy") # ---------------------------------------------------------------------- async def _test_add_attributes_to_bundle(self, node_type_to_test: str): """Test basic operation of the tutorial node that adds new attributes to a bundle""" keys = og.Controller.Keys (graph, [add_node, inspector_node], _, _) = og.Controller.edit( "/TestGraph", { keys.CREATE_NODES: [ ("AddAttributes", node_type_to_test), ("Inspector", "omni.graph.nodes.BundleInspector"), ], keys.CONNECT: ("AddAttributes.outputs_bundle", "Inspector.inputs:bundle"), }, ) await og.Controller.evaluate() types_attribute = og.Controller.attribute("inputs:typesToAdd", add_node) added_names_attribute = og.Controller.attribute("inputs:addedAttributeNames", add_node) removed_names_attribute = og.Controller.attribute("inputs:removedAttributeNames", add_node) _ = ogt.OGN_DEBUG and ognts.enable_debugging(inspector_node) # List of test data configurations to run. The individual test entries consist of: # - list of the types for the attributes to be added # - list of the base types corresponding to the main types # - list of names for the attributes to be added # - list of values expected for the new bundle as (count, roles, arrayDepths, tupleCounts, values) test_data = [ [ ["float", "double", "int", "int64", "uchar", "uint", "uint64", "bool", "token"], ["float", "double", "int", "int64", "uchar", "uint", "uint64", "bool", "token"], [f"output{n}" for n in range(9)], (9, ["none"] * 9, [0] * 9, [1] * 9, [0.0, 0.0, 0, 0, 0, 0, 0, False, ""]), ], [ ["float[]", "double[]", "int[]", "int64[]", "uchar[]", "uint[]", "uint64[]", "token[]"], ["float", "double", "int", "int64", "uchar", "uint", "uint64", "token"], [f"output{n}" for n in range(8)], (8, ["none"] * 8, [1] * 8, [1] * 8, [[], [], [], [], [], [], [], []]), ], [ ["float[3]", "double[2]", "int[4]"], ["float", "double", "int"], [f"output{n}" for n in range(3)], (3, ["none"] * 3, [0] * 3, [3, 2, 4], [[0.0, 0.0, 0.0], [0.0, 0.0], [0, 0, 0, 0]]), ], [ ["float[3][]", "double[2][]", "int[4][]"], ["float", "double", "int"], [f"output{n}" for n in range(3)], (3, ["none"] * 3, [1] * 3, [3, 2, 4], [[], [], []]), ], [ ["any"], ["token"], ["output0"], (1, ["none"], [0], [1], [""]), ], [ ["colord[3]", "colorf[4]", "colorh[3]"], ["double", "float", "half"], [f"output{n}" for n in range(3)], (3, ["color"] * 3, [0] * 3, [3, 4, 3], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), ], [ ["normald[3]", "normalf[3]", "normalh[3]"], ["double", "float", "half"], [f"output{n}" for n in range(3)], (3, ["normal"] * 3, [0] * 3, [3, 3, 3], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), ], [ ["pointd[3]", "pointf[3]", "pointh[3]"], ["double", "float", "half"], [f"output{n}" for n in range(3)], (3, ["point"] * 3, [0] * 3, [3, 3, 3], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), ], [ ["quatd[4]", "quatf[4]", "quath[4]"], ["double", "float", "half"], [f"output{n}" for n in range(3)], ( 3, ["quat"] * 3, [0] * 3, [4, 4, 4], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], ), ], [ ["texcoordd[3]", "texcoordf[2]", "texcoordh[3]"], ["double", "float", "half"], [f"output{n}" for n in range(3)], (3, ["texcoord"] * 3, [0] * 3, [3, 2, 3], [[0.0, 0.0, 0.0], [0.0, 0.0], [0.0, 0.0, 0.0]]), ], [ ["vectord[3]", "vectorf[3]", "vectorh[3]"], ["double", "float", "half"], [f"output{n}" for n in range(3)], (3, ["vector"] * 3, [0] * 3, [3, 3, 3], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), ], [ ["timecode"], ["double"], ["output0"], (1, ["timecode"], [0], [1], [0.0]), ], ] for (types, base_types, names, results) in test_data: og.Controller.edit( graph, { keys.SET_VALUES: [ (types_attribute, types), (added_names_attribute, names), ] }, ) await og.Controller.evaluate() (bundled_count, bundled_results) = ognts.bundle_inspector_results(inspector_node) self.assertEqual(bundled_count, results[0]) self.assertCountEqual(list(bundled_results.keys()), names) for index, name in enumerate(names): error = f"Checking {{}} for attribute {name}" named_results = bundled_results[name] self.assertEqual( named_results[ognts.BundleResultKeys.TYPE_IDX], base_types[index], error.format("type") ) self.assertEqual( named_results[ognts.BundleResultKeys.ARRAY_DEPTH_IDX], results[2][index], error.format("array depth"), ) self.assertEqual( named_results[ognts.BundleResultKeys.TUPLE_COUNT_IDX], results[3][index], error.format("tuple count"), ) self.assertEqual( named_results[ognts.BundleResultKeys.ROLE_IDX], results[1][index], error.format("role") ) # One final test run that also includes a remove of the 2nd, 4th, and 7th attributes in the first test list (types, base_types, names, results) = test_data[0] og.Controller.edit( graph, { keys.SET_VALUES: [ (types_attribute, types), (added_names_attribute, names), (removed_names_attribute, [names[2], names[4], names[7]]), ] }, ) await og.Controller.evaluate() def pruned_list(original: List) -> List: """Remove the elements 2, 4, 7 from the list and return the result""" return [item for index, item in enumerate(original) if index not in [2, 4, 7]] (bundled_count, bundled_results) = ognts.bundle_inspector_results(inspector_node) # Modify the expected results to account for the removed attributes self.assertEqual(bundled_count, results[0] - 3) names_expected = pruned_list(names) base_types_expected = pruned_list(base_types) array_depths_expected = pruned_list(results[2]) tuple_counts_expected = pruned_list(results[3]) roles_expected = pruned_list(results[1]) self.assertCountEqual(list(bundled_results.keys()), names_expected) for index, name in enumerate(names_expected): error = f"Checking {{}} for attribute {name}" named_results = bundled_results[name] self.assertEqual( named_results[ognts.BundleResultKeys.TYPE_IDX], base_types_expected[index], error.format("type") ) self.assertEqual( named_results[ognts.BundleResultKeys.ARRAY_DEPTH_IDX], array_depths_expected[index], error.format("array depth"), ) self.assertEqual( named_results[ognts.BundleResultKeys.TUPLE_COUNT_IDX], tuple_counts_expected[index], error.format("tuple count"), ) self.assertEqual( named_results[ognts.BundleResultKeys.ROLE_IDX], roles_expected[index], error.format("role") ) # ---------------------------------------------------------------------- async def test_add_attributes_to_bundle_cpp(self): """Test adding attributes to bundles on the C++ implemented node""" await self._test_add_attributes_to_bundle("omni.graph.tutorials.BundleAddAttributes") # ---------------------------------------------------------------------- async def test_add_attributes_to_bundle_py(self): """Test adding attributes to bundles on the Python implemented node""" await self._test_add_attributes_to_bundle("omni.graph.tutorials.BundleAddAttributesPy") # ---------------------------------------------------------------------- async def _test_batched_add_attributes_to_bundle(self, node_type_to_test: str): """Test basic operation of the tutorial node that adds new attributes to a bundle""" keys = og.Controller.Keys (graph, [add_node, inspector_node], _, _) = og.Controller.edit( "/TestGraph", { keys.CREATE_NODES: [ ("AddAttributes", node_type_to_test), ("Inspector", "omni.graph.nodes.BundleInspector"), ], keys.CONNECT: ("AddAttributes.outputs_bundle", "Inspector.inputs:bundle"), }, ) await og.Controller.evaluate() types_attribute = og.Controller.attribute("inputs:typesToAdd", add_node) added_names_attribute = og.Controller.attribute("inputs:addedAttributeNames", add_node) removed_names_attribute = og.Controller.attribute("inputs:removedAttributeNames", add_node) batched_api_attribute = og.Controller.attribute("inputs:useBatchedAPI", add_node) _ = ogt.OGN_DEBUG and ognts.enable_debugging(inspector_node) # List of test data configurations to run. The individual test entries consist of: # - list of the types for the attributes to be added # - list of the base types corresponding to the main types # - list of names for the attributes to be added # - list of values expected for the new bundle as (count, roles, arrayDepths, tupleCounts, values) test_data = [ [ ["float", "double", "int", "int64", "uchar", "uint", "uint64", "bool", "token"], ["float", "double", "int", "int64", "uchar", "uint", "uint64", "bool", "token"], [f"output{n}" for n in range(9)], (9, ["none"] * 9, [0] * 9, [1] * 9, [0.0, 0.0, 0, 0, 0, 0, 0, False, ""]), ], [ ["float[]", "double[]", "int[]", "int64[]", "uchar[]", "uint[]", "uint64[]", "token[]"], ["float", "double", "int", "int64", "uchar", "uint", "uint64", "token"], [f"output{n}" for n in range(8)], (8, ["none"] * 8, [1] * 8, [1] * 8, [[], [], [], [], [], [], [], []]), ], [ ["float[3]", "double[2]", "int[4]"], ["float", "double", "int"], [f"output{n}" for n in range(3)], (3, ["none"] * 3, [0] * 3, [3, 2, 4], [[0.0, 0.0, 0.0], [0.0, 0.0], [0, 0, 0, 0]]), ], [ ["float[3][]", "double[2][]", "int[4][]"], ["float", "double", "int"], [f"output{n}" for n in range(3)], (3, ["none"] * 3, [1] * 3, [3, 2, 4], [[], [], []]), ], [ ["any"], ["token"], ["output0"], (1, ["none"], [0], [1], [""]), ], [ ["colord[3]", "colorf[4]", "colorh[3]"], ["double", "float", "half"], [f"output{n}" for n in range(3)], (3, ["color"] * 3, [0] * 3, [3, 4, 3], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), ], [ ["normald[3]", "normalf[3]", "normalh[3]"], ["double", "float", "half"], [f"output{n}" for n in range(3)], (3, ["normal"] * 3, [0] * 3, [3, 3, 3], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), ], [ ["pointd[3]", "pointf[3]", "pointh[3]"], ["double", "float", "half"], [f"output{n}" for n in range(3)], (3, ["point"] * 3, [0] * 3, [3, 3, 3], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), ], [ ["quatd[4]", "quatf[4]", "quath[4]"], ["double", "float", "half"], [f"output{n}" for n in range(3)], ( 3, ["quat"] * 3, [0] * 3, [4, 4, 4], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], ), ], [ ["texcoordd[3]", "texcoordf[2]", "texcoordh[3]"], ["double", "float", "half"], [f"output{n}" for n in range(3)], (3, ["texcoord"] * 3, [0] * 3, [3, 2, 3], [[0.0, 0.0, 0.0], [0.0, 0.0], [0.0, 0.0, 0.0]]), ], [ ["vectord[3]", "vectorf[3]", "vectorh[3]"], ["double", "float", "half"], [f"output{n}" for n in range(3)], (3, ["vector"] * 3, [0] * 3, [3, 3, 3], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), ], [ ["timecode"], ["double"], ["output0"], (1, ["timecode"], [0], [1], [0.0]), ], ] for (types, base_types, names, results) in test_data: og.Controller.edit( graph, { keys.SET_VALUES: [ (types_attribute, types), (added_names_attribute, names), (batched_api_attribute, True), ] }, ) await og.Controller.evaluate() (bundled_count, bundled_results) = ognts.bundle_inspector_results(inspector_node) self.assertEqual(bundled_count, results[0]) self.assertCountEqual(list(bundled_results.keys()), names) for index, name in enumerate(names): error = f"Checking {{}} for attribute {name}" named_results = bundled_results[name] self.assertEqual( named_results[ognts.BundleResultKeys.TYPE_IDX], base_types[index], error.format("type") ) self.assertEqual( named_results[ognts.BundleResultKeys.ARRAY_DEPTH_IDX], results[2][index], error.format("array depth"), ) self.assertEqual( named_results[ognts.BundleResultKeys.TUPLE_COUNT_IDX], results[3][index], error.format("tuple count"), ) self.assertEqual( named_results[ognts.BundleResultKeys.ROLE_IDX], results[1][index], error.format("role") ) # One final test run that also includes a remove of the 2nd, 4th, and 7th attributes in the first test list (types, base_types, names, results) = test_data[0] og.Controller.edit( graph, { keys.SET_VALUES: [ (types_attribute, types), (added_names_attribute, names), (removed_names_attribute, [names[2], names[4], names[7]]), ] }, ) await og.Controller.evaluate() def pruned_list(original: List) -> List: """Remove the elements 2, 4, 7 from the list and return the result""" return [item for index, item in enumerate(original) if index not in [2, 4, 7]] await og.Controller.evaluate() (bundled_count, bundled_results) = ognts.bundle_inspector_results(inspector_node) self.assertEqual(bundled_count, results[0] - 3) # Modify the expected results to account for the removed attributes names_expected = pruned_list(names) base_types_expected = pruned_list(base_types) array_depths_expected = pruned_list(results[2]) tuple_counts_expected = pruned_list(results[3]) roles_expected = pruned_list(results[1]) self.assertCountEqual(list(bundled_results.keys()), names_expected) for index, name in enumerate(names_expected): error = f"Checking {{}} for attribute {name}" named_results = bundled_results[name] self.assertEqual( named_results[ognts.BundleResultKeys.TYPE_IDX], base_types_expected[index], error.format("type") ) self.assertEqual( named_results[ognts.BundleResultKeys.ARRAY_DEPTH_IDX], array_depths_expected[index], error.format("array depth"), ) self.assertEqual( named_results[ognts.BundleResultKeys.TUPLE_COUNT_IDX], tuple_counts_expected[index], error.format("tuple count"), ) self.assertEqual( named_results[ognts.BundleResultKeys.ROLE_IDX], roles_expected[index], error.format("role") ) # ---------------------------------------------------------------------- async def test_batched_add_attributes_to_bundle_cpp(self): """Test adding attributes to bundles on the C++ implemented node""" await self._test_batched_add_attributes_to_bundle("omni.graph.tutorials.BundleAddAttributes") # ---------------------------------------------------------------------- async def test_batched_add_attributes_to_bundle_py(self): """Test adding attributes to bundles on the Python implemented node""" await self._test_batched_add_attributes_to_bundle("omni.graph.tutorials.BundleAddAttributesPy") # ---------------------------------------------------------------------- async def _test_bundle_gpu(self, node_type_to_test: str): """Test basic operation of the tutorial node that accesses bundle data on the GPU""" controller = og.Controller() keys = og.Controller.Keys (graph, (bundle_node, inspector_node), _, _) = controller.edit( "/TestGraph", { keys.CREATE_PRIMS: [ ("Prim1", {"points": ("pointf[3][]", [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])}), ("Prim2", {"points": ("pointf[3][]", [[4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])}), ], keys.CREATE_NODES: [ ("GpuBundleNode", node_type_to_test), ("Inspector", "omni.graph.nodes.BundleInspector"), ], }, ) controller.edit( graph, { keys.EXPOSE_PRIMS: [ (og.GraphController.PrimExposureType.AS_BUNDLE, "Prim1", "Prim1Extract"), (og.GraphController.PrimExposureType.AS_BUNDLE, "Prim2", "Prim2Extract"), ], keys.CONNECT: [ ("Prim1Extract.outputs_primBundle", "GpuBundleNode.inputs:cpuBundle"), ("Prim2Extract.outputs_primBundle", "GpuBundleNode.inputs:gpuBundle"), ("GpuBundleNode.outputs_cpuGpuBundle", "Inspector.inputs:bundle"), ], }, ) _ = ogt.OGN_DEBUG and ognts.enable_debugging(inspector_node) await controller.evaluate(graph) for gpu in [False, True]: controller.attribute("inputs:gpu", bundle_node, graph).set(gpu) await controller.evaluate() (_, bundled_values) = ognts.bundle_inspector_results(inspector_node) self.assertEqual(bundled_values["dotProducts"][ognts.BundleResultKeys.VALUE_IDX], [32.0, 122.0]) # ---------------------------------------------------------------------- async def test_bundle_gpu_cpp(self): """Test access to bundle contents on GPU on the C++ implemented node""" await self._test_bundle_gpu("omni.graph.tutorials.CpuGpuBundles") # ---------------------------------------------------------------------- async def test_bundle_gpu_py(self): """Test bundle contents on GPU on the Python implemented node""" await self._test_bundle_gpu("omni.graph.tutorials.CpuGpuBundlesPy") # ---------------------------------------------------------------------- @unittest.skipIf(is_running_in_teamcity(), "This is a manual test so it only needs to run locally") async def test_cuda_pointers_py(self): """Run a simple test on a node that extracts CUDA pointers from the GPU. Inspect the output for information""" controller = og.Controller() keys = og.Controller.Keys (graph, (_, inspector_node), _, _) = controller.edit( "/TestGraph", { keys.CREATE_PRIMS: [ ("Prim1", {"points": ("float[3][]", [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])}), ], keys.CREATE_NODES: [ ("GpuBundleNode", "omni.graph.tutorials.CudaCpuArraysPy"), ("Inspector", "omni.graph.nodes.BundleInspector"), ], }, ) controller.edit( graph, { keys.EXPOSE_PRIMS: [ (og.GraphController.PrimExposureType.AS_ATTRIBUTES, "Prim1", "Prim1Extract"), ], }, ) _ = ogt.OGN_DEBUG and ognts.enable_debugging(inspector_node) await controller.evaluate(graph) controller.edit( graph, { keys.CONNECT: [ ("Prim1Extract.outputs:points", "GpuBundleNode.inputs:points"), ("GpuBundleNode.outputs_outBundle", "Inspector.inputs:bundle"), ], }, )
34,927
Python
45.632844
118
0.474361
omniverse-code/kit/exts/omni.graph.tutorials/omni/graph/tutorials/tests/test_omnigraph_metadata.py
"""Basic tests of the metadata bindings found in OmniGraphBindingsPython.cpp""" import omni.graph.core as og import omni.graph.core.tests as ogts import omni.graph.tools.ogn as ogn # ====================================================================== class TestOmniGraphMetadata(ogts.OmniGraphTestCase): """Encapsulate simple tests that exercise metadata access""" # ---------------------------------------------------------------------- def __node_type_metadata_test(self, node_type: og.NodeType): """Test a node type object for metadata access""" original_count = node_type.get_metadata_count() key = "_node_type_metadata" value = "_test_value" node_type.set_metadata(key, value) # The new metadata key should now have the new value self.assertEqual(value, node_type.get_metadata(key)) new_metadata = node_type.get_all_metadata() # The new key/value pair should now be part of the entire metadata listt self.assertTrue(key in new_metadata) self.assertEqual(value, new_metadata[key]) # Since a unique key was chosen there should be one extra metadata value self.assertEqual(original_count + 1, node_type.get_metadata_count()) # Setting a value to None should remove the metadata from the node type. # Do this last so that the test can run multiple times successfully. node_type.set_metadata(key, None) self.assertEqual(original_count, node_type.get_metadata_count()) # Test silent success of trying to set the metadata to illegal data node_type.set_metadata(None, value) # Verify the hardcoded metadata type names self.assertTrue(ogn.MetadataKeys.UI_NAME in new_metadata) self.assertTrue(ogn.MetadataKeys.TAGS in new_metadata) self.assertEqual("Tutorial Node: Tuple Attributes", new_metadata[ogn.MetadataKeys.UI_NAME]) self.assertEqual("tuple,tutorial,internal", new_metadata[ogn.MetadataKeys.TAGS]) # ---------------------------------------------------------------------- async def test_node_type_metadata(self): """Test metadata features in the NodeType class""" # The TupleData tutorial node happens to have special metadata types defined node_type = og.get_node_type("omni.tutorials.TupleData") self.assertIsNotNone(node_type, "Empty node type to be used for test was not registered") self.__node_type_metadata_test(node_type) # ---------------------------------------------------------------------- async def test_node_metadata(self): """Test metadata access through the node class""" # The TupleData tutorial node happens to have special metadata types defined (_, [test_node], _, _) = og.Controller.edit( "/TestGraph", { og.Controller.Keys.CREATE_NODES: ("TupleNode", "omni.tutorials.TupleData"), }, ) await og.Controller.evaluate() self.__node_type_metadata_test(test_node.get_node_type()) # ---------------------------------------------------------------------- async def test_attribute_metadata(self): """Test metadata features in the NodeType class""" # Any node type will do for metadata tests so use a simple one (_, [test_node], _, _) = og.Controller.edit( "/TestGraph", { og.Controller.Keys.CREATE_NODES: ("SimpleNode", "omni.graph.tutorials.SimpleData"), }, ) await og.Controller.evaluate() self.assertIsNotNone(test_node, "Simple data node type to be used for test was not registered") test_attribute = test_node.get_attribute("inputs:a_bool") self.assertIsNotNone(test_attribute, "Boolean input on simple data node type not found") original_count = test_attribute.get_metadata_count() key = "_test_attribute_metadata" value = "_test_value" test_attribute.set_metadata(key, value) # The new metadata key should now have the new value self.assertEqual(value, test_attribute.get_metadata(key)) new_metadata = test_attribute.get_all_metadata() # The new key/value pair should now be part of the entire metadata listt self.assertTrue(key in new_metadata) self.assertEqual(value, new_metadata[key]) # Since a unique key was chosen there should be one extra metadata value self.assertEqual(original_count + 1, test_attribute.get_metadata_count()) # Setting a value to None should remove the metadata from the node type. # Do this last so that the test can run multiple times successfully. test_attribute.set_metadata(key, None) self.assertEqual(original_count, test_attribute.get_metadata_count()) # Test silent success of trying to set the metadata to illegal data test_attribute.set_metadata(None, value) # ObjectId types get special metadata hardcoded for attribute_name in ["inputs:a_objectId", "outputs:a_objectId"]: object_id_attribute = test_node.get_attribute(attribute_name) self.assertIsNotNone(object_id_attribute, f"ObjectId {attribute_name} on simple data node type not found") object_id_metadata = object_id_attribute.get_all_metadata() self.assertTrue(ogn.MetadataKeys.OBJECT_ID in object_id_metadata) # Check on the constant attribute constant_attribute = test_node.get_attribute("inputs:a_constant_input") self.assertEqual("1", constant_attribute.get_metadata(ogn.MetadataKeys.OUTPUT_ONLY))
5,658
Python
49.079646
118
0.627253
omniverse-code/kit/exts/omni.graph.tutorials/docs/CHANGELOG.md
# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] ## [1.3.3] - 2022-08-31 ### Fixed - Refactored out use of a deprecated class ## [1.3.2] - 2022-08-30 ### Fixed - Doc error referencing a Python node in a C++ node docs ## [1.3.1] - 2022-08-09 ### Fixed - Applied formatting to all of the Python files ## [1.3.0] - 2022-07-07 ### Changed - Refactored imports from omni.graph.tools to get the new locations ### Added - Test for public API consistency ## [1.2.0] - 2022-05-06 ### Changed - Updated the CPU to GPU test to show the actual pointer contents ## [1.1.3] - 2022-04-29 ### Changed - Made tests derive from OmniGraphTestCase ## [1.1.2] - 2022-03-07 ### Changed - Substituted old tutorial 1 node icons with new version ## [1.1.1] - 2022-03-01 ### Fixed - Made bundle tests use the Controller ## [1.0.0] - 2021-03-01 ### Initial Version - Started changelog with initial released version of the OmniGraph core
1,138
Markdown
23.760869
87
0.692443
omniverse-code/kit/exts/omni.graph.tutorials/docs/README.md
# OmniGraph Tutorials [omni.graph.tutorials] Sets of nodes that provide tutorials on the construction of OmniGraph nodes, with an emphasis on the use of the OmniGraph node description files (*.ogn).
200
Markdown
39.199992
107
0.795
omniverse-code/kit/exts/omni.graph.tutorials/docs/index.rst
.. _ogn_tutorial_nodes: OmniGraph Walkthrough Tutorials ############################### .. tabularcolumns:: |L|R| .. csv-table:: :width: 100% **Extension**: omni.graph.tutorials,**Documentation Generated**: |today| .. toctree:: :maxdepth: 1 CHANGELOG In the source tree are several tutorials. Enclosed in these tutorial files are curated nodes that implement a representative portion of the available features in an OmniGraph Node interface. By working through these tutorials you can gradually build up a knowledge of the concepts used to effectively write OmniGraph Nodes. .. note:: In the tutorial files irrelevant details such as the copyright notice are omitted for clarity .. toctree:: :maxdepth: 1 :glob: ../tutorials/conversionTutorial/* ../tutorials/extensionTutorial/* ../tutorials/tutorial1/* ../tutorials/tutorial2/* ../tutorials/tutorial3/* ../tutorials/tutorial4/* ../tutorials/tutorial5/* ../tutorials/tutorial6/* ../tutorials/tutorial7/* ../tutorials/tutorial8/* ../tutorials/tutorial9/* ../tutorials/tutorial10/* ../tutorials/tutorial11/* ../tutorials/tutorial12/* ../tutorials/tutorial13/* ../tutorials/tutorial14/* ../tutorials/tutorial15/* ../tutorials/tutorial16/* ../tutorials/tutorial17/* ../tutorials/tutorial18/* ../tutorials/tutorial19/* ../tutorials/tutorial20/* ../tutorials/tutorial21/* ../tutorials/tutorial22/* ../tutorials/tutorial23/* ../tutorials/tutorial24/* ../tutorials/tutorial25/* ../tutorials/tutorial26/* ../tutorials/tutorial27/*
1,588
reStructuredText
26.396551
115
0.684509
omniverse-code/kit/exts/omni.audiorecorder/config/extension.toml
[package] title = "Kit Audio Recorder" category = "Audio" feature = true version = "0.1.0" description = "An audio recorder API which is available from python and C++" detailedDescription = """This is a simple interface that can be used to record audio to file or send recorded audio to a callback for generic audio recording tasks. This recorder can achieve a relatively low latency (e.g. 20ms). This API is available in python and C++. See omni.kit.window.audiorecorder for an example use of this API. """ authors = ["NVIDIA"] keywords = ["audio", "capture", "recording"] [dependencies] "carb.audio" = {} "omni.usd.libs" = {} "omni.usd.schema.semantics" = {} "omni.usd.schema.audio" = {} "omni.kit.audiodeviceenum" = {} [[native.plugin]] path = "bin/*.plugin" [[python.module]] name = "omni.audiorecorder" [[test]] args = [ "--/audio/deviceBackend=null" ] dependencies = [ "omni.usd", "omni.kit.test_helpers_gfx", ] stdoutFailPatterns.exclude = [ "*failed to create an encoder state*", "*failed to initialize the output stream*", "*recording is already in progress*", "*no supported conversion path from ePcmFloat (4) to eRaw (10)*", ] # uncomment and re-open OM-50069 if it breaks again #pythonTests.unreliable = [ # "*test_callback_recording", # OM-50069 #]
1,302
TOML
23.584905
78
0.690476
omniverse-code/kit/exts/omni.audiorecorder/omni/audiorecorder/__init__.py
""" This module contains bindings to the C++ omni::audio::IAudioRecorder interface. This provides functionality for simple audio recording to file or using a callback. Recording can be done in 16 bit, 32 bit or float PCM if a callback is required. Recording can be done in any format when only recording to a file. Recording to a file while simultaneously receiving data callbacks is also possible. """ # recorder bindings depend on some types from carb.audio import carb.audio from ._audiorecorder import *
550
Python
41.384612
91
0.732727
omniverse-code/kit/exts/omni.audiorecorder/omni/audiorecorder/tests/__init__.py
from .test_audio_recorder import * # pragma: no cover
56
Python
17.999994
54
0.714286
omniverse-code/kit/exts/omni.audiorecorder/omni/audiorecorder/tests/test_audio_recorder.py
import pathlib import time import os import math from PIL import Image import omni.kit.test_helpers_gfx.compare_utils import carb.tokens import carb.audio import omni.audiorecorder import omni.usd.audio import omni.kit.audiodeviceenum import omni.kit.test import omni.log OUTPUTS_DIR = pathlib.Path(omni.kit.test.get_test_output_path()) # boilerplate to work around python lacking references class IntReference: # pragma: no cover def __init__(self, v): self._v = v; def _get_value(self): return self._v def _set_value(self, v): self._v = v value = property(_get_value, _set_value) def wait_for_data(received: IntReference, ms: int, expected_data: int): # pragma: no cover for i in range(ms): if received.value > expected_data: # log this so we can see timing margins on teamcity omni.log.warn("finished waiting after " + str(i / 1000.0) + " seconds") break if i > 0 and i % 1000 == 0: omni.log.warn("waited " + str(i // 1000) + " seconds") if i == ms - 1: self.assertTrue(not "timeout of " + str(ms) + "ms waiting for " + str(expected_data) + " frames") time.sleep(0.001) class TestAudioRecorder(omni.kit.test.AsyncTestCase): # pragma: no cover def setUp(self): self._recorder = omni.audiorecorder.create_audio_recorder() self.assertIsNotNone(self._recorder) extension_path = carb.tokens.get_tokens_interface().resolve("${omni.audiorecorder}") self._test_path = pathlib.Path(extension_path).joinpath("data").joinpath("tests").absolute() self._golden_path = self._test_path.joinpath("golden") def tearDown(self): self._recorder = None def test_callback_recording(self): valid = [] for i in range(1, 16): valid.append(4800 * i) received = IntReference(0) def read_validation(data): nonlocal received received.value += len(data) self.assertTrue(len(data) in valid) def float_read_callback(data): read_validation(data) self.assertTrue(isinstance(data[0], float)) def int_read_callback(data): read_validation(data) self.assertTrue(isinstance(data[0], int)) # if there are no devices, this won't work if omni.kit.audiodeviceenum.acquire_audio_device_enum_interface().get_device_count(omni.kit.audiodeviceenum.Direction.CAPTURE) == 0: self.assertFalse(self._recorder.begin_recording_float( callback = float_read_callback, )) return # not open => should be some valid default format fmt = self._recorder.get_format() self.assertIsNotNone(fmt) self.assertGreaterEqual(fmt.channels, carb.audio.MIN_CHANNELS) self.assertLessEqual(fmt.channels, carb.audio.MAX_CHANNELS) self.assertGreaterEqual(fmt.frame_rate, carb.audio.MIN_FRAMERATE) self.assertLessEqual(fmt.frame_rate, carb.audio.MAX_FRAMERATE) self.assertTrue(self._recorder.begin_recording_float( callback = float_read_callback, buffer_length = 4800 * 16, # enormous buffer for test reliability period = 4800, length_type = carb.audio.UnitType.FRAMES, channels = 1 )) # try again and it will fail because a recording is already in progress self.assertFalse(self._recorder.begin_recording_float( callback = float_read_callback, buffer_length = 4800 * 16, # enormous buffer for test reliability period = 4800, length_type = carb.audio.UnitType.FRAMES, channels = 1 )) # not much we can test here aside from it being valid fmt = self._recorder.get_format() self.assertIsNotNone(fmt) self.assertEqual(fmt.channels, 1) self.assertGreaterEqual(fmt.frame_rate, carb.audio.MIN_FRAMERATE) self.assertLessEqual(fmt.frame_rate, carb.audio.MAX_FRAMERATE) self.assertEqual(fmt.format, carb.audio.SampleFormat.PCM_FLOAT) # this timeout seems absurd, but anything's possible with teamcity's timing wait_for_data(received, 8000, 4800) self._recorder.stop_recording() # try again with int16 received.value = 0 self.assertTrue(self._recorder.begin_recording_int16( callback = int_read_callback, buffer_length = 4800 * 16, # enormous buffer for test reliability period = 4800, length_type = carb.audio.UnitType.FRAMES, channels = 1 )) # not much we can test here aside from it being valid fmt = self._recorder.get_format() self.assertIsNotNone(fmt) self.assertEqual(fmt.channels, 1) self.assertGreaterEqual(fmt.frame_rate, carb.audio.MIN_FRAMERATE) self.assertLessEqual(fmt.frame_rate, carb.audio.MAX_FRAMERATE) self.assertEqual(fmt.format, carb.audio.SampleFormat.PCM16) wait_for_data(received, 8000, 4800) self._recorder.stop_recording() # try again with int32 received.value = 0 self.assertTrue(self._recorder.begin_recording_int32( callback = int_read_callback, buffer_length = 4800 * 16, # enormous buffer for test reliability period = 4800, length_type = carb.audio.UnitType.FRAMES, channels = 1 )) # not much we can test here aside from it being valid fmt = self._recorder.get_format() self.assertIsNotNone(fmt) self.assertEqual(fmt.channels, 1) self.assertGreaterEqual(fmt.frame_rate, carb.audio.MIN_FRAMERATE) self.assertLessEqual(fmt.frame_rate, carb.audio.MAX_FRAMERATE) self.assertEqual(fmt.format, carb.audio.SampleFormat.PCM32) wait_for_data(received, 8000, 4800) self._recorder.stop_recording() # test that the format is set as specified self.assertTrue(self._recorder.begin_recording_float( callback = float_read_callback, channels = 1, # 1 channel because that's all we can guarantee # when we upgrade IAudioCapture, this should no longer # be an issue frame_rate = 43217, buffer_length = 4800 * 16, # enormous buffer for test reliability period = 4800, length_type = carb.audio.UnitType.FRAMES )) fmt = self._recorder.get_format() self.assertEqual(fmt.channels, 1) self.assertEqual(fmt.frame_rate, 43217) self.assertEqual(fmt.format, carb.audio.SampleFormat.PCM_FLOAT) self._recorder.stop_recording() def _validate_sound(self, path): # to validate that this is a working sound, load it into a sound prim # and check if the sound asset loaded successfully context = omni.usd.get_context() self.assertIsNotNone(context) context.new_stage() stage = context.get_stage() self.assertIsNotNone(stage) prim = stage.DefinePrim("/sound", "OmniSound") self.assertIsNotNone(prim) prim.GetAttribute("filePath").Set(path) audio = omni.usd.audio.get_stage_audio_interface() self.assertIsNotNone(audio) i = 0 while audio.get_sound_asset_status(prim) == omni.usd.audio.AssetLoadStatus.IN_PROGRESS: time.sleep(0.001) if i > 5000: raise Exception("asset load timed out") i += 1 self.assertEqual(audio.get_sound_asset_status(prim), omni.usd.audio.AssetLoadStatus.DONE) def test_recording_to_file(self): received = IntReference(0) def read_callback(data): nonlocal received received.value += len(data) if omni.kit.audiodeviceenum.acquire_audio_device_enum_interface().get_device_count(omni.kit.audiodeviceenum.Direction.CAPTURE) == 0: self.assertFalse(self._recorder.begin_recording_float( callback = read_callback, filename = str(OUTPUTS_DIR.joinpath("test.oga")) )) self.assertFalse(self._recorder.begin_recording_to_file( filename = str(OUTPUTS_DIR.joinpath("test.oga")) )) return self.assertTrue(self._recorder.begin_recording_float( callback = read_callback, buffer_length = 4800 * 16, # enormous buffer for test reliability period = 4800, channels = 1, frame_rate = 48000, length_type = carb.audio.UnitType.FRAMES, output_format = carb.audio.SampleFormat.OPUS, filename = str(OUTPUTS_DIR.joinpath("test.opus.oga")) )) wait_for_data(received, 8000, 4800) self._recorder.stop_recording() self._validate_sound(str(OUTPUTS_DIR.joinpath("test.opus.oga"))) # try again with a default output format self.assertTrue(self._recorder.begin_recording_float( callback = read_callback, buffer_length = 4800 * 16, # enormous buffer for test reliability period = 4800, channels = 1, frame_rate = 48000, length_type = carb.audio.UnitType.FRAMES, filename = str(OUTPUTS_DIR.joinpath("test.default.0.wav")) )) wait_for_data(received, 8000, 4800) self._recorder.stop_recording() self._validate_sound(str(OUTPUTS_DIR.joinpath("test.default.0.wav"))) self.assertTrue(self._recorder.begin_recording_to_file( frame_rate = 73172, channels = 1, filename = str(OUTPUTS_DIR.joinpath("test.vorbis.oga")), output_format = carb.audio.SampleFormat.VORBIS )) time.sleep(2.0) self._recorder.stop_recording() self._validate_sound(str(OUTPUTS_DIR.joinpath("test.vorbis.oga"))) # try again with a default output format self.assertTrue(self._recorder.begin_recording_to_file( frame_rate = 73172, channels = 1, filename = str(OUTPUTS_DIR.joinpath("test.default.1.wav")), )) time.sleep(2.0) self._recorder.stop_recording() self._validate_sound(str(OUTPUTS_DIR.joinpath("test.default.1.wav"))) def test_bad_parameters(self): def read_callback(data): pass # MP3 is not supported self.assertFalse(self._recorder.begin_recording_float( callback = read_callback, output_format = carb.audio.SampleFormat.MP3, filename = str(OUTPUTS_DIR.joinpath("test.mp3")) )) # bad format self.assertFalse(self._recorder.begin_recording_float( callback = read_callback, output_format = carb.audio.SampleFormat.RAW, filename = str(OUTPUTS_DIR.joinpath("test.mp3")) )) # bad file name self.assertFalse(self._recorder.begin_recording_float( callback = read_callback, output_format = carb.audio.SampleFormat.OPUS, filename = "a/b/c/d/e/f/g/h/i/j.oga" )) self.assertFalse(self._recorder.begin_recording_float( callback = read_callback, channels = carb.audio.MAX_CHANNELS + 1 )) self.assertFalse(self._recorder.begin_recording_float( callback = read_callback, frame_rate = carb.audio.MAX_FRAMERATE + 1 )) def test_draw_waveform(self): # toggle in case you want to regenerate waveforms GENERATE_GOLDEN_IMAGES = False samples_int16 = [] samples_int32 = [] samples_float = [] for i in range(4800): samples_float.append(math.sin(i / 48.0)) samples_int16.append(int(samples_float[-1] * (2 ** 15 - 1))) samples_int32.append(int(samples_float[-1] * (2 ** 31 - 1))) W = 256 H = 256 raw = omni.audiorecorder.draw_waveform_from_blob_float(samples_float, 1, W, H, [1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0]) self.assertEqual(len(raw), W * H * 4) with Image.frombytes("RGBX", (W, H), bytes(raw), 'raw') as img: img.convert("RGB").save(str(pathlib.Path(OUTPUTS_DIR).joinpath("waveform.float.png"))) if not GENERATE_GOLDEN_IMAGES: self.assertLess(omni.kit.test_helpers_gfx.compare_utils.compare( pathlib.Path(OUTPUTS_DIR).joinpath("waveform.float.png"), self._golden_path.joinpath("waveform.png"), pathlib.Path(OUTPUTS_DIR).joinpath("waveform.float.png.diff.png")), 0.1) raw = omni.audiorecorder.draw_waveform_from_blob_int32(samples_int32, 1, W, H, [1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0]) self.assertEqual(len(raw), W * H * 4) with Image.frombytes("RGBX", (W, H), bytes(raw), 'raw') as img: img.convert("RGB").save(str(pathlib.Path(OUTPUTS_DIR).joinpath("waveform.int32.png"))) if not GENERATE_GOLDEN_IMAGES: self.assertLess(omni.kit.test_helpers_gfx.compare_utils.compare( pathlib.Path(OUTPUTS_DIR).joinpath("waveform.int32.png"), self._golden_path.joinpath("waveform.png"), pathlib.Path(OUTPUTS_DIR).joinpath("waveform.int32.png.diff.png")), 0.1) raw = omni.audiorecorder.draw_waveform_from_blob_int16(samples_int16, 1, W, H, [1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0]) self.assertEqual(len(raw), W * H * 4) with Image.frombytes("RGBX", (W, H), bytes(raw), 'raw') as img: img.convert("RGB").save(str(pathlib.Path(OUTPUTS_DIR).joinpath("waveform.int16.png"))) if not GENERATE_GOLDEN_IMAGES: # 1 pixel of difference was 756.0 difference self.assertLess(omni.kit.test_helpers_gfx.compare_utils.compare( pathlib.Path(OUTPUTS_DIR).joinpath("waveform.int16.png"), self._golden_path.joinpath("waveform.png"), pathlib.Path(OUTPUTS_DIR).joinpath("waveform.int16.png.diff.png")), 1024.0)
14,313
Python
36.276042
140
0.604066
omniverse-code/kit/exts/omni.graph.tools/config/extension.toml
[package] title = "OmniGraph Tools" # Note that for this extension the semantic versioning is interpreted slightly differently as it has to be concerned # with the public Python API, the content of the generated code, and the .ogn format. Ordinarily the versioning of the # generated code would be handled by a dependency on a particular version of omni.graph/omni.graph.core, however as # this extension cannot see them the dependency has to be managed manually. # # MAJOR VERSION: Changes are made to the generated code, OGN format, or the public API that are not backward compatible. # i.e. the user has to change some of their code to make it work with the new version # # MINOR VERSION: 1. A change is made to the Python API that is backward compatible with no user changes # 2. A change is made to the .ogn format that is backward compatible with no user changes # 3. A change is made that would require regeneration of the Python node database at runtime in order # to ensure backward compatibility, but with no user changes required to the .ogn or node files. # e.g. extra runtime information is added to the database # # PATCH VERSION: 1. A change is made to the implementation of the Python API that does not change functionality # 2. A change is made to the .ogn parsing that does not change the generated code # version = "1.17.2" category = "Graph" readme = "docs/README.md" changelog = "docs/CHANGELOG.md" description = "Contains the implementation of the Omniverse Graph node generator scripts." repository = "" keywords = ["kit", "omnigraph", "tools"] authors = ["NVIDIA"] # The tools are used for the build and so have no dependencies [dependencies] "omni.usd.libs" = {} "omni.kit.pip_archive" = {} # For AutoNode - to be removed after the deprecation support is removed from omni.graph.tools/python/_impl/v1_2_0/autograph [python.pipapi] requirements = [ "numpy", # SWIPAT filed under: http://nvbugs/3193231 "toml", # SWIPAT filed under: http://nvbugs/3060676 ] # Main python module this extension provides, it will be publicly available as "import omni.graph.tools". [[python.module]] name = "omni.graph.tools" [documentation] deps = [ ["kit-sdk", "_build/docs/kit-sdk/latest"], # WAR to include omni.graph.tutorial/etc refs until that workflow is moved ] pages = [ "docs/Overview.md", "docs/node_architects_guide.rst", "docs/ogn_user_guide.rst", "docs/ogn_reference_guide.rst", "docs/attribute_types.rst", "docs/ogn_code_samples_cpp.rst", "docs/ogn_code_samples_python.rst", "docs/ogn_generation_script.rst", "docs/CHANGELOG.md", ]
2,726
TOML
44.449999
123
0.70653
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/ensure_node_types_in_toml.py
""" Given a collection of node metadata, ensure that all nodes appear in the [omnigraph.node_types] section of the extension.toml used by the owning extension to register its information. Invoke this script by passing the location of the .json file containing the metadata for all node types and the location of the extension.toml file to which the metadata should be populated. python ensure_node_types_in_toml.py --nodeInfo $BUILD/exts/omni.my.extension/ogn/nodes.json --toml $SRC/extensions/omni.my.extension/config/extension.toml The extension.toml file will be modified to included a generated section with this format: # === GENERATED BY ensure_node_types_in_toml.py -- DO NOT MODIFY === [omnigraph.node_types] "omni.my.extension.MyNodeType" = 1 "omni.my.extension.MyOtherNodeType" = 1 # === END OF GENERATED CODE === or if the "--allData" flag is set then this more verbose format will be used: # === GENERATED BY ensure_node_types_in_toml.py -- DO NOT MODIFY === [omnigraph.node_types."omni.my.extension.MyNodeType"] version = 1 language = "C++" description = "This is my node" [omnigraph.node_types."omni.my.extension.MyOtherNodeType"] version = 1 language = "C++" description = "This is my other node" # === END OF GENERATED CODE === Note that this script explicitly does not handle the case of multiple versions of the same node type in the same extension as that is also not handled by OmniGraph proper. You might also want to use an intermediate directory, which will create an explicit tag when the .toml is regenerated so that you can safely handle the case of regeneration after a direct edit of the .toml file itself. This will ensure that a user cannot accidentally delete a node definition from the automatically generated section; python ensure_node_types_in_toml.py --nodeInfo $BUILD/exts/omni.my.extension/ogn/nodes.json --toml $SRC/extensions/omni.my.extension/config/extension.toml --intermediate $TOP/_build/intermediate Lastly, the support for the toml package is only available through repo_man in the build, not in the standard path. You can pass in the root directory of the repo_man module if it is needed to find the toml package. python ensure_node_types_in_toml.py --nodeInfo $BUILD/exts/omni.my.extension/ogn/nodes.json --toml $SRC/extensions/omni.my.extension/config/extension.toml --intermediate $TOP/_build/intermediate --repoMan $TOP/_repo/deps/repo_man """ import argparse import json import logging import os import sys from pathlib import Path from node_generator.utils import WritableDir # Create a logger and selectively turn on logging if the OGN debugging environment variable is set logger = logging.getLogger("add_nodes_to_toml") logging_handler = logging.StreamHandler(sys.stdout) logging_handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s")) logger.addHandler(logging_handler) logger.setLevel(logging.INFO if os.getenv("OGN_PARSE_DEBUG") else logging.WARN) START_MARKER = "# === GENERATED BY ensure_node_types_in_toml.py -- DO NOT MODIFY ===" """Special marker text identifying the start of the generated node type identifier code""" SECTION_NAME = "omnigraph.node_types" """Name of the generated section of the .toml file""" END_MARKER = "# === END OF GENERATED CODE ===" """Special marker text identifying the end of the generated node type identifier code""" # ====================================================================== class TomlModifier: """Encapsulates the reading, modifying, and writing of the .toml file with the node information Attributes: changes_made: True iff processing the .toml file resulted in changes to it toml: The toml module root - a member of the class because it may need to be imported from an alternate location Internal Attributes: __all_data: If True then the .toml will include language and description in addition to type name and version __existing_types: Dictionary of node_type_name:version_number for node types found in the original .toml file __node_info: Dictionary of node_type_name:node_type_info generated by the build for this extension __node_info_path: Path pointing to the generated file containing the node type information for this extension __tag_path: Path pointing to a file to use to tag the operation as complete (so that edits to the .toml can trigger regeneration) __toml_path: Path pointing to the .toml file to be modified """ # -------------------------------------------------------------------------------------------------------------- def __init__(self): """Set up the information required for the operations - do nothing just yet""" # Construct the parsing information. Run the script with "--help" to see the usage. parser = argparse.ArgumentParser( description="Ensure that the supplied .toml file contains metadata for all of the nodes defined" " in the extension", formatter_class=argparse.RawTextHelpFormatter, ) parser.add_argument( "-a", "--allData", action="store_true", help="Dump all metadata to the .toml file instead of just the version and node type name", ) parser.add_argument( "-in", "--intermediate", action=WritableDir, const=None, type=Path, metavar="INTERMEDIATE_DIRECTORY", help="Directory into which temporary build information is stored", ) parser.add_argument( "-ni", "--nodeInfoFile", type=Path, metavar="NODE_INFO_FILE.json", help=".json file generated by the build that contains the registration information for all of the nodes", ) parser.add_argument( "-rm", "--repoMan", type=Path, metavar="REPO_MAN_DIR", help="Path to the repo_man support directory", ) parser.add_argument( "-t", "--tomlFile", type=Path, metavar="TOML_FILE", help=".toml file that will contain the node information", ) parser.add_argument("-v", "--verbose", action="store_true", help="Output the steps the script is performing") args = parser.parse_args() # If the script steps are to be echoed enable the logger and dump the script arguments as a first step if args.verbose: logger.setLevel(logging.DEBUG) logger.info("Processing the arguments") logger.info(" Args = %s", args) # Set up the internal attributes. All path files are resolved to absolute paths for convenience. self.__all_data = args.allData self.__toml_path = args.tomlFile.resolve() self.__node_info_path = args.nodeInfoFile.resolve() self.__tag_path = args.intermediate / "extension.toml.built" if args.intermediate is not None else None if self.__tag_path is not None: self.__tag_path = self.__tag_path.resolve() self.changes_made = False self.__in_team_city = False self.__existing_types = {} self.__node_info = {} # The toml package is installed as part of repo_man, not directly available in the build, so it may not be # available here and may have to be found through a location supplied by the script arguments. try: import toml self.toml = toml except ModuleNotFoundError: self.toml = None # There is some information to get from repoMan when running through a build so that we can successfully # determine when a failure is fatal and when it is just part of the normal generation process. if args.repoMan is not None: try: python_path = args.repoMan.resolve().as_posix() sys.path.append(python_path) import omni.repo.man if self.toml is None: toml = omni.repo.man.get_toml_module() self.toml = toml self.__in_team_city = omni.repo.man.is_running_in_teamcity() except (ModuleNotFoundError, AttributeError): # If repoMan is inaccessible then issue a warning but continue on to avoid spurious failures if self.toml is None: logger.warning( "toml module could not be found natively or at path '%s', parsing cannot happen", python_path ) else: logger.warning( "Not able to determine if running in TeamCity without module at '%s', assuming not.", python_path, ) logger.info(" Team City run = %s", self.__in_team_city) # -------------------------------------------------------------------------------------------------------------- @property def needs_generation(self) -> bool: """Returns True iff the extension.toml is older than either the generator script itself or the nodes.json file. This is done here after several unsuccessful attempts to get the build structure to recognize when the file needs rebuilding. At worst it means running this script when it isn't necessary, but then returning immediately after checking the file modification times so hopefully no big deal. """ # If the toml cannot be parsed no generation can happen if self.toml is None: return False this_file = Path(__file__) # If the nodes.json does not exist then no generation is needed because there are no nodes if not self.__node_info_path.exists(): logger.info("Skipping generation - no nodes.json file") return False # If the .toml file does not exist it definitely needs generation if not self.__toml_path.exists(): logger.info("Forcing generation - no .toml file") return True # If the tag file does not exist but should then generation has never been done so it needs to be done now if self.__tag_path is not None and not self.__tag_path.exists(): logger.info("Forcing generation - missing tag file") return True # All four files exist. Regeneration is only needed if the .tag file is not the newest one this_file_mtime = this_file.stat().st_mtime node_info_mtime = self.__node_info_path.stat().st_mtime toml_mtime = self.__toml_path.stat().st_mtime tag_mtime = self.__tag_path.stat().st_mtime if self.__tag_path is not None else 0 if tag_mtime < toml_mtime: logger.info("Forcing generation - .toml is newer %s than the tag file %s", toml_mtime, tag_mtime) return True if tag_mtime < node_info_mtime: logger.info("Forcing generation - tag file is older than nodes.json") return True if tag_mtime < this_file_mtime: logger.info("Forcing generation - tag file is older than the generation script") return True # No good reason was found to regenerate, so don't logger.info("Skipping generation - the .toml file is up to date") return False # -------------------------------------------------------------------------------------------------------------- def read_files(self): """Read in the contents of the .toml and .json files and parse them for modification""" logger.info("Reading the extension's .toml file") contents = self.toml.load(self.__toml_path) try: sections = SECTION_NAME.split(".") self.__existing_types = contents for section in sections: self.__existing_types = self.__existing_types[section] except KeyError: self.__existing_types = {} logger.info("Reading the extension's .json node information file") try: with open(self.__node_info_path, "r", encoding="utf-8") as json_fd: self.__node_info = json.load(json_fd)["nodes"] except (IOError, json.JSONDecodeError): self.__node_info = {} # -------------------------------------------------------------------------------------------------------------- def __existing_version_matches(self, node_type_name: str, version: int) -> bool: """Returns True iff the node type name has a version number in the .toml file matching the one passed in""" if node_type_name not in self.__existing_types: return False node_type_info = self.__existing_types[node_type_name] # If the abbreviated version of the metadata was used the version number is all there is if isinstance(node_type_info, int): return version == node_type_info # Otherwise extract the version number from the metadata dictionary try: return version == node_type_info["version"] except KeyError: return False # -------------------------------------------------------------------------------------------------------------- def add_nodes(self): """Ensure the nodes that were passed in are present in the .toml file""" logger.info("Ensuring the node types are in the file") for node_type_name, node_type_info in self.__node_info.items(): version = int(node_type_info["version"]) if not self.__existing_version_matches(node_type_name, version): self.changes_made = True if self.__all_data: new_item = { "version": version, "language": node_type_info["language"], "description": node_type_info["description"], } else: new_item = version logger.info(" Found an unregistered type - %s = %s", node_type_name, new_item) self.__existing_types[node_type_name] = new_item # -------------------------------------------------------------------------------------------------------------- def write_file(self): """Write the new contents of the .toml file back to the original location. The toml library dump() method cannot be used here as it would lose information like comments and formatting so instead it narrows its focus to the [[omnigraph]] section, surrounding it with fixed markers so that it can be easily identified and replaced using a text-based edit. """ logger.info("Writing the file") if self.__in_team_city: raise AttributeError( f"The file {self.__toml_path} was not up to date in the merge request. Rebuild and add it." ) with open(self.__toml_path, "r", encoding="utf-8") as toml_fd: raw_contents = toml_fd.readlines() raw_line_count = len(raw_contents) # Convert the node type list to a .toml format logger.info(" Inserting new section %s", self.__existing_types) # Build the structure from the bottom up to ensure the .toml has the correct nesting section_dict = self.__existing_types sections = SECTION_NAME.split(".") sections.reverse() for section in sections: section_dict = {section: dict(sorted(section_dict.items()))} inserted_section = self.toml.dumps(section_dict) # Scan the file to see if/where the generated section currently resides in_section = False section_start_index = -1 section_end_index = -1 for line_index, line in enumerate(raw_contents): if in_section and line.rstrip() == END_MARKER: in_section = False section_end_index = line_index if line.rstrip() == START_MARKER: in_section = True section_start_index = line_index logger.info(" Existing section location was %s, %s", section_start_index, section_end_index) if section_start_index >= 0 and section_end_index == -1: raise ValueError( f"The .toml file '{self.__toml_path}' was illegal - it had a start marker but no end marker" ) if section_start_index < 0: section_start_index = raw_line_count section_end_index = section_start_index # Write the modified contents with the new generated section try: with open(self.__toml_path, "w", encoding="utf-8") as toml_fd: toml_fd.writelines(raw_contents[0:section_start_index]) # If inserting at the end of the file then insert a blank line for readability if section_start_index == raw_line_count: toml_fd.write("\n") toml_fd.write(f"{START_MARKER}\n") toml_fd.write(inserted_section) toml_fd.write(f"{END_MARKER}\n") toml_fd.writelines(raw_contents[section_end_index + 1 : raw_line_count]) toml_fd.flush() # Required to ensure the mtime is earlier than the tag file's except IOError as error: raise IOError(f"Failed to write back the .toml file '{self.__toml_path}'") from error # -------------------------------------------------------------------------------------------------------------- def touch_tag_file(self): """Forces update of the tag file mtime.""" try: # Tag the conversion as being complete so that a build process can properly manage dependencies. # This has to happen last to avoid a false positive where the tag file is older than the .toml logger.info("Touching the tag file %s", self.__tag_path) if self.__tag_path is not None: with open(self.__tag_path, "w", newline="\n", encoding="utf-8") as tag_fd: tag_fd.write("This file tags the last time its .toml file was processed with node metadata") except IOError as error: raise IOError(f"Failed to write back the tag file '{self.__tag_path}'") from error # ============================================================================================================== def main_update_extension_toml(): """Walk through the steps required to parse, modify, and write the .toml file.""" modifier = TomlModifier() if modifier.needs_generation: modifier.read_files() modifier.add_nodes() if modifier.changes_made: modifier.write_file() # The tag file needs updating even if changes were not made so that it doesn't repeatedly try and fail to # regenerate every time the script runs. modifier.touch_tag_file() # ============================================================================================================== if __name__ == "__main__": main_update_extension_toml()
19,319
Python
48.035533
120
0.591076
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_internal.py
"""Imports for support code that will used internally by OmniGraph but should not be used elsewhere. These are all subject to change without supported deprecation paths. """ __all__ = [ "cache_location", "Compatibility", "ExtensionContentsBase", "ExtensionContentsStandalone", "ExtensionContentsV118", "ExtensionContentsV119", "ExtensionVersion_t", "extension_contents_factory", "FileType", "find_ogn_build_directory", "full_cache_path", "GENERATED_FILE_CONFIG_NAMES", "GenerationVersions", "get_generator_extension_version", "get_module_path", "get_ogn_file_name", "get_ogn_type_and_node", "get_target_extension_version", "import_tests_in_directory", "load_module_from_file", "LOG", "NodeTypeDefinition", "OmniGraphExtensionError", "set_registration_logging", "Settings", "TemporaryCacheLocation", "TemporaryLogLocation", "VersionProperties", "walk_with_excludes", ] from ._impl.internal.cache_utils import TemporaryCacheLocation, cache_location, full_cache_path from ._impl.internal.extension_contents_1_18 import ExtensionContentsV118 from ._impl.internal.extension_contents_1_19 import ExtensionContentsV119 from ._impl.internal.extension_contents_base import ExtensionContentsBase from ._impl.internal.extension_contents_factory import extension_contents_factory from ._impl.internal.extension_contents_standalone import ExtensionContentsStandalone from ._impl.internal.file_utils import ( GENERATED_FILE_CONFIG_NAMES, FileType, find_ogn_build_directory, get_module_path, get_ogn_file_name, get_ogn_type_and_node, load_module_from_file, walk_with_excludes, ) from ._impl.internal.logging_utils import LOG, OmniGraphExtensionError, TemporaryLogLocation, set_registration_logging from ._impl.internal.node_type_definition import NodeTypeDefinition from ._impl.internal.versions import ( Compatibility, ExtensionVersion_t, GenerationVersions, VersionProperties, get_generator_extension_version, get_target_extension_version, ) from ._impl.node_generator.generate_test_imports import import_tests_in_directory from ._impl.node_generator.utils import Settings
2,234
Python
33.384615
118
0.744405
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/__init__.py
"""Tools that support all of OmniGraph in general, and the .ogn format in particular. General tools can be imported directly with the top level import: .. code-block:: python import omni.graph.tools as ogt help(ogt.deprecated_function) This module also supports a submodule just for the .ogn handling. .. code-block:: python # Support for the parsing and creation of the .ogn format import omni.graph.tools.ogn as ogn """ from . import ogn from ._impl.debugging import destroy_property, function_trace from ._impl.deprecate import ( DeprecatedClass, DeprecatedDictConstant, DeprecatedImport, DeprecatedStringConstant, DeprecateMessage, DeprecationError, DeprecationLevel, RenamedClass, deprecated_constant_object, deprecated_function, ) from ._impl.extension import _PublicExtension # noqa: F401 from ._impl.node_generator.utils import IndentedOutput, shorten_string_lines_to # ============================================================================================================== __all__ = [ "dbg_gc", "dbg_ui", "dbg", "deprecated_constant_object", "deprecated_function", "DeprecatedClass", "DeprecatedDictConstant", "DeprecatedImport", "DeprecatedStringConstant", "DeprecateMessage", "DeprecationError", "DeprecationLevel", "destroy_property", "function_trace", "import_tests_in_directory", "IndentedOutput", "OGN_DEBUG", "RenamedClass", "shorten_string_lines_to", "supported_attribute_type_names", ] # ============================================================================================================== # Soft-deprecated imports. Kept around for backward compatibility for one version. # _____ ______ _____ _____ ______ _____ _______ ______ _____ # | __ \ | ____|| __ \ | __ \ | ____|/ ____| /\ |__ __|| ____|| __ \ # | | | || |__ | |__) || |__) || |__ | | / \ | | | |__ | | | | # | | | || __| | ___/ | _ / | __| | | / /\ \ | | | __| | | | | # | |__| || |____ | | | | \ \ | |____| |____ / ____ \ | | | |____ | |__| | # |_____/ |______||_| |_| \_\|______|\_____|/_/ \_\|_| |______||_____/ # from ._impl.debugging import OGN_DEBUG, dbg, dbg_gc, dbg_ui from ._impl.node_generator.attributes.management import supported_attribute_type_names as _moved_to_ogn from ._impl.node_generator.generate_test_imports import import_tests_in_directory @deprecated_function("supported_attribute_type_names() has moved to omni.graph.tools.ogn") def supported_attribute_type_names(*args, **kwargs): return _moved_to_ogn(*args, **kwargs)
2,683
Python
34.315789
112
0.530004
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_1_11.py
"""Backward compatible module for omni.graph.tools version 1.11 and earlier. This module contains everything that was formerly visible by default but will no longer be part of the Python API for omni.graph.tools. If there is something here you rely on contact the OmniGraph team and let them know. Currently the module is in pre-deprecation, meaning you can still access everything here from the main module with .. code-block:: python import omni.graph.tools as ogt ogt.pre_deprecated_but_still_visible() Once the soft deprecation is enabled you will only be able to access the deprecated function with an explicit import: .. code-block:: python import omni.graph.tools as ogt import omni.graph.tools._1_11 as ogt1_11 if i_want_v1_11: ogt1_11.soft_deprecated_but_still_accessible() else: ogt.current_function() When hard deprecation is in place all functionality will be removed and import of this module will fail: .. code-block:: python import omni.graph.tools._1_11 as ot1_11 # Raises DeprecationError """ from ._impl.debugging import OGN_DEBUG, OGN_EVAL_DEBUG, OGN_GC_DEBUG, OGN_UI_DEBUG, dbg, dbg_eval, dbg_gc, dbg_ui from ._impl.node_generator.attributes.management import ATTRIBUTE_MANAGERS from ._impl.node_generator.attributes.parsing import sdf_type_name from ._impl.node_generator.generate_test_imports import import_tests_in_directory # Code that should be retired from ._impl.node_generator.keys import GraphSetupKeys_V1 from ._impl.node_generator.type_definitions import apply_type_definitions # Code that should be refactored and moved to an appropriate location from ._impl.node_generator.utils import ( OGN_PARSE_DEBUG, OGN_REG_DEBUG, Settings, dbg_parse, dbg_reg, is_unwritable, shorten_string_lines_to, ) # Code that is entirely internal to the node description editor and should be made local from ._impl.ogn_types import _OGN_TO_SDF_BASE_NAME as OGN_TO_SDF_BASE_NAME from ._impl.ogn_types import _SDF_BASE_NAME_TO_OGN as SDF_BASE_NAME_TO_OGN from ._impl.ogn_types import _SDF_TO_OGN as SDF_TO_OGN __all__ = [ "apply_type_definitions", "ATTRIBUTE_MANAGERS", "dbg_eval", "dbg_gc", "dbg_parse", "dbg_reg", "dbg_ui", "dbg", "GraphSetupKeys_V1", "import_tests_in_directory", "is_unwritable", "OGN_DEBUG", "OGN_EVAL_DEBUG", "OGN_GC_DEBUG", "OGN_PARSE_DEBUG", "OGN_REG_DEBUG", "OGN_TO_SDF_BASE_NAME", "OGN_UI_DEBUG", "SDF_BASE_NAME_TO_OGN", "SDF_TO_OGN", "sdf_type_name", "Settings", "shorten_string_lines_to", ]
2,611
Python
30.853658
117
0.711605
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/generate_node.py
"""Command line script to run the node generator scripts Mainly a separate script to create a package for the node generator scripts so that the files can use shorter names and relative imports. See node_generator/README.md for the usage information. """ from _impl.node_generator import main main.main()
307
Python
33.222219
115
0.791531
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/ogn.py
"""Tools that support interacting with the .ogn format, including parsing and creation. General tools can be imported directly with the top level import: .. code-block:: python import omni.graph.tools.ogn as ogn help(ogn) """ from ._impl.node_generator.attributes.AttributeManager import AttributeManager from ._impl.node_generator.attributes.management import ( ALL_ATTRIBUTE_TYPES, ATTRIBUTE_UNION_GROUPS, expand_attribute_union_groups, get_attribute_manager, get_attribute_manager_type, split_attribute_type_name, supported_attribute_type_names, ) from ._impl.node_generator.code_generation import code_generation from ._impl.node_generator.generate_cpp import generate_cpp from ._impl.node_generator.generate_documentation import generate_documentation from ._impl.node_generator.generate_python import generate_python from ._impl.node_generator.generate_template import generate_template from ._impl.node_generator.generate_test_imports import generate_test_imports from ._impl.node_generator.generate_tests import generate_tests from ._impl.node_generator.generate_usd import generate_usd from ._impl.node_generator.keys import ( AttributeKeys, CategoryTypeValues, CudaPointerValues, ExclusionTypeValues, GraphSetupKeys, IconKeys, LanguageTypeValues, MemoryTypeValues, MetadataKeys, NodeTypeKeys, TestKeys, ) from ._impl.node_generator.nodes import NodeGenerationError from ._impl.node_generator.parse_scheduling import SchedulingHints from ._impl.node_generator.utils import ( CarbLogError, DebugError, ParseError, UnimplementedError, to_cpp_comment, to_python_comment, to_usd_comment, to_usd_docs, ) from ._impl.ogn_types import ogn_to_sdf, sdf_to_ogn __all__ = [ "ALL_ATTRIBUTE_TYPES", "ATTRIBUTE_UNION_GROUPS", "AttributeKeys", "AttributeManager", "CarbLogError", "CategoryTypeValues", "code_generation", "CudaPointerValues", "DebugError", "ExclusionTypeValues", "expand_attribute_union_groups", "generate_cpp", "generate_documentation", "generate_python", "generate_template", "generate_test_imports", "generate_tests", "generate_usd", "get_attribute_manager_type", "get_attribute_manager", "GraphSetupKeys", "IconKeys", "LanguageTypeValues", "MemoryTypeValues", "MetadataKeys", "NodeGenerationError", "NodeTypeKeys", "ogn_to_sdf", "ParseError", "SchedulingHints", "sdf_to_ogn", "split_attribute_type_name", "supported_attribute_type_names", "TestKeys", "to_cpp_comment", "to_python_comment", "to_usd_comment", "to_usd_docs", "UnimplementedError", ] # ============================================================================================================== # These are symbols that should technically be prefaced with an underscore because they are used internally but # not part of the public API but that would cause a lot of refactoring work so for now they are just added to the # module contents but not the module exports. # _ _ _____ _____ _____ ______ _ _ # | | | |_ _| __ \| __ \| ____| \ | | # | |__| | | | | | | | | | | |__ | \| | # | __ | | | | | | | | | | __| | . ` | # | | | |_| |_| |__| | |__| | |____| |\ | # |_| |_|_____|_____/|_____/|______|_| \_| # from ._impl.node_generator.attributes.management import validate_attribute_type_name # noqa: F401 from ._impl.node_generator.attributes.naming import ATTR_NAME_REQUIREMENT # noqa: F401 from ._impl.node_generator.attributes.naming import ATTR_UI_NAME_REQUIREMENT # noqa: F401 from ._impl.node_generator.attributes.naming import INPUT_GROUP # noqa: F401 from ._impl.node_generator.attributes.naming import INPUT_NS # noqa: F401 from ._impl.node_generator.attributes.naming import OUTPUT_GROUP # noqa: F401 from ._impl.node_generator.attributes.naming import OUTPUT_NS # noqa: F401 from ._impl.node_generator.attributes.naming import STATE_GROUP # noqa: F401 from ._impl.node_generator.attributes.naming import STATE_NS # noqa: F401 from ._impl.node_generator.attributes.naming import assemble_attribute_type_name # noqa: F401 from ._impl.node_generator.attributes.naming import attribute_name_as_python_property # noqa: F401 from ._impl.node_generator.attributes.naming import attribute_name_in_namespace # noqa: F401 from ._impl.node_generator.attributes.naming import attribute_name_without_port # noqa: F401 from ._impl.node_generator.attributes.naming import check_attribute_name # noqa: F401 from ._impl.node_generator.attributes.naming import check_attribute_ui_name # noqa: F401 from ._impl.node_generator.attributes.naming import is_input_name # noqa: F401 from ._impl.node_generator.attributes.naming import is_output_name # noqa: F401 from ._impl.node_generator.attributes.naming import is_state_name # noqa: F401 from ._impl.node_generator.attributes.naming import namespace_of_group # noqa: F401 from ._impl.node_generator.attributes.NumericAttributeManager import NumericAttributeManager # noqa: F401 from ._impl.node_generator.attributes.parsing import attributes_as_usd # noqa: F401 from ._impl.node_generator.attributes.parsing import separate_ogn_role_and_type # noqa: F401 from ._impl.node_generator.attributes.parsing import usd_type_name # noqa: F401 from ._impl.node_generator.generate_test_imports import import_file_contents # noqa: F401 from ._impl.node_generator.nodes import NODE_NAME_REQUIREMENT # noqa: F401 from ._impl.node_generator.nodes import NODE_UI_NAME_REQUIREMENT # noqa: F401 from ._impl.node_generator.nodes import NodeInterface # noqa: F401 from ._impl.node_generator.nodes import NodeInterfaceWrapper # noqa: F401 from ._impl.node_generator.nodes import check_node_language # noqa: F401 from ._impl.node_generator.nodes import check_node_name # noqa: F401 from ._impl.node_generator.nodes import check_node_ui_name # noqa: F401 from ._impl.node_generator.OmniGraphExtension import OmniGraphExtension # noqa: F401 from ._impl.node_generator.utils import _EXTENDED_TYPE_ANY as EXTENDED_TYPE_ANY # noqa: F401 from ._impl.node_generator.utils import _EXTENDED_TYPE_REGULAR as EXTENDED_TYPE_REGULAR # noqa: F401 from ._impl.node_generator.utils import _EXTENDED_TYPE_UNION as EXTENDED_TYPE_UNION # noqa: F401 from ._impl.node_generator.utils import OGN_PARSE_DEBUG # noqa: F401 from ._impl.node_generator.utils import GeneratorConfiguration # noqa: F401 from ._impl.node_generator.utils import check_memory_type # noqa: F401 # By placing this in an internal list and exporting the list the backward compatibility code can make use of it # to allow access to the now-internal objects in a way that looks like they are still published. _HIDDEN = [ "assemble_attribute_type_name", "ATTR_NAME_REQUIREMENT", "ATTR_UI_NAME_REQUIREMENT", "attribute_name_as_python_property", "attribute_name_in_namespace", "attribute_name_without_port", "attributes_as_usd", "check_attribute_name", "check_attribute_ui_name", "check_memory_type", "check_node_language", "check_node_name", "check_node_ui_name", "EXTENDED_TYPE_ANY", "EXTENDED_TYPE_REGULAR", "EXTENDED_TYPE_UNION", "GeneratorConfiguration", "import_file_contents", "INPUT_GROUP", "INPUT_NS", "is_input_name", "is_output_name", "is_state_name", "namespace_of_group", "NODE_NAME_REQUIREMENT", "NODE_UI_NAME_REQUIREMENT", "NodeInterface", "NodeInterfaceWrapper", "NumericAttributeManager", "OGN_PARSE_DEBUG", "OmniGraphExtension", "OUTPUT_GROUP", "OUTPUT_NS", "separate_ogn_role_and_type", "STATE_GROUP", "STATE_NS", "usd_type_name", "validate_attribute_type_name", ]
7,821
Python
40.386243
113
0.70234
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/make_docs_toc.py
""" Create a table of contents file in index.rst that references all of the OmniGraph node generated documentation files that live in that directory. This processing is highly tied to the formatting of the OGN generated documentation files so if they change this has to as well. The table of contents will be in two sections. A table consisting of columns with [node name, node version, link to node doc file, link to node appendix entry] An appendix with headers consisting of the node name and body consisting of the node's description """ from _impl.node_generator import main_docs main_docs.main_docs()
619
Python
37.749998
116
0.781906
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/parse_scheduling.py
from omni.graph.tools._impl.node_generator.parse_scheduling import * # noqa: F401,PLW0401,PLW0614
99
Python
48.999976
98
0.787879
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_python.py
from omni.graph.tools._impl.node_generator.generate_python import * # noqa: F401,PLW0401,PLW0614
98
Python
48.499976
97
0.785714
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/main_docs.py
from omni.graph.tools._impl.node_generator.main_docs import * # noqa: F401,PLW0401,PLW0614
92
Python
45.499977
91
0.771739
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_node_info.py
from omni.graph.tools._impl.node_generator.generate_node_info import * # noqa: F401,PLW0401,PLW0614
101
Python
49.999975
100
0.782178
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/main.py
from omni.graph.tools._impl.node_generator.main import * # noqa: F401,PLW0401,PLW0614
87
Python
42.999979
86
0.770115
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_template.py
from omni.graph.tools._impl.node_generator.generate_template import * # noqa: F401,PLW0401,PLW0614
100
Python
49.499975
99
0.79
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/type_definitions.py
from omni.graph.tools._impl.node_generator.type_definitions import * # noqa: F401,PLW0401,PLW0614
99
Python
48.999976
98
0.787879
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_test_imports.py
from omni.graph.tools._impl.node_generator.generate_test_imports import * # noqa: F401,PLW0401,PLW0614
104
Python
51.499974
103
0.788462
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/__init__.py
import traceback from carb import log_warn _trace = "".join(traceback.format_stack()) log_warn(f"The OmniGraph Node Generator has moved. Use 'import omni.graph.tools.ogn as ogn' to access it.\n{_trace}")
206
Python
28.571424
117
0.742718
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_cpp.py
from omni.graph.tools._impl.node_generator.generate_cpp import * # noqa: F401,PLW0401,PLW0614
95
Python
46.999977
94
0.778947
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/register_ogn_nodes.py
from omni.graph.tools._impl.node_generator.register_ogn_nodes import * # noqa: F401,PLW0401,PLW0614
101
Python
49.999975
100
0.782178
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_tests.py
from omni.graph.tools._impl.node_generator.generate_tests import * # noqa: F401,PLW0401,PLW0614
97
Python
47.999976
96
0.783505
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_usd.py
from omni.graph.tools._impl.node_generator.generate_usd import * # noqa: F401,PLW0401,PLW0614
95
Python
46.999977
94
0.778947
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/utils.py
from omni.graph.tools._impl.node_generator.utils import * # noqa: F401,PLW0401,PLW0614
88
Python
43.499978
87
0.772727
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/ThreadsafeOpen.py
from omni.graph.tools._impl.node_generator.ThreadsafeOpen import * # noqa: F401,PLW0401,PLW0614
97
Python
47.999976
96
0.793814
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/keys.py
from omni.graph.tools._impl.node_generator.keys import * # noqa: F401,PLW0401,PLW0614
87
Python
42.999979
86
0.770115
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_documentation.py
from omni.graph.tools._impl.node_generator.generate_documentation import * # noqa: F401,PLW0401,PLW0614
105
Python
51.999974
104
0.8
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/nodes.py
from omni.graph.tools._impl.node_generator.nodes import * # noqa: F401,PLW0401,PLW0614
88
Python
43.499978
87
0.772727
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/OmniGraphExtension.py
from omni.graph.tools._impl.node_generator.OmniGraphExtension import * # noqa: F401,PLW0401,PLW0614
101
Python
49.999975
100
0.80198
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/code_generation.py
from omni.graph.tools._impl.node_generator.code_generation import * # noqa: F401,PLW0401,PLW0614
98
Python
48.499976
97
0.785714
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/generate_icon.py
from omni.graph.tools._impl.node_generator.generate_icon import * # noqa: F401,PLW0401,PLW0614
96
Python
47.499976
95
0.78125
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/category_definitions.py
from omni.graph.tools._impl.node_generator.category_definitions import * # noqa: F401,PLW0401,PLW0614
103
Python
50.999975
102
0.796116
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/FloatAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.FloatAttributeManager import * # noqa: F401,PLW0401,PLW0614
115
Python
56.999972
114
0.817391
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/TokenAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.TokenAttributeManager import * # noqa: F401,PLW0401,PLW0614
115
Python
56.999972
114
0.817391
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/StringAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.StringAttributeManager import * # noqa: F401,PLW0401,PLW0614
116
Python
57.499971
115
0.818966
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/TimeCodeAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.TimeCodeAttributeManager import * # noqa: F401,PLW0401,PLW0614
118
Python
58.499971
117
0.822034
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/parsing.py
from omni.graph.tools._impl.node_generator.attributes.parsing import * # noqa: F401,PLW0401,PLW0614
101
Python
49.999975
100
0.792079
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/ObjectIdAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.ObjectIdAttributeManager import * # noqa: F401,PLW0401,PLW0614
118
Python
58.499971
117
0.822034
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/naming.py
from omni.graph.tools._impl.node_generator.attributes.naming import * # noqa: F401,PLW0401,PLW0614
100
Python
49.499975
99
0.79
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/DoubleAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.DoubleAttributeManager import * # noqa: F401,PLW0401,PLW0614
116
Python
57.499971
115
0.818966
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/NumericAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.NumericAttributeManager import * # noqa: F401,PLW0401,PLW0614
117
Python
57.999971
116
0.820513
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/AnyAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.AnyAttributeManager import * # noqa: F401,PLW0401,PLW0614
113
Python
55.999972
112
0.814159
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/FrameAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.FrameAttributeManager import * # noqa: F401,PLW0401,PLW0614
115
Python
56.999972
114
0.817391
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/NormalAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.NormalAttributeManager import * # noqa: F401,PLW0401,PLW0614
116
Python
57.499971
115
0.818966
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/VectorAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.VectorAttributeManager import * # noqa: F401,PLW0401,PLW0614
116
Python
57.499971
115
0.818966
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/Int64AttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.Int64AttributeManager import * # noqa: F401,PLW0401,PLW0614
115
Python
56.999972
114
0.817391
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/UIntAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.UIntAttributeManager import * # noqa: F401,PLW0401,PLW0614
114
Python
56.499972
113
0.815789
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/ExecutionAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.ExecutionAttributeManager import * # noqa: F401,PLW0401,PLW0614
119
Python
58.999971
118
0.823529
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/BundleAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.BundleAttributeManager import * # noqa: F401,PLW0401,PLW0614
116
Python
57.499971
115
0.818966
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/RoleAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.RoleAttributeManager import * # noqa: F401,PLW0401,PLW0614
114
Python
56.499972
113
0.815789
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/management.py
from omni.graph.tools._impl.node_generator.attributes.management import * # noqa: F401,PLW0401,PLW0614
104
Python
51.499974
103
0.798077
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/ColorAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.ColorAttributeManager import * # noqa: F401,PLW0401,PLW0614
115
Python
56.999972
114
0.817391
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/BoolAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.BoolAttributeManager import * # noqa: F401,PLW0401,PLW0614
114
Python
56.499972
113
0.815789
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/PathAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.PathAttributeManager import * # noqa: F401,PLW0401,PLW0614
114
Python
56.499972
113
0.815789
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/UInt64AttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.UInt64AttributeManager import * # noqa: F401,PLW0401,PLW0614
116
Python
57.499971
115
0.818966
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/UnionAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.UnionAttributeManager import * # noqa: F401,PLW0401,PLW0614
115
Python
56.999972
114
0.817391
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/UCharAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.UCharAttributeManager import * # noqa: F401,PLW0401,PLW0614
115
Python
56.999972
114
0.817391
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/MatrixAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.MatrixAttributeManager import * # noqa: F401,PLW0401,PLW0614
116
Python
57.499971
115
0.818966
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/IntAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.IntAttributeManager import * # noqa: F401,PLW0401,PLW0614
113
Python
55.999972
112
0.814159
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/PointAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.PointAttributeManager import * # noqa: F401,PLW0401,PLW0614
115
Python
56.999972
114
0.817391
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/HalfAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.HalfAttributeManager import * # noqa: F401,PLW0401,PLW0614
114
Python
56.499972
113
0.815789
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/AttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.AttributeManager import * # noqa: F401,PLW0401,PLW0614
110
Python
54.499973
109
0.809091
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/node_generator/attributes/TexCoordAttributeManager.py
from omni.graph.tools._impl.node_generator.attributes.TexCoordAttributeManager import * # noqa: F401,PLW0401,PLW0614
118
Python
58.499971
117
0.822034
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/debugging.py
""" Collection of tools to help with debugging the operation of scripts. Mainly lives here so that all OGN-related files can access it, though the tools are pretty general. """ import os import weakref from contextlib import suppress from functools import partial, wraps from typing import Dict, List from omni.ext import get_dangling_references __all__ = [] # ====================================================================== # Environment variable gating display and execution of debugging information # - The value "1" sets OGN_DEBUG for general debugging # - Any string containing "eval" sets OGN_EVAL_DEBUG # - Either "1" or a string containing "gc" sets OGN_GC_DEBUG # - Either "1" or a string containing "ui" sets OGN_UI_DEBUG # e.g. you could enable UI and GC by setting it to "gc, ui" _ogn_debug_env_var = os.getenv("OGN_DEBUG") has_debugging = _ogn_debug_env_var is not None OGN_DEBUG = _ogn_debug_env_var == "1" OGN_EVAL_DEBUG = has_debugging and (_ogn_debug_env_var.lower().find("eval") >= 0) OGN_GC_DEBUG = has_debugging and (_ogn_debug_env_var == "1" or _ogn_debug_env_var.lower().find("gc") >= 0) OGN_UI_DEBUG = has_debugging and (_ogn_debug_env_var == "1" or _ogn_debug_env_var.lower().find("ui") >= 0) # ====================================================================== def __dbg(gate_variable: bool, message: str, *args, **kwargs): """ Print out a debugging message if the gate_variable is enabled, additional args will be passed to format the given message. """ if gate_variable: if args or kwargs: print("DBG: " + message.format(*args, **kwargs), flush=True) else: print(f"DBG: {message}", flush=True) # Define a few helper functions that provide debugging for some standard environment variables. # Even more efficient use pattern is "OGN_DEBUG and dbg(X)" to prevent side effects. dbg = partial(__dbg, OGN_DEBUG) dbg_eval = partial(__dbg, OGN_EVAL_DEBUG) dbg_gc = partial(__dbg, OGN_GC_DEBUG) dbg_ui = partial(__dbg, OGN_UI_DEBUG) # ====================================================================== # String used for indenting debugging information, so that nested function calls are visually distinct INDENT = "" # ====================================================================== def function_trace(env_var=None): """ Debugging decorator that adds function call tracing, potentially gated by an environment variable. Use as a normal function decorator: .. code-block:: python @function_trace() def my_function(value: str) -> str: return value + value Calling my_function("X") with debugging enabled will print this: Calling my_function('X') 'my_function' returned 'XX' The extra parameter lets you selectively disable it based on environment variables: .. code-block:: python @function_trace("OGN_DEBUG") def my_function(value: str) -> str: return value + value This version only enables debugging if the environment variable "OGN_DEBUG" is set """ def inner_decorator(func): """Having an inner decorator allows parameters to be passed to the outer one""" @wraps(func) def wrapper_debug(*args, **kwargs): """Wrapper function to add debugging information before and after forwarding calls""" if env_var is None or os.getenv(env_var) is not None: global INDENT args_repr = [repr(a) for a in args] kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()] signature = ", ".join(args_repr + kwargs_repr) print(f"{INDENT}Calling {func.__name__}({signature})") INDENT += " " value = func(*args, **kwargs) print(f"{INDENT}{func.__name__!r} returned {value!r}") INDENT = INDENT[:-2] return value return func(*args, **kwargs) return wrapper_debug return inner_decorator # ====================================================================== def __validate_property_destruction(weak_property, name: str): """Check that the weak reference to a property value references a destroyed value""" # Check to see if the property value is still being referenced if OGN_GC_DEBUG: with suppress(AttributeError, TypeError): if weak_property() is not None: print(f"Property {name} destroy failed: {get_dangling_references(weak_property())}", flush=True) # ---------------------------------------------------------------------- def __destroy_property_member(obj_property, name: str): """Try to call destroy for the obj_property - returns a weak reference to it for later use""" dbg(f"Destroying member {name} on {obj_property}") try: # Use a weak reference to perform a simple test for "real" destruction weak_property = weakref.ref(obj_property) obj_property.destroy() except AttributeError: dbg_gc(f"...obj_property {name} has no destroy method") weak_property = None except TypeError: dbg_gc(f"...obj_property {name} cannot be weak referenced") weak_property = None return weak_property # ---------------------------------------------------------------------- def __destroy_property_list(property_list: List, base_name: str): """Walk a list of properties, recursively destroying them""" dbg_gc(f"Destroying list {property_list} as {base_name}") index = 0 # The non-standard loop is to make sure this execution frame does not retain references to the objects while property_list: property_member = property_list.pop(0) debug_name = f"{base_name}[{index}]" index += 1 dbg_gc(f"...destroying member {debug_name}") if isinstance(property_member, list): dbg_gc("...(as list)") __destroy_property_list(property_member, debug_name) elif isinstance(property_member, dict): dbg_gc("...(as dictionary)") __destroy_property_dict(property_member, debug_name) else: dbg_gc("...(as object)") weak_property = __destroy_property_member(property_member, debug_name) property_member = None __validate_property_destruction(weak_property, debug_name) # ---------------------------------------------------------------------- def __destroy_property_dict(property_dict: Dict, base_name: str): """Walk a dictionary of properties, recursively destroying them""" dbg_gc(f"Destroying dictionary {property_dict} as {base_name}") # The non-standard loop is to make sure this execution frame does not retain references to the objects while property_dict: property_key, property_member = property_dict.popitem() debug_name = f"{base_name}[{property_key}]" dbg_gc(f"...destroying member {debug_name}") if isinstance(property_member, list): dbg_gc("...(as list)") __destroy_property_list(property_member, debug_name) elif isinstance(property_member, dict): dbg_gc("...(as dictionary)") __destroy_property_dict(property_member, debug_name) else: dbg_gc("...(as object)") weak_property = __destroy_property_member(property_member, debug_name) property_member = None __validate_property_destruction(weak_property, debug_name) # ---------------------------------------------------------------------- def destroy_property(self, property_name: str): """Call the destroy method on a property and set it to None - helps with garbage collection In a class's destroy() or __del__ method you can call this to generically handle member destruction when such things do not happen automatically (e.g. when you cross into the C++-bindings, or the objects have circular references) def destroy(self): destroy_property(self, "_widget") If the property is a list then the list members are individually destroyed. If the property is a dictionary then the values of the dictionary are individually destroyed. NOTE: Only call this if you are the ownder of the property, otherwise just set it to None. Args: self: The object owning the property to be destroyed (can be anything with a destroy() method) property_name: Name of the property to be destroyed """ debug_name = f"{type(self).__name__}.{property_name}" # If the property name uses the double-underscore convention for "internal" data then the name must # be embellished with the class name to allow access, since this function is not part of the class. property_to_access = property_name if property_name[0:2] != "__" else f"_{type(self).__name__}{property_name}" obj_property = getattr(self, property_to_access, None) if obj_property is None: dbg_gc(f"Destroyed None member {debug_name} {self} {property_to_access}") return dbg_gc(f"Destroy property {debug_name}") if isinstance(obj_property, list): dbg_gc("(as list)") __destroy_property_list(obj_property, debug_name) setattr(self, property_to_access, []) elif isinstance(obj_property, dict): dbg_gc("(as dictionary)") __destroy_property_dict(obj_property, debug_name) setattr(self, property_to_access, {}) else: dbg_gc("(as object)") weak_property = __destroy_property_member(obj_property, debug_name) setattr(self, property_to_access, None) obj_property = None __validate_property_destruction(weak_property, debug_name)
9,727
Python
41.854625
114
0.607793
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/deprecate.py
"""Manage deprecation for Python features for common use All deprecation functions can be accessed from the top module level. The :py:class:`omni.graph.tools.DeprecateMessage` class provides a simple way of logging a message that will only show up once per session. The :py:class:`omni.graph.tools.DeprecatedClass` decorator provides a method to emit a deprecation message when the deprecated class is accessed. The :py:class:`omni.graph.tools.RenamedClass` decorator is a slightly more sophisticated method of deprecating a class when the deprecation is simply a name change. The :py:function:`omni.graph.tools.deprecated_function` decorator provides a method to emit a deprecation message when the old function is called. The :py:function:`omni.graph.tools.DeprecatedImport` decorator provides a method to emit a deprecation message when an entire deprecated file is imported for use. This should not be used for imports that will be included in the API for backward compatibility, nor should these files be moved as they must continue to exist at the same import location in order to remain compatible. """ import functools import inspect import re import traceback from typing import Optional, Set from carb import log_warn, settings __all__ = [] # ============================================================================================================== class DeprecationError(Exception): """Exception to raise when a hard-deprecated import, class, or function is attempted to be used. Exists to provide a last bit of information to users who have been ignoring previous deprecation errors. """ # ============================================================================================================== # begin-deprecate-message class DeprecateMessage: """Manager for deprecation messages, to make it efficient to prevent multiple logging of the same deprecation messages. The default settings for output is usually enough to help you find where deprecated code is referenced. If more information is desired these per-class variables can be set to reduce the filtering being done. The message should contains an action item for the user to upgrade from the deprecated functionality: .. code-block:: python DeprecateMessage.deprecated("Install the latest version instead") # Although it's not usually necessary the class can be tuned using these class variable SILENCE_LOG = False # When set the output does not go to the console log; useful to disable for testing SHOW_STACK = True # Report stack trace in the deprecation message - can be turned off if it is too verbose MAX_STACK_LEVELS = 3 # Maximum number of stack levels to report, after filtering RE_IGNORE = re.compile("deprecate.py|bindings-python|importlib") # Ignore stack levels matching these patterns You can use some Python features to handle simple deprecation cases directly such as: .. code-block:: python # Rename constant from A to B A = (DeprecateMessage("A has been renamed to B") and False) or B # Constant A will be removed A = (DeprecateMessage("A will be removed, use B instead) and False) or B """ # end-deprecate-message MESSAGES_LOGGED = set() SILENCE_LOG = False SHOW_STACK = True MAX_STACK_LEVELS = 3 RE_IGNORE = re.compile("deprecate.py|bindings-python|importlib") class NoLogging: """Context manager class to let you import a bunch of known deprecated functions without logging warnings. Typical use would be in providing backward compatibility in a module where submodules have moved. with DeprecateMessage.NoLogging(): import .v1_0.my_old_function as my_old_function """ def __init__(self, *args, **kwargs): self.__original_logging = None def __enter__(self): """Disable logging for the duration of the context""" self.__original_logging = DeprecateMessage.SILENCE_LOG DeprecateMessage.SILENCE_LOG = True def __exit__(self, exit_type, value, exit_traceback): """Restore the original logging state""" DeprecateMessage.SILENCE_LOG = self.__original_logging # -------------------------------------------------------------------------------------------------------------- @classmethod def messages_logged(cls) -> Set[str]: """Returns the set of messages that have been logged so far""" return cls.MESSAGES_LOGGED # -------------------------------------------------------------------------------------------------------------- @classmethod def clear_messages(cls): """Clear the logged messages so that they can be logged again""" cls.MESSAGES_LOGGED = set() # -------------------------------------------------------------------------------------------------------------- @classmethod def deprecations_are_errors(cls) -> bool: """Returns True if deprecations are currently being treated as errors""" return settings.get_settings().get("/persistent/omnigraph/deprecationsAreErrors") @classmethod def set_deprecations_are_errors(cls, make_errors: bool): """Enable or disable treating deprecations as errors instead of warnings""" settings.get_settings().set("/persistent/omnigraph/deprecationsAreErrors", make_errors) # -------------------------------------------------------------------------------------------------------------- @classmethod def deprecated(cls, message: str): """Log the deprecation message if it has not yet been logged, otherwise do nothing Args: message: Message to display; only displays once even if this is called many times Adds stack trace information if the class member SHOW_STACK is True. Skips the Carbonite logging if the class member SILENCE_LOG is True (mostly useful for testing when a warning is the expected result). """ if message in cls.MESSAGES_LOGGED: return stack = "" try: try: full_stack = traceback.format_stack() if cls.SHOW_STACK else [] except SyntaxError as error: full_stack = [f"Error encountered when retrieving call stack - {error}"] if full_stack: filtered_stack = filter(lambda stack: not cls.RE_IGNORE.search(stack), full_stack) stack = "\n" + "".join(list(filtered_stack)[-cls.MAX_STACK_LEVELS :]) except SyntaxError as error: stack = f"Stack trace not accessible - {error}" if cls.deprecations_are_errors(): raise DeprecationError(f"{message}{stack}") _ = cls.SILENCE_LOG or log_warn(f"{message}{stack}") cls.MESSAGES_LOGGED.add(message) # ============================================================================================================== # begin-deprecated-class class DeprecatedClass: """Decorator to deprecate a class. Takes one argument that is a string to describe the action the user is to take to avoid the deprecated class. A deprecation message will be shown once, the first time the deprecated class is instantiated. .. code-block:: python @DeprecatedClass("After version 1.5.0 use og.NewerClass instead") class OlderClass: pass """ # end-deprecated-class def __init__(self, deprecation_message: str): """Remember the message and only report it on initialization Args: deprecation_message: A description of the action the user is to take to avoid the deprecated class. """ self.__deprecation_message = deprecation_message def message(self, deprecated_cls, deprecated_member: Optional[str] = None): """Emit a deprecation message with useful information attached""" try: old_name = deprecated_cls.__old_name__ except AttributeError: old_name = deprecated_cls.__name__ what_is_deprecated = old_name if deprecated_member is None else f"{old_name}.{deprecated_member}" DeprecateMessage.deprecated(f"{what_is_deprecated} is deprecated: {self.__deprecation_message}") def __call__(self, deprecated_cls): """Report the deprecation message if it hasn't already been reported""" def wrapper(*args, **kwargs): """Redirect function calls to the real class""" self.message(deprecated_cls) result = deprecated_cls(*args, **kwargs) return result # Do some magic here by copying any static methods on the class to the wrapper function object. # This handles the case where a deprecated class has static or class methods. for member_name in dir(deprecated_cls): if isinstance(inspect.getattr_static(deprecated_cls, member_name), staticmethod): def static_function(cls, method, *sf_args, **sf_kwargs): """Wrapper that will give deprecation messages for calling static methods too""" self.message(cls, method) return getattr(cls, method)(*sf_args, **sf_kwargs) setattr(wrapper, member_name, functools.partial(static_function, deprecated_cls, member_name)) elif isinstance(inspect.getattr_static(deprecated_cls, member_name), classmethod): def class_function(cls, method, *cl_args, **cl_kwargs): """Wrapper that will give deprecation messages for calling class methods too""" self.message(cls, method) return getattr(cls, method)(*cl_args, **cl_kwargs) setattr(wrapper, member_name, functools.partial(class_function, deprecated_cls, member_name)) return wrapper # ============================================================================================================== # begin-renamed-class def RenamedClass(cls, old_class_name: str, rename_message: Optional[str] = None) -> object: # noqa: N802 """Syntactic sugar to provide a class deprecation that is a simple renaming, where all of the functions in the old class are still present in backwards compatible form in the new class. Args: old_class_name: The name of the class that was renamed rename_message: If not None, what to use instead of the old class. If None then assume the new class is used. Usage: .. code-block:: python MyDeprecatedClass = RenamedClass(MyNewClass, "MyDeprecatedClass") """ # end-renamed-class @DeprecatedClass(f"Use {cls.__name__ if rename_message is None else rename_message} instead") class _RenamedClass(cls): __old_name__ = old_class_name return _RenamedClass # ============================================================================================================== # begin-deprecated-function def deprecated_function(deprecation_message: str, is_property: bool = False): """Decorator to deprecate a function. Args: deprecation_message: A description of the action the user is to take to avoid the deprecated function. is_property: Set this True if the function is a property getter or setter. A deprecation message will only be shown once, the first time the deprecated function is called. .. code-block:: python @deprecated_function("After version 1.5.0 use og.newer_function() instead") def older_function(): pass For property getters/setters use this decorator *after* the property decorator. .. code-block:: python @property @deprecated_function("use 'your_prop' instead.", is_property=True) def my_prop(self): return self.your_prop @my_prop.setter @deprecated_function("use 'your_prop' instead.", is_property=True) def my_prop(self, value): self.your_prop = value """ # end-deprecated-function def decorator_deprecated(func): """Remember the message""" # The functools internal decorator lets the help functions drill down into the actual function when asked, # rather that @functools.wraps(func) def wrapper_deprecated(*args, **kwargs): func_str = f"'{func.__name__}'" if is_property else f"{func.__name__}()" DeprecateMessage.deprecated(f"{func_str} is deprecated: {deprecation_message}") return func(*args, **kwargs) return wrapper_deprecated return decorator_deprecated # ============================================================================================================== # begin-deprecated-import def DeprecatedImport(deprecation_message: str): # noqa: N802 """Decorator to deprecate a specific file or module import. Usually the functionality has been deprecated and moved to a different file. Args: deprecation_message: String with the action the user is to perform to avoid the deprecated import Usage: .. code-block:: python '''This is the top line of the imported file''' import omni.graph.tools as og og.DeprecatedImport("Import 'omni.graph.tools as og' and use og.new_function() instead") # The rest of the file can be left as-is for best backward compatibility, or import non-deprecated versions # of objects from their new location to avoid duplication. """ # end-deprecated-import this_module = inspect.currentframe().f_back.f_locals["__name__"] DeprecateMessage.deprecated(f"{this_module} is deprecated: {deprecation_message}")
13,789
Python
43.340836
119
0.618174
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/extension.py
"""Extension management support""" import omni.ext __all__ = [] class _PublicExtension(omni.ext.IExt): """Dummy extension class that just serves to register and deregister the extension""" def on_startup(self): """Callback when the extension is starting up""" def on_shutdown(self): """Callback when the extension is shutting down"""
367
Python
23.533332
89
0.675749
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/ogn_types.py
"""Helper which contains utilities and data for converting between type representations""" from typing import Optional from pxr import Sdf __all__ = [] # Maping of pxr.Sdf.ValueTypeNames to corresponding OGN types (not including the Array/[] suffixes) _SDF_BASE_NAME_TO_OGN = { "Bool": "bool", "Color3d": "colord[3]", "Color3f": "colorf[3]", "Color3h": "colorh[3]", "Color4d": "colord[4]", "Color4f": "colorf[4]", "Color4h": "colorh[4]", "Double": "double", "Double2": "double[2]", "Double3": "double[3]", "Double4": "double[4]", "Float": "float", "Float2": "float[2]", "Float3": "float[3]", "Float4": "float[4]", "Frame4d": "framed[4]", "Half": "half", "Half2": "half[2]", "Half3": "half[3]", "Half4": "half[4]", "Int": "int", "Int2": "int[2]", "Int3": "int[3]", "Int4": "int[4]", "Int64": "int64", "Matrix2d": "matrixd[2]", "Matrix3d": "matrixd[3]", "Matrix4d": "matrixd[4]", "Normal3d": "normald[3]", "Normal3f": "normalf[3]", "Normal3h": "normalh[3]", "Point3d": "pointd[3]", "Point3f": "pointf[3]", "Point3h": "pointh[3]", "Quatd": "quatd[4]", "Quatf": "quatf[4]", "Quath": "quath[4]", "String": "string", "TexCoord2d": "texcoordd[2]", "TexCoord2f": "texcoordf[2]", "TexCoord2h": "texcoordh[2]", "TexCoord3d": "texcoordd[3]", "TexCoord3f": "texcoordf[3]", "TexCoord3h": "texcoordh[3]", "TimeCode": "timecode", "Token": "token", "UChar": "uchar", "UInt": "uint", "UInt64": "uint64", "Vector3d": "vectord[3]", "Vector3f": "vectorf[3]", "Vector3h": "vectorh[3]", } # Mapping of OGN types to SDF - not all OGN types can be translated directly _OGN_TO_SDF_BASE_NAME = {value: key for key, value in _SDF_BASE_NAME_TO_OGN.items()} # As the Sdf.ValueTypeNames are static Boost objects create a mapping of them back to OGN to avoid linear lookup _SDF_TO_OGN = {getattr(Sdf.ValueTypeNames, key): value for key, value in _SDF_BASE_NAME_TO_OGN.items()} _SDF_TO_OGN.update( {getattr(Sdf.ValueTypeNames, f"{key}Array"): f"{value}[]" for key, value in _SDF_BASE_NAME_TO_OGN.items()} ) # ================================================================================ def ogn_to_sdf(ogn_type: str) -> Optional[Sdf.ValueTypeNames]: """Convert an OGN type string to the equivalent SDF value type name Args: ogn_type: String representation of the OGN type as described in its documentation Return: Equivalent pxr.Sdf.ValueTypeNames value, or None if there is no equivalent """ is_array = False if ogn_type[-2:] == "[]": is_array = True ogn_type = ogn_type[:-2] try: sdf_type_name = _OGN_TO_SDF_BASE_NAME[ogn_type] if is_array: sdf_type_name += "Array" sdf_type = getattr(Sdf.ValueTypeNames, sdf_type_name, None) except KeyError: sdf_type = None return sdf_type # ================================================================================ def sdf_to_ogn(sdf_type: Sdf.ValueTypeName) -> Optional[str]: """Convert an SDF type to the equivalent OGN type name Args: sdf_type: String representation of the SDF type as described in its documentation Return: Equivalent OGN string name value, or None if there is no equivalent """ is_array = False if str(sdf_type)[-5:] == "Array": is_array = True try: ogn_type_name = _SDF_TO_OGN[sdf_type] if is_array: ogn_type_name += "[]" except KeyError: ogn_type_name = None return ogn_type_name
3,657
Python
28.983606
112
0.563577
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/parse_scheduling.py
"""Support for the parsing and interpretation of scheduling hints in the .ogn file""" from __future__ import annotations # For the forward class reference type in compare() import re from enum import Enum from typing import List, Optional, Union from .utils import IndentedOutput, ParseError # ====================================================================== class _AccessType(Enum): """Access type for a given scheduling flag ALL = The data will be both read and written to READ = The data will only be read WRITE = The data will only be written """ ALL = "ReadWrite" READ = "ReadOnly" WRITE = "WriteOnly" @classmethod def flag_access_type(cls, flag_name: str): """Returns the type of access the flag name implies""" if flag_name.endswith("-read"): return cls.READ if flag_name.endswith("-write"): return cls.WRITE return cls.ALL @classmethod def as_cpp_enum(cls, access_type: _AccessType) -> str: """Returns the C++ enum value corresponding to the access type string taken from the class data values""" if access_type == cls.READ: return "eAccessType::eRead" if access_type == cls.WRITE: return "eAccessType::eWrite" return "eAccessType::eReadWrite" @classmethod def as_python_enum(cls, access_type: _AccessType) -> str: """Returns the Python enum value corresponding to the access type string taken from the class data values""" if access_type == cls.READ: return "og.eAccessType.E_READ" if access_type == cls.WRITE: return "og.eAccessType.E_WRITE" return "og.eAccessType.E_READ_WRITE" # ====================================================================== class _ComputeRule(Enum): """Compute Rule for the scheduling flag DEFAULT = Evaluator default rule ON_REQUEST = Compute skipped until INode::onRequest """ DEFAULT = "compute-default" ON_REQUEST = "compute-on-request" @classmethod def flag_compute_rule(cls, flag_name: str): """Returns the type of compute-rule the flag name implies""" if flag_name == cls.ON_REQUEST.value: return cls.ON_REQUEST return cls.DEFAULT @classmethod def as_cpp_enum(cls, compute_rule: _ComputeRule) -> str: """Returns the C++ enum value corresponding to the string taken from the class data values""" if compute_rule == cls.ON_REQUEST: return "eComputeRule::eOnRequest" return "eComputeRule::eDefault" @classmethod def as_python_enum(cls, compute_rule: _ComputeRule) -> str: """Returns the Python enum value corresponding to the access type string taken from the class data values""" if compute_rule == cls.ON_REQUEST: return "og.eComputeRule.E_ON_REQUEST" return "og.eComputeRule.E_DEFAULT" # ====================================================================== # begin-scheduling-hints class SchedulingHints: """Class managing the scheduling hints. The keywords are case-independent during parsing, specified in lower case here for easy checking. When there is a -read and -write variant only one of them should be specified at a time: no suffix: The item in question is accessed for both read and write -read suffix: The item in question is accessed only for reading -write suffix: The item in question is accessed only for writing These class static values list the possible values for the "scheduling" lists in the .ogn file. # Set when the node accesses other global data, i.e. data stored outside of the node, including the data # on other nodes. GLOBAL_DATA = "global" GLOBAL_DATA_READ = "global-read" GLOBAL_DATA_WRITE = "global-write" # Set when a node accesses static data, i.e. data shared among all nodes of the same type STATIC_DATA = "static" STATIC_DATA_READ = "static-read" STATIC_DATA_WRITE = "static-write" # Set when the node is a threadsafe function, i.e. it can be scheduled in parallel with any other nodes, including # nodes of the same type. This flag is not allowed to coexist with any of the other types since they all denote # unsafe threaded data access. THREADSAFE = "threadsafe" # Set when the node accesses the graph topology, e.g. connections, attributes, or nodes TOPOLOGY = "topology" TOPOLOGY_READ = "topology-read" TOPOLOGY_WRITE = "topology-write" # Set when the node accesses the USD stage data (for read-only, write-only, or both read and write) USD = "usd" USD_READ = "usd-read" USD_WRITE = "usd-write" # Set when the scheduling of the node compute may be modified from the evaluator default. COMPUTERULE_DEFAULT = "compute-default" COMPUTERULE_ON_REQUEST = "compute-on-request" """ # end-scheduling-hints GLOBAL_DATA = "global" GLOBAL_DATA_READ = "global-read" GLOBAL_DATA_WRITE = "global-write" STATIC_DATA = "static" STATIC_DATA_READ = "static-read" STATIC_DATA_WRITE = "static-write" THREADSAFE = "threadsafe" TOPOLOGY = "topology" TOPOLOGY_READ = "topology-read" TOPOLOGY_WRITE = "topology-write" USD = "usd" USD_READ = "usd-read" USD_WRITE = "usd-write" COMPUTERULE_DEFAULT = "compute-default" COMPUTERULE_ON_REQUEST = "compute-on-request" def __init__(self, scheduling_hints: Union[List[str], str]): """Initialize the scheduling hints from the .ogn description""" self.global_data = None self.static_data = None self.threadsafe = None self.topology = None self.usd = None self.compute_rule = None self._allowed_tokens = [ getattr(self, token_name) for token_name in dir(SchedulingHints) if token_name.isupper() ] if not isinstance(scheduling_hints, list) and not isinstance(scheduling_hints, str): raise ParseError("Scheduling hints must be a comma-separated string or a list of strings") if isinstance(scheduling_hints, str): # This trick allows lists to be delimited by arbitrary combinations of commas and spaces, so that the # user doesn't have to remember which one to use scheduling_hints = [element for element in re.split(" |, |,", scheduling_hints) if element] for hints in scheduling_hints: self.set_flag(hints) # -------------------------------------------------------------------------------------------------------------- def __str__(self) -> str: """Returns a string with the set of flags currently set""" result = [] result.append(f"GLOBAL={None if self.global_data is None else self.global_data.value}") result.append(f"STATIC={None if self.static_data is None else self.static_data.value}") result.append(f"THREADSAFE={False if self.threadsafe is None else self.threadsafe}") result.append(f"TOPOLOGY={None if self.topology is None else self.topology.value}") result.append(f"USD={None if self.usd is None else self.usd.value}") result.append(f'COMPUTE_RULE="{None if self.compute_rule is None else self.compute_rule.value}"') return ", ".join(result) # -------------------------------------------------------------------------------------------------------------- def parse_error(self, message: str): """Raises a parse error with common information attached to the given message""" raise ParseError(f"{message} - [{self}]") # -------------------------------------------------------------------------------------------------------------- def set_flag(self, flag_to_set: str): """Tries to enable the named flag. Raises ParseError if the flag is not legal or not compatible with current flags""" flag_to_set = flag_to_set.lower() if flag_to_set not in self._allowed_tokens: self.parse_error(f"Scheduling flag '{flag_to_set}' not in allowed list {self._allowed_tokens}") if flag_to_set == self.THREADSAFE: if [self.usd, self.global_data, self.static_data, self.topology] != [None, None, None, None]: self.parse_error(f"'{flag_to_set}' scheduling type not compatible with any data modification flags") self.threadsafe = True elif flag_to_set in [self.USD, self.USD_READ, self.USD_WRITE]: if self.usd is not None: self.parse_error(f"{flag_to_set} must be the only USD flag set") if self.threadsafe and flag_to_set != self.USD_READ: self.parse_error(f"{flag_to_set} not compatible with {self.THREADSAFE} flag") self.usd = _AccessType.flag_access_type(flag_to_set) elif flag_to_set in [self.STATIC_DATA, self.STATIC_DATA_READ, self.STATIC_DATA_WRITE]: if self.static_data is not None: self.parse_error(f"{flag_to_set} must be the only static_data flag set") if self.threadsafe and flag_to_set != self.STATIC_DATA_READ: self.parse_error(f"{flag_to_set} not compatible with {self.THREADSAFE} flag") self.static_data = _AccessType.flag_access_type(flag_to_set) elif flag_to_set in [self.GLOBAL_DATA, self.GLOBAL_DATA_READ, self.GLOBAL_DATA_WRITE]: if self.global_data is not None: self.parse_error(f"{flag_to_set} must be the only global data flag set") if self.threadsafe and flag_to_set != self.GLOBAL_DATA_READ: self.parse_error(f"{flag_to_set} not compatible with {self.THREADSAFE} flag") self.global_data = _AccessType.flag_access_type(flag_to_set) elif flag_to_set in [self.TOPOLOGY, self.TOPOLOGY_READ, self.TOPOLOGY_WRITE]: if self.topology is not None: self.parse_error(f"{flag_to_set} must be the only topology flag set") if self.threadsafe and flag_to_set != self.TOPOLOGY_READ: self.parse_error(f"{flag_to_set} not compatible with {self.THREADSAFE} flag") self.topology = _AccessType.flag_access_type(flag_to_set) elif flag_to_set in [self.COMPUTERULE_DEFAULT, self.COMPUTERULE_ON_REQUEST]: if self.compute_rule is not None: self.parse_error(f"{flag_to_set} must be the only compute-rule flag set") self.compute_rule = _ComputeRule.flag_compute_rule(flag_to_set) # -------------------------------------------------------------------------------------------------------------- def compare(self, other: SchedulingHints) -> List[str]: """Compare this object against another of the same type to see if their flag configurations match. If they don't match then a list of differences is returned, otherwise an empty list """ errors = [] if self.usd != other.usd: errors.append(f"usd flag mismatch '{self.usd}' != '{other.usd}'") if self.global_data != other.global_data: errors.append(f"global_data flag mismatch '{self.global_data}' != '{other.global_data}'") if self.topology != other.topology: errors.append(f"topology flag mismatch '{self.topology}' != '{other.topology}'") if self.static_data != other.static_data: errors.append(f"static_data flag mismatch '{self.static_data}' != '{other.static_data}'") if self.threadsafe != other.threadsafe: errors.append(f"threadsafe flag mismatch '{self.threadsafe}' != '{other.threadsafe}'") if self.compute_rule != other.compute_rule: errors.append(f"compute-rule flag mismatch '{self.compute_rule}' != '{other.compute_rule}'") return errors # -------------------------------------------------------------------------------------------------------------- def has_values_set(self) -> bool: """Returns True if any of the scheduling hints values have been set""" return [self.threadsafe, self.global_data, self.static_data, self.topology, self.usd, self.compute_rule] != [ None ] * 6 # -------------------------------------------------------------------------------------------------------------- def cpp_includes_required(self) -> List[str]: """Returns a list of files required to be included for the generated C++ code to work""" return ["#include <omni/graph/core/ISchedulingHints.h>"] if self.has_values_set() else [] # -------------------------------------------------------------------------------------------------------------- def emit_cpp(self, out: IndentedOutput) -> bool: """Write the C++ initialization code to the given output stream, writing nothing if no flags were set. Assumes there is a local variable called nodeTypeObj that contains the NodeTypeObj definition. Returns True if anything was written. """ if not self.has_values_set(): return False out.write("auto __schedulingInfo = nodeTypeObj.iNodeType->getSchedulingHints(nodeTypeObj);") out.write('CARB_ASSERT(__schedulingInfo, "Could not acquire the scheduling hints");') out.write("if (__schedulingInfo)") if out.indent("{"): if self.threadsafe: out.write("__schedulingInfo->setThreadSafety(eThreadSafety::eSafe);") elif self.threadsafe is not None: out.write("__schedulingInfo->setThreadSafety(eThreadSafety::eUnsafe);") if self.global_data is not None: out.write( "__schedulingInfo->setDataAccess(eAccessLocation::eGlobal," f" {_AccessType.as_cpp_enum(self.global_data)});" ) if self.static_data is not None: out.write( "__schedulingInfo->setDataAccess(eAccessLocation::eStatic," f" {_AccessType.as_cpp_enum(self.static_data)});" ) if self.topology is not None: out.write( "__schedulingInfo->setDataAccess(eAccessLocation::eTopology," f" {_AccessType.as_cpp_enum(self.topology)});" ) if self.usd is not None: out.write( f"__schedulingInfo->setDataAccess(eAccessLocation::eUsd, {_AccessType.as_cpp_enum(self.usd)});" ) if self.compute_rule is not None: out.write(f"__schedulingInfo->setComputeRule({_ComputeRule.as_cpp_enum(self.compute_rule)});") out.exdent("}") return True # -------------------------------------------------------------------------------------------------------------- def emit_python(self, out: IndentedOutput) -> bool: """Write the Python initialization code to the given output stream, writing nothing if no flags were set. Assumes there is a local variable called node_type that contains the Py_NodeType definition. Returns True if anything was written. """ if not self.has_values_set(): return False out.write("__hints = node_type.get_scheduling_hints()") if out.indent("if __hints is not None:"): if self.threadsafe: out.write("__hints.thread_safety = og.eThreadSafety.E_SAFE") elif self.threadsafe is not None: out.write("__hints.thread_safety = og.eThreadSafety.E_UNSAFE") if self.global_data is not None: out.write( "__hints.set_data_access(og.eAccessLocation.E_GLOBAL," f" {_AccessType.as_python_enum(self.global_data)})" ) if self.static_data is not None: out.write( "__hints.set_data_access(og.eAccessLocation.E_STATIC," f" {_AccessType.as_python_enum(self.static_data)})" ) if self.topology is not None: out.write( "__hints.set_data_access(og.eAccessLocation.E_TOPOLOGY," f" {_AccessType.as_python_enum(self.topology)})" ) if self.usd is not None: out.write(f"__hints.set_data_access(og.eAccessLocation.E_USD, {_AccessType.as_python_enum(self.usd)})") if self.compute_rule is not None: out.write(f"__hints.compute_rule = {_ComputeRule.as_python_enum(self.compute_rule)}") out.exdent() return True # -------------------------------------------------------------------------------------------------------------- @classmethod def illegal_configurations(cls) -> List[str]: """Returns a list of illegal parsing configurations for testing purposes. Keeps the data local""" return [ '{"not": "a list or string"}', '["foo"]', # List with bad values '"usd, bar"', # String with bad values '["usd", "usd-read"]', # Lists with incompatible values '["global-write", "global-read"]', '["topology", "topology-write"]', '["static", "static-read"]', '"threadsafe, static"', # String with incompatible values '["compute-default", "compute-on-request"]', ] # -------------------------------------------------------------------------------------------------------------- @classmethod def legal_configurations(cls) -> List[str]: """Returns a list of legal parsing configurations and expected results for testing purposes. The data is a list of pairs where the first element is the flags to be set on the scheduling hints in the .ogn file (possibly with extra information as needed) and the second element is a SchedulingHints object configured with the expected results. It has a compare operation so the test will use that to confirm results """ def from_flags( global_data: Optional[_AccessType] = None, threadsafe: Optional[bool] = None, static_data: Optional[_AccessType] = None, topology: Optional[_AccessType] = None, usd: Optional[_AccessType] = None, compute_rule: Optional[_ComputeRule] = None, ) -> SchedulingHints: """Returns a SchedulingHints object whose flags are set to the ones passed in""" scheduling = SchedulingHints([]) scheduling.global_data = global_data scheduling.threadsafe = threadsafe scheduling.static_data = static_data scheduling.topology = topology scheduling.usd = usd scheduling.compute_rule = compute_rule return scheduling return [ ('"global"', from_flags(global_data=_AccessType.ALL)), ('"threadsafe"', from_flags(threadsafe=True)), ('"static-read"', from_flags(static_data=_AccessType.READ)), ('"topology-write"', from_flags(topology=_AccessType.WRITE)), ( '"usd,global-write,topology-read"', from_flags(usd=_AccessType.ALL, global_data=_AccessType.WRITE, topology=_AccessType.READ), ), ( '["usd", "global-read", "topology-write"]', from_flags(usd=_AccessType.ALL, global_data=_AccessType.READ, topology=_AccessType.WRITE), ), ('"compute-on-request"', from_flags(compute_rule=_ComputeRule.ON_REQUEST)), ('"compute-default"', from_flags(compute_rule=_ComputeRule.DEFAULT)), ]
19,726
Python
48.3175
119
0.57812
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_python.py
"""Support for generating a pythonic interface class for OmniGraph Nodes. Exports: generate_python: Create a NODE.ogn.py file containing a pythonic interface for the node data """ import json import re from contextlib import suppress from typing import List, Optional from .attributes.AttributeManager import AttributeManager from .attributes.naming import INPUT_NS, OUTPUT_NS, PORT_NAMES, STATE_NS from .keys import CudaPointerValues, LanguageTypeValues, MemoryTypeValues, MetadataKeyOutput, MetadataKeys from .nodes import NodeInterfaceGenerator from .utils import GeneratorConfiguration, logger, shorten_string_lines_to __all__ = ["generate_python"] class NodePythonGenerator(NodeInterfaceGenerator): """Manage the functions required to generate a Python interface for a node""" def __init__(self, configuration: GeneratorConfiguration): # noqa: PLW0246 """Set up the generator and output the Python interface code for the node Just passes the initialization on to the parent class. See the argument and exception descriptions there. """ super().__init__(configuration) # ---------------------------------------------------------------------- def interface_file_name(self) -> str: """Return the path to the name of the Python file""" return self.base_name + "Database.py" # ---------------------------------------------------------------------- def database_class_name(self) -> str: """Return the name of the generated database class, which is what will be passed to the compute method""" return f"{self.base_name}Database" # ---------------------------------------------------------------------- def _value_class_name(self, namespace: str) -> str: """Return the name of the internal class that holds attributes in the given namespace""" return f"ValuesFor{namespace.capitalize()}" # ---------------------------------------------------------------------- def _pre_class_spacing(self): """Writes out spacing before class names - follows Flake8 in verbose mode, nothing otherwise""" if self.verbose: self.out.write() self.out.write() # ---------------------------------------------------------------------- def _pre_function_spacing(self): """Writes out spacing before function definitions - follows Flake8 in verbose mode, nothing otherwise""" if self.verbose: self.out.write() # ---------------------------------------------------------------------- def _filter_out_batched_attributes(self, attribute_list: List[AttributeManager], namespace: str): """ Args: attribute_list: List of attributes belonging to the generated class namespace: Namespace of attributes in the list. Assumption is that all attributes have the same answer. Returns: Two lists of attributes: batched attributes and the filtered list without batched attributes""" if namespace == STATE_NS: return [], attribute_list batched_attribute_list = [] filtered_attribute_list = [] for attribute in attribute_list: # batching of attributes is not supported for runtime types # batching of array attributes wouldn't be the most efficient. best is to acquire the right size # numpy.array once and work with it directly currently only limited to CPU memory if ( attribute.ogn_base_type() not in ["bundle", "any", "union"] and attribute.array_depth == 0 and attribute.memory_storage() == MemoryTypeValues.CPU ): batched_attribute_list.append(attribute) else: filtered_attribute_list.append(attribute) return batched_attribute_list, filtered_attribute_list # ---------------------------------------------------------------------- def _generate_attribute_class(self, attribute_list: List[AttributeManager], namespace: str) -> Optional[str]: """Output a nested class that provides database access for the node's input or output attributes. Args: attribute_list: List of attributes belonging to the generated class namespace: Namespace of attributes in the list. Assumption is that all attributes have the same answer. Passed explicitly to allow for the possibility of an empty list. Returns: The name of the class that was generated (None if not generated) The attribute classes have two members per attribute: attr_PROPERTY: Holds a reference to the node's Attribute member for this attribute PROPERTY: A property through which the attribute values are accessed """ # This method is called with all attributes in the same namespace so it's safe to use the first one # to extract the common definition. attribute_class = self._value_class_name(namespace) is_read_only = namespace == INPUT_NS # For correct syntax the namespace name must be singular namespace_for_comment = namespace[:-1] if namespace.endswith("s") else namespace self._pre_function_spacing() if self.out.indent(f"class {attribute_class}(og.DynamicAttributeAccess):"): batched_attribute_list, filtered_attribute_list = self._filter_out_batched_attributes( attribute_list, namespace ) has_batched_attributes = len(batched_attribute_list) > 0 if has_batched_attributes: local_property_list = [attribute.python_property_name() for attribute in batched_attribute_list] if namespace == INPUT_NS: local_property_list += ["_setting_locked", "_batchedReadAttributes", "_batchedReadValues"] elif namespace == OUTPUT_NS: local_property_list += ["_batchedWriteValues"] batched_str = "{" + ", ".join(f'"{attribute}"' for attribute in local_property_list) + "}" self.out.write(f"LOCAL_PROPERTY_NAMES = {batched_str}") elif namespace != STATE_NS: self.out.write("LOCAL_PROPERTY_NAMES = { }") self.out.write( f'"""Helper class that creates natural hierarchical access to {namespace_for_comment} attributes"""' ) if self.out.indent( "def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):" ): self.out.write('"""Initialize simplified access for the attribute data"""') self.out.write("context = node.get_graph().get_default_graph_context()") self.out.write("super().__init__(context, node, attributes, dynamic_attributes)") has_bundles = False gpu_bundles = [] gpu_ptr_kinds = {} for attribute in attribute_list: if attribute.ogn_base_type() == "bundle": has_bundles = True if attribute.memory_type != MemoryTypeValues.CPU: gpu_bundles.append(attribute.usd_name()) with suppress(KeyError): gpu_ptr_kinds[attribute.usd_name()] = CudaPointerValues.PYTHON[ self.node_interface.cuda_pointer_type ] if has_bundles: gpu_ptr_str = "{" + ",".join(f'"{key}": {value}' for key, value in gpu_ptr_kinds.items()) + "}" self.out.write( f"self.__bundles = og.BundleContainer(context, node, attributes, {gpu_bundles}," f" read_only={is_read_only}, gpu_ptr_kinds={gpu_ptr_str})" ) # Output arrays will need a size since that has to be set when the user gets their values. # This puts the onus on the caller to set the size before calling get(). For safety, the sizes # are initialized to None so that failure to set values can generate a sensible error message. if not is_read_only: for attribute in attribute_list: if attribute.fabric_needs_counter(): default_size = "None" if attribute.default is None else len(attribute.default) self.out.write(f"self.{attribute.python_property_name()}_size = {default_size}") # Initialize storage for batched values if namespace == INPUT_NS: batched_str = ( "[" + ", ".join(f"self.{attribute.python_attribute_name()}" for attribute in batched_attribute_list) + "]" ) self.out.write(f"self._batchedReadAttributes = {batched_str}") batched_str = ( "[" + ", ".join(f"{attribute.python_default_value()}" for attribute in batched_attribute_list) + "]" ) self.out.write(f"self._batchedReadValues = {batched_str}") elif namespace == OUTPUT_NS: self.out.write("self._batchedWriteValues = { }") self.out.exdent() for attribute in filtered_attribute_list: # Emit the getters and setters for the attributes. attribute.generate_python_property_code(self.out) for index, attribute in enumerate(batched_attribute_list): # Emit the getters and setters for batched read or write attribute.generate_python_batched_property_code(index, self.out) if has_batched_attributes: # Override any dynamic getters and setters for batched attributes to remove the overhead self.out.write() if self.out.indent("def __getattr__(self, item: str):"): if self.out.indent("if item in self.LOCAL_PROPERTY_NAMES:"): self.out.write("return object.__getattribute__(self, item)") self.out.exdent() if self.out.indent("else:"): self.out.write("return super().__getattr__(item)") self.out.exdent() self.out.exdent() self.out.write() if self.out.indent("def __setattr__(self, item: str, new_value):"): if self.out.indent("if item in self.LOCAL_PROPERTY_NAMES:"): self.out.write("object.__setattr__(self, item, new_value)") self.out.exdent() if self.out.indent("else:"): self.out.write("super().__setattr__(item, new_value)") self.out.exdent() self.out.exdent() if namespace == INPUT_NS: self.out.write() if self.out.indent("def _prefetch(self):"): self.out.write("readAttributes = self._batchedReadAttributes") self.out.write("newValues = _og._prefetch_input_attributes_data(readAttributes)") if self.out.indent("if len(readAttributes) == len(newValues):"): self.out.write("self._batchedReadValues = newValues") self.out.exdent() self.out.exdent() elif namespace == OUTPUT_NS: self.out.write() if self.out.indent("def _commit(self):"): self.out.write("_og._commit_output_attributes_data(self._batchedWriteValues)") self.out.write("self._batchedWriteValues = { }") self.out.exdent() self.out.exdent() return attribute_class # ---------------------------------------------------------------------- def _generate_shared_node_type_initialize(self): """ Output the code to set up any shared node type information, like adding attributes and setting metadata. Assumes this is part of a method where the variable "node_type" contains the node type object to initialize """ # Set the metadata for this node type self.out.write(f"node_type.set_metadata(ogn.MetadataKeys.EXTENSION, {json.dumps(self.extension)})") for key, value in self.node_interface.metadata.items(): python_key = MetadataKeyOutput.python_name_from_key(key) if python_key is None: python_key = json.dumps(key) # Handle lists as a comma-separated string if isinstance(value, list): value = '"' + ",".join([x.replace('"', '\\"') for x in value]) + '"' else: value = json.dumps(value) self.out.write(f"node_type.set_metadata({python_key}, {value})") if self.node_interface.memory_type != MemoryTypeValues.CPU: self.out.write(f'node_type.set_metadata(ogn.MetadataKeys.MEMORY_TYPE, "{self.node_interface.memory_type}")') # The icon path is relative to the extension path, which is only known at runtime, so build it up then. # To the user it will appear as an absolute path, which they can modify if they wish to. if self.node_interface.icon_path is not None: icon_path = json.dumps(self.node_interface.icon_path) self.out.write(f'icon_path = carb.tokens.get_tokens_interface().resolve("${{{self.extension}}}")') # Using os.path.join here causes problems due to the backslash path separator on Windows. The components # both have forward slashes by design so just insert the missing one. self.out.write(f"icon_path = icon_path + '/' + {icon_path}") self.out.write("node_type.set_metadata(ogn.MetadataKeys.ICON_PATH, icon_path)") # If any of the scheduling hints flags have been defined then set them here if self.node_interface.scheduling_hints is not None: self.node_interface.scheduling_hints.emit_python(self.out) # Generate the initialization of attributes, including setting defaults and adding them to the node type if self.node_interface.has_attributes(): self.out.write(f"{self.database_class_name()}.INTERFACE.add_to_node_type(node_type)") if self.node_interface.all_state_attributes() or self.node_interface.has_state: self.out.write("node_type.set_has_state(True)") # ---------------------------------------------------------------------- def _generate_node_registration(self): """ Output the definition of the node type's registration support method By having the node type class object be a static class member a circular import can be avoided. The node implementation will call OgnTheNodeDatabase.register(OgnTheNode) to handle registration and the automatic override of any ABI methods that OgnTheNode might implement. """ db_class_name = self.database_class_name() self._pre_function_spacing() self.out.write("NODE_TYPE_CLASS = None") # Find the version of this extension in use so that it can be imprinted into the generated file self.out.write(f"GENERATOR_VERSION = {self.generator_version}") self.out.write(f"TARGET_VERSION = {self.target_version}") self._pre_function_spacing() self.out.write("@staticmethod") if self.out.indent("def register(node_type_class):"): self.out.write(f"{db_class_name}.NODE_TYPE_CLASS = node_type_class") self.out.write(f"og.register_node_type({db_class_name}.abi, {self.node_interface.version})") self.out.exdent() self._pre_function_spacing() self.out.write("@staticmethod") if self.out.indent("def deregister():"): self.out.write(f'og.deregister_node_type("{self.node_interface.name}")') self.out.exdent() # ---------------------------------------------------------------------- def _generate_abi_get_node_type(self): """Output the abi implementation of the get_node_type method""" db_class_name = self.database_class_name() self._pre_function_spacing() self.out.write("@staticmethod") if self.out.indent("def get_node_type():"): self.out.write(f"get_node_type_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'get_node_type', None)") if self.out.indent("if callable(get_node_type_function):"): self.out.write("return get_node_type_function()") self.out.exdent(f"return '{self.node_interface.name}'") self.out.exdent() # ---------------------------------------------------------------------- def _generate_abi_compute(self): """Output the abi implementation of the compute method""" db_class_name = self.database_class_name() def __generate_attribute_validate(attribute_list: List[AttributeManager]): """Write out any code that verifies the validity of attributes before trying to compute""" for attribute in attribute_list: attribute.generate_python_validation(self.out) self._pre_function_spacing() self.out.write("@staticmethod") if self.out.indent("def compute(context, node):"): # Construct the database that accesses the Fabric data in a Pythonic way if self.out.indent("try:"): self.out.write(f"per_node_data = {db_class_name}.PER_NODE_DATA[node.node_id()]") self.out.write("db = per_node_data.get('_db')") if self.out.indent("if db is None:"): self.out.write(f"db = {db_class_name}(node)") self.out.write("per_node_data['_db'] = db") self.out.exdent() self.out.exdent() # Currently with hot reload we are not getting PER_NODE_DATA initialized. Just generate the db on the fly. if self.out.indent("except:"): self.out.write(f"db = {db_class_name}(node)") self.out.exdent() self.out.write() if self.out.indent("try:"): __generate_attribute_validate(self.node_interface.all_input_attributes()) __generate_attribute_validate(self.node_interface.all_output_attributes()) __generate_attribute_validate(self.node_interface.all_state_attributes()) # The ABI compute method has the same name as the generated compute method to be called, so use # the fact that the ABI method has more parameters to figure out which one the node has defined. self.out.write(f"compute_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'compute', None)") if self.out.indent("if callable(compute_function) and compute_function.__code__.co_argcount > 1:"): self.out.write("return compute_function(context, node)") self.out.exdent() self.out.write() # Fetch input attributes registered for batch read self.out.write("db.inputs._prefetch()") # Special flag that prevents inputs from being modified inside a compute method, which avoids # synchronization problems. In C++ this is enforced by returning const values; this is equivalent. # Suppress the error that occurs if no inputs were generated. self.out.write("db.inputs._setting_locked = True") # If the node attempted to write a const value the compute will throw AttributeError saying why if self.out.indent("with og.in_compute():"): self.out.write(f"return {db_class_name}.NODE_TYPE_CLASS.compute(db)") self.out.exdent() self.out.exdent() # For this error only the name of the attribute is returned, to minimize duplication of strings if self.out.indent("except Exception as error:"): self.out.write('stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next))') self.out.write( "db.log_error(f'Assertion raised in compute - {error}\\n{stack_trace}', add_context=False)" ) self.out.exdent() if self.out.indent("finally:"): self.out.write("db.inputs._setting_locked = False") # Commit output attributes registered for batch write self.out.write("db.outputs._commit()") self.out.exdent() self.out.write("return False") self.out.exdent() # ---------------------------------------------------------------------- def _generate_abi_initialize(self): """Output the abi implementation of the initialize method""" db_class_name = self.database_class_name() self._pre_function_spacing() self.out.write("@staticmethod") if self.out.indent("def initialize(context, node):"): # Give the database a chance to cache away any node-specific data that will not change each evaluation self.out.write(f"{db_class_name}._initialize_per_node_data(node)") self.out.write(f"initialize_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'initialize', None)") if self.out.indent("if callable(initialize_function):"): self.out.write("initialize_function(context, node)") self.out.exdent() self.out.exdent() # ---------------------------------------------------------------------- def _generate_abi_release(self): """Output the abi implementation of the release method""" db_class_name = self.database_class_name() self._pre_function_spacing() self.out.write("@staticmethod") if self.out.indent("def release(node):"): self.out.write(f"release_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'release', None)") if self.out.indent("if callable(release_function):"): self.out.write("release_function(node)") self.out.exdent() # Release any node-specific data that was cached during the initialize function self.out.write(f"{db_class_name}._release_per_node_data(node)") self.out.exdent() # ---------------------------------------------------------------------- def _generate_abi_update_node_version(self): """Output the abi implementation of the update_node_version method""" db_class_name = self.database_class_name() self._pre_function_spacing() self.out.write("@staticmethod") if self.out.indent("def update_node_version(context, node, old_version, new_version):"): self.out.write( f"update_node_version_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'update_node_version', None)" ) if self.out.indent("if callable(update_node_version_function):"): self.out.write("return update_node_version_function(context, node, old_version, new_version)") self.out.exdent("return False") self.out.exdent() # ---------------------------------------------------------------------- def _generate_abi_initialize_type(self): """Output the abi implementation of the intialize_type method""" db_class_name = self.database_class_name() self._pre_function_spacing() self.out.write("@staticmethod") if self.out.indent("def initialize_type(node_type):"): self.out.write( f"initialize_type_function = getattr({db_class_name}.NODE_TYPE_CLASS, 'initialize_type', None)" ) self.out.write("needs_initializing = True") if self.out.indent("if callable(initialize_type_function):"): self.out.write("needs_initializing = initialize_type_function(node_type)") self.out.exdent() # By returning a bool the initialize_type override can request attribute additions from the parent # rather than a full override. if self.out.indent("if needs_initializing:"): self._generate_shared_node_type_initialize() self.out.exdent() self.out.exdent() # ---------------------------------------------------------------------- def _generate_abi_on_connection_type_resolve(self): """Output the abi implementation of the on_connection_type_resolve method""" db_class_name = self.database_class_name() self._pre_function_spacing() self.out.write("@staticmethod") if self.out.indent("def on_connection_type_resolve(node):"): self.out.write( "on_connection_type_resolve_function = " f"getattr({db_class_name}.NODE_TYPE_CLASS, 'on_connection_type_resolve', None)" ) if self.out.indent("if callable(on_connection_type_resolve_function):"): self.out.write("on_connection_type_resolve_function(node)") self.out.exdent() self.out.exdent() # ---------------------------------------------------------------------- def _generate_database_abi(self): """Output a registration method and subclass that handles ABI access for the Python node""" self._pre_function_spacing() if self.out.indent("class abi:"): self.out.write('"""Class defining the ABI interface for the node type"""') self._generate_abi_get_node_type() self._generate_abi_compute() self._generate_abi_initialize() self._generate_abi_release() self._generate_abi_update_node_version() self._generate_abi_initialize_type() self._generate_abi_on_connection_type_resolve() self.out.exdent() # ---------------------------------------------------------------------- def _generate_token_help(self): """Generate the help information showing how to access any hardcoded tokens in the file""" if not self.node_interface.tokens: return self.out.write() if self.out.indent("Predefined Tokens:"): for token_name, _ in self.node_interface.tokens.items(): self.out.write(f"tokens.{token_name}") self.out.exdent() # ---------------------------------------------------------------------- def _generate_tokens(self): """Generate the code required to define and initialize any hardcoded tokens in the file""" if not self.node_interface.tokens: return self._pre_function_spacing() if self.out.indent("class tokens:"): for token_name, token_value in self.node_interface.tokens.items(): value = json.dumps(token_value) self.out.write(f"{token_name} = {value}") self.out.exdent() # ---------------------------------------------------------------------- def _generate_attribute_definitions(self): """Output the database class member that describes unchanging attribute data""" self._pre_function_spacing() self.out.write("# This is an internal object that provides per-class storage of a per-node data dictionary") self.out.write("PER_NODE_DATA = {}") all_attributes = self.node_interface.all_attributes() self._pre_function_spacing() self.out.write("# This is an internal object that describes unchanging attributes in a generic way") self.out.write("# The values in this list are in no particular order, as a per-attribute tuple") self.out.write("# Name, Type, ExtendedTypeIndex, UiName, Description, Metadata,") self.out.write("# Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg") self.out.write("# You should not need to access any of this data directly, use the defined database interfaces") if self.out.indent("INTERFACE = og.Database._get_interface(["): empty_list = [None, None, None, None, None, None, None, None, None, None] for attribute in all_attributes: attribute_data = empty_list[:] attribute_data[0] = attribute.name (extended_type, type_info) = attribute.python_extended_type() attribute_data[1] = attribute.create_type_name() if type_info is None else type_info attribute_data[2] = extended_type with suppress(KeyError): attribute_data[3] = attribute.metadata[MetadataKeys.UI_NAME] with suppress(KeyError): attribute_data[4] = attribute.metadata[MetadataKeys.DESCRIPTION] metadata = {} for key, value in attribute.metadata.items(): if key not in [MetadataKeys.UI_NAME, MetadataKeys.DESCRIPTION]: python_key = MetadataKeyOutput.python_name_from_key(key) if python_key is None: python_key = key metadata[python_key] = value attribute_data[5] = metadata attribute_data[6] = attribute.is_required attribute_data[7] = attribute.default attribute_data[8] = attribute.is_deprecated attribute_data[9] = attribute.deprecation_msg if attribute.is_deprecated else "" raw_output = f"{tuple(attribute_data)}," # ogn.MetadataKeys is an object name so make sure it is not quoted raw_output = re.sub(r'"(ogn.MetadataKeys[^"]*)"', r"\1", raw_output) raw_output = re.sub(r"'(ogn.MetadataKeys[^']*)'", r"\1", raw_output) self.out.write(raw_output) self.out.exdent("])") # ---------------------------------------------------------------------- def _generate_role_definition_method(self): """Output the method responsible for initialize the role-based data, if any attributes have roles to set""" # Find attributes with non-default roles for output. # Dictionary is {NAMESPACED_ATTRIBUTE, ROLE_NAME} roles_to_output = {} for attribute in self.node_interface.all_attributes(): role = attribute.python_role_name() if role: roles_to_output[f"{attribute.namespace}.{attribute.python_property_name()}"] = role # Rely on the base class method if no roles were found if not roles_to_output: return self._pre_function_spacing() self.out.write("@classmethod") if self.out.indent("def _populate_role_data(cls):"): self.out.write('"""Populate a role structure with the non-default roles on this node type"""') self.out.write("role_data = super()._populate_role_data()") for attribute_name, role in roles_to_output.items(): self.out.write(f"role_data.{attribute_name} = {role}") self.out.write("return role_data") self.out.exdent() # ---------------------------------------------------------------------- def _generate_attribute_access_help(self): """Output the help information describing attribute properties available on this node type""" if not self.node_interface.has_attributes(): return def __generate_attribute_access_help(attribute_list: List[AttributeManager]): """Output the documentation for a single section of attributes (input/output/state)""" if not attribute_list: return # All attributes are in the same namespace so use the first one to extract its name if self.out.indent(f"{attribute_list[0].namespace.capitalize()}:"): for attribute in attribute_list: self.out.write(f"{attribute.namespace}.{attribute.python_property_name()}") self.out.exdent() self.out.write() if self.out.indent("Attribute Value Properties:"): __generate_attribute_access_help(self.node_interface.all_input_attributes()) __generate_attribute_access_help(self.node_interface.all_output_attributes()) __generate_attribute_access_help(self.node_interface.all_state_attributes()) self.out.exdent() # ---------------------------------------------------------------------- def _generate_database_class(self): """Output a class that provides database access for the node's compute method. The class has nested class members called "inputs", "outputs", and "state" that make access to attribute values more natural: inputValue = Node.inputs.InputAttribute Node.outputs.OutputAttribute = inputValue * 2 """ db_class_name = self.database_class_name() self._pre_class_spacing() if self.out.indent(f"class {db_class_name}(og.Database):"): self.out.write( f'"""Helper class providing simplified access to data on nodes of type {self.node_interface.name}' ) self.out.write() if self.out.indent("Class Members:"): self.out.write("node: Node being evaluated") self.out.exdent() self._generate_attribute_access_help() self._generate_token_help() self.out.write('"""') self._generate_attribute_definitions() self._generate_tokens() self._generate_role_definition_method() input_class_name = self._generate_attribute_class( self.node_interface.all_input_attributes(), namespace=INPUT_NS ) output_class_name = self._generate_attribute_class( self.node_interface.all_output_attributes(), namespace=OUTPUT_NS ) state_class_name = self._generate_attribute_class( self.node_interface.all_state_attributes(), namespace=STATE_NS ) self._pre_function_spacing() if self.out.indent("def __init__(self, node):"): self.out.write("super().__init__(node)") for (value_class_name, namespace) in [ (input_class_name, INPUT_NS), (output_class_name, OUTPUT_NS), (state_class_name, STATE_NS), ]: if value_class_name is not None: self.out.write( f"dynamic_attributes = self.dynamic_attribute_data(node, {PORT_NAMES[namespace]})" ) self.out.write( f"self.{namespace} = {db_class_name}.{value_class_name}" f"(node, self.attributes.{namespace}, dynamic_attributes)" ) self.out.exdent() # When the node is written in Python there are some helper methods to add if self.node_interface.language == LanguageTypeValues.PYTHON: self._generate_database_abi() # By having the node type class object be a static class member a circular import can be avoided. # The node implementation will call OgnTheNodeDatabase.register(OgnTheNode) to handle registration and the # automatic override of any ABI methods that OgnTheNode might implement. self._generate_node_registration() # ---------------------------------------------------------------------- def generate_node_interface(self): """Output a Python script containing interface and database support for an OmniGraph node Raises: NodeGenerationError: When there is a failure in the generation of the Python class """ self.out.write(f'"""Support for simplified access to data on nodes of type {self.node_interface.name}') self.out.write() for line in shorten_string_lines_to(self.node_interface.description, 120): self.out.write(line) self.out.write('"""') self.out.write() self.out.write("import omni.graph.core as og") self.out.write("import omni.graph.core._omni_graph_core as _og") self.out.write("import omni.graph.tools.ogn as ogn") imports = [] # Icon path resolution requires more imports if self.node_interface.icon_path is not None: imports.append("import carb") # Python-implemented nodes need access to stack information for compute error reporting if self.node_interface.language == LanguageTypeValues.PYTHON: imports.append("import sys") imports.append("import traceback") # Imports required by the attributes for attribute in self.node_interface.all_attributes(): imports += attribute.python_imports() for import_statement in set(imports): self.out.write(import_statement) # Both Python and C++ nodes benefit from the use of the Pythonic database class self._generate_database_class() # ====================================================================== def generate_python(configuration: GeneratorConfiguration) -> Optional[str]: """Create support files for the pythonic interface to a node Args: configuration: Information defining how and where the documentation will be generated Returns: String containing the generated Python database definition or None if its generation was not enabled Raises: NodeGenerationError: When there is a failure in the generation of the Python database """ if not configuration.node_interface.can_generate("python"): return None logger.info("Generating Python Database") generator = NodePythonGenerator(configuration) generator.generate_interface() return str(generator.out)
38,485
Python
52.011019
120
0.571417
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/main_docs.py
""" Create a table off contents file in index.rst that references all of the OmniGraph node generated documentation files that live in that directory. This processing is highly tied to the formatting of the OGN generated documentation files so if they change this has to as well. The table of contents will be in two sections. A table consisting of columns with [node name, node version, link to node doc file, link to node appendix entry] An appendix with headers consisting of the node name and body consisting of the node's description """ import argparse import logging import os from pathlib import Path from typing import List, Optional from .generate_documentation import ( RE_OGN_BODY_MARKER, RE_OGN_DESCRIPTION_TITLE, RE_OGN_DOC_FILENAME, RE_OGN_INPUTS_TITLE, RE_OGN_NAME_INFO, ) from .utils import WritableDir, logger, rst_table, rst_title # If True then perform more aggressive directory checks, not safe in a multi-threaded environment SAFE_DIRECTORY_CREATION = False # Name of the generated index file INDEX_FILENAME = "index.rst" # Selectively turn on logging if the OGN debugging environment variable is set logger.setLevel(logging.DEBUG if os.getenv("OGN_DEBUG") else logging.WARN) # ====================================================================== def construct_parser() -> argparse.ArgumentParser: """Construct and return the parser for the script arguments""" # If no output directory is specified generated files will end up in the current directory default_output_dir = Path.cwd() # This helps format the usage information in a nicer way os.putenv("COLUMNS", "120") # Construct the parsing information. Run the script with "--help" to see the usage. parser = argparse.ArgumentParser( description="Read a directory of OGN documentation files and create an index for them", formatter_class=argparse.RawTextHelpFormatter, ) parser.add_argument( "-od", "--ognDirectory", action=WritableDir, const=default_output_dir, type=Path, metavar="DIR", help="directory containing the OGN documentation files, where the index will be generated", ) parser.add_argument("-v", "--verbose", action="store_true", help="output the steps the script is performing") return parser # ====================================================================== class OgnIndexCreator: """Handler to analyze OGN documentation files and generate a Table of Contents for them Attributes: documentation_files: List of documentation files found in the directory index_file_path: Full path to the generated index file ogn_directory: Path where the table of contents will be generated """ def __init__(self, ogn_directory: Path): """Read the contents of the docs directory and prepare it for generation""" logger.info("Scanning documentation directory %s", ogn_directory) self.ogn_directory = ogn_directory self.index_file_path = self.ogn_directory / INDEX_FILENAME self.documentation_files = [] for path_object in self.ogn_directory.glob("**/*"): if path_object.is_file() and RE_OGN_DOC_FILENAME.search(str(path_object)): self.documentation_files.append(path_object) # ---------------------------------------------------------------------- def extract_node_information(self, ogn_doc_path: str): """Read the OGN documentation file and extract the information to use for the index Patterns assumed, in order: One line matching RE_OGN_NAME_INFO with the node name One line matching RE_OGN_DESCRIPTION_TITLE, followed immediately by... ...one line with the title RST (probably dashes) An undefined number of lines containing the node description A blank line (to be omitted) One line matching RE_OGN_INPUTS_TITLE, marking the end of the description The rest will be ignored as there is no more relevant information Args: ogn_doc_path: Path to the node's documentation file Returns: (Marker, Name, Description) tuple with the node's information """ in_body = False name = None description = [] marker = None found_description = False # True after the description title was found in_description = False # True after the description body is entered try: with open(ogn_doc_path, "r", encoding="utf-8") as doc_fd: for line in doc_fd: if not in_body: body_marker_match = RE_OGN_BODY_MARKER.match(line) if body_marker_match: in_body = True marker = body_marker_match.group(1) elif name is None: name_match = RE_OGN_NAME_INFO.match(line) if name_match: name = name_match.group(1) elif found_description: found_description = False in_description = True elif in_description: if RE_OGN_INPUTS_TITLE.search(line): in_description = False break description.append(line[:-1]) elif RE_OGN_DESCRIPTION_TITLE.search(line): found_description = True # If attributes were not found then an extra blank line is needed to separate sections if in_description: description.append("\n") if marker is None: logger.error("Marker not found in %s", ogn_doc_path) if name is None: logger.error("Name not found in %s", ogn_doc_path) if not description: logger.error("Description not found in %s", ogn_doc_path) except Exception as error: # noqa: PLW0703 # Report the failure but continue processing logger.error("Error processing %s: %s", ogn_doc_path, error) return (marker, name.rstrip(), description) # ---------------------------------------------------------------------- def index_is_out_of_date(self) -> bool: """Returns True if the index file is older than any of the other files in the directory""" if not self.index_file_path.is_file(): return True index_modified_time = self.index_file_path.lstat().st_mtime return any( index_modified_time < documentation_file.lstat().st_mtime for documentation_file in self.documentation_files ) # ---------------------------------------------------------------------- def construct_index(self): """Construct the table of contents in an index file""" if not self.index_is_out_of_date(): logger.info("Documentation is up to date. Index generation skipped") return # Dictionary containing the information needed to generate the index file # Key = Node File, Value = [Name, Version, Description] node_information = {} for ogn_doc_file in self.documentation_files: logger.info("Processing %s", ogn_doc_file) node_information[ogn_doc_file] = self.extract_node_information(ogn_doc_file) sorted_keys = sorted(node_information.keys(), key=lambda key: node_information[key][1]) rows = [["Node", "Detailed Documentation"]] if not node_information: # Avoid a table with no contents, as that will generate a syntax error rows.append(["", ""]) else: for ogn_doc_file in sorted_keys: (marker, node_name, _) = node_information[ogn_doc_file] rows.append([f"`{node_name}`_", f":ref:`{marker}`"]) try: with open(self.index_file_path, "w", newline="\n", encoding="utf-8") as index_file: index_file.write(rst_title("OGN Node List", 0)) index_file.write("\n\n.. tabularcolumns:: |l|l|\n\n") index_file.write(rst_table(rows)) index_file.write("\n") index_file.write(rst_title("Node Descriptions", 1)) index_file.write("\n") for ogn_doc_file in sorted_keys: (_, node_name, node_documentation) = node_information[ogn_doc_file] index_file.write(f"{rst_title(node_name, 1)}\n") index_file.write("\n".join(node_documentation)) except Exception as error: # noqa: PLW0703 logger.error("Cannot write to index file %s : %s", self.index_file_path, error) # ====================================================================== def main_docs(args_to_parse: Optional[List] = None): """Parse the contents of sys.args and perform the requested function.""" parser = construct_parser() args = parser.parse_args(args_to_parse) # If the script steps are to be echoed enable the logger and dump the script arguments as a first step logger.setLevel(logging.DEBUG if args.verbose else logging.WARN) logger.info("ognDirectory = %s", args.ognDirectory) index_handler = OgnIndexCreator(args.ognDirectory) index_handler.construct_index() # ====================================================================== if __name__ == "__main__": main_docs()
9,594
Python
42.416289
120
0.58818
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_node_info.py
""" Support for updating the node information file for OmniGraph Nodes. Exported Methods: generate_node_info """ import json from json.decoder import JSONDecodeError from typing import Optional from .nodes import NodeInterfaceGenerator from .utils import GeneratorConfiguration, ParseError, ensure_writable_directory, logger __all__ = [ "generate_node_info", ] # ====================================================================== class NodeInfoGenerator(NodeInterfaceGenerator): """Manage the functions required to generate a C++ interface for a node""" def __init__(self, configuration: GeneratorConfiguration): """Set up the generator and output the meta-information for the node Just passes the initialization on to the parent class. See the argument and exception descriptions there. """ logger.info("Creating NodeInfoGenerator") try: ensure_writable_directory(configuration.destination_directory) except Exception as error: raise ParseError("Unable to create node information directory") from error super().__init__(configuration) try: with open(self.output_path, "r", encoding="utf-8") as output_fd: self.node_information = json.load(output_fd) except (FileNotFoundError, KeyError, JSONDecodeError): self.node_information = {"nodes": {}} # ---------------------------------------------------------------------- def interface_file_name(self) -> str: """Return the path to the name of the node information file, relative to the configured directory""" return "nodes.json" # ---------------------------------------------------------------------- def generate_node_interface(self): """Generate the node information for the node""" logger.info("Generating node information for node %s", self.node_interface.name) this_nodes_information = { self.node_interface.name: { "description": self.node_interface.description, "version": self.node_interface.version, "extension": self.extension, "language": self.node_interface.language, } } self.node_information["nodes"].update(this_nodes_information) node_info_as_json = json.dumps(self.node_information, indent=4) self.out.write(node_info_as_json) # ====================================================================== def generate_node_info(configuration: GeneratorConfiguration) -> Optional[str]: """Create or modify the extension's node information file Args: configuration: Information defining how and where the node information file will be generated Returns: String containing the generated/updated node information Raises: NodeGenerationError: When there is a failure in the generation of the node information file """ logger.info("Generating node information") generator = NodeInfoGenerator(configuration) generator.generate_interface() return str(generator.out)
3,126
Python
37.604938
113
0.615803
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/main.py
# noqa: E501,PLW1203 """Generate code and documentation for an OmniGraph Node description file. Takes a JSON file containing information describing the configuration of an OmniGraph node and generates a header file implementing a simplified interface to the graph ABI. Run this script with the arg "--help" to see available functions in this form, followed by the current list of supported attribute types: usage: generate_node.py [-h] [-cd DIR] [-c [DIR]] [-d [DIR]] [-e EXTENSION_NAME] [-i [DIR]] [-in [INTERMEDIATE_DIRECTORY]] [-m [PYTHON_IMPORT_MODULE]] [-n [FILE.ogn]] [-p [DIR]] [-s SETTING_NAME] [-t [DIR]] [-td FILE.json] [-tp [DIR]] [-u] [-usd [DIR]] [-uw [DIR]] [-v] Parse a node interface description file and generate code or documentation optional arguments: -h, --help show this help message and exit -cd DIR, --configDirectory DIR the directory containing the code generator configuration files (default is current) -c [DIR], --cpp [DIR] generate the C++ interface class into the specified directory (default is current) -d [DIR], --docs [DIR] generate the node documentation into the specified directory (default is current) -e EXTENSION_NAME, --extension EXTENSION_NAME name of the extension requesting the generation -i [DIR], --icons [DIR] directory into which to install the icon, if one is found -in [INTERMEDIATE_DIRECTORY], --intermediate [INTERMEDIATE_DIRECTORY] directory into which temporary build information is stored -m [PYTHON_IMPORT_MODULE], --module [PYTHON_IMPORT_MODULE] Python module where the Python node files live -n [FILE.ogn], --nodeFile [FILE.ogn] file containing the node description (use stdin if file name is omitted) -p [DIR], --python [DIR] generate the Python interface class into the specified directory (default is current) -s SETTING_NAME, --settings SETTING_NAME define one or more build-specific settings that can be used to change the generated code at runtime -t [DIR], --tests [DIR] generate a file containing basic operational tests for this node -td FILE.json, --typeDefinitions FILE.json file name containing the mapping to use from OGN type names to generated code types -tp [DIR], --template [DIR] generate an annotated template for the C++ node class into the specified directory (default is current) -u, --unitTests run the unit tests on this file -usd [DIR], --usdPath [DIR] generate a file containing a USD template for nodes of this type -uw [DIR], --unwritable [DIR] mark the generated directory as unwritable at runtime -v, --verbose output the steps the script is performing as it performs them """ import argparse import logging import os import sys from pathlib import Path from typing import List, Optional from .attributes.management import formatted_supported_attribute_type_names from .category_definitions import get_category_definitions from .generate_cpp import generate_cpp from .generate_documentation import generate_documentation from .generate_icon import generate_icon from .generate_node_info import generate_node_info from .generate_python import generate_python from .generate_template import generate_template from .generate_tests import generate_tests from .generate_usd import generate_usd from .keys import LanguageTypeValues from .nodes import NodeInterfaceWrapper from .type_definitions import apply_type_definitions from .utils import ( UNWRITABLE_TAG_FILE, GeneratorConfiguration, ParseError, Settings, UnimplementedError, ensure_writable_directory, logger, ) __all__ = ["main"] # ====================================================================== def construct_parser() -> argparse.ArgumentParser: """Construct and return the parser for the script arguments""" class ReadableDir(argparse.Action): """Helper class for the parser to check for a readable directory""" def __call__(self, parser, namespace, values, option_string=None): """Function called by the arg parser to verify that a directory exists and is readable Args: parser: argparser required argument, ignored namespace: argparser required argument, ignored values: The path to the directory being checked for writability option_string: argparser required argument, ignored Raises: argparse.ArgumentTypeError if the requested directory cannot be found or created in readable mode """ prospective_dir = values try: # If the directory can't be read then listdir will raise an exception if os.listdir(prospective_dir): setattr(namespace, self.dest, prospective_dir) except Exception as error: raise argparse.ArgumentTypeError(str(error)) class WritableDir(argparse.Action): """Helper class for the parser to check for a writable directory""" def __call__(self, parser, namespace, values, option_string=None): """Function called by the arg parser to verify that a directory exists and is writable Args: parser: argparser required argument, ignored namespace: argparser required argument, ignored values: The path to the directory being checked for writability option_string: argparser required argument, ignored Raises: argparse.ArgumentTypeError if the requested directory cannot be found or created in writable mode """ prospective_dir = values try: ensure_writable_directory(prospective_dir) setattr(namespace, self.dest, prospective_dir) except Exception as error: raise argparse.ArgumentTypeError(str(error)) # If no output directory is specified generated files will end up in the current directory default_output_dir = os.path.realpath(os.getcwd()) # This helps format the usage information in a nicer way os.putenv("COLUMNS", "120") # Generate a message enumerating the set of attribute types currently supported available_attribute_types = formatted_supported_attribute_type_names() formatted_types = "\n\t".join(available_attribute_types) epilog = "Available attribute types:\n\t" + formatted_types available_settings = Settings().all() if available_settings: epilog += "\nAvailable settings:\n\t" + "\n\t".join( [f"{name}: {description}" for name, (_, description) in available_settings.items()] ) # Construct the parsing information. Run the script with "--help" to see the usage. parser = argparse.ArgumentParser( description="Parse a node interface description file and generate code or documentation", formatter_class=argparse.RawTextHelpFormatter, epilog=epilog, ) parser.add_argument( "-cd", "--configDirectory", action=ReadableDir, const=default_output_dir, metavar="DIR", help="the directory containing the code generator configuration files (default is current)", ) parser.add_argument( "-c", "--cpp", action=WritableDir, nargs="?", const=default_output_dir, metavar="DIR", help="generate the C++ interface class into the specified directory (default is current)", ) parser.add_argument( "-d", "--docs", action=WritableDir, nargs="?", const=default_output_dir, metavar="DIR", help="generate the node documentation into the specified directory (default is current)", ) parser.add_argument( "-e", "--extension", action="store", metavar="EXTENSION_NAME", default=None, help="name of the extension requesting the generation", ) # Notice how, unlike other directory names, this one is not a "WritableDir" as the directory should only # be created if the node happens to have an icon, which isn't discovered until parse time. parser.add_argument( "-i", "--icons", action="store", nargs="?", const=default_output_dir, metavar="DIR", help="directory into which to install the icon, if one is found", ) parser.add_argument( "-in", "--intermediate", action=WritableDir, nargs="?", const=default_output_dir, metavar="INTERMEDIATE_DIRECTORY", help="directory into which temporary build information is stored", ) parser.add_argument( "-m", "--module", nargs="?", action="store", metavar="PYTHON_IMPORT_MODULE", help="Python module where the Python node files live", ) parser.add_argument( "-n", "--nodeFile", nargs="?", type=argparse.FileType("r"), const=sys.stdin, help="file containing the node description (use stdin if file name is omitted)", metavar="FILE.ogn", ) parser.add_argument( "-p", "--python", action=WritableDir, nargs="?", const=default_output_dir, metavar="DIR", help="generate the Python interface class into the specified directory (default is current)", ) parser.add_argument( "-s", "--settings", type=str, action="append", metavar="SETTING_NAME", help="define one or more build-specific settings that can be used to change the generated code at runtime", ) parser.add_argument( "-t", "--tests", action=WritableDir, nargs="?", const=default_output_dir, metavar="DIR", help="generate a file containing basic operational tests for this node", ) parser.add_argument( "-td", "--typeDefinitions", action="store", default=None, help="file name containing the mapping to use from OGN type names to generated code types", metavar="FILE.json", ) parser.add_argument( "-tp", "--template", action=WritableDir, nargs="?", const=default_output_dir, metavar="DIR", help="generate an annotated template for the C++ node class into the specified directory (default is current)", ) parser.add_argument("-u", "--unitTests", action="store_true", help="run the unit tests on this file") parser.add_argument( "-usd", "--usdPath", action=WritableDir, nargs="?", const=default_output_dir, metavar="DIR", help="generate a file containing a USD template for nodes of this type", ) parser.add_argument( "-uw", "--unwritable", action=WritableDir, nargs="?", const=default_output_dir, metavar="DIR", help="mark the generated directory as unwritable at runtime", ) parser.add_argument( "-v", "--verbose", action="store_true", help="output the steps the script is performing as it performs them" ) return parser # ====================================================================== def main(args_to_parse: Optional[List] = None): """Parse the contents of the argument list and perform the requested function. Uses sys.argv if None.""" parser = construct_parser() args = parser.parse_args(args_to_parse) # If the script steps are to be echoed enable the logger and dump the script arguments as a first step logger.setLevel(logging.DEBUG if args.verbose else logging.WARN) logger.info("cpp == %s", args.cpp) logger.info("configDirectory == %s", args.configDirectory) logger.info("template == %s", args.template) logger.info("docs == %s", args.docs) logger.info("extension == %s", args.extension) logger.info("icons == %s", args.icons) logger.info("intermediate == %s", args.intermediate) logger.info("module == %s", args.module) logger.info("nodeFile == %s", args.nodeFile) logger.info("python == %s", args.python) logger.info("settings == %s", args.settings) logger.info("tests == %s", args.tests) logger.info("typeDefinitions == %s", args.typeDefinitions) logger.info("unitTests == %s", args.unitTests) logger.info("unwritable == %s", args.unwritable) logger.info("usdPath == %s", args.usdPath) logger.info("verbose == %s", args.verbose) if args.unitTests: logger.info("Running unit tests") from ..tests.test_node_generator import run_tests as run_tests_general # noqa: PLE0402 from ..tests.test_node_generator_attributes import run_tests as run_tests_attributes # noqa: PLE0402 from ..tests.test_node_generator_data_types import run_tests as run_tests_data_types # noqa: PLE0402 from ..tests.test_node_generator_illegal import run_tests as run_tests_illegal # noqa: PLE0402 run_tests_general() run_tests_data_types() run_tests_illegal() run_tests_attributes() # Create the settings object from the list of settings specified on the command line. # Every setting keyword is assumed to be a boolean, set to true when it is passed in. settings = Settings() if args.settings is not None: for setting in args.settings: try: setattr(settings, setting, True) except AttributeError as error: raise ParseError(f"{setting} is not in the known settings list [{settings}]") from error # If there is a node to parse then do so node_interface_wrapper = None if not args.nodeFile: if args.docs or args.cpp or args.template or args.python or args.tests: logger.error("Cannot generate code unless you specify a nodeFile") return try: # Read in the standard set of category definitions if it can be found categories_allowed = {} if args.configDirectory is not None: config_dir_type_path = Path(args.configDirectory, "CategoryConfiguration.json") if config_dir_type_path.is_file(): categories_allowed = get_category_definitions(config_dir_type_path) base_name, node_ext = os.path.splitext(os.path.basename(args.nodeFile.name)) if node_ext != ".ogn": logger.error("Node files must have the .ogn extension") return if (args.python or args.docs or args.tests) and not args.module: logger.error("When generating Python code or documentation you must include the 'module' argument") return node_interface_wrapper = NodeInterfaceWrapper( args.nodeFile, extension=args.extension, config_directory=args.configDirectory, categories_allowed=categories_allowed, ) logger.info("Parsed interface for %s", node_interface_wrapper.node_interface.name) try: all_supported = True node_interface_wrapper.check_support() except UnimplementedError as error: all_supported = False logger.warning("Some attributes are not supported. Only documentation will be generated.\n\t%s", error) # Applying the type definitions make them take immediate effect, which means adding/modifying members of # the AttributeManager class hierarchy. if args.typeDefinitions is not None: type_definition_path = Path(args.typeDefinitions) if type_definition_path.is_file(): apply_type_definitions(args.typeDefinitions) elif not type_definition_path.is_absolute(): config_dir_type_path = Path(args.configDirectory, args.typeDefinitions) if config_dir_type_path.is_file(): apply_type_definitions(config_dir_type_path) else: raise ParseError( f"Type definitions '{args.typeDefinitions}' not found in" f" config directory '{args.configDirectory}'" ) else: raise ParseError(f"Absolute type definition path '{args.typeDefinitions}' not found") # Sanity check to see if there is a Python file of the same name as the .ogn file but the language was # not specified as Python. if node_interface_wrapper.node_interface.language != LanguageTypeValues.PYTHON: python_file_name = args.nodeFile.name.replace(".ogn", ".py") if os.path.isfile(python_file_name): raise ParseError(f"Python node file {python_file_name} exists but language was not set to Python") # If there is no generation happening then emit a message indicating the success of the parse. # (Failure of the parse would have already been indicated by a ParseError exception) if not args.docs and not args.cpp and not args.python: print(f"Node file {args.nodeFile.name} successfully validated") configuration = GeneratorConfiguration( args.nodeFile.name, node_interface_wrapper.node_interface, args.extension, args.module, base_name, None, args.verbose, settings, ) # The node interface may have an override on the path - get rid of it if the icon isn't being generated configuration.destination_directory = args.icons node_interface_wrapper.node_interface.icon_path = generate_icon(configuration) if args.icons else None configuration.destination_directory = args.docs _ = generate_documentation(configuration) if args.docs else None configuration.destination_directory = str(Path(args.icons).parent) _ = generate_node_info(configuration) if args.docs and args.icons else None configuration.destination_directory = args.cpp _ = generate_cpp(configuration, all_supported) if args.cpp else None configuration.destination_directory = args.template _ = generate_template(configuration) if args.template else None configuration.destination_directory = args.python _ = generate_python(configuration) if args.python and all_supported else None configuration.destination_directory = args.tests _ = generate_tests(configuration) if args.tests and all_supported else None configuration.destination_directory = args.usdPath _ = generate_usd(configuration) if args.usdPath and all_supported else None # The intermediate directory contains a tag file per-node that can be used to determine if the code generator # has been run since the last time the .ogn file was modified. The cost is that deletion of generated files # will not trigger their rebuild, but as the information of which files are generated is only known after # processing that is an acceptable tradeoff. (The alternative would be a much more verbose system that creates # a separate tag per generated file with all of the extra build dependencies required to make that work.) if args.intermediate: logger.info("Tagging the file as being built") intermediate_tag_path = os.path.join(args.intermediate, f"{os.path.basename(args.nodeFile.name)}.built") with open(intermediate_tag_path, "w", newline="\n", encoding="utf-8") as tag_fd: tag_fd.write("The presence of this file tags the last time its .ogn file was processed") if args.unwritable: logger.info("Tagging the generated directory as unwritable") unwritable_tag_path = os.path.join(args.unwritable, UNWRITABLE_TAG_FILE) with open(unwritable_tag_path, "w", newline="\n", encoding="utf-8") as tag_fd: tag_fd.write("The presence of this file ensures the directory will not regenerate at runtime") except Exception as error: raise ParseError(f"{os.path.basename(args.nodeFile.name)} failed") from error if __name__ == "__main__": main()
20,790
Python
43.61588
119
0.631746
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_template.py
"""Support for generating an annotated C++ template class for OmniGraph Nodes. Exports: generate_template: Create a NODE_template.cpp file containing sample uses of the generated interface """ from typing import List, Optional from .attributes.AttributeManager import AttributeManager from .attributes.naming import INPUT_GROUP, OUTPUT_GROUP, STATE_GROUP, namespace_of_group from .keys import LanguageTypeValues from .nodes import NodeInterfaceGenerator from .utils import GeneratorConfiguration, UnimplementedError, logger, to_comment, to_cpp_comment __all__ = ["generate_template"] class NodeTemplateGenerator(NodeInterfaceGenerator): """Manage the functions required to generate an annotated template class for a node""" def __init__(self, configuration: GeneratorConfiguration): """Set up the generator and output the annotated template class for the node Checks the language support. """ self.template_extension = None if configuration.node_interface.language == LanguageTypeValues.CPP: self.template_extension = "cpp" elif configuration.node_interface.language == LanguageTypeValues.PYTHON: self.template_extension = "py" else: language_name = "|".join(LanguageTypeValues.ALL[self.node_interface.language]) raise UnimplementedError(f"Template generation not supported for '{language_name}' files") # This needs the extension set to properly define the interface file name so do it after that super().__init__(configuration) # ---------------------------------------------------------------------- def interface_file_name(self) -> str: """Return the path to the name of the template file""" return self.base_name + "." + self.template_extension # ---------------------------------------------------------------------- def generate_cpp_attribute_info(self, attribute_list: List[AttributeManager], attribute_group: str): """Generate the comments explaining how to access the values of attributes in the list Args: attribute_list: List of attributes for which explanations are to be emitted attribute_group: Enum with the attribute's group (input, output, or state) """ namespace = namespace_of_group(attribute_group) for attribute in attribute_list: self.out.write() if attribute_group != INPUT_GROUP: if attribute.fabric_needs_counter(): self.out.write("// Before setting array outputs you must first set their size to allocate space") self.out.write(f"// db.{namespace}.{attribute.base_name}.size() = newOutputSize;") self.out.write(f"// auto& output{attribute.base_name} = db.{namespace}.{attribute.base_name}();") else: self.out.write(f"// const auto& input_value = db.{namespace}.{attribute.base_name}();") role = attribute.cpp_role_name() if role: self.out.write("// Roles for role-based attributes can be found by name using this member") self.out.write(f"// auto roleName = db.{namespace}.{attribute.base_name}.role();") # ---------------------------------------------------------------------- def generate_cpp_template(self): """Write out a template for a C++ node describing use of the current OGN configuration. Raises: NodeGenerationError: When there is a failure in the generation of the C++ interface """ # Rely on the formatter to insert the copyright here node_description = to_cpp_comment(self.node_interface.description) self.out.write(f"{node_description}") self.out.write(f"#include <{self.base_name}Database.h>") self.out.write(f"class {self.base_name}:") self.out.write("{") if self.out.indent("public:"): self.out.write(f"static bool compute({self.base_name}Database& db)") if self.out.indent("{"): input_attributes = self.node_interface.all_input_attributes() if input_attributes: self.out.write("// ======================================================================") self.out.write("// Use these methods to access the input values") self.out.write("// ======================================================================") self.generate_cpp_attribute_info(self.node_interface.all_input_attributes(), INPUT_GROUP) self.out.write() output_attributes = self.node_interface.all_output_attributes() if output_attributes: self.out.write("// ======================================================================") self.out.write("// Use these methods to set the output values") self.out.write("// ======================================================================") self.generate_cpp_attribute_info(self.node_interface.all_output_attributes(), OUTPUT_GROUP) self.out.write() state_attributes = self.node_interface.all_state_attributes() if state_attributes: self.out.write("// ======================================================================") self.out.write("// Use these methods to set the state values") self.out.write("// ======================================================================") self.generate_cpp_attribute_info(state_attributes, STATE_GROUP) self.out.write() self.out.write("// ======================================================================") self.out.write("// If you have predefined any tokens you can access them by name like this") self.out.write("// ======================================================================") self.out.write("auto myColorToken = db.tokens.color;") self.out.write() self.out.write("return true;") self.out.exdent("}") self.out.exdent("}") # ---------------------------------------------------------------------- def generate_python_attribute_info(self, attribute_list: List[AttributeManager], attribute_group: str): """Generate the comments explaining how to access the values of attributes in the list Args: attribute_list: List of attributes for which explanations are to be emitted attribute_group: Enum with the attribute's group (input, output, or state) """ namespace = namespace_of_group(attribute_group) for attribute in attribute_list: self.out.write() if attribute_group != INPUT_GROUP: if attribute.fabric_needs_counter(): self.out.write("# Before setting array outputs you must first set their size to allocate space") self.out.write(f"# db.{namespace}.{attribute.base_name}_size = new_output_size") self.out.write(f"# db.{namespace}.{attribute.base_name} = new_output_value") else: self.out.write(f"# input_value = db.{namespace}.{attribute.base_name}") role = attribute.python_role_name() if role: self.out.write("# Roles for role-based attributes can be found by name using this member") self.out.write(f"# role_name = db.role.{namespace}.{attribute.base_name}") # ---------------------------------------------------------------------- def generate_python_template(self): """Write out the code associated with the node. Raises: NodeGenerationError: When there is a failure in the generation of the C++ interface """ self.out.write('"""') self.out.write(f"This is the implementation of the OGN node defined in {self.base_name}.ogn") self.out.write('"""') self.out.write() self.out.write("# Array or tuple values are accessed as numpy arrays so you probably need this import") self.out.write("import numpy") self.out.write() self.out.write() if self.out.indent(f"class {self.base_name}:"): node_description = to_comment("", self.node_interface.description, 1) self.out.write('"""') self.out.write(node_description) self.out.write('"""') self.out.write("@staticmethod") if self.out.indent("def compute(db) -> bool:"): self.out.write('"""Compute the outputs from the current input"""\n') if self.out.indent("try:"): self.out.write("# With the compute in a try block you can fail the compute by raising an exception") input_attributes = self.node_interface.all_input_attributes() if input_attributes: self.out.write("# ======================================================================") self.out.write("# Use these methods to access the input values") self.out.write("# ======================================================================") self.generate_python_attribute_info(self.node_interface.all_input_attributes(), INPUT_GROUP) self.out.write() output_attributes = self.node_interface.all_output_attributes() if output_attributes: self.out.write("# ======================================================================") self.out.write("# Use these methods to set the output values") self.out.write("# ======================================================================") self.generate_python_attribute_info(self.node_interface.all_output_attributes(), OUTPUT_GROUP) self.out.write() state_attributes = self.node_interface.all_state_attributes() if state_attributes: self.out.write("# ======================================================================") self.out.write("# Use these methods to set the state values") self.out.write("# ======================================================================") self.generate_python_attribute_info(state_attributes, STATE_GROUP) self.out.write() self.out.write("pass") self.out.exdent() if self.out.indent("except Exception as error:"): self.out.write("# If anything causes your compute to fail report the error and return False") self.out.write("db.log_error(str(error))") self.out.write("return False") self.out.exdent() self.out.write() self.out.write("# Even if inputs were edge cases like empty arrays, correct outputs mean success") self.out.write("return True") self.out.exdent() self.out.exdent() # ---------------------------------------------------------------------- def generate_node_interface(self): """Write out a template implementation of the node in the requested language. Raises: NodeGenerationError: When there is a failure in the generation of the template """ if self.node_interface.language == LanguageTypeValues.CPP: self.generate_cpp_template() elif self.node_interface.language == LanguageTypeValues.PYTHON: self.generate_python_template() # ====================================================================== def generate_template(configuration: GeneratorConfiguration) -> Optional[str]: """Create support files for the C++ interface to a node For now only a header file is generated for the C++ interface, though there will probably be multiple files generated in the future. For that reason this single point of contact was created for outside callers. Args: configuration: Information defining how and where the template will be generated Returns: String containing the generated template class definition or None if its generation was not enabled Raises: NodeGenerationError: When there is a failure in the generation of the header UnimplementedError: When the language of the node does not support template generation """ if not configuration.node_interface.can_generate("template"): return None logger.info("Generating Template Node Implementation Class") generator = NodeTemplateGenerator(configuration) generator.generate_interface() return str(generator.out)
13,115
Python
54.812766
120
0.535951
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/type_definitions.py
"""Handle the mapping of OGN types onto the various generated code types""" import json from contextlib import suppress from io import TextIOWrapper from pathlib import Path from typing import IO, Dict, List, Tuple, Union from .attributes.AttributeManager import CppConfiguration from .attributes.management import get_attribute_manager_type from .keys import NodeTypeKeys from .utils import ParseError, is_comment class __TypeDefinitions: """Use the function apply_type_definitions instead of directly instantiating this class""" def __init__(self, type_definitions: Union[str, IO, Dict, Path, None]): """Initialize the type definition maps based on a JSON definition Internal: __definitions: Dictionary of type information read from the definition description """ try: self.__definitions = {} if type_definitions is None: pass elif isinstance(type_definitions, str): self.__definitions = json.loads(type_definitions)[NodeTypeKeys.TYPE_DEFINITIONS] elif isinstance(type_definitions, Dict): print("Using type definitions straight from a dictionary") self.__definitions = type_definitions[NodeTypeKeys.TYPE_DEFINITIONS] elif isinstance(type_definitions, TextIOWrapper): self.__definitions = json.load(type_definitions)[NodeTypeKeys.TYPE_DEFINITIONS] elif isinstance(type_definitions, Path): self.__definitions = json.load(type_definitions.open("r"))[NodeTypeKeys.TYPE_DEFINITIONS] else: raise ParseError(f"Type definition type not handled - {type_definitions}") except OSError as error: raise ParseError(f"File error when parsing type definitions {type_definitions} - {error}") from None except json.decoder.JSONDecodeError as error: raise ParseError(f"Invalid JSON formatting in file {type_definitions} - {error}") from None # -------------------------------------------------------------------------------------------------------------- def __apply_cpp_definitions(self, configuration_information: Dict[str, Tuple[str, List[str]]]): """Apply type definitions from the definition to the C++ types on the attribute managers Args: configuration_information: Dictionary whose keys are the names of attribute types and whose values are a tuple of the C++ data type name for that attribute type and a list of files to be included to use it """ for attribute_type_name, attribute_type_configuration in configuration_information.items(): # Empty configuration means leave it as-is if not attribute_type_configuration: continue if is_comment(attribute_type_name): continue # Take a single string to mean the type definition, with no extra includes required if isinstance(attribute_type_configuration, str): if attribute_type_configuration: attribute_type_configuration = [attribute_type_configuration] else: attribute_type_configuration = [] attribute_manager = get_attribute_manager_type(attribute_type_name) if attribute_manager is None: raise ParseError(f"Could not find attribute manager type for configuration of {attribute_type_name}") # If there is a change it will have a type and include file list, else skip this one with suppress(AttributeError, KeyError): cast_type = attribute_type_configuration[0] include_files = [] if len(attribute_type_configuration) < 2 else attribute_type_configuration[1] if not isinstance(cast_type, str): raise ParseError( f"Cast type for attribute type {attribute_type_name} must be a string, not {cast_type}" ) if not isinstance(include_files, list): raise ParseError( f"Include files for attribute type {attribute_type_name} must be a list, not {include_files}" ) attribute_manager.override_cpp_configuration(cast_type, include_files, cast_required=False) attribute_manager.CPP_CONFIGURATION[attribute_manager.tuple_count] = CppConfiguration( base_type_name=cast_type, include_files=include_files ) # -------------------------------------------------------------------------------------------------------------- def apply_definitions(self): """Apply any type definitions to the attribute manager to which they apply""" for language, configuration_information in self.__definitions.items(): if language == "c++": self.__apply_cpp_definitions(configuration_information) elif not is_comment(language): raise ParseError(f"Configuration for language '{language}' is not supported") # ============================================================================================================== def apply_type_definitions(type_definitions: Union[str, IO, Dict, Path, None]): definitions = __TypeDefinitions(type_definitions) definitions.apply_definitions()
5,428
Python
52.752475
117
0.607222
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_test_imports.py
"""Contains the function that will generate the __init__.py file for the ogn/tests directory.""" import hashlib import sys from importlib import import_module from pathlib import Path from typing import List, Optional from .nodes import NodeGenerationError from .ThreadsafeOpen import ThreadsafeOpen from .utils import logger # ============================================================================================================== def import_tests_in_directory(module_file: str, module_name: str): """Find all of the .ogn-generated tests in a module's directory and import them into that module. This will only be called from the generated test directory __init__.py file, generated below by the import_file_contents() function Args: module_file: Full path of the __init__.py file for the generated test directory (e.g. its __file__) module_name: Module name at which the generated test directory is imported (e.g. its __name__) """ this_dir = Path(module_file).parent this_module = sys.modules[module_name] test_module_files = this_dir.glob("Test*.py") for test_module_file in test_module_files: test_module_name = test_module_file.stem try: test_module = import_module(f".{test_module_name}", module_name) setattr(this_module, test_module_name, test_module.TestOgn) except Exception as error: # noqa: PLW0703 logger.warning( "Failed to import test %s in module %s (%s) - skipping.", test_module_name, module_name, error ) # ============================================================================================================== def import_file_contents(file_to_write: Optional[Path] = None) -> List[str]: """Returns the contents of the tests/__init__.py file that imports the given list of test classes. If a file_to_write is passed in then the contents are written there before returning. Not part of the interface, but not hidden with a dunder so that a test can be written against it.""" # To enable easier upgrades the test directory initialization mostly happens in a utility function file_contents = [ '"""====== GENERATED BY omni.graph.tools - DO NOT EDIT ======"""', "import omni.graph.tools as ogt", "ogt.import_tests_in_directory(__file__, __name__)", ] # Use the md5 to avoid overwriting the file multiple times, which could trigger extension reload expected_md5 = hashlib.md5(bytes("".join([f"{line}\n" for line in file_contents]), "utf-8")).hexdigest() if file_to_write is not None: import_directory = file_to_write.parent # First ensure that the directory in which the file lives exists if not import_directory.exists(): try: import_directory.mkdir(mode=0o777, parents=True, exist_ok=True) except Exception as error: raise NodeGenerationError(f"Cannot create test directory {import_directory}") from error # If the path is not a directory then there is a serious problem that cannot be fixed safely here. if not import_directory.is_dir(): raise NodeGenerationError(f"Cannot write __init__.py file to non-directory {import_directory}") if file_to_write.exists(): with open(file_to_write, "rb") as f: if hashlib.md5(f.read()).hexdigest() == expected_md5: return [] # ThreadsafeOpen will report a warning if there's a problem with ThreadsafeOpen(file_to_write, "w", newline="\n") as test_init_fd: test_init_fd.writelines([f"{line}\n" for line in file_contents]) return file_contents # ============================================================================================================== def ensure_test_is_imported(test_class_name: str, test_directory: Path): """Reads the tests __init__.py file and verifies that a test file is imported in it Args: test_class_name: Name of the test class, which is also the name of the file it lives in test_directory: Directory in which the test file lives and where the import will be added if necessary Raises: NodeGenerationError if there was a problem adding the test import """ try: test_init_file = test_directory / "__init__.py" needs_regeneration = True if test_init_file.exists(): # For backward compatibility - check to see if the file in place is the latest version. # If it is then just leave it, otherwise flag it for regeneration. with open(test_init_file, "r", encoding="utf-8") as init_fd: needs_regeneration = init_fd.readline().find("GENERATED") < 0 # If anything is out of date the file has to be rewritten if needs_regeneration: import_file_contents(test_init_file) except Exception as error: # noqa: PLW0703 logger.error("Failed to create test import for %s = %s", test_class_name, error) # ============================================================================================================== def generate_test_imports(test_directory: Path, write_file: bool = False) -> List[str]: """Generates a set of imports for the ogn test modules contained in a directory. Args: test_directory: Directory containing all of the ogn test scripts. No subdirectories are checked. write_file: If True then write the __init__.py file into the named directory, otherwise just return the contents Returns: List of lines of Python code that comprise the test import code required in the __init__.py file """ # Get the statements to perform safe import of the test files test_init_file = test_directory / "__init__.py" if write_file else None file_contents = import_file_contents(test_init_file) return file_contents
5,936
Python
48.066115
120
0.616914
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_cpp.py
# noqa: PLC0302 """Support for generating C++ interface code for OmniGraph Nodes.""" import json from itertools import zip_longest from typing import List, Optional from .attributes.AttributeManager import AttributeManager from .attributes.management import list_without_runtime_attributes, split_attribute_list from .attributes.naming import INPUT_GROUP, INPUT_NS, OUTPUT_GROUP, OUTPUT_NS, STATE_GROUP, STATE_NS, namespace_of_group from .keys import CudaPointerValues, MemoryTypeValues from .nodes import NodeInterfaceGenerator from .utils import ( OMNI_GRAPH_CORE_EXTENSION, GeneratorConfiguration, MetadataKeyOutput, NameManager, ParseError, logger, to_cpp_comment, to_cpp_str, ) __all__ = ["generate_cpp"] # ====================================================================== def grouper(iterable, max_size: int): """Returns the iterable decomposed into iterables of size "max_size", filling any excess with None""" args = [iter(iterable)] * max_size return zip_longest(*args, fillvalue=None) # ====================================================================== class NodeCppGenerator(NodeInterfaceGenerator): """Manage the functions required to generate a C++ interface for a node Attributes: all_supported: True if all attributes in the node are of a supported type declarations: List of declarations that need to appear after include files but before code __name_manager: Manager for unique name shortening for unimportant internal generated names include_files: List of includes that need to appear at the top of the header file """ def __init__(self, configuration: GeneratorConfiguration, all_supported: bool): """Set up the generator and output the C++ interface code for the node Args: configuration: Information used to configure the output all_supported: True if the node's attributes are all of supported types """ super().__init__(configuration) self.all_supported = all_supported self.declarations = [] self.preamble_code = [] self.__name_manager = NameManager() self.__needs_initialize = None self.__has_deprecated_attributes = None # ---------------------------------------------------------------------- def __calculate_per_attribute_flags(self): """Calculate the flags used to determine whether any attributes meet a certain criteria. Only call this after all parsing has completed. """ if self.__needs_initialize is None: all_attributes = self.node_interface.all_input_attributes() all_attributes += self.node_interface.all_output_attributes() all_attributes += self.node_interface.all_state_attributes() self.__needs_initialize = False self.__has_deprecated_attributes = False for attribute in all_attributes: if attribute.is_deprecated: self.__has_deprecated_attributes = True if attribute.metadata or not attribute.is_required or attribute.is_deprecated: self.__needs_initialize = True @property def needs_initialize(self) -> bool: self.__calculate_per_attribute_flags() return self.__needs_initialize @property def has_deprecated_attributes(self) -> bool: self.__calculate_per_attribute_flags() return self.__has_deprecated_attributes # ---------------------------------------------------------------------- def nm(self, variable_name: str) -> str: """Returns unique, possibly shortened version of the unique variableName""" return self.__name_manager.name(variable_name) # ---------------------------------------------------------------------- def database_class_name(self): """Returns the name of the generated database class""" return f"{self.base_name}Database" # ---------------------------------------------------------------------- def state_manager_name(self): """Returns the name of the static object that will be the state manager for this node type""" return f"sm_stateManager{self.base_name}" # ---------------------------------------------------------------------- def generator_version_name(self): """Returns the name of the static object that will hold the code generator version used for this node type""" return f"sm_generatorVersion{self.base_name}" # ---------------------------------------------------------------------- def target_version_name(self): """Returns the name of the static object that will hold the code target version used for this node type. The code target version is the version of omni.graph.core for which the code was generated. """ return f"sm_targetVersion{self.base_name}" # ---------------------------------------------------------------------- def interface_file_name(self) -> str: """Return the path to the name of the header file""" return f"{self.database_class_name()}.h" # ---------------------------------------------------------------------- def pre_interface_generation(self): """Create the header information independent of the node itself""" self.preamble_code.append("#include <carb/InterfaceUtils.h>") self.preamble_code.append("#include <omni/graph/core/NodeTypeRegistrar.h>") self.preamble_code.append("#include <omni/graph/core/iComputeGraph.h>") self.preamble_code.append("#include <omni/graph/core/CppWrappers.h>") self.preamble_code.append("#include <carb/flatcache/Enums.h>") self.preamble_code.append("using carb::flatcache::PtrToPtrKind;") if self.extension == OMNI_GRAPH_CORE_EXTENSION: self.preamble_code.append('#include "Token.h"') self.preamble_code.append("#include <map>") self.preamble_code.append("#include <vector>") self.preamble_code.append("#include <tuple>") self.preamble_code.append("#include <omni/graph/core/OgnHelpers.h>") if self.node_interface.icon_path: self.preamble_code.append("#include <carb/tokens/TokensUtils.h>") self.add_attribute_type_setup() # ---------------------------------------------------------------------- def get_file_inclusions(self) -> str: """Return code with the discovered include files in an order that makes sense""" pxr_includes = [] regular_includes = [] if self.node_interface.scheduling_hints is not None: regular_includes += self.node_interface.scheduling_hints.cpp_includes_required() if self.has_deprecated_attributes: regular_includes.append("#include <omni/graph/core/IInternal.h>") # Partition the files into those included from USD and those not. By doing this the # warnings that including USD files can be harmlessly silenced. for include_file in self.preamble_code: if include_file.find("/ogn/UsdTypes.h") < 0: regular_includes.append(include_file) else: pxr_includes.append(include_file) # Poor include practices make it necessary to include the pxr information used by Graph.h just # to include the definition of the direct IToken interface, needed since accessing it indirectly # causes complaints from carb::Framework::tryAcquireInterface(). if self.extension == OMNI_GRAPH_CORE_EXTENSION: pxr_includes.append("#include <omni/graph/core/PreUsdInclude.h>") pxr_includes.append("#include <pxr/usd/sdf/path.h>") pxr_includes.append("#include <pxr/usd/usd/stage.h>") pxr_includes.append("#include <pxr/usd/usd/prim.h>") pxr_includes.append("#include <omni/graph/core/PostUsdInclude.h>") if pxr_includes: # The namespace makes USD access easier, and it's used everywhere already pxr_includes.append("using namespace pxr;\n") return "\n".join(pxr_includes + regular_includes) # ---------------------------------------------------------------------- def post_interface_generation(self): """Insert the file header information, now that it is known""" header = "#pragma once\n\n" # Protect the CPU and CUDA code so that the include file only gets one set of definitions. # Doing it this way rather than generating two files keeps the include rules simple. if self.node_interface.has_cuda_attributes: header += "#ifndef __CUDACC__\n" header += self.get_file_inclusions() header += "\n" header += "\n".join(self.declarations) header += "\n" self.out.prepend(header) if self.node_interface.has_cuda_attributes: self.out.write("#else") self.generate_cuda_code() self.out.write("#endif") # ---------------------------------------------------------------------- def generate_registration_macro(self): """Generate the macro that will be called after the node definition to create the registration manager. This has to be done in two steps like this since the macro will reference code that performs a template-based introspection on the node class, which can only happen after it has been defined. This relies on instantiation of the OgnHelpers.h macros "DECLARE_OGN_NODES()" and "INITIALIZE_OGN_NODES()" in the proper spots. """ template_args = ", ".join([self.base_name, self.database_class_name()]) constructor_args = ", ".join( [f'"{self.node_interface.name}"', f"{self.node_interface.version}", f'"{self.extension}"'] ) self.out.write("#define REGISTER_OGN_NODE() \\") if self.out.indent("namespace { \\"): self.out.write(f"ogn::NodeTypeBootstrapImpl<{template_args}> s_registration({constructor_args}); \\") self.out.exdent() self.out.write("}") # ---------------------------------------------------------------------- def add_attribute_type_setup(self): """Write out the code to generate the include files used by all attributes. Raises: NodeGenerationError: When there is a failure in the generation of the C++ interface """ # Required include files and declarations have to be first includes = [] declarations = [] for input_attribute in self.node_interface.all_input_attributes(): includes += input_attribute.cpp_includes() declarations += input_attribute.cpp_declarations() for output_attribute in self.node_interface.all_output_attributes(): includes += output_attribute.cpp_includes() declarations += output_attribute.cpp_declarations() for state_attribute in self.node_interface.all_state_attributes(): includes += state_attribute.cpp_includes() declarations += state_attribute.cpp_declarations() self.preamble_code += [f"#include <{include_file}>" for include_file in sorted(set(includes))] if declarations: self.preamble_code += sorted(set(declarations)) # ---------------------------------------------------------------------- def generate_registration(self): """Write out the code to register the node. Raises: NodeGenerationError: When there is a failure in the generation of the C++ interface """ self.out.write( f'REGISTER_NODE_TYPE({self.base_name}, "{self.node_interface.name}", {self.node_interface.version})' ) # ---------------------------------------------------------------------- def generate_attribute_static_data(self): """Generate the code required to create the static data structures for unchanging parts of the attributes.""" # Namespace it to create file-local objects with easy access self.out.write(f"namespace {self.base_name}Attributes") self.out.write("{") def generate_static_attributes(attribute_list: List[AttributeManager], attribute_group: str): """Helper function that generates the static attribute support classes for all attributes in the list Args: attribute_list: List of attributes to generate attribute_group: Enum with the attribute's group (input, output, or state) """ namespace = namespace_of_group(attribute_group) self.out.write(f"namespace {namespace}") self.out.write("{") for attribute in attribute_list: self.out.write(attribute.cpp_typedef_definitions()) (_initializer_name, initializer_declaration) = attribute.cpp_initializer() self.out.write(initializer_declaration) self.out.write("}") generate_static_attributes(self.node_interface.all_input_attributes(), attribute_group=INPUT_GROUP) generate_static_attributes(self.node_interface.all_output_attributes(), attribute_group=OUTPUT_GROUP) generate_static_attributes(self.node_interface.all_state_attributes(), attribute_group=STATE_GROUP) self.out.write("}") self.out.write(f"using namespace {self.base_name}Attributes;") # ---------------------------------------------------------------------- def generate_attribute_accessors(self, attribute_list: List[AttributeManager], attribute_group: str): """Write out the code to create the declarations of the attribute accessor pointers. Args: attribute_list: List of attributes on the node of that type attribute_group: Enum with the attribute's group (input, output, or state) Raises: NodeGenerationError: When there is a failure in the generation of the C++ interface """ # If no attributes of this type then the type will not be referenced and does not need generating if not attribute_list: return namespace = namespace_of_group(attribute_group) self.out.write() if self.out.indent("struct {"): # Write out the declaration for the internal pointers that point into Fabric. has_local_declarations = False for attribute in attribute_list: declaration = attribute.datamodel_local_variables() if declaration: self.out.write(declaration) has_local_declarations = True if has_local_declarations: self.out.write() # The accessible structures are the wrappers through which the data will be accessed. They will # all have an operator() to access the data type directly, and may have other convenience methods # for managing things like iterating, resizing, copying, etc. for attribute in attribute_list: self.out.write(attribute.datamodel_accessor_declaration()) self.out.exdent(f"}} {namespace};") # ---------------------------------------------------------------------- def write_n_per_line(self, list_to_write: List[str], stride: int): """Write out a comma-separated list with "stride" elements per line There is no filling of extra entries, or quoting of the provided strings, so writing out a list of the string names for [1, 10] by fours would be: one, two, three, four, five, six, seven, eight, nine, ten Args: list_to_write: List of strings to split out into smaller sublists per line stride: Number of elements per line """ list_size = len(list_to_write) suffix = "," for i in range(0, list_size, stride): if i + stride >= list_size: # Avoid the trailing comma on the last line suffix = "" self.out.write(f"{', '.join(list_to_write[i:i + stride])}{suffix}") # ---------------------------------------------------------------------- def get_attributes_by_memory_type(self, attribute_group: str): """Return a trio of attribute lists, partitioned by the type of memory their data occupies. Args: attribute_group: Enum with the attribute's group (input, output, or state) Returns: (cpu_attributes, : List of attributes exclusively living on the CPU cuda_attributes, : List of attributes exclusively living on the GPU in CUDA format any_attributes) : List of attributes that may live in either location """ if attribute_group == INPUT_GROUP: attributes = self.node_interface.all_input_attributes() elif attribute_group == OUTPUT_GROUP: attributes = self.node_interface.all_output_attributes() else: attributes = self.node_interface.all_state_attributes() cpu_attributes = [attribute for attribute in attributes if attribute.memory_storage() == MemoryTypeValues.CPU] cuda_attributes = [attribute for attribute in attributes if attribute.memory_storage() == MemoryTypeValues.CUDA] any_attributes = [attribute for attribute in attributes if attribute.memory_storage() == MemoryTypeValues.ANY] return (cpu_attributes, cuda_attributes, any_attributes) # ---------------------------------------------------------------------- # Enum for the types of handles for which to generate extraction code HANDLE_INPUT = 0 HANDLE_INPUT_BUNDLE = 1 HANDLE_OUTPUT = 2 HANDLE_OUTPUT_BUNDLE = 3 HANDLE_STATE = 4 HANDLE_STATE_BUNDLE = 5 # List of (get_method, handle_type, handle_object, namespace) for each type of method. HANDLE_TABLE = { HANDLE_INPUT: ("getAttributesR", "ConstAttributeDataHandle", "inputDataHandles", INPUT_NS), HANDLE_INPUT_BUNDLE: ("getAttributesR", "ConstAttributeDataHandle", "inputDataBundleHandles", INPUT_NS), HANDLE_OUTPUT: ("getAttributesW", "AttributeDataHandle", "outputDataHandles", OUTPUT_NS), HANDLE_OUTPUT_BUNDLE: ("getAttributesW", "AttributeDataHandle", "outputBundleDataHandles", OUTPUT_NS), HANDLE_STATE: ("getAttributesW", "AttributeDataHandle", "stateDataHandles", STATE_NS), HANDLE_STATE_BUNDLE: ("getAttributesW", "AttributeDataHandle", "stateBundleDataHandles", STATE_NS), } # Runtime attribute type correspondences # (accessor_method, handle_type, wrapper_type, namespace, access type, is_const_cast required to reset) RUNTIME_TABLE = { HANDLE_INPUT: ("getConstAttributeDataHandle", "ConstAttributeDataHandle", INPUT_NS, True), HANDLE_OUTPUT: ("getAttributeDataHandle", "AttributeDataHandle", OUTPUT_NS, False), HANDLE_STATE: ("getAttributeDataHandle", "AttributeDataHandle", STATE_NS, False), } # ---------------------------------------------------------------------- def write_attrib_metadata(self, attribute: AttributeManager): """Write attribute metadata""" for key_raw, value_raw in attribute.metadata.items(): cpp_key = MetadataKeyOutput.cpp_name_from_key(key_raw) if cpp_key is None: # Run it through json to handle escaping the quotes cpp_key = json.dumps(key_raw) # Handle lists of strings or just strings if isinstance(value_raw, list): value = '"' + ",".join([x.replace('"', '\\"') for x in value_raw]) + '"' else: value = json.dumps(value_raw) self.out.write(f"iAttribute->setMetadata(attributeObj, {cpp_key}, {value});") # ---------------------------------------------------------------------- def __generate_data_handles( self, attributes: list, group_index, handle_type: int, check_if_handle_required: bool = False ): """Shared code to initialize data handles for all types of attributes Args: attributes: List of attributes for which to generate the handles group_index: Index of this handle group among all others with the same types handle_type: Enumerated value used to switch the methods and data members generated check_if_handle_required: If True do a more expensive check to see if the handle needs to be generated """ if not attributes: return handle_data = self.HANDLE_TABLE[handle_type] # The grouping operation might have left Nones in the list - prune them out to make later code more clear actual_attributes = [attribute for attribute in attributes if attribute is not None] if not actual_attributes: return # If the handle won't be checked in later code then don't get it now - it will only generate unused code if check_if_handle_required and not any( attribute.cpp_set_handle_at_runtime() for attribute in actual_attributes ): return # Example of line generated here for two input attributes: # auto inputDataHandles = getAttributesR< # ConstAttributeDataHandle, ConstAttributeDataHandle # >(contextObj, nodeHandle, std::make_tuple( # inputs::attribute1.m_token, inputs::attribute2.m_token # ) # ); self.out.indent(f"auto {handle_data[2]}{group_index} = {handle_data[0]}<") # The template arguments require one handle type per attribute in the tuple (written 4 per line for clarity) self.write_n_per_line([handle_data[1]] * len(actual_attributes), 4) self.out.indent(">(contextObj, nodeHandle, std::make_tuple(") # The token arguments are the names of the attribute token declared earlier tokens = [f"{handle_data[3]}::{attribute.cpp_variable_name()}.m_token" for attribute in actual_attributes] self.write_n_per_line(tokens, 4) self.out.exdent(")") self.out.exdent(");") # ---------------------------------------------------------------------- def generate_input_array_extraction(self, input_attributes: list, group_index, handle_type: int): """Generate the code to extract the information of the input array attributes' data in Fabric. This presumes all handles were stored locally in the tuple named in the handle table entry. Args: input_attributes: List of input attributes group_index: Which of the subgroupings of attributes is this? handle_type: HANDLE_* index indicating which type of attribute is being generated """ if not input_attributes: return actual_attributes = list_without_runtime_attributes(input_attributes) if not actual_attributes: return (_, _, handle_name, namespace) = self.HANDLE_TABLE[handle_type] # Attributes have been grouped by memory type so the value can be extracted from any attribute in the list memory_type = actual_attributes[0].memory_storage() # "Any" and regular types are not extracted here, they are extracted on demand # This is just added for bundles, for which we need the bundle handle if memory_type != MemoryTypeValues.ANY and handle_type == self.HANDLE_INPUT_BUNDLE: # Example line generated here for two attributes: # std::tie( # outputs.m_attribute1.m_ptrToData, outputs.m_attribute2.m_ptrToData # ) = getDataR< # const float*, const double* # >(contextObj, {handle__name}); extra_args = "" get_function = "getDataR" if memory_type != MemoryTypeValues.CPU: get_function = "getDataRGpuAt" if self.node_interface.cuda_pointer_type is not None: cuda_location = CudaPointerValues.CPP[self.node_interface.cuda_pointer_type] else: cuda_location = CudaPointerValues.CPP[CudaPointerValues.CUDA] extra_args = f", {cuda_location}" self.out.indent("std::tie(") pointer_names = [f"{attribute.fabric_pointer()}" for attribute in actual_attributes] self.write_n_per_line(pointer_names, 4) self.out.exdent(f") = {get_function}<") self.out.indent() data_types = [] for attribute in actual_attributes: data_types.append(f"{attribute.fabric_raw_type()}*") self.write_n_per_line(data_types, 4) self.out.exdent(f">(contextObj, {handle_name}{group_index}{extra_args});") # All of the attributes that are array types need to manage a counter pointer since that's how Fabric # knows how big the data is. for (index, attribute) in enumerate(actual_attributes): accessor_name = f"{namespace}.{attribute.cpp_variable_name()}" if attribute.fabric_needs_counter() and memory_type == MemoryTypeValues.CPU: self.out.write(f"{accessor_name}.resetArrayWrapper();") if attribute.cpp_set_handle_at_runtime(): extract_handle = f"std::get<{index}>({handle_name}{group_index})" self.out.write(f"{accessor_name}.setHandle({extract_handle});") self.out.write(f"{accessor_name}.setContext(contextObj);") # ---------------------------------------------------------------------- def generate_writable_array_extraction(self, attributes: list, group_index: int, handle_type: int): """Generate the code to extract the information of the writable array attributes in Fabric. Args: attributes: List of writable attributes for which to extract pointers group_index: Which of the subgroupings of attributes is this? handle_type: HANDLE_* index indicating which type of attribute is being generated """ if not attributes: return # Writable attributes need a definite type to be extracted here. Others will be extracted on demand. actual_attributes = list_without_runtime_attributes(attributes) if not actual_attributes: return (_, _, handle_name, namespace) = self.HANDLE_TABLE[handle_type] # All of the attributes that are array types need to manage a counter pointer since that's how Fabric # knows how big the data is. They also have to keep track of the context and handle to allow for resizing, # which would modify the Fabric memory locations. for (index, attribute) in enumerate(actual_attributes): accessor_name = f"{namespace}.{attribute.cpp_variable_name()}" extract_handle = f"std::get<{index}>({handle_name}{group_index})" if attribute.cpp_set_handle_at_runtime(): self.out.write(f"{accessor_name}.setHandle({extract_handle});") self.out.write(f"{accessor_name}.setContext(contextObj);") # ---------------------------------------------------------------------- def generate_writable_bundle_extraction(self, attributes: list, group_index: int, handle_type: int): """Generate the code to extract the pointers of bundle-type writable attributes' data in Fabric. Args: attributes: List of output attributes for which to extract pointers group_index: Which of the subgroupings of attributes is this? handle_type: HANDLE_* index indicating which type of attribute is being generated """ if not attributes: return # The grouping operation might have left Nones in the list - prune them out to make later code more clear actual_attributes = [attribute for attribute in attributes if attribute is not None] if not actual_attributes: return (_, _, handle_name, namespace) = self.HANDLE_TABLE[handle_type] # Attributes have been grouped by memory type so the value can be extracted from any attribute in the list memory_type = actual_attributes[0].memory_storage() # "Any" types are not extracted here, they are extracted on demand if memory_type != MemoryTypeValues.ANY: # Example lines generated here for two attributes: # {namespace}.m_d1_ = contextObj.iContext->getOutputPrim(contextObj, nodeHandle, {namespace}::d1.m_token); # {namespace}.m_d2_ = contextObj.iContext->getOutputPrim(contextObj, nodeHandle, {namespace}::d2.m_token); # Unlike other attribute types the bundle types have to be extracted one at a time; there is no bulk method arg_prefix = f"contextObj.iContext->getOutputPrim(contextObj, nodeHandle, {namespace}::" for attribute in actual_attributes: bundle_prim_handle = f"{namespace}.{attribute.fabric_data_variable_name()}" self.out.write(f"*{bundle_prim_handle} = {arg_prefix}{attribute.cpp_variable_name()}.m_token);") # All of the attributes that are array types need to manage a counter pointer since that's how Fabric # knows how big the data is. They also have to keep track of the context and handle to allow for resizing, # which would modify the Fabric memory locations. for (index, attribute) in enumerate(actual_attributes): accessor_name = f"{namespace}.{attribute.cpp_variable_name()}" extract_handle = f"std::get<{index}>({handle_name}{group_index})" if attribute.cpp_set_handle_at_runtime(): self.out.write(f"{accessor_name}.setHandle({extract_handle});") self.out.write(f"{accessor_name}.setContext(contextObj);") # ---------------------------------------------------------------------- def generate_runtime_attribute_initialization(self, attributes: list, handle_type: int): """Generate the code to generate the initialization of runtime attributes' data from Fabric. Args: attributes: List of output attributes for which to extract pointers group_index: Which of the subgroupings of attributes is this? handle_type: HANDLE_* index indicating which type of attribute is being generated """ (handle_accessor, handle_type, namespace, requires_const_cast) = self.RUNTIME_TABLE[handle_type] # Example lines generated here for two attributes, sharing temporary attributes per attribute type in order # to avoid naming conflicts and support the different methods and classes different attribute types require. # { # AttributeObj __a; # ConstAttributeDataHandle __h; # __a = nodeObj.iNode->getAttributeByToken(nodeObj, inputs::floatOrToken.m_token); # __h = __a.iAttribute->getConstAttributeDataHandle(__a); # const_cast<ogn::RuntimeAttribute<ogn::kInput, ogn::kCpu>&>(inputs.m_floatOrToken).reset(contextObj, __h); # # __a = nodeObj.iNode->getAttributeByToken(nodeObj, inputs::boolOrFloat.m_token); # __h = __a.iAttribute->getConstAttributeDataHandle(__a); # const_cast<ogn::RuntimeAttribute<ogn::kInput, ogn::kCpu>&>(inputs.m_boolOrFloat).reset(contextObj, __h); # } declaration = [ f"{handle_type} __h;", "AttributeObj __a;", ] for attribute in attributes: if attribute is not None: # Only write the declaration if there is at least one attribute to process if declaration is not None: self.out.indent("{") self.out.write(declaration) declaration = None member = f"{namespace}::{attribute.cpp_variable_name()}" self.out.write(f"__a = nodeObj.iNode->getAttributeByToken(nodeObj, {member}.m_token);") self.out.write(f"__h = __a.iAttribute->{handle_accessor}(__a);") attr_object = f"{namespace}.{attribute.fabric_data_variable_name()}" if attribute.array_depth == 0: if requires_const_cast: accessor_type = ( f"ogn::RuntimeAttribute<{attribute.attribute_group}," f" {MemoryTypeValues.CPP[attribute.memory_type]}>" ) attr_object = f"const_cast<typename std::remove_const_t<{accessor_type}&>>({attr_object})" self.out.write(f"{attr_object}.reset(contextObj, __h, __a);") else: raise ParseError("Arrays not yet supported on runtime attributes") self.out.write() if declaration is None: self.out.exdent("}") # ---------------------------------------------------------------------- def generate_token_declarations(self): """Emit the code required to declare the tokens subclass in the database.""" if not self.node_interface.tokens: return self.out.write("struct TokenManager") if self.out.indent("{"): for token_name, _ in self.node_interface.tokens.items(): self.out.write(f"NameToken {token_name};") self.out.exdent("};") self.out.write("static TokenManager tokens;") # ---------------------------------------------------------------------- def generate_token_intialization(self): """Emit the code required to initialize the tokens subclass in the database.""" if not self.node_interface.tokens: return for token_name, token_value in self.node_interface.tokens.items(): value = json.dumps(token_value) self.out.write(f"{self.database_class_name()}::tokens.{token_name} = iToken.getHandle({value});") # ---------------------------------------------------------------------- def generate_database_constructor(self): """Write out the code that defines the database class constructor""" # Declare the constructor, taking the same parameters as the ABI compute() method self.out.write( f"{self.database_class_name()}(const GraphContextObj& contextObjParam, const NodeObj& nodeObjParam)" ) self.out.indent(": OmniGraphDatabase(contextObjParam, nodeObjParam) {") # Extract some common parts of the ABI that will be used later self.out.write("GraphContextObj const& contextObj = m_graphContext;") self.out.write("NodeObj const& nodeObj = m_nodeObj;") self.out.write("NodeContextHandle nodeHandle = nodeObj.nodeContextHandle;") # TODO: This could use some cleanup - it's a bit redundant and confused at the moment # Split the input and output attributes into groups by memory type - CPU, CUDA, and ANY. # These are grouped together to minimize the calls across the ABI. More consolidation could happen # by getting all of the handles in one shot but that would complicate the management so unless it # becomes a performance problem it won't be done cpu_inputs, cuda_inputs, any_inputs = self.get_attributes_by_memory_type(INPUT_GROUP) cpu_outputs, cuda_outputs, any_outputs = self.get_attributes_by_memory_type(OUTPUT_GROUP) cpu_state, cuda_state, any_state = self.get_attributes_by_memory_type(STATE_GROUP) for input_attributes, output_attributes, state_attributes in [ [cpu_inputs, cpu_outputs, cpu_state], [cuda_inputs, cuda_outputs, cuda_state], [any_inputs, any_outputs, any_state], ]: if not input_attributes and not output_attributes and not state_attributes: continue # Avoid the necessity to provide unique names within this block (input_attrs, input_bundles, input_runtime) = split_attribute_list(input_attributes) (output_attrs, output_bundles, output_runtime) = split_attribute_list(output_attributes) (state_attrs, state_bundles, state_runtime) = split_attribute_list(state_attributes) needs_nesting = ( input_attrs or input_bundles or output_attrs or output_bundles or state_attrs or state_bundles ) if needs_nesting and self.out.indent("{"): # Arbitrarily split up the list of attributes into groups of 60, in the unlikely event there are more # than that. Limiting the size of the processed groups avoids potential template recursion depth limits. # Making it a larger number minimizes the calls across the ABI into the DataModel. # They are cast to lists so that they can be used more than once (generators cannot be reused) # groups of 60 seems to be safe for both VC compiler and gcc. input_single_groups = list(grouper(input_attrs, 60)) input_bundle_groups = list(grouper(input_bundles, 60)) output_bundle_groups = list(grouper(output_bundles, 60)) output_single_groups = list(grouper(output_attrs, 60)) state_bundle_groups = list(grouper(state_bundles, 60)) state_single_groups = list(grouper(state_attrs, 60)) # Keep the generation of the attribute handles from the tokens independent from the rest as in future # this could be cached on the node and reused since it never changes from one evaluation to the next. for i, attribute_sublist in enumerate(input_single_groups): self.__generate_data_handles(attribute_sublist, i, self.HANDLE_INPUT) for i, attribute_sublist in enumerate(input_bundle_groups): self.__generate_data_handles(attribute_sublist, i, self.HANDLE_INPUT_BUNDLE) for i, attribute_sublist in enumerate(output_single_groups): self.__generate_data_handles(attribute_sublist, i, self.HANDLE_OUTPUT) for i, attribute_sublist in enumerate(output_bundle_groups): self.__generate_data_handles(attribute_sublist, i, self.HANDLE_OUTPUT_BUNDLE) for i, attribute_sublist in enumerate(state_single_groups): self.__generate_data_handles(attribute_sublist, i, self.HANDLE_STATE) for i, attribute_sublist in enumerate(state_bundle_groups): self.__generate_data_handles(attribute_sublist, i, self.HANDLE_STATE_BUNDLE) # Generate the code that extracts the Fabric pointers for each attribute. for i, attribute_sublist in enumerate(input_single_groups): self.generate_input_array_extraction(attribute_sublist, i, self.HANDLE_INPUT) for i, attribute_sublist in enumerate(input_bundle_groups): self.generate_input_array_extraction(attribute_sublist, i, self.HANDLE_INPUT_BUNDLE) for i, attribute_sublist in enumerate(output_single_groups): self.generate_writable_array_extraction(attribute_sublist, i, self.HANDLE_OUTPUT) for i, attribute_sublist in enumerate(output_bundle_groups): self.generate_writable_bundle_extraction(attribute_sublist, i, self.HANDLE_OUTPUT_BUNDLE) for i, attribute_sublist in enumerate(state_single_groups): self.generate_writable_array_extraction(attribute_sublist, i, self.HANDLE_STATE) for i, attribute_sublist in enumerate(state_bundle_groups): self.generate_writable_bundle_extraction(attribute_sublist, i, self.HANDLE_STATE_BUNDLE) self.out.exdent("}") # Runtime attributes are set up in one step since their handles are only required once, to set up # their data wrappers, which will access the data at runtime. self.generate_runtime_attribute_initialization(input_runtime, self.HANDLE_INPUT) self.generate_runtime_attribute_initialization(output_runtime, self.HANDLE_OUTPUT) self.generate_runtime_attribute_initialization(state_runtime, self.HANDLE_STATE) self.out.exdent("}") # ---------------------------------------------------------------------- def generate_release(self): """Write out the code that defines the database internal state accessor, if any. Since the existence of the internal state can only be determined at compile time, not code generation time, the method is always emitted, relying on the state manager to efficiently handle nodes with and without state. """ self.out.write("static void release(const NodeObj& nodeObj)") if self.out.indent("{"): self.out.write(f"{self.state_manager_name()}.removeState(nodeObj.nodeHandle);") self.out.exdent("}") # ---------------------------------------------------------------------- def generate_initialize(self): """Write out the code for the function that initializes a node of this type after creation. Raises: NodeGenerationError: When there is a failure in the generation of the C++ interface """ all_attributes = self.node_interface.all_input_attributes() all_attributes += self.node_interface.all_output_attributes() all_attributes += self.node_interface.all_state_attributes() if not all_attributes: return self.out.write("static void initialize(const GraphContextObj&, const NodeObj& nodeObj)") if self.out.indent("{"): self.out.write("const INode* iNode = nodeObj.iNode;") if self.has_deprecated_attributes: self.out.write("const IInternal* iInternal = carb::getCachedInterface<omni::graph::core::IInternal>();") self.out.write("const IAttribute* iAttribute = carb::getCachedInterface<omni::graph::core::IAttribute>();") self.out.write("AttributeObj attributeObj;") for attribute in all_attributes: if not attribute.metadata and attribute.is_required and not attribute.is_deprecated: continue member = f"{attribute.namespace}::{attribute.cpp_variable_name()}" self.out.write(f"attributeObj = iNode->getAttributeByToken(nodeObj, {member}.token());") self.write_attrib_metadata(attribute) # Set the optional flag if it isn't the default if not attribute.is_required: self.out.write("iAttribute->setIsOptionalForCompute(attributeObj, true);") # Deprecate the attribute, if necessary if attribute.is_deprecated: self.out.write( f"iInternal->deprecateAttribute(attributeObj, {to_cpp_str(attribute.deprecation_msg)});" ) self.out.exdent("}") # ---------------------------------------------------------------------- def generate_initialize_type(self): """Write out the code for the function that initializes the node type. Raises: NodeGenerationError: When there is a failure in the generation of the C++ interface """ input_attributes = self.node_interface.all_input_attributes() output_attributes = self.node_interface.all_output_attributes() state_attributes = self.node_interface.all_state_attributes() self.out.write("static void initializeType(const NodeTypeObj& nodeTypeObj)") self.out.indent("{") # Metadata always needs this, and attributes will as well if there are any self.out.write("const INodeType* iNodeType = nodeTypeObj.iNodeType;") # Avoid generation of an unused variable if input_attributes or output_attributes or state_attributes or self.node_interface.tokens: # Cannot use the token interface stored in the database class since it doesn't exist here. self.out.write("auto iTokenPtr = carb::getCachedInterface<carb::flatcache::IToken>();") if self.out.indent("if( ! iTokenPtr ) {"): self.out.write(f'CARB_LOG_ERROR("IToken not found when initializing {self.node_interface.name}");') self.out.write("return;") self.out.exdent("}") # The extra step of making a reference is taken so that future accesses to the interface are the # same no matter which extension you are in. self.out.write("auto& iToken{ *iTokenPtr };") self.generate_token_intialization() # Generate the initialization of attributes, including setting defaults and adding them to the node type def generate_attribute_initialize(attribute_list: List[AttributeManager], namespace: str): """Helper to initialize attributes of a given type. Prevents triplication of the loop""" self.out.write() for attribute in attribute_list: attribute.cpp_pre_initialization(self.out) for attribute in attribute_list: self.out.write( f"{namespace}::{attribute.cpp_variable_name()}.initialize(iToken, *iNodeType, nodeTypeObj);" ) for attribute in attribute_list: attribute.cpp_post_initialization(self.out) generate_attribute_initialize(input_attributes, INPUT_NS) generate_attribute_initialize(output_attributes, OUTPUT_NS) generate_attribute_initialize(state_attributes, STATE_NS) # Generate the initialization of the node metadata, including the hardcoded one holding the extension name self.out.write( f"iNodeType->setMetadata(nodeTypeObj, {MetadataKeyOutput.EXTENSION}, {json.dumps(self.extension)});" ) for key, value in self.node_interface.metadata.items(): cpp_key = MetadataKeyOutput.cpp_name_from_key(key) if cpp_key is None: # Run it through json to handle escaping the quotes cpp_key = json.dumps(key) self.out.write(f"iNodeType->setMetadata(nodeTypeObj, {cpp_key}, {json.dumps(value)});") # If any of the scheduling hints flags have been defined then set them here if self.node_interface.scheduling_hints is not None: self.node_interface.scheduling_hints.emit_cpp(self.out) # The icon path is relative to the extension path, which is only known at runtime, so build it up then. # To the user it will appear as an absolute path, which they can modify if they wish to. if self.node_interface.icon_path is not None: icon_path = json.dumps(self.node_interface.icon_path) self.out.write("auto iTokens = carb::getCachedInterface<carb::tokens::ITokens>();") if self.out.indent("if( ! iTokens ) {"): self.out.write( 'CARB_LOG_ERROR("Extension path not available - ITokens not found when initializing' f' {self.node_interface.name}");' ) self.out.exdent("}") self.out.write("else") if self.out.indent("{"): self.out.write(f'std::string iconPath = carb::tokens::resolveString(iTokens, "${{{self.extension}}}");') self.out.write('iconPath.append("/");') self.out.write(f"iconPath.append({icon_path});") self.out.write(f"iNodeType->setMetadata(nodeTypeObj, {MetadataKeyOutput.ICON_PATH}, iconPath.c_str());") self.out.exdent("}") # Set up the state variable, directly if there are state attributes if self.node_interface.has_state: self.out.write("iNodeType->setHasState(nodeTypeObj, true);") # Close off the method definition self.out.exdent("}") # ---------------------------------------------------------------------- def generate_validate(self): """Write out the code for the function that validates the attributes before compute. Raises: NodeGenerationError: When there is a failure in the generation of the C++ interface """ self.out.indent("bool validate() {") attributes_to_walk = [ [INPUT_NS, self.node_interface.all_input_attributes()], [OUTPUT_NS, self.node_interface.all_output_attributes()], [STATE_NS, self.node_interface.all_state_attributes()], ] conditions = [] for attribute_prefix, attribute_list in attributes_to_walk: for attribute in attribute_list: if attribute.is_required: if attribute.has_fixed_type(): conditions.append(f"{attribute_prefix}.{attribute.cpp_variable_name()}.isValid()") elif attribute.do_validation: conditions.append(f"{attribute_prefix}.{attribute.cpp_variable_name()}().resolved()") if conditions: self.out.indent(f"return {conditions[0]}") for condition in conditions[1:]: self.out.write(f"&& {condition}") self.out.exdent(";") else: self.out.write("return true;") self.out.exdent("}") # ---------------------------------------------------------------------- def generate_database(self): """Write out the code to initialize the required attributes. Raises: NodeGenerationError: When there is a failure in the generation of the C++ interface """ self.generate_attribute_static_data() # The namespace encapsulates the attribute access to make it easier to isolate it self.out.write(f"namespace I{self.base_name}") self.out.write("{") # Optional description information. Can be omitted if the generated code is never examined. node_description = to_cpp_comment(self.node_interface.description) self.out.write(f"{node_description}") # Generate the class wrapping all of the access to the DataModel, including the core ABI objects self.out.write(f"class {self.database_class_name()} : public omni::graph::core::ogn::OmniGraphDatabase") self.out.write("{") if self.out.indent("public:"): self.generate_token_declarations() self.out.write("template <typename StateInformation>") if self.out.indent("static StateInformation& sInternalState(const NodeObj& nodeObj) {"): self.out.write(f"return {self.state_manager_name()}.getState<StateInformation>(nodeObj.nodeHandle);") self.out.exdent("}") self.out.write("template <typename StateInformation>") if self.out.indent("StateInformation& internalState() {"): self.out.write("return sInternalState<StateInformation>(abi_node());") self.out.exdent("}") self.out.write(f"static ogn::StateManager {self.state_manager_name()};") self.out.write(f"static std::tuple<int, int, int>{self.generator_version_name()};") self.out.write(f"static std::tuple<int, int, int>{self.target_version_name()};") self.generate_attribute_accessors(self.node_interface.all_input_attributes(), INPUT_GROUP) self.generate_attribute_accessors(self.node_interface.all_output_attributes(), OUTPUT_GROUP) self.generate_attribute_accessors(self.node_interface.all_state_attributes(), STATE_GROUP) self.generate_database_constructor() # The ABI initializeType method is implemented as a member of this class to simplify registration self.generate_initialize_type() # The ABI initialize method is implemented as a member of this class to facilitate attribute # metadata and internal state data. if self.needs_initialize: self.generate_initialize() # The ABI initialize method is implemented as a member of this class to facilitate internal state data. self.generate_release() # Compute will require validation that all required attributes exist before running self.generate_validate() # Terminate the class definition self.out.exdent("};") # Initialize the static objects here, to avoid potential namespace clashes later class_name = self.database_class_name() self.out.write(f"ogn::StateManager {class_name}::{self.state_manager_name()};") # Remember the generator and code target version, in case it is needed later for backwards compatibility generator_version = ",".join([str(version) for version in self.generator_version]) self.out.write( f"std::tuple<int, int, int> {class_name}::{self.generator_version_name()}" f"{{std::make_tuple({generator_version})}};" ) target_version = ",".join([str(version) for version in self.target_version]) self.out.write( f"std::tuple<int, int, int> {class_name}::{self.target_version_name()}" f"{{std::make_tuple({target_version})}};" ) # If there are tokens declare the class member that implements them if self.node_interface.tokens: self.out.write(f"{class_name}::TokenManager {class_name}::tokens;") # Hide the namespace enclosure from the node code self.out.write("}") self.out.write(f"using namespace I{self.base_name};") # ---------------------------------------------------------------------- def generate_cuda_code(self): """Write out the code that will be used by the CUDA code. Here is some sample output for a node multiplying a vector by a constant namespace OgnFooCudaTypes { namespace inputs { using multiplier_t = const float&; using input_t = const float3&; } namespace outputs { using result_t = float; } } using namespace OgnFooCudaTypes; With this set of definitions and the ones in the CPU types you can easily define functions that cross the CPU/GPU boundary. This would be the signature in the .cpp file: extern "C" void scaleVector(inputs::input_t, inputs::multiplier_t, outputs::result_t); with this definition in the .cu file: extern "C" void scaleVector(inputs::input_t, inputs::multiplier_t, outputs::result_t); The pointer is necessary as the CPU cannot directly access GPU data so it has to pass through the extracted Fabric data as (undereferencable) pointers. It is made explicit rather than burying it in the type definition in order to make it obvious to the user that they are dealing with a pointer. Raises: NodeGenerationError: When there is a failure in the generation of the CUDA interface """ def generate_cuda_includes(attribute_list: List[AttributeManager]) -> List[str]: """Helper function that returns a list of all include statements needed by CUDA definitions""" includes = [] for attribute in attribute_list: includes += attribute.cuda_includes() return includes def generate_cuda_typedefs(attribute_list: List[AttributeManager], attribute_group: str): """Helper function that generates the CUDA typedefs all attributes in the list The types here are similar but not identical to those generated in the static attribute information definition class created in generate_attribute_static_data(). Args: attribute_list: List of attributes to generate attribute_group: Enum with the attribute's group (input, output, or state) """ namespace = namespace_of_group(attribute_group) self.out.write(f"namespace {namespace}") self.out.write("{") for attribute in attribute_list: attribute_data_type = attribute.cuda_type_name() # No type means the attribute type cannot be passed to CUDA code if attribute_data_type is not None: modifier = "const " if attribute.is_read_only() else "" initializer_name = attribute.cpp_variable_name() self.out.write(f"using {initializer_name}_t = {modifier}{attribute_data_type};") self.out.write("}") input_includes = generate_cuda_includes(self.node_interface.all_input_attributes()) output_includes = generate_cuda_includes(self.node_interface.all_output_attributes()) state_includes = generate_cuda_includes(self.node_interface.all_state_attributes()) for include_file in sorted(set(input_includes + output_includes + state_includes)): self.out.write(f"#include <{include_file}>") # Namespace it to create file-local objects with easy access self.out.write(f"namespace {self.base_name}CudaTypes") self.out.write("{") generate_cuda_typedefs(self.node_interface.all_input_attributes(), INPUT_GROUP) generate_cuda_typedefs(self.node_interface.all_output_attributes(), OUTPUT_GROUP) generate_cuda_typedefs(self.node_interface.all_state_attributes(), STATE_GROUP) self.out.write("}") self.out.write(f"using namespace {self.base_name}CudaTypes;") # ---------------------------------------------------------------------- def generate_node_interface(self): """Write out the code associated with the node. Raises: NodeGenerationError: When there is a failure in the generation of the C++ interface """ if self.all_supported: self.generate_database() self.generate_registration_macro() else: # self.generate_attribute_tokens() self.generate_registration() # ====================================================================== def generate_cpp(configuration: GeneratorConfiguration, all_supported: bool) -> Optional[str]: """Create support files for the C++ interface to a node For now only a header file is generated for the C++ interface, though there will probably be multiple files generated in the future. For that reason this single point of contact was created for outside callers. Args: configuration: Information defining how and where the header will be generated all_supported: True if all attributes in the file are of supported types. If False then no initialization code will be omitted, only registration. Returns: String containing the generated header code or None if its generation was not enabled Raises: NodeGenerationError: When there is a failure in the generation of the header """ if not configuration.node_interface.can_generate("c++"): return None logger.info("Generating C++ Database Definition") generator = NodeCppGenerator(configuration, all_supported) generator.generate_interface() return str(generator.out)
59,658
Python
52.50583
120
0.60986
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/register_ogn_nodes.py
from omni.graph.tools import DeprecationError raise DeprecationError("register_ogn_nodes has moved from omni.graph.tools to omni.graph.core")
143
Python
34.999991
95
0.825175
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_tests.py
"""Support for generating simple regression test code for OmniGraph Nodes. The tests do three things: 1. Do a test "import" of the Python interface to the node 2. Do a test load of the USDA template interface for the node 3. Run all of the "tests" cases specified in the node's .ogn file Exports: generate_tests: Create a TestNODE.py file containing standard unit tests of the node operation """ from pathlib import Path from typing import List, Optional from .attributes.management import list_without_runtime_attributes from .generate_test_imports import ensure_test_is_imported from .keys import MemoryTypeValues, TestKeys from .nodes import NodeInterfaceGenerator from .utils import GeneratorConfiguration, UnimplementedError, logger __all__ = ["generate_tests"] class NodeTestGenerator(NodeInterfaceGenerator): """Manage the functions required to generate basic test scripts for a node""" def __init__(self, configuration: GeneratorConfiguration): # noqa: PLW0246 """Set up the generator and output the test scripts for the node Just passes the initialization on to the parent class. See the argument and exception descriptions there. """ super().__init__(configuration) # ---------------------------------------------------------------------- def interface_file_name(self) -> str: """Return the path to the name of the Python test file""" return self.test_class_name() + ".py" # ---------------------------------------------------------------------- def test_class_name(self) -> str: """Returns the name to use for the test class and base name for the test file""" return f"Test{self.base_name}" # ---------------------------------------------------------------------- def generate_user_test_data(self, all_tests: List) -> bool: """Generate the section of the user test that creates the test data to be iterated over. The variable "test_data" is set up to be a list of test data, whose elements consist of four sub-lists - values for input attributes, set before the test starts - values for output attributes, checked after the test finishes - initial values for state attributes, set before the test starts - final values for state attributes, checked after the test finishes Each of the data elements consist of a 3-tuple of the name of the attribute, the value for it, and whether the attribute's data should be manipulated on the GPU. Simple example of one test where two inputs are set, one output is checked, one state attribute has an initial value set and a final value checked, and a node of type "MyNodeType" is created before the test runs: test_data = [ { "inputs": [ ["inputs:attr", INPUT_VALUE, False], ["inputs:attr2", INPUT_VALUE2, False], ], "outputs": [ ["outputs:attr", OUTPUT_VALUE, False], ], "state_set": [ ["state:attr", INITIAL_VALUE, False], ], "state_get": [ ["state:attr", FINAL_VALUE, False], ], "setup": { "create_nodes": [["MyNode", "MyNodeType"]] } } ] Args: all_tests: List of test dictionaries with the test information to be consolidated Returns: True if the node tests use the old V1 setup data, meant for the deprecated OmniGraphHelper """ test_data = [] uses_v1_setup = False for node_test_data in all_tests: # The outputs who decide on being on the GPU at runtime have to be marked as such in the test data. # That way the test knows where to look for the results. gpu_outputs = node_test_data.gpu_outputs test_run = {} # Do all inputs first, for clarity if node_test_data.input_values: input_data = [] for attribute, attribute_value in node_test_data.input_values.items(): if attribute.memory_type == MemoryTypeValues.ANY: UnimplementedError(f"Input '{attribute.name}' has unsupported memory CPU/GPU") on_gpu = attribute.memory_type != MemoryTypeValues.CPU input_data.append([attribute.name, attribute_value, on_gpu]) test_run[TestKeys.INPUTS] = input_data # ...then all outputs if node_test_data.expected_outputs: output_data = [] for attribute, attribute_value in node_test_data.expected_outputs.items(): if attribute.memory_type == MemoryTypeValues.ANY: on_gpu = attribute.name in gpu_outputs else: on_gpu = attribute.memory_type != MemoryTypeValues.CPU output_data.append([attribute.name, attribute_value, on_gpu]) test_run[TestKeys.OUTPUTS] = output_data # ...then all state initial values if node_test_data.state_initial_values: state_data = [] for attribute, attribute_value in node_test_data.state_initial_values.items(): if attribute.memory_type == MemoryTypeValues.ANY: on_gpu = attribute.name in gpu_outputs else: on_gpu = attribute.memory_type != MemoryTypeValues.CPU state_data.append([attribute.name, attribute_value, on_gpu]) test_run[TestKeys.STATE_SET] = state_data # ... all state final values if node_test_data.state_final_values: state_data = [] for attribute, attribute_value in node_test_data.state_final_values.items(): if attribute.memory_type == MemoryTypeValues.ANY: on_gpu = attribute.name in gpu_outputs else: on_gpu = attribute.memory_type != MemoryTypeValues.CPU state_data.append([attribute.name, attribute_value, on_gpu]) test_run[TestKeys.STATE_GET] = state_data # ... and graph setup (where None has a different meaning than the empty list) if node_test_data.graph_setup is not None: test_run[TestKeys.SETUP] = node_test_data.graph_setup uses_v1_setup |= node_test_data.uses_v1_setup test_data.append(test_run) self.out.write(f"test_data = {test_data}") return uses_v1_setup # ---------------------------------------------------------------------- def generate_user_test_run(self, uses_v1_setup: bool = False): """Generate the section of the user test that iterates test runs over the test data""" node_type_name = self.node_interface.name # Emit the test loop, creating a new node each iteration to ensure that all unset values are their default self.out.write("test_node = None") self.out.write("test_graph = None") if self.out.indent("for i, test_run in enumerate(test_data):"): self.out.write(f"inputs = test_run.get('{TestKeys.INPUTS}', [])") self.out.write(f"outputs = test_run.get('{TestKeys.OUTPUTS}', [])") self.out.write(f"state_set = test_run.get('{TestKeys.STATE_SET}', [])") self.out.write(f"state_get = test_run.get('{TestKeys.STATE_GET}', [])") self.out.write(f"setup = test_run.get('{TestKeys.SETUP}', None)") # Clean out the stage for the next test, unless the setup is to be carried on if self.out.indent("if setup is None or setup:"): self.out.write("await omni.usd.get_context().new_stage_async()") self.out.write("test_graph = None") self.out.exdent() if self.out.indent("elif not setup:"): # An empty setup means "use the previous setup", which should have created a graph and a node self.out.write( "self.assertTrue(test_graph is not None and test_graph.is_valid(), " + '"Test is misconfigured - empty setup cannot be in the first test")' ) self.out.exdent() # Emit the construction of the test node if self.out.indent("if setup:"): if uses_v1_setup: self.out.write("await ogts.setup_test_environment()") self.out.write('test_nodes = og.OmniGraphHelper().edit_graph("/", setup)') self.out.write("test_graph = og.get_current_graph()") else: self.out.write('(test_graph, test_nodes, _, _) = og.Controller.edit("/TestGraph", setup)') self.out.write("self.assertTrue(test_nodes)") self.out.write("test_node = test_nodes[0]") self.out.exdent() if self.out.indent("elif setup is None:"): # A previous setup will have created a test_graph, otherwise create a default graph for testing. if self.out.indent("if test_graph is None:"): self.out.write('test_graph = og.Controller.create_graph("/TestGraph")') self.out.write("self.assertTrue(test_graph is not None and test_graph.is_valid())") self.out.exdent() if self.out.indent("test_node = og.Controller.create_node("): self.out.write(f'("TestNode_{self.safe_name()}", test_graph), "{node_type_name}"') self.out.exdent(")") self.out.exdent() self.out.write('self.assertTrue(test_graph is not None and test_graph.is_valid(), "Test graph invalid")') self.out.write('self.assertTrue(test_node is not None and test_node.is_valid(), "Test node invalid")') # First evaluation sets up the node and Fabric self.out.write("await og.Controller.evaluate(test_graph)") # Emit the code that sets the inputs of the node first self.out.write("values_to_set = inputs + state_set") if self.out.indent("if values_to_set:"): if self.out.indent("for attribute_name, attribute_value, _ in inputs + state_set:"): self.out.write("og.Controller((attribute_name, test_node)).set(attribute_value)") self.out.exdent() self.out.exdent() # Emit code to evaluate the node self.out.write("await og.Controller.evaluate(test_graph)") # Emit code to read the outputs and state and compare against the expected values if self.out.indent("for attribute_name, expected_value, _ in outputs + state_get:"): self.out.write("attribute = og.Controller.attribute(attribute_name, test_node)") self.out.write("actual_output = og.Controller.get(attribute)") self.out.write("expected_type = None") if self.out.indent("if isinstance(expected_value, dict):"): self.out.write('expected_type = expected_value["type"]') self.out.write('expected_value = expected_value["value"]') self.out.exdent() error_message = f"{node_type_name} User test case #{{i+1}}: {{attribute_name}} attribute value error" self.out.write(f'ogts.verify_values(expected_value, actual_output, f"{error_message}")') if self.out.indent("if expected_type:"): self.out.write("tp = og.AttributeType.type_from_ogn_type_name(expected_type)") self.out.write("actual_type = attribute.get_resolved_type()") if self.out.indent("if tp != actual_type:"): self.out.write( f'raise ValueError(f"{node_type_name} User tests - {{attribute_name}}: ' f'Expected {{expected_type}}, saw {{actual_type.get_ogn_type_name()}}")' ) self.out.exdent() self.out.exdent() self.out.exdent() self.out.exdent() self.out.exdent() # ---------------------------------------------------------------------- def generate_user_tests(self): """Generate the test method that exercises the tests specified in the .ogn file""" all_tests = self.node_interface.all_tests() # If the user did not specify any tests then do not generate this test function if not all_tests: return self.out.write("") if self.out.indent("async def test_generated(self):"): uses_v1_setup = self.generate_user_test_data(all_tests) self.generate_user_test_run(uses_v1_setup) # ---------------------------------------------------------------------- def generate_data_access_test(self): """Generate the test method for loading the generated USD file""" check_usd = "usd" not in self.node_interface.excluded_generators check_python = "python" not in self.node_interface.excluded_generators # There is not enough information to test data access if there's no Python database and no USD file if not check_usd and not check_python: return node_type_name = self.node_interface.name node_name = f"Template_{self.safe_name()}" # The node may have generated a .usda file with default values on it. If so then load it in and confirm # that the node exists after load and the inputs have the defaults. self.out.write("") self.out.write("async def test_data_access(self):") if self.out.indent(): if check_python: db_name = f"{self.base_name}Database" self.out.write(f"from {self.module}.ogn.{db_name} import {db_name}") # If USD testing is turned on then use that as a source of the test node if check_usd: self.out.write(f'test_file_name = "{self.base_name}Template.usda"') # The file is found in the usd/ subdirectory of this script's test directory. self.out.write('usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name)') if self.out.indent("if not os.path.exists(usd_path):"): self.out.write('self.assertTrue(False, f"{usd_path} not found for loading test")') self.out.exdent() self.out.write("(result, error) = await ogts.load_test_file(usd_path)") self.out.write("self.assertTrue(result, f'{error} on {usd_path}')") # This node name is hardcoded into the USD generation self.out.write(f'test_node = og.Controller.node("/TestGraph/{node_name}")') # otherwise create the node directly else: self.out.write('(_, (test_node,), _, _) = og.Controller.edit("/TestGraph", {') self.out.write(f' og.Controller.Keys.CREATE_NODES: ("{node_name}", "{node_type_name}")') self.out.write("})") if check_python: self.out.write(f"database = {db_name}(test_node)") self.out.write("self.assertTrue(test_node.is_valid())") # Checking the version instead of the name allows for node type name aliases self.out.write("node_type_name = test_node.get_type_name()") version = self.node_interface.version self.out.write(f"self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), {version})") self.out.write("") if self.out.indent("def _attr_error(attribute: og.Attribute, usd_test: bool) -> str:"): self.out.write('test_type = "USD Load" if usd_test else "Database Access"') self.out.write('return f"{node_type_name} {test_type} Test - {attribute.get_name()} value error"') self.out.exdent() self.out.write("") # Emit code to read the inputs and compare against the default values for attribute in list_without_runtime_attributes(self.node_interface.all_attributes()): # Optional attributes are not written to the template file if not attribute.is_required: continue name_to_check = attribute.usd_name() self.out.write("") self.out.write(f'self.assertTrue(test_node.get_attribute_exists("{name_to_check}"))') # Always get the value, so that the code is exercised self.out.write(f'attribute = test_node.get_attribute("{name_to_check}")') # Values aren't available for testing if the memory type is purely CUDA if check_python and attribute.memory_type != MemoryTypeValues.CUDA: attribute_accessor = f"{attribute.namespace}.{attribute.python_property_name()}" # If memory type is determined at runtime the property is an accessor, not a value if attribute.memory_type == MemoryTypeValues.ANY and not attribute.cpp_accessor_on_cpu(): attribute_accessor += ".cpu" self.out.write(f"db_value = database.{attribute_accessor}") # Only check input numbers since they should be well-defined as the defaults for all nodes if not attribute.is_read_only(): continue expected_value = attribute.python_default_value() if expected_value is not None: self.out.write(f"expected_value = {expected_value}") if check_usd: self.out.write("actual_value = og.Controller.get(attribute)") self.out.write("ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True))") if check_python and attribute.memory_type != MemoryTypeValues.CUDA: self.out.write("ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))") self.out.exdent() # ---------------------------------------------------------------------- def generate_node_interface(self): """Generate the test method for the named node""" self.generate_user_tests() self.generate_data_access_test() if self.interface_directory is not None: ensure_test_is_imported(self.test_class_name(), Path(self.interface_directory)) # ---------------------------------------------------------------------- def pre_interface_generation(self): """Create the imports and common test framework used by tests for all nodes""" self.out.write("import omni.kit.test") self.out.write("import omni.graph.core as og") self.out.write("import omni.graph.core.tests as ogts") if "usd" not in self.node_interface.excluded_generators: self.out.write("import os") self.out.write("") self.out.write("") self.out.write("class TestOgn(ogts.OmniGraphTestCase):") self.out.indent() # ====================================================================== def generate_tests(configuration: GeneratorConfiguration) -> Optional[str]: """Create support files for the tests on the node Args: configuration: Information defining how and where the test files will be generated Returns: String containing the generated test script code or None if its generation was not enabled Raises: NodeGenerationError: When there is a failure in the generation of the tests """ if not configuration.node_interface.can_generate("tests"): return None logger.info("Generating Node Type Tests") generator = NodeTestGenerator(configuration) generator.generate_interface() return str(generator.out)
20,359
Python
53.878706
120
0.56766
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_usd.py
"""Support for generating USD template files for OmniGraph Nodes. Exports: generate_usd: Create a NODETemplate.usda file containing a template for instantiation of the described node type """ from typing import List, Optional from .attributes.AttributeManager import AttributeManager from .nodes import NodeInterfaceGenerator from .utils import GeneratorConfiguration, logger, to_usd_docs def pluralize(count: int): """Return a string with the pluralization suffix for the given count ("s" for non-1, "" for 1)""" return "" if count == 1 else "s" class NodeUsdGenerator(NodeInterfaceGenerator): """Manage the functions required to generate a USD template file representing a node type""" def __init__(self, configuration: GeneratorConfiguration): # noqa: PLW0246 """Set up the generator and output the USD template for the node Just passes the initialization on to the parent class. See the argument and exception descriptions there. """ super().__init__(configuration) # ---------------------------------------------------------------------- def interface_file_name(self): """Return the path to the name of the USD file""" return self.base_name + "Template.usda" # ---------------------------------------------------------------------- def __prim_name(self) -> str: """Returns a string comprising the name of the prim representing this node in the USD file""" return f"Template_{self.safe_name()}" # ---------------------------------------------------------------------- def generate_attributes_usd(self, attributes: List[AttributeManager]): """Write out USD code corresponding to the node Args: attributes: List of attributes whose USD is to be generated Raises: NodeGenerationError: When there is a failure in the generation of the USD file """ for attribute in attributes: attribute.emit_usd_declaration(self.out) # ---------------------------------------------------------------------- def generate_node_interface(self): """Generate the USD code corresponding to the node_interface Raises: NodeGenerationError: When there is a failure in the generation of the USD file """ node_name = self.node_interface.name node_version = self.node_interface.version self.out.write("") if self.out.indent(f'def OmniGraphNode "{self.__prim_name()}" ('): self.out.write(to_usd_docs(self.node_interface.description)) self.out.exdent(")") self.out.write("{") self.out.indent() self.out.write(f'token node:type = "{node_name}"') self.out.write(f"int node:typeVersion = {node_version}") for attributes in [ self.node_interface.all_input_attributes(), self.node_interface.all_output_attributes(), self.node_interface.all_state_attributes(), ]: if attributes: attribute_count = len(attributes) self.out.write("") self.out.write(f"# {attribute_count} attribute{pluralize(attribute_count)}") self.generate_attributes_usd(attributes) self.out.exdent("}") # ---------------------------------------------------------------------- def pre_interface_generation(self): """Create the USD header information and the graph enclosing the node""" self.out.write("#usda 1.0") self.out.write("(") self.out.write(f' doc ="""Generated from node description file {self.base_name}.ogn') self.out.write('Contains templates for node types found in that file."""') self.out.write(")") self.out.write("") self.out.write('def OmniGraph "TestGraph"') if self.out.indent("{"): self.out.write('token evaluator:type = "push"') self.out.write("int2 fileFormatVersion = (1, 3)") self.out.write('token flatCacheBacking = "Shared"') self.out.write('token pipelineStage = "pipelineStageSimulation"') # ---------------------------------------------------------------------- def post_interface_generation(self): """Close the graph definition""" self.out.exdent() self.out.write("}") # ====================================================================== def generate_usd(configuration: GeneratorConfiguration) -> Optional[str]: """Create support files for the USD template definition for a node Args: configuration: Information defining how and where the template will be generated Returns: String containing the generated USD or None if its generation was not enabled Raises: NodeGenerationError: When there is a failure in the generation of the USD file """ if not configuration.node_interface.can_generate("usd"): return None logger.info("Generating USD Template File") generator = NodeUsdGenerator(configuration) generator.generate_interface() return str(generator.out)
5,136
Python
40.427419
116
0.584891
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/utils.py
# noqa: PLC0302 """Common constants, methods, and classes used by the various sections of the node generator. These were split out into a separate file to avoid circular inclusions. """ import argparse import csv import io import locale import logging import os import re import subprocess import sys from functools import partial from pathlib import Path from typing import IO, Any, Dict, List, Optional, Tuple, Union # Support for deprecated location of these types from .keys import IconKeys # noqa from .keys import MemoryTypeValues # noqa from .keys import MetadataKeyOutput # noqa from .keys import MetadataKeys # noqa # ====================================================================== # Environment variable gating display and execution of parse debugging information # The parsing debugging is turned on in any one of these situations: # OGN_DEBUG=1 # OGN_DEBUG.contains("parse") # OGN_PARSE_DEBUG=1 env_var = os.getenv("OGN_DEBUG") has_debugging = env_var is not None OGN_PARSE_DEBUG = ( has_debugging and (env_var == "1" or env_var.lower().find("parse") >= 0) or (os.getenv("OGN_PARSE_DEBUG") is not None) ) OGN_REG_DEBUG = ( has_debugging and (env_var == "1" or env_var.lower().find("reg") >= 0) or (os.getenv("OGN_REG_DEBUG") is not None) ) # ====================================================================== def __dbg(gate: bool, message: str, *args, **kwargs): """ Print out a debugging message if the gate_variable is enabled, additional args will be passed to format the given message. """ if gate: if args or kwargs: print("DBG: " + message.format(*args, **kwargs), flush=True) else: print(f"DBG: {message}", flush=True) dbg_parse = partial(__dbg, OGN_PARSE_DEBUG) dbg_reg = partial(__dbg, OGN_REG_DEBUG) # Color type that can be either a hex string or an RGBA tuple ColorType = Union[str, Tuple[int, int, int, int]] # Constant defining the name of the OmniGraph core extension, since some code needs to generate differently for it OMNI_GRAPH_CORE_EXTENSION = "omni.graph.core" # Special file inserted into a a generated ogn/ directory to tag it as not requiring runtime regeneration UNWRITABLE_TAG_FILE = "__ogn_files_prebuilt" # Legacy file name that causes part of the packaging process to breakdown due to the leading dot __OLD_UNWRITABLE_TAG_FILE = ".ogn_files_prebuilt" # Deprecated - use keys.MemoryTypeValues instead MEMORY_TYPE_CPU = MemoryTypeValues.CPU MEMORY_TYPE_CUDA = MemoryTypeValues.CUDA MEMORY_TYPE_ANY = MemoryTypeValues.ANY ALL_MEMORY_TYPES = MemoryTypeValues.ALL CPP_MEMORY_TYPES = MemoryTypeValues.CPP # Pattern for legal token names # - starts with a letter or underscore # - then an arbitrary number of alphanumerics or underscores # - other special characters cause problems in the generated code and so are disallowed RE_TOKEN_NAME = re.compile(r"^[A-Za-z_][A-Za-z0-9_,]*$") TOKEN_NAME_REQUIREMENT = ( "Token name '{}' should be CamelCase with letters, numbers, underscores." " Tokens with special characters should use a dictionary rather than a list, where the key is the name." ) # Enum values corresponding to extended attribute types (to avoid relying on omni.graph.core) _EXTENDED_TYPE_REGULAR = 0 _EXTENDED_TYPE_UNION = 1 _EXTENDED_TYPE_ANY = 2 # Global logger avoids multiple loggers clashing with each other or duplicating output logger = None # ====================================================================== def global_logger(): """Global status logger for the node generator. Delay initialization so that it can be set up from the main function as well as scripts that import this one. Returns: A logging.Logger instance that will be shared by all scripts used for node generation """ global logger if logger is None: logger = logging.getLogger("generate_node") logging_handler = logging.StreamHandler(sys.stdout) logging_handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s")) logger.addHandler(logging_handler) logger.setLevel(logging.INFO if OGN_PARSE_DEBUG else logging.ERROR) return logger # Bootstrap initialization of the global variable global_logger() # ================================================================================ class CarbLogError(Exception): """Exception to raise when there is an error that requires logging""" # ================================================================================ class DebugError(Exception): """Exception to raise when there is an error that requires a debug message but no specific action""" # ====================================================================== class ParseError(Exception): """Exception to raise when there is an error in the parsing of the node interface description""" # ====================================================================== class UnimplementedError(Exception): """Custom exception to raise when attempting to access unimplemented functionality""" # ====================================================================== class Settings: """Manage the build settings that can be used for tuning the code generation. The settings are all available as properties on the class. Add any new settings with their default and description in the __init__ method. New settings should also be reflected in the omni.graph.core.Settings class. The only name not allowed is "all" as that is used to return the list of all available settings. Defining slots allows interception of attempts to get/set unknown settings with an AttributeError. """ __slots__ = ["__settings"] def __init__(self): """Initialize the list of available settings and their defaults.""" self.__settings = { "pyOptimize": (False, "When generating Python nodes use a more optimized approach"), } for setting_name in self.__settings: self.__slots__.append(setting_name) for setting_name, (default_value, description) in self.__settings.items(): def _get(self, _default=default_value) -> bool: return _default def _set(self, setting_value, _name=setting_name, _description=description): self.__settings[_name] = (setting_value, _description) setattr(Settings, setting_name, property(_get, _set)) def __str__(self) -> str: """Returns a string containing the list of allowed settings""" return ", ".join(list(self.__settings.keys())) def all(self) -> Dict[str, Tuple[Any, str]]: # noqa: A003 """Return a dictionary of all known settings mapped onto (DEFAULT, DESCRIPTION)""" return self.__settings # ====================================================================== class GeneratorConfiguration: """Storage class containing common information used by the generators. Mostly created to avoid passing long argument lists around. Properties: base_name: Name of the .ogn file with directory and extension stripped away destination_directory: Directory for the generated code extension: Name of the extension running the generation generator_version: Version identification for this extension to embed in generated code module: Python module in which the generated Python files will live needs_directory: Destination will be ensured to exist before running the generator node_file_path: Location of the .ogn file used to generate the code node_interface: Node interface class to be processed target_version: Identification for the version of the omni.graph.core extension for which code was generated verbose: True if extra debugging information is to be output settings: List of settings that were enabled as part of the build generator_version_override: Generator version to use instead of the one extracted from omni.graph.tools target_version_override: Target version to use instead of the one extracted from omni.graph.core """ def __init__( self, node_file_path: str, node_interface, extension: str, module: str, base_name: str, destination_directory: Optional[str], verbose: bool = False, settings: Optional[Settings] = None, generator_version_override: Optional[Tuple[int, int, int]] = None, target_version_override: Optional[Tuple[int, int, int]] = None, ): """Collect the data members into the structure""" self.node_file_path = node_file_path.replace("\\", "/") if node_file_path else None # Standardize the separator self.node_interface = node_interface self.extension = extension self.module = module self.base_name = base_name self.destination_directory = destination_directory self.needs_directory = True self.verbose = verbose self.settings = settings or Settings() # It would have been nicer to use the toml package here but it's not available to the build script, and # finding the version is trivial anyway. tools_extension_root = Path(__file__).parent.parent.parent.parent.parent.parent if generator_version_override is None: self.generator_version = (0, 0, 0) re_version = re.compile('version = "(.*)"') toml_path = tools_extension_root / "config" / "extension.toml" if not toml_path.is_file(): raise ParseError(f"Could not find generator file containing the version information '{toml_path}'") with open(toml_path, "r", encoding="utf-8") as toml_fd: for line in toml_fd: match = re_version.match(line) if match: self.generator_version = tuple( int(version) for version in f"{match.group(1)}.0.0.0".split(".")[0:3] ) break else: self.generator_version = generator_version_override # There is no dependency from tools to the OmniGraph core but for now they always appear in the same build # tree so rely on that fact to find the configuration information. if target_version_override is None: self.target_version = (0, 0, 0) # The path may have extra information in it such as version, SHA1, or platform, so use a pattern core_toml_path = None for core_dir in tools_extension_root.parent.rglob("omni.graph.core*"): core_toml_path = core_dir / "config" / "extension.toml" # There should only be one, but break on the first one found anyway if core_toml_path.is_file(): break # Do not fail if a file wasn't found, but issue a warning and use the default values if core_toml_path is not None and core_toml_path.is_file(): with open(core_toml_path, "r", encoding="utf-8") as toml_fd: for line in toml_fd: match = re_version.match(line) if match: self.target_version = tuple( int(version) for version in f"{match.group(1)}.0.0.0".split(".")[0:3] ) break else: pass else: self.target_version = target_version_override # -------------------------------------------------------------------------------------------------------------- def __str__(self): """Convert the configuration to a string for debugging - one property per line""" return f"""generator_version = {self.generator_version} node_file_path = {self.node_file_path} node_interface = {self.node_interface} extension = {self.extension} module = {self.module} base_name = {self.base_name} directory = {self.destination_directory} target_version = {self.target_version} verbose = {self.verbose}""" # ====================================================================== class IndentedOutput: """Helper class that provides output capabilities to messages with preserved indentation levels Properties: output: File type that receives the output indent_level: Number of indentation levels for the current output indent_string: String representing the current indentation level """ def __init__(self, output: IO): """Initialize the indentation level and prepare for output Args: output: IO object to which this object will be sending its output Both io.TextIOWrapper (output from "open()") and io.StringIO can be used """ self.output = output self.indent_level = 0 self.indent_string = "" # ---------------------------------------------------------------------- def indent(self, message: str = None) -> bool: """Increase the indentation level for emitted code If a message is specified then emit that message immediately before indenting, allowing you to easily open sections like: out.indent("{") Returns True so that indented sections can be indented in the code: if output.indent("begin {"): output.exdent("}) """ if message is not None: self.write(message) self.indent_level += 1 self.indent_string = " " * self.indent_level return True # ---------------------------------------------------------------------- def exdent(self, message: str = None): """Decrease the indentation level for emitted code If a message is specified then emit that message immediately after exdenting, allowing you to easily close sections like: out.exdent("}") """ assert self.indent_level > 0 self.indent_level -= 1 self.indent_string = " " * self.indent_level if message is not None: self.write(message) # ---------------------------------------------------------------------- def __str__(self): """Return the accumulated string saved when there is no file to write, or the file path if there was""" if isinstance(self.output, io.StringIO): return self.output.getvalue() return self.output.name # ---------------------------------------------------------------------- def prepend(self, message: str): """Write the message line at the beginning of the output. This rewrites the entire output so it is best to minimize its use, and stick with string implementations. The message is written as-is with no newlines or indenting """ if isinstance(self.output, io.StringIO): current_output = self.output.getvalue() self.output = io.StringIO() self.output.write(message) self.output.write(current_output) else: filename = self.output.name self.output = open(filename, "r+", newline="\n", encoding="utf-8") # noqa: SIM115,PLR1732 content = self.output.read() self.output.seek(0, 0) self.output.write(message + content) # ---------------------------------------------------------------------- def write(self, message: Union[List, str] = ""): """Output a single message line to the file. This assumes indentation will be used and a newline will be appended. Passing in a list will write each list member on its own line. Args: message: Line of text being emitted """ if not message: self.output.write("\n") elif isinstance(message, list): for line in message: self.write(line) else: self.output.write(f"{self.indent_string}{message}\n") # ---------------------------------------------------------------------- def write_as_is(self, message: Union[List, str]): """Output a string to the output file without indentation or added newline Passing in a list will write each list member on its own line. Args: message: Line of text being emitted """ if isinstance(message, list): for line in message: self.write_as_is(line) elif message: self.output.write(f"{message}") # ====================================================================== def is_comment(keyword: str) -> bool: """Returns True if the keyword matches the specially reserved pattern for comments, the leading '$'""" return keyword[0] == "$" if keyword else False # ====================================================================== def is_unwritable(generated_directory: str) -> bool: """Returns True if the OGN generated directory is tagged as unwritable (i.e. part of a build) or if it is physically unwritable. """ unwritable_tag = os.path.join(generated_directory, UNWRITABLE_TAG_FILE) old_unwritable_tag = os.path.join(generated_directory, __OLD_UNWRITABLE_TAG_FILE) if os.path.isfile(unwritable_tag) or os.path.isfile(old_unwritable_tag): return True # The directory is not tagged as unwritable, now check to see if it is physically unwritable. try: # Attempting to write a temp file is the only reliable way to detect unwritable directories on Windows test_path = Path(generated_directory) / "__test_file__" with open(test_path, "w", encoding="utf-8"): pass test_path.unlink() except OSError: # error.errno == errno.EACCES or error.errno == errno.EEXIST return True return False # ====================================================================== def ensure_quoted(value: str) -> str: """Returns the value in quotes if it wasn't already quoted, or just itself if it was""" if len(value) > 1 and ((value[0] == "'" and value[-1] == "'") or (value[0] == '"' and value[-1] == '"')): return value value_escaped = value.replace('"', '\\"') return f'"{value_escaped}"' # ====================================================================== def shorten_string_lines_to(full_string: str, suggested_limit: int) -> List[str]: """Convert a single long line into a list of shorter lines Args: full_string: Single line to be trimmed suggested_limit: Minimum length of line; line will extend to the next space past this limit """ shortened_strings = [] while len(full_string) > suggested_limit: next_space = full_string.find(" ", suggested_limit) if next_space > 0: shortened_strings.append(full_string[0:next_space]) full_string = full_string[next_space + 1 :] else: break shortened_strings.append(full_string) return shortened_strings # ====================================================================== def attrib_description_to_string(description): """Convert convert to string if the input has a List type""" description_list = description if isinstance(description, List) else [description] return "\n".join(description_list) # ====================================================================== def to_cpp_str(raw: Union[str, List[str]], separator: str = " "): """Convert a string or list of string into a string literal safe for writing to a .cpp file. Args: raw: The string or list of strings to be converted. separator: If an list of strings is supplied they will be concatenated together with this arg separating them. """ if isinstance(raw, list): raw = separator.join(raw) t = "".maketrans({"\\": "\\\\", "\n": "\\n", "\r": "\\r", '"': '\\"'}) return '"' + raw.translate(t) + '"' # ====================================================================== def to_comment(comment_separator: str, multiline_string: str, indent_level: int = 0): """Convert a multiline string into a comment where each line begins with the comment_separator Args: comment_separator: Character that indicates a line of comments, usually language-specific multiline_string: String with potential newlines in it indent_level: Number of spaces the resulting comment should be indented Returns: String representing a comment with one line of the comment per one line of the input. Each line of the string is indented the given number of spaces. """ # Convert the multiline string into a set of truncated strings that pack into comments nicely comment_lines = [] for single_line in multiline_string.splitlines(): shortened_lines = shorten_string_lines_to(single_line, 80) comment_lines += shortened_lines # Empty lines should not have a trailing space so embed that in the non-empty lines before joining them string_lines = [f"{comment_separator} {x}" if x else f"{comment_separator}" for x in comment_lines] if indent_level > 0: indent_string = " " * indent_level string_lines = [f"{indent_string}{line}" for line in string_lines] return "\n".join(string_lines) # ====================================================================== def to_cpp_comment(multiline_string: str, indent_level: int = 0): """Convert a multiline string into a C++ comment Args: multiline_string: String with potential newlines in it indent_level: Number of spaces the resulting comment should be indented Returns: String representing a C++ comment with one line of the comment per one line of the input. Each line of the string is indented the given number of spaces. """ return to_comment("//", multiline_string, indent_level) # ====================================================================== def to_python_comment(multiline_string: str, indent_level: int = 0): """Convert a multiline string into a Python comment Args: multiline_string: String with potential newlines in it indent_level: Number of spaces the resulting comment should be indented Returns: String representing a Python comment with one line of the comment per one line of the input. Each line of the string is indented the given number of spaces. """ return to_comment("#", multiline_string, indent_level) # ====================================================================== def to_usd_comment(multiline_string: str, indent_level: int = 0): """Convert a multiline string into a USD comment Args: multiline_string: String with potential newlines in it indent_level: Number of spaces the resulting comment should be indented Returns: String representing a USD comment with one line of the comment per one line of the input. Each line of the string is indented the given number of spaces. """ return to_comment("#", multiline_string, indent_level) # ====================================================================== def to_usd_docs(docs: Union[List, str]) -> List[str]: """Returns the USD documentation as a list of strings with the docs= included""" if not docs: return 'docs="""No documentation provided"""' if isinstance(docs, list): text = [f'docs="""{docs[0]}'] if len(docs) > 1: text += docs[1:] text[-1] += '"""' return text return f'docs="""{docs}"""' # ====================================================================== def value_as_usd(python_value: Union[None, Tuple, List, str, bool, int, float]) -> str: """Convert a Python data type into a USD structure equivalent Args: python_value: Python value to convert. Dictionaries and sets have no equivalent. Returns: Structure representing the USD version of the value passed in, for converting to a string """ if python_value is None: return None if isinstance(python_value, str): return ensure_quoted(python_value) if isinstance(python_value, bool): return "true" if python_value else "false" if isinstance(python_value, (int, float)): return python_value # Lists and tuples both appear as parenthesized values so convert them to that. There is also no # representation of an empty array so return None if the list or tuple is empty. if isinstance(python_value, List): usd_list = [value_as_usd(value) for value in python_value] return tuple(usd_list) if usd_list else None if isinstance(python_value, Tuple): usd_list = [value_as_usd(value) for value in python_value] return tuple(usd_list) if usd_list else None return None # ====================================================================== def rst_title(title: str, title_level: int = 0) -> str: """Returns a string implementing a title_level header formatting for the string""" title_char = ["=", "-", "~", "_", "+", "*", ":", "^"][title_level] return f"\n{title}\n{title_char * len(title)}" # ====================================================================== def rst_table(table_to_format: List[List[str]]) -> str: """ Utility to take a list of lists representing a text table and format it in reStructedText format. This means equalizing all of the column widths, separating columns with " | ", separating the second and third rows with "+==+..==+" and putting "+--+..--+" between other rows, and at the top and bottom. e.g. this input [["Name", "Value"], ["Fred", "Flinstone"], ["Bamm-Bamm", "Rubble"]] yields this output: +-----------+------------+ | Name | Value | +===========+============+ | Fred | Flintstone | +-----------+------------+ | Bamm-Bamm | Rubble | +-----------+------------+ Note how the columns have been adjusted to have constant width, and the header has different padding characters. Args: table_to_format: List of columns to go in the table, where the first list is the header row and subsequent lists must all be the same length. Returns: A string implementing the table of data, formatted as an RST aligned-table. Raises: ValueError: If the inner lists have different lengths """ # Verify the list sizes before any work begins if not table_to_format: return "" if not table_to_format[0]: return "" list_size = len(table_to_format[0]) for i in range(1, len(table_to_format)): if list_size != len(table_to_format[i]): raise ValueError(f"Table row {i} does not have the expected size {list_size}") # Compute the maximum column sizes, as they will have to all be padded to that amount + 1 space on each side max_widths = [0] * len(table_to_format[0]) for row in table_to_format: for index, column in enumerate(row): if len(str(column)) > max_widths[index]: max_widths[index] = len(str(column)) # Title and row separators are the same width as the columns, with different characters for padding title_separator = "+" row_separator = "+" for column_width in max_widths: title_separator += f"{'=' * (column_width + 2)}+" row_separator += f"{'-' * (column_width + 2)}+" title_separator += "\n" row_separator += "\n" table = row_separator # Walk each of the inner lists, adding spaces to each column as required, and column separators first_row = True for row in table_to_format: formatted_columns = [] for index, column in enumerate(row): padding = max_widths[index] - len(str(column)) formatted_columns.append(f"{column}{' ' * padding}") table += f"| {' | '.join(formatted_columns)} |\n" if first_row: first_row = False table += title_separator else: table += row_separator return table # ====================================================================== def check_color(color: ColorType): """Check to see if the color has a legal specification. Args: color Value to check using one of these two formats "#AABBGGRR" Hex digits of color components in 0-255 [R, G, B, A] Decimal values of color components in 0-255 Returns: Hex string representing the RGBA values (to be used as metadata, using uppercase letters as #AABBGGRR) Raises: ParseError if the color specification was not legal """ if isinstance(color, List): if len(color) != 4: raise ParseError(f"Color list '{color}' must have 4 elements - R, G, B, A") try: (red, green, blue, alpha) = [int(component) for component in color] except TypeError as error: raise ParseError(f"Color list '{color}' must have 4 integer elements in [0, 255] - R, G, B, A") from error elif isinstance(color, str): try: red = int(f"0x{color[7:9]}", 16) green = int(f"0x{color[5:7]}", 16) blue = int(f"0x{color[3:5]}", 16) alpha = int(f"0x{color[1:3]}", 16) except (TypeError, ValueError) as error: raise ParseError(f"Color string '{color}' must be in the hexadecimal format '#AABBGGRR'") from error if red < 0 or red > 255: raise ParseError("Red component '{red}' is out of the range [0, 255]") if green < 0 or green > 255: raise ParseError("Green component '{green}' is out of the range [0, 255]") if blue < 0 or blue > 255: raise ParseError("Blue component '{blue}' is out of the range [0, 255]") if alpha < 0 or alpha > 255: raise ParseError("Alpha component '{alpha}' is out of the range [0, 255]") return f"#{format(alpha, '02X')}{format(blue, '02X')}{format(green, '02X')}{format(red, '02X')}".upper() # ====================================================================== def check_icon_information(icon_info: Union[str, Dict[str, ColorType]]): """Raises ParseError if the icon_path is not legal, otherwise returns the path Args: icon_info: If a string then it is the icon path relative to the .ogn file If a dictionary then the dictionary contains extended icon information with these keywords Returns: (path, color, background_color, border_color) extracted from the icon information If any element was not specified then it will be None Raises: ParseError if any of the icon properties are illegal """ path = None color = None background_color = None border_color = None # Simple spec - just the path if isinstance(icon_info, str): if icon_info.startswith("/") or icon_info.startswith("\\") or icon_info.find(":") >= 0: raise ParseError(f'Icon path "{icon_info}" must be a string path relative to the .ogn file location') path = icon_info # Extended spec - dictionary of properties elif isinstance(icon_info, dict): for key, value in icon_info.items(): if key == IconKeys.PATH: if value.startswith("/") or value.startswith("\\") or value.find(":") >= 0: raise ParseError(f'Icon path "{value}" must be a string path relative to the .ogn file location') path = value elif key == IconKeys.COLOR: color = check_color(value) elif key == IconKeys.BACKGROUND_COLOR: background_color = check_color(value) elif key == IconKeys.BORDER_COLOR: border_color = check_color(value) else: raise ParseError(f"Icon keyword '{key}' not in legal list of path, color, backgroundColor, borderColor") else: raise ParseError(f"Icon information not a string path or a dictionary of properties - `{icon_info}`") return (path, color, background_color, border_color) # ====================================================================== def check_memory_type(memory_type: str): """Raises ParseError if the memory type is not legal, otherwise returns the memory type value""" if memory_type not in MemoryTypeValues.ALL: raise ParseError(f'Memory type "{memory_type} not in allowed list of {MemoryTypeValues.ALL}') return memory_type # ====================================================================== def check_token_name(token_name: str): """Raises a ParseError if the given node name has an illegal pattern, else returns the node name""" if not RE_TOKEN_NAME.match(token_name): raise ParseError(TOKEN_NAME_REQUIREMENT.format(token_name)) return token_name # ====================================================================== def get_metadata_dictionary(metadata): """Raises ParseError if the metadata is not legal, otherwise returns it as a dictionary with comments removed. This function only does generic checks, applicable to all types of metadata. More specific metadata checks, in particular for legal values and keywords, is done elsewhere """ def to_string(value: Any) -> str: """Turn metadata values into strings, notably lists and tuples are comma-separated strings""" output = io.StringIO() # Metadata specified as a list is stored as a comma-separated string if isinstance(value, (tuple, list)): csv_data = list(value) # Metadata specified as a dictionary means the values might contain special characters that the generated code # cannot handle and the keys are safe names. For storing the actual metadata only the values are of interest. elif isinstance(value, dict): csv_data = list(value.values()) # Simple elements are still run through CSV to quote any embedded commas else: csv_data = [value] writer = csv.writer(output, quoting=csv.QUOTE_MINIMAL) writer.writerow(csv_data) return output.getvalue().rstrip() try: pruned_metadata = {key: to_string(value) for key, value in metadata.items() if key[0] != "$"} logger.info(" -> %s", pruned_metadata) except AttributeError as error: raise ParseError("Metadata must be a dictionary of strings") from error return pruned_metadata # If True then perform more aggressive directory checks, not safe in a multi-threaded environment SAFE_DIRECTORY_CREATION = False # ====================================================================== def ensure_writable_directory(prospective_dir: Union[Path, str]): """Ensure a directory exists and is writable Args: prospective_dir: Full path to the directory to check or create Raises: ValueError: If the path could not be made into a writable directory for any reason """ # There are race condition issues with the prospective directory checking so only do it # if it was explicitly requested. (It has to be hardcoded since this test happens during # argument parsing so you can't safely pass an argument to enable that.) writable_dir = prospective_dir if isinstance(prospective_dir, Path) else Path(prospective_dir) try: if SAFE_DIRECTORY_CREATION: if writable_dir.is_file(): logger.warning('Directory "%s" existed as a file - removing', writable_dir) writable_dir.unlink() if not writable_dir.is_dir(): writable_dir.mkdir(mode=0o777, parents=True, exist_ok=True) if not writable_dir.is_dir(): raise Exception if not os.access(str(writable_dir), os.W_OK): raise Exception else: if not writable_dir.is_dir(): writable_dir.mkdir(mode=0o777, parents=True, exist_ok=True) except Exception as error: raise ValueError(f"writable_dir:{prospective_dir} could not be made into a writable directory") from error # ============================================================================================================== class WritableDir(argparse.Action): """Helper class for the argparser to check for a writable directory""" def __call__(self, parser, namespace, values, option_string=None): """Function called by the arg parser to verify that a directory exists and is writable Args: parser: argparser required argument, ignored namespace: argparser required argument, ignored values: The Path of the directory being checked for writability option_string: argparser required argument, ignored Raises: argparse.ArgumentTypeError if the requested directory cannot be found or created in writable mode """ try: ensure_writable_directory(values) setattr(namespace, self.dest, values) except Exception as error: raise argparse.ArgumentTypeError(error) # ====================================================================== # # Collection of functions to make a symbolic link - ends at the next separator with "=====" in it or EOF # def _find_junction_location(junction_path: str) -> str: """Returns the location to which the junction path points As with the os.symlink call the equivalent fsutil function can only be run with admin privileges, resulting in the necessity of this roundabout path to the same information. - use the /A:L functions to get the file type information in the parent directory - find the entry that matches junction_path - parse the link location from the remainder of the line """ # Normalizing the path ensures we don't end up at the target's parent instead of the link's parent with subprocess.Popen( ("dir", "/A:L", os.path.normpath(os.path.join(junction_path, os.pardir))), bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, ) as results: out, _ = results.communicate() out = out.decode(locale.getdefaultlocale()[1]) if results.returncode == 0: lines = out.splitlines() keys = ["<JUNCTION>", "<SYMLINKD>"] for line in lines: for key in keys: start = line.find(key) if start == -1: continue end = start + len(key) terms = line[end:].split("[") if len(terms) < 2: continue junction_name = os.path.normcase(terms[0].strip()) junction_target = terms[1].strip("]") junction_name_to_find = os.path.normcase(os.path.basename(junction_path)) if junction_name == junction_name_to_find: return junction_target raise OSError(f"Failed to get link target for '{junction_path}'") # ---------------------------------------------------------------------- def _find_linked_location(link_path: str) -> str: """Looks for the location to which the link_path points Args: link_path: Location of the link to check Returns: Location the link points to Raises: OSError: If the link doesn't exist, is the wrong type, or could not be read """ try: # First the easy way... return os.readlink(link_path) except Exception as error: # Then the hard way on Windows... if os.name == "nt": try: return _find_junction_location(link_path) except Exception as secondary_error: raise OSError() from secondary_error raise OSError() from error # ---------------------------------------------------------------------- def _try_os_symlink(existing_path: str, link_to_create: str): """Implementation of symbolic link that uses the Python os.symlink method Args: existing_path: Current location to which the link will point link_to_create: Location of the new link Raises: OSError: If the link could not be created """ try: os.symlink(existing_path, link_to_create, target_is_directory=True) except FileExistsError as error: # Find the linked location target = _find_linked_location(link_to_create) # If the link is to a different location than the one requested that's bad if os.path.normcase(target) != os.path.normcase(existing_path): raise OSError("Link already exists, pointing to a different location") from error # ---------------------------------------------------------------------- def _try_junction_link(existing_path: str, link_to_create: str): """Implementation of symbolic link that uses the native Windows linking capabilities. This is only necessary because Windows requires admin privileges to create symlinks and we may not have them Args: existing_path: Current location to which the link will point link_to_create: Location of the new link Raises: OSError: If the link could not be created """ # Even though they do exactly the same things "mklink" can be done with admin privileges with subprocess.Popen( ("mklink", "/j", link_to_create, existing_path), bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, ) as results: _, err = results.communicate() err = err.decode(locale.getdefaultlocale()[1]) if results.returncode: if "file already exists" in err: target = _find_linked_location(link_to_create) # If the link exists but is to the same place as was requested that's good if os.path.normcase(target) == os.path.normcase(existing_path): return raise OSError(f"{err.strip()} ({link_to_create} ==> {existing_path})") # ---------------------------------------------------------------------- def create_symbolic_link(existing_path: str, link_to_create: str): """Create a symbolic link, if possible Args: existing_path: Current location to which the link will point link_to_create: Location of the new link Raises: OSError: If the link could not be created """ try: _try_os_symlink(existing_path, link_to_create) except OSError as error: # On Windows there can be privilege errors that prevent the link from being made, but there is another way... if os.name == "nt" and "privilege not held" in str(error): _try_junction_link(existing_path, link_to_create) else: raise error except Exception as error: raise OSError(str(error)) from error # ====================================================================== class NameManager: """Class that manages naming of generated code where the name is not important to the user Name uniqueness is only important within a single file generation, so different name managers should be used for different languages (e.g. one for C++, a different one for Python). Internal Properties: __current: Current unique index for the next name __shortened_names: Map of original name to shortened name. """ # Control the naming algorithm through environment variables. SHORTEN_NAMES = os.getenv("DEBUG") or os.getenv("OGN_DEBUG") def __init__(self): """Initialize with an empty name map""" self.__shortened_names = {} self.__current = 0 def name(self, original_name: str) -> str: """Returns a shortened unique name corresponding to original_name if shortening is enabled, otherwise the name. This is similar to tokenization, with the goal of minimizing the amount of code the compiler has to read when compiling/interpreting the generated code that's invisible to the user. For instance there might be a unique local variable called "attribute_inputs_myInput" that can be shorted to "__2" """ if self.SHORTEN_NAMES: try: return self.__shortened_names[original_name] except KeyError: self.__shortened_names[original_name] = f"__{self.__current}" self.__current += 1 return self.__shortened_names[original_name] return original_name
44,876
Python
41.29689
120
0.593458
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/ThreadsafeOpen.py
r""" Support for safe file writing from multiple threads. The build system is threaded, which introduces the possibility of multiple threads trying to access the same generated OGN files at the same time (e.g. the Python test initialization file ogn/tests/__init__.py) For that reason it is necessary to be able to atomically write to a file. You use this in the same way you would use open(). .. code-block:: python with ThreadsafeOpen("myFile.txt", "w") as my_fd: my_fd.write("Hello World\n) """ import os from io import TextIOWrapper from warnings import warn # ====================================================================== # Implement the right definitions of shared functions based on the OS try: # Posix based file locking (Linux, Ubuntu, MacOS, etc.) import fcntl def lock_file(file_to_lock: TextIOWrapper): """Lock a file for exclusive access""" fcntl.lockf(file_to_lock, fcntl.LOCK_EX) def unlock_file(file_to_unlock: TextIOWrapper): """Unlock exclusive access to a file""" fcntl.lockf(file_to_unlock, fcntl.LOCK_UN) except ModuleNotFoundError: # Windows file locking (triggered by failed import of fcntl) # Only the first byte is locked, but that is enough for our purposes. import msvcrt def lock_file(file_to_lock: TextIOWrapper): """Lock a file for exclusive access""" file_to_lock.seek(0) msvcrt.locking(file_to_lock.fileno(), msvcrt.LK_LOCK, 1) def unlock_file(file_to_unlock: TextIOWrapper): """Unlock exclusive access to a file""" file_to_unlock.seek(0) msvcrt.locking(file_to_unlock.fileno(), msvcrt.LK_UNLCK, 1) # ====================================================================== class ThreadsafeOpen: """ Class for ensuring that all file operations are atomic, treat initialization like a standard call to 'open' that happens to be atomic. This file opener *must* be used in a "with" block. """ def __init__(self, path, *args, **kwargs): """ Open the file with the given arguments. Then acquire a lock on that file object WARNING: Advisory locking """ self.file = open(path, *args, **kwargs) # noqa: SIM115,PLR1732,PLW1514 self.writing_to_file = "r" not in args try: lock_file(self.file) except IOError as error: warn(f"Could not lock {path}, may be out of sync - {error}") def __enter__(self, *args, **kwargs): """Yield the locked file descriptor on entry""" return self.file def __exit__(self, exc_type=None, exc_value=None, traceback=None): """Release the locked file descriptor and close the file on exit""" # Flush to make sure all buffered contents are written to file before unlocking. if self.writing_to_file: try: self.file.flush() os.fsync(self.file.fileno()) except OSError as error: warn(f"Error in sync of {self.file.name} - {error}") try: unlock_file(self.file) self.file.close() except PermissionError as error: warn(f"Could not unlock {self.file.name} - {error}") # By default any exceptions are raised to the user. return exc_type is None
3,338
Python
34.903225
106
0.61444
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/keys.py
"""Common location for all of the keyword definitions for the .ogn format""" # ============================================================================================================== class AttributeKeys: """Container for the text for all of the .ogn keywords used at the attribute definition level""" ALLOWED_TOKENS = "allowedTokens" DEFAULT = "default" DEPRECATED = "deprecated" DESCRIPTION = "description" MAXIMUM = "maximum" MEMORY_TYPE = "memoryType" METADATA = "metadata" MINIMUM = "minimum" OPTIONAL = "optional" TYPE = "type" UI_NAME = "uiName" UNVALIDATED = "unvalidated" # Attribute keywords required to exist for all attributes MANDATORY = [DESCRIPTION, TYPE] # Attribute keys that are always directly processed, not relying on AttributeManager derived classes to do it PROCESSED = MANDATORY + [DEFAULT, DEPRECATED, MEMORY_TYPE, METADATA, OPTIONAL, UI_NAME, UNVALIDATED] # ============================================================================================================== class CategoryTypeValues: """Container for the set of values allowed for defining node type categories""" ANIMATION = "animation" DEBUG = "debug" FUNCTION = "function" GENERIC = "generic" GEOMETRY = "geometry" INPUT = "input" IO = "io" MATERIAL = "material" MATH = "math" RENDERING = "rendering" SCENE_GRAPH = "scene_graph" TEXTURE = "texture" TIME = "time" UI = "ui" # ============================================================================================================== class CudaPointerValues: """Container for the set of values .ogn accepts for the cudaPointers node type""" CPU = "cpu" CUDA = "cuda" NA = "na" # These values are members of the enum class carb::flatcache::PtrToPtrKind, assuming to have a "using" declaration CPP = { CPU: "PtrToPtrKind::eCpuPtrToGpuPtr", CUDA: "PtrToPtrKind::eGpuPtrToGpuPtr", NA: "PtrToPtrKind::eNotApplicable", } PYTHON = { CPU: "og.PtrToPtrKind.CPU", CUDA: "og.PtrToPtrKind.CUDA", NA: "og.PtrToPtrKind.NA", } # ============================================================================================================== class ExclusionTypeValues: """Container for the .ogn keywords allowed for types of generated code that can be excluded""" CPP = "c++" DOCS = "docs" PYTHON = "python" TEMPLATE = "template" TESTS = "tests" USD = "usd" # ============================================================================================================== class GraphSetupKeys: """Container for keywords encapsulating the graph setup entries in the test dictionary, v2+.""" CONNECT = "connect" CREATE_NODES = "create_nodes" CREATE_PRIMS = "create_prims" CREATE_VARIABLES = "create_variables" DELETE_NODES = "delete_nodes" DISCONNECT = "disconnect" DISCONNECT_ALL = "disconnect_all" EXPOSE_PRIMS = "expose_prims" SET_VALUES = "set_values" ALL = [ CONNECT, CREATE_NODES, CREATE_PRIMS, CREATE_VARIABLES, DELETE_NODES, DISCONNECT, DISCONNECT_ALL, EXPOSE_PRIMS, SET_VALUES, ] # ============================================================================================================== class PrimExposureValues: """Options for importing a prim into OmniGraph""" READ_PRIM = "read" """Read the prim and create dynamic attributes to access each prim attribute""" READ_PRIM_BUNDLE = "readBundle" """Read the prim and create a single bundle with every prim attribute in it""" WRITE_PRIM = "write" """Create inputs for every attribute in the prim, writing them to the prim if they are connected""" # ====================================================================== class IconKeys: """Holder for the set of keywords that could appear in the icon dictionary""" BACKGROUND_COLOR = "backgroundColor" BORDER_COLOR = "borderColor" COLOR = "color" PATH = "path" # ====================================================================== class LanguageTypeValues: """Holder for the set of values that define a language specification""" CPP = "C++" PYTHON = "Python" ALL = {CPP: ["cpp", "c++", "C++"], PYTHON: ["py", "python", "Python"]} @staticmethod def key_from_text(language: str) -> str: """Gets the language name in a canonical form, or raises ValueError if it is not a recognized language""" if language in LanguageTypeValues.ALL[LanguageTypeValues.CPP]: return LanguageTypeValues.CPP if language in LanguageTypeValues.ALL[LanguageTypeValues.PYTHON]: return LanguageTypeValues.PYTHON raise ValueError(f"Unrecognized language '{language}' - should be one of {list(LanguageTypeValues.ALL.keys())}") # ====================================================================== class MemoryTypeValues: """Holder for the set of keywords identifying memory types""" CPU = "cpu" CUDA = "cuda" ANY = "any" ALL = [ANY, CPU, CUDA] # These values are members of the type omni::graph::core::ogn::eMemoryType CPP = {CUDA: "ogn::kCuda", CPU: "ogn::kCpu", ANY: "ogn::kAny"} # These values are members of the type omni.graph.core.MemoryType PYTHON = {CUDA: "og.MemoryType.CUDA", CPU: "og.MemoryType.CPU", ANY: "og.MemoryType.ANY"} # ====================================================================== class MetadataKeys: """Holder for common metadata information These should match the C++ constant values found in include/omni/graph/core/ogn/Database.h as well as the members of MetadataKeyOutput below. """ ALLOW_MULTI_INPUTS = "allowMultiInputs" ALLOWED_TOKENS = "allowedTokens" ALLOWED_TOKENS_RAW = "__allowedTokens" CATEGORIES = "__categories" CATEGORY_DESCRIPTIONS = "__categoryDescriptions" CUDA_POINTERS = "__cudaPointers" DEFAULT = "__default" DESCRIPTION = "__description" EXCLUSIONS = "__exclusions" EXTENSION = "__extension" HIDDEN = "hidden" ICON_BACKGROUND_COLOR = "__iconBackgroundColor" ICON_BORDER_COLOR = "__iconBorderColor" ICON_COLOR = "__iconColor" ICON_PATH = "__icon" INTERNAL = "internal" LANGUAGE = "__language" MEMORY_TYPE = "__memoryType" OBJECT_ID = "__objectId" OPTIONAL = "__optional" OUTPUT_ONLY = "outputOnly" LITERAL_ONLY = "literalOnly" SINGLETON = "__singleton" TAGS = "tags" TOKENS = "__tokens" UI_NAME = "uiName" UI_TYPE = "uiType" @classmethod def key_names(cls): key_names = [] for key, value in cls.__dict__.items(): if key == key.upper(): key_names.append(value) return key_names # ====================================================================== class MetadataKeyOutput: """Names of the C++ equivalent constants from MetadataKeys. These should match the C++ constant names found in include/omni/graph/core/ogn/Database.h as well as the members of MetadataKeys above """ ALLOW_MULTI_INPUTS = "kOgnMetadataAllowMultiInputs" ALLOWED_TOKENS = "kOgnMetadataAllowedTokens" ALLOWED_TOKENS_RAW = "kOgnMetadataAllowedTokensRaw" CATEGORIES = "kOgnMetadataCategories" CATEGORY_DESCRIPTIONS = "kOgnMetadataCategoryDescriptions" CUDA_POINTERS = "kOgnMetadataCudaPointers" DEFAULT = "kOgnMetadataDefault" DESCRIPTION = "kOgnMetadataDescription" EXCLUSIONS = "kOgnMetadataExclusions" EXTENSION = "kOgnMetadataExtension" HIDDEN = "kOgnMetadataHidden" ICON_BACKGROUND_COLOR = "kOgnMetadataIconBackgroundColor" ICON_BORDER_COLOR = "kOgnMetadataIconBorderColor" ICON_COLOR = "kOgnMetadataIconColor" ICON_PATH = "kOgnMetadataIconPath" INTERNAL = "kOgnMetadataInternal" LANGUAGE = "kOgnMetadataLanguage" MEMORY_TYPE = "kOgnMetadataMemoryType" OBJECT_ID = "kOgnMetadataObjectId" OPTIONAL = "kOgnMetadataOptional" OUTPUT_ONLY = "kOgnMetadataOutputOnly" LITERAL_ONLY = "kOgnMetadataLiteralOnly" SINGLETON = "kOgnSingletonName" TAGS = "kOgnMetadataTags" TOKENS = "kOgnMetadataTokens" UI_NAME = "kOgnMetadataUiName" UI_TYPE = "kOgnMetadataUiType" @classmethod def cpp_name_from_key(cls, metadata_key: str) -> str: """Returns the C++ constant name that defines the given metdata key string, the key itself if no match""" # If the key is already one of the constants use it directly if metadata_key in MetadataKeyOutput.__dict__.values(): return metadata_key # Find the key corresponding to the value name, if it exists for key, value in MetadataKeys.__dict__.items(): if value == metadata_key: return getattr(cls, key) # Use the string directly, but return None so that the caller knows to quote it return None @classmethod def python_name_from_key(cls, metadata_key: str) -> str: """Returns the Python constant name that defines the given metadata key string, the key itself if no match""" # If it's already a member variable use it directly if metadata_key.startswith("ogn.MetadataKeys"): return metadata_key # Find the key corresponding to the value name, if it exists for key, value in MetadataKeys.__dict__.items(): if value == metadata_key: return f"ogn.MetadataKeys.{key}" # Use the string directly, but return None so that the caller knows to quote it return None # ============================================================================================================== class NodeTypeKeys: """Container for the text for all of the .ogn keywords used at the node definition level""" CATEGORIES = "categories" CATEGORY_DEFINITIONS = "categoryDefinitions" CUDA_POINTERS = "cudaPointers" DESCRIPTION = "description" EXCLUDE = "exclude" ICON = "icon" INPUTS = "inputs" LANGUAGE = "language" MEMORY_TYPE = "memoryType" METADATA = "metadata" OUTPUTS = "outputs" SCHEDULING = "scheduling" SINGLETON = "singleton" STATE = "state" TAGS = "tags" TESTS = "tests" TOKENS = "tokens" TYPE_DEFINITIONS = "typeDefinitions" UI_NAME = "uiName" VERSION = "version" # Node type keywords required to exist for all attributes MANDATORY = [DESCRIPTION] # ============================================================================================================== class TestKeys: """Container for the text for all of the .ogn keywords used at the test definition level""" DESCRIPTION = "description" GPU_ATTRIBUTES = "gpu" INPUTS = "inputs" OUTPUTS = "outputs" SETUP = "setup" STATE = "state" STATE_GET = "state_get" STATE_SET = "state_set" # ============================================================================================================== # _____ ______ _____ _____ ______ _____ _______ ______ _____ # | __ \ | ____|| __ \ | __ \ | ____|/ ____| /\ |__ __|| ____|| __ \ # | | | || |__ | |__) || |__) || |__ | | / \ | | | |__ | | | | # | | | || __| | ___/ | _ / | __| | | / /\ \ | | | __| | | | | # | |__| || |____ | | | | \ \ | |____| |____ / ____ \ | | | |____ | |__| | # |_____/ |______||_| |_| \_\|______|\_____|/_/ \_\|_| |______||_____/ # class GraphSetupKeys_V1: # noqa: N801 """Container for deprecated .ogn keywords for the graph setup section of tests - the subset from OmniGraphHelper.edit_graph for creation. Syntax of the contents are left to the helper when the test runs.""" CONNECTIONS = "connections" NODES = "nodes" PRIMS = "prims" VALUES = "values"
11,949
Python
35.882716
120
0.556699
omniverse-code/kit/exts/omni.graph.tools/omni/graph/tools/_impl/node_generator/generate_documentation.py
""" Support for generating documentation files for OmniGraph Nodes. The documentation is written in the reStructuredText format, consistent with API documentation. Note that the script ../make_docs_toc.py relies on parsing the files so if the format changes it must be updated. Exported Methods: generate_documention Exported Constants RE_OGN_DOC_FILENAME RE_OGN_NAME_INFO RE_OGN_DESCRIPTION_TITLE RE_OGN_INPUTS_TITLE RE_OGN_BODY_MARKER """ import re from contextlib import suppress from typing import List, Optional from .attributes.AttributeManager import AttributeManager from .nodes import NodeInterfaceGenerator from .utils import GeneratorConfiguration, MetadataKeys, UnimplementedError, logger, rst_table, rst_title __all__ = [ "generate_documentation", "RE_OGN_DOC_FILENAME", "RE_OGN_NAME_INFO", "RE_OGN_DESCRIPTION_TITLE", "RE_OGN_INPUTS_TITLE", "RE_OGN_BODY_MARKER", ] # Special restructuredText markers so that the parser can quickly find the start of any section RST_ID_DOC = "GENERATED - Documentation" # Pattern to recognize the name of OGN documentation files RE_OGN_DOC_FILENAME = re.compile("Ogn.*.rst$") # Pattern to recognize the name of the node in the file (must coordinate with the output of generate_documentation.py) RE_OGN_NAME_INFO = re.compile("OmniGraph Node ([^ ]+)$") # Patterns for title lines in the file RE_OGN_DESCRIPTION_TITLE = re.compile("Description$") RE_OGN_INPUTS_TITLE = re.compile("Inputs$") # Pattern marking the start of the node body RE_OGN_BODY_MARKER = re.compile(f"^.. _({RST_ID_DOC} .*):") # Pattern to recognize .ogn files that are part of the internal tutorials RE_TUTORIAL_FILE = re.compile(".*(omni.graph.core/tutorials.*)") # ====================================================================== class NodeDocumentationGenerator(NodeInterfaceGenerator): """Manage the functions required to generate a C++ interface for a node""" def __init__(self, configuration: GeneratorConfiguration): """Set up the generator and output the documentation for the node Just passes the initialization on to the parent class. See the argument and exception descriptions there. """ logger.info("Creating NodeDocumentationGenerator") super().__init__(configuration) # ---------------------------------------------------------------------- def interface_file_name(self) -> str: """Return the path to the name of the documentation file""" return self.base_name + ".rst" # ---------------------------------------------------------------------- def generate_attributes_documentation(self, attributes: List[AttributeManager]): """Write out documentation code corresponding to the node Args: attributes: List of attributes whose documentation is to be generated Raises: NodeGenerationError: When there is a failure in the generation of the documentation file """ logger.info("Generating documentation for %s attributes", len(attributes)) # RST tables are very particular about sizing so first find out how big the columns need to be attribute_table = [["Name", "Type", "Default", "Required?", "Descripton"]] for attribute in attributes: name = attribute.name try: type_name = attribute.ogn_type() except AttributeError: type_name = "[Unsupported]" try: default_value = attribute.default except UnimplementedError: default_value = "[Unsupported]" required = "**Y**" if attribute.is_required else "" description = attribute.description if isinstance(description, list): description = " ".join(description) description = description.replace("\n", " ") attribute_table.append([name, type_name, default_value, required, description]) # If there is any metadata add it in name/value pairs below the attribute definition for key, value in attribute.metadata.items(): if key != MetadataKeys.DESCRIPTION: attribute_table.append(["", key, value, "", ""]) self.out.write(rst_table(attribute_table)) # ---------------------------------------------------------------------- def generate_code(self, title: str, code_file_path: str, code_type: str, code_id: str): """Generate a code block with the given title containing the contents of the given file If the file does not exist then a message to that effect is emitted and no code block is generated """ logger.info("Generating code titled '%s' of type '%s'", title, code_type) self.out.write(rst_title(title, 0)) self.out.write() try: self.out.write(f".. _{code_id}:\n") self.out.write(f".. code:: {code_type}") self.out.write() self.out.indent() with open(code_file_path, "r", encoding="utf-8") as code_fd: for code_line in code_fd: self.out.write(code_line.rstrip()) self.out.exdent() self.out.write() except FileNotFoundError: relative_path = RE_TUTORIAL_FILE.match(code_file_path).group(1) self.out.write(f"File not found: {relative_path}") return # ---------------------------------------------------------------------- def pre_interface_generation(self): """Generate the documentation setup, which is just the link to the top of the generated documentation""" self.out.write(f".. _{RST_ID_DOC} _ogn{self.node_interface.name}:\n") # ---------------------------------------------------------------------- def generate_node_interface(self): """Generate the documentation for the node""" logger.info("Generating documentation for node %s", self.node_interface.name) node_name = self.node_interface.name self.out.write(rst_title(f"OmniGraph Node {node_name}", 0)) # Gather the node metadata for reporting node_metadata_table = [["Name", "Value"]] node_metadata_table.append(["Version", self.node_interface.version]) node_metadata_table.append(["Extension", self.extension]) if self.node_interface.icon_path is not None: node_metadata_table.append(["Icon", self.node_interface.icon_path]) node_metadata_table.append(["Has State?", self.node_interface.has_state]) node_metadata_table.append(["Implementation Language", self.node_interface.language]) node_metadata_table.append(["Default Memory Type", self.node_interface.memory_type]) excluded = self.node_interface.excluded_generators exclusions = ", ".join(excluded) if excluded else "None" node_metadata_table.append(["Generated Code Exclusions", exclusions]) for key, value in self.node_interface.metadata.items(): # Some metadata appears in other locations already so skip it here if key not in [MetadataKeys.EXTENSION, MetadataKeys.DESCRIPTION]: node_metadata_table.append([key, value]) node_metadata_table.append(["Generated Class Name", f"{self.base_name}Database"]) node_metadata_table.append(["Python Module", f"{self.module}"]) self.out.write(rst_title(f"{node_name} Properties", 1)) self.out.write(rst_table(node_metadata_table)) # The "node_name" here, and in attribute titles, is kind of redundant. It is mainly here to prevent a whole # bunch of duplicate tag errors in the documentation generator, which does not seem to have a way to turn # off automatic link generation for a given heading. self.out.write(rst_title(f"{node_name} Description", 1)) self.out.write(self.node_interface.description) # Files in the tutorials/ directory have more detailed documentation available elsewhere so link to it with suppress(TypeError): if RE_TUTORIAL_FILE.match(self.node_file_path): self.out.write() self.out.write(f"See the accompanying explanation and annotated code at :ref:`ogn{node_name}`") attributes = self.node_interface.all_input_attributes() if attributes: self.out.write(rst_title(f"{node_name} Inputs", 1)) self.generate_attributes_documentation(attributes) attributes = self.node_interface.all_output_attributes() if attributes: self.out.write(rst_title(f"{node_name} Outputs", 1)) self.generate_attributes_documentation(attributes) attributes = self.node_interface.all_state_attributes() if attributes: self.out.write(rst_title(f"{node_name} State", 1)) self.generate_attributes_documentation(attributes) # ====================================================================== def generate_documentation(configuration: GeneratorConfiguration) -> Optional[str]: """Create support files for the documentation of a node Args: configuration: Information defining how and where the documentation will be generated Returns: String containing the generated documentation or None if its generation was not enabled Raises: NodeGenerationError: When there is a failure in the generation of the documentation file """ if not configuration.node_interface.can_generate("docs"): return None logger.info("Generating documentation") generator = NodeDocumentationGenerator(configuration) generator.generate_interface() return str(generator.out)
9,749
Python
45.650717
118
0.630424