file_path
stringlengths
32
153
content
stringlengths
0
3.14M
omniverse-code/kit/include/omni/ui/scene/bind/DocAbstractManipulatorModel.h
// Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #define OMNIUI_PYBIND_DOC_AbstractManipulatorModel \ "\n" \ "Bridge to data.\n" \ "Operates with double and int arrays.\n" \ "No strings.\n" \ "No tree, it's a flat list of items.\n" \ "Manipulator requires the model has specific items.\n" #define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_getItem "Returns the items that represents the identifier.\n" #define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_getAsFloats "Returns the Float values of the item.\n" #define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_getAsInts "Returns the int values of the item.\n" #define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_setFloats "Sets the Float values of the item.\n" #define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_setInts "Sets the int values of the item.\n" #define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_subscribe \ "Subscribe ManipulatorModelHelper to the changes of the model.\n" \ "We need to use regular pointers because we subscribe in the constructor of the widget and unsubscribe in the destructor. In constructor smart pointers are not available. We also don't allow copy and move of the widget.\n" #define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_unsubscribe \ "Unsubscribe the ItemModelHelper widget from the changes of the model.\n" #define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_addItemChangedFn \ "Adds the function that will be called every time the value changes.\n" \ "The id of the callback that is used to remove the callback.\n" #define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_removeItemChangedFn \ "Remove the callback by its id.\n" \ "\n" \ "\n" \ "### Arguments:\n" \ "\n" \ " `id :`\n" \ " The id that addValueChangedFn returns.\n"
omniverse-code/kit/include/omni/ui/scene/bind/BindCameraModel.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "DocCameraModel.h" // clang-format off #define OMNIUI_PYBIND_INIT_CameraModel #define OMNIUI_PYBIND_KWARGS_DOC_CameraModel \ "\n `projection : `\n " \ OMNIUI_PYBIND_DOC_CameraModel_getProjection \ "\n `view : `\n " \ OMNIUI_PYBIND_DOC_CameraModel_getView // clang-format on
omniverse-code/kit/include/omni/ui/scene/bind/DocWidget.h
// Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #define OMNIUI_PYBIND_DOC_Widget \ "The shape that contains the omni.ui widgets. It automatically creates IAppWindow and transfers its content to the texture of the rectangle. It interacts with the mouse and sends the mouse events to the underlying window, so interacting with the UI on this rectangle is smooth for the user.\n" \ "\n" #define OMNIUI_PYBIND_DOC_Widget_getFrame "Return the main frame of the widget.\n" #define OMNIUI_PYBIND_DOC_Widget_invalidate \ "Rebuild and recapture the widgets at the next frame. If\n" \ "frame\n" \ "build_fn\n" #define OMNIUI_PYBIND_DOC_Widget_fillPolicy \ "Define what happens when the source image has a different size than the item.\n" #define OMNIUI_PYBIND_DOC_Widget_updatePolicy "Define when to redraw the widget.\n" #define OMNIUI_PYBIND_DOC_Widget_resolutionScale "The resolution scale of the widget.\n" #define OMNIUI_PYBIND_DOC_Widget_resolutionWidth "The resolution of the widget framebuffer.\n" #define OMNIUI_PYBIND_DOC_Widget_resolutionHeight "The resolution of the widget framebuffer.\n" #define OMNIUI_PYBIND_DOC_Widget_Widget "Created an empty image.\n"
omniverse-code/kit/include/omni/ui/scene/bind/DocLine.h
// Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #define OMNIUI_PYBIND_DOC_Line \ "\n" \ "\n" #define OMNIUI_PYBIND_DOC_Line_getGesturePayload "Contains all the information about the intersection.\n" #define OMNIUI_PYBIND_DOC_Line_getGesturePayload01 \ "Contains all the information about the intersection at the specific state.\n" #define OMNIUI_PYBIND_DOC_Line_getIntersectionDistance \ "The distance in pixels from mouse pointer to the shape for the intersection.\n" #define OMNIUI_PYBIND_DOC_Line_start "The start point of the line.\n" #define OMNIUI_PYBIND_DOC_Line_end "The end point of the line.\n" #define OMNIUI_PYBIND_DOC_Line_color "The line color.\n" #define OMNIUI_PYBIND_DOC_Line_thickness "The line thickness.\n" #define OMNIUI_PYBIND_DOC_Line_intersectionThickness "The thickness of the line for the intersection.\n" #define OMNIUI_PYBIND_DOC_Line_Line \ "A simple line.\n" \ "\n" \ "\n" \ "### Arguments:\n" \ "\n" \ " `start :`\n" \ " The start point of the line\n" \ "\n" \ " `end :`\n" \ " The end point of the line\n"
omniverse-code/kit/include/omni/ui/scene/bind/DocAbstractGesture.h
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #define OMNIUI_PYBIND_DOC_AbstractGesture \ "The base class for the gestures to provides a way to capture mouse events in 3d scene.\n" \ "\n" #define OMNIUI_PYBIND_DOC_AbstractGesture_setManager "Set the Manager that controld this gesture.\n" #define OMNIUI_PYBIND_DOC_AbstractGesture_getManager "The Manager that controld this gesture.\n" #define OMNIUI_PYBIND_DOC_AbstractGesture_dispatchInput \ "Called by scene to process the mouse inputs and do intersections with shapes. It can be an entry point to simulate the mouse input.\n" \ "Todo\n" \ "We probably don't need projection-view here. We can get it from manager.\n" #define OMNIUI_PYBIND_DOC_AbstractGesture_preProcess \ "Called before the processing to determine the state of the gesture.\n" #define OMNIUI_PYBIND_DOC_AbstractGesture_process "Process the gesture and call callbacks if necessary.\n" #define OMNIUI_PYBIND_DOC_AbstractGesture_postProcess "Gestures are finished. Clean-up.\n" #define OMNIUI_PYBIND_DOC_AbstractGesture_getState "Get the internal state of the gesture.\n" #define OMNIUI_PYBIND_DOC_AbstractGesture_setState \ "Set the internal state of the gesture. It's the way to cancel, prevent, or restore the gesture.\n" #define OMNIUI_PYBIND_DOC_AbstractGesture_isStateChanged \ "Returns true if the gesture is just changed at the current frame. If the state is not changed,\n" \ "process()\n" #define OMNIUI_PYBIND_DOC_AbstractGesture_getSender "Returns the relevant shape driving the gesture.\n" #define OMNIUI_PYBIND_DOC_AbstractGesture_getGesturePayload \ "Shortcut for sender.get_gesturePayload.\n" \ "OMNIUI_SCENE_API const*\n" #define OMNIUI_PYBIND_DOC_AbstractGesture_getGesturePayload01 \ "Shortcut for sender.get_gesturePayload.\n" \ "OMNIUI_SCENE_API const*\n" #define OMNIUI_PYBIND_DOC_AbstractGesture_name "The name of the object. It's used for debugging.\n"
omniverse-code/kit/include/omni/ui/scene/bind/BindLine.h
// Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "BindAbstractShape.h" #include "DocLine.h" // clang-format off #define OMNIUI_PYBIND_INIT_Line \ OMNIUI_PYBIND_INIT_AbstractShape \ OMNIUI_PYBIND_INIT_CALL(start, setStart, pythonToVector3) \ OMNIUI_PYBIND_INIT_CALL(end, setEnd, pythonToVector3) \ OMNIUI_PYBIND_INIT_CALL(color, setColor, pythonToColor4) \ OMNIUI_PYBIND_INIT_CAST(thickness, setThickness, float) \ OMNIUI_PYBIND_INIT_CAST(intersection_thickness, setIntersectionThickness, float) #define OMNIUI_PYBIND_KWARGS_DOC_Line \ "\n `start : `\n " \ OMNIUI_PYBIND_DOC_Line_start \ "\n `end : `\n " \ OMNIUI_PYBIND_DOC_Line_end \ "\n `color : `\n " \ OMNIUI_PYBIND_DOC_Line_color \ "\n `thickness : `\n " \ OMNIUI_PYBIND_DOC_Line_thickness \ "\n `intersection_thickness : `\n " \ OMNIUI_PYBIND_DOC_Line_intersectionThickness \ OMNIUI_PYBIND_KWARGS_DOC_AbstractShape // clang-format on
omniverse-code/kit/include/omni/ui/scene/bind/BindScrollGesture.h
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "BindShapeGesture.h" #include "DocScrollGesture.h" #include <omni/ui/scene/ScrollGesture.h> OMNIUI_PROTECT_PYBIND11_OBJECT(OMNIUI_SCENE_NS::ScrollGesture, ScrollGesture); // clang-format off #define OMNIUI_PYBIND_INIT_PyScrollGesture \ OMNIUI_PYBIND_INIT_CAST(mouse_button, setMouseButton, uint32_t) \ OMNIUI_PYBIND_INIT_CAST(modifiers, setModifiers, uint32_t) \ OMNIUI_PYBIND_INIT_CALLBACK(on_ended_fn, setOnEndedFn, void(AbstractShape const*)) \ OMNIUI_PYBIND_INIT_ShapeGesture #define OMNIUI_PYBIND_KWARGS_DOC_ScrollGesture \ "\n `mouse_button : `\n " \ OMNIUI_PYBIND_DOC_ScrollGesture_mouseButton \ "\n `modifiers : `\n " \ OMNIUI_PYBIND_DOC_ScrollGesture_modifiers \ "\n `on_ended_fn : `\n " \ OMNIUI_PYBIND_DOC_ScrollGesture_OnEnded \ OMNIUI_PYBIND_KWARGS_DOC_ShapeGesture // clang-format on
omniverse-code/kit/include/omni/ui/scene/bind/BindRectangle.h
// Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "BindAbstractShape.h" #include "DocRectangle.h" // clang-format off #define OMNIUI_PYBIND_INIT_Rectangle \ OMNIUI_PYBIND_INIT_AbstractShape \ OMNIUI_PYBIND_INIT_CAST(width, setWidth, Float) \ OMNIUI_PYBIND_INIT_CAST(height, setHeight, Float) \ OMNIUI_PYBIND_INIT_CAST(thickness, setThickness, float) \ OMNIUI_PYBIND_INIT_CAST(intersection_thickness, setIntersectionThickness, float) \ OMNIUI_PYBIND_INIT_CALL(color, setColor, pythonToColor4) \ OMNIUI_PYBIND_INIT_CAST(axis, setAxis, uint8_t) \ OMNIUI_PYBIND_INIT_CAST(wireframe, setWireframe, bool) #define OMNIUI_PYBIND_KWARGS_DOC_Rectangle \ "\n `width : `\n " \ OMNIUI_PYBIND_DOC_Rectangle_width \ "\n `height : `\n " \ OMNIUI_PYBIND_DOC_Rectangle_height \ "\n `thickness : `\n " \ OMNIUI_PYBIND_DOC_Rectangle_thickness \ "\n `intersection_thickness : `\n " \ OMNIUI_PYBIND_DOC_Rectangle_intersectionThickness \ "\n `color : `\n " \ OMNIUI_PYBIND_DOC_Rectangle_color \ "\n `axis : `\n " \ OMNIUI_PYBIND_DOC_Rectangle_axis \ "\n `wireframe : `\n " \ OMNIUI_PYBIND_DOC_Rectangle_wireframe \ OMNIUI_PYBIND_KWARGS_DOC_AbstractShape // clang-format on
omniverse-code/kit/include/omni/ui/scene/bind/DocDrawList.h
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #define OMNIUI_PYBIND_DOC_DrawList \ "The InvisibleButton widget provides a transparent command button.\n" \ "\n"
omniverse-code/kit/include/omni/ui/scene/bind/DocHoverGesture.h
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #define OMNIUI_PYBIND_DOC_HoverGesture \ "The gesture that provides a way to capture event when mouse enters/leaves the item.\n" \ "\n" #define OMNIUI_PYBIND_DOC_HoverGesture_preProcess "Called before processing to determine the state of the gesture.\n" #define OMNIUI_PYBIND_DOC_HoverGesture_process "Process the gesture and call callbacks if necessary.\n" #define OMNIUI_PYBIND_DOC_HoverGesture_onBegan "Called if the callback is not set and the mouse enters the item.\n" #define OMNIUI_PYBIND_DOC_HoverGesture_onChanged \ "Called if the callback is not set and the mouse is hovering the item.\n" #define OMNIUI_PYBIND_DOC_HoverGesture_onEnded "Called if the callback is not set and the mouse leaves the item.\n" #define OMNIUI_PYBIND_DOC_HoverGesture_mouseButton "The mouse button this gesture is watching.\n" #define OMNIUI_PYBIND_DOC_HoverGesture_modifiers "The modifier that should be pressed to trigger this gesture.\n" #define OMNIUI_PYBIND_DOC_HoverGesture_triggerOnViewHover "Determines whether the gesture is triggered only when the SceneView is being hovered by the mouse.\n" #define OMNIUI_PYBIND_DOC_HoverGesture_OnBegan "Called when the mouse enters the item.\n" #define OMNIUI_PYBIND_DOC_HoverGesture_OnChanged "Called when the mouse is hovering the item.\n" #define OMNIUI_PYBIND_DOC_HoverGesture_OnEnded "Called when the mouse leaves the item.\n" #define OMNIUI_PYBIND_DOC_HoverGesture_HoverGesture \ "Constructs an gesture to track when the user clicked the mouse.\n" \ "\n" \ "\n" \ "### Arguments:\n" \ "\n" \ " `onEnded :`\n" \ " Function that is called when the user clicked the mouse button.\n"
omniverse-code/kit/include/omni/ui/scene/bind/BindAbstractItem.h
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "DocAbstractItem.h" // clang-format off #define OMNIUI_PYBIND_INIT_AbstractItem OMNIUI_PYBIND_INIT_CAST(visible, setVisible, bool) #define OMNIUI_PYBIND_KWARGS_DOC_AbstractItem \ "\n `visible : `\n " \ OMNIUI_PYBIND_DOC_AbstractItem_visible // clang-format on
omniverse-code/kit/include/omni/ui/scene/bind/BindPolygonMesh.h
// Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "BindAbstractShape.h" #include "DocPolygonMesh.h" // clang-format off #define OMNIUI_PYBIND_INIT_PolygonMesh \ OMNIUI_PYBIND_INIT_AbstractShape \ OMNIUI_PYBIND_INIT_CALL(positions, setPositions, pythonListToVector3) \ OMNIUI_PYBIND_INIT_CALL(colors, setColors, pythonListToVector4) \ OMNIUI_PYBIND_INIT_CAST(vertex_counts, setVertexCounts, std::vector<uint32_t>) \ OMNIUI_PYBIND_INIT_CAST(vertex_indices, setVertexIndices, std::vector<uint32_t>) \ OMNIUI_PYBIND_INIT_CAST(thicknesses, setThicknesses, std::vector<float>) \ OMNIUI_PYBIND_INIT_CAST(intersection_thickness, setIntersectionThickness, float) \ OMNIUI_PYBIND_INIT_CAST(wireframe, setWireframe, bool) #define OMNIUI_PYBIND_KWARGS_DOC_PolygonMesh \ "\n `positions : `\n " \ OMNIUI_PYBIND_DOC_PolygonMesh_positions \ "\n `colors : `\n " \ OMNIUI_PYBIND_DOC_PolygonMesh_colors \ "\n `vertex_counts : `\n " \ OMNIUI_PYBIND_DOC_PolygonMesh_vertexCounts \ "\n `vertex_indices : `\n " \ OMNIUI_PYBIND_DOC_PolygonMesh_vertexIndices \ "\n `thicknesses : `\n " \ OMNIUI_PYBIND_DOC_PolygonMesh_thicknesses \ "\n `intersection_thickness : `\n " \ OMNIUI_PYBIND_DOC_PolygonMesh_intersectionThickness \ "\n `wireframe: `\n " \ OMNIUI_PYBIND_DOC_PolygonMesh_wireframe \ OMNIUI_PYBIND_KWARGS_DOC_AbstractShape // clang-format on
omniverse-code/kit/include/omni/ui/scene/bind/BindCurve.h
// Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "BindAbstractShape.h" #include "DocCurve.h" // clang-format off #define OMNIUI_PYBIND_INIT_Curve \ OMNIUI_PYBIND_INIT_AbstractShape \ OMNIUI_PYBIND_INIT_CALL(positions, setPositions, pythonListToVector3) \ OMNIUI_PYBIND_INIT_CALL(colors, setColors, pythonListToVector4) \ OMNIUI_PYBIND_INIT_CAST(thicknesses, setThicknesses, std::vector<float>) \ OMNIUI_PYBIND_INIT_CAST(intersection_thickness, setIntersectionThickness, float) \ OMNIUI_PYBIND_INIT_CAST(curve_type, setCurveType, Curve::CurveType) \ OMNIUI_PYBIND_INIT_CAST(tessellation, setTessellation, uint16_t) #define OMNIUI_PYBIND_KWARGS_DOC_Curve \ "\n `positions : `\n " \ OMNIUI_PYBIND_DOC_Curve_positions \ "\n `colors : `\n " \ OMNIUI_PYBIND_DOC_Curve_colors \ "\n `thicknesses : `\n " \ OMNIUI_PYBIND_DOC_Curve_thicknesses \ "\n `intersection_thickness : `\n " \ OMNIUI_PYBIND_DOC_Curve_intersectionThickness \ "\n `curve_type : `\n " \ OMNIUI_PYBIND_DOC_Curve_curveType \ "\n `tessellation : `\n " \ OMNIUI_PYBIND_DOC_Curve_tessellation \ OMNIUI_PYBIND_KWARGS_DOC_AbstractShape // clang-format on
omniverse-code/kit/include/omni/ui/scene/bind/DocArc.h
// Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #define OMNIUI_PYBIND_DOC_Arc \ "\n" \ "\n" #define OMNIUI_PYBIND_DOC_Arc_getGesturePayload "Contains all the information about the intersection.\n" #define OMNIUI_PYBIND_DOC_Arc_getGesturePayload01 \ "Contains all the information about the intersection at the specific state.\n" #define OMNIUI_PYBIND_DOC_Arc_getIntersectionDistance \ "The distance in pixels from mouse pointer to the shape for the intersection.\n" #define OMNIUI_PYBIND_DOC_Arc_radius "The radius of the circle.\n" #define OMNIUI_PYBIND_DOC_Arc_begin \ "The start angle of the arc. " \ "Angle placement and directions are (0 to 90): Y to Z, Z to X, X to Y\n" #define OMNIUI_PYBIND_DOC_Arc_end \ "The end angle of the arc. " \ "Angle placement and directions are (0 to 90): Y to Z, Z to X, X to Y\n" #define OMNIUI_PYBIND_DOC_Arc_thickness "The thickness of the line.\n" #define OMNIUI_PYBIND_DOC_Arc_intersectionThickness "The thickness of the line for the intersection.\n" #define OMNIUI_PYBIND_DOC_Arc_color "The color of the line.\n" #define OMNIUI_PYBIND_DOC_Arc_tesselation "Number of points on the curve.\n" #define OMNIUI_PYBIND_DOC_Arc_wireframe "When true, it's a line. When false it's a mesh.\n" #define OMNIUI_PYBIND_DOC_Arc_sector "Draw two radii of the circle.\n" #define OMNIUI_PYBIND_DOC_Arc_axis "The axis the circle plane is perpendicular to.\n" #define OMNIUI_PYBIND_DOC_Arc_culling "Draw two radii of the circle.\n" #define OMNIUI_PYBIND_DOC_Arc_Arc "Constructs Arc.\n"
omniverse-code/kit/include/omni/ui/scene/bind/DocManipulatorModelHelper.h
// Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #define OMNIUI_PYBIND_DOC_ManipulatorModelHelper \ "The ManipulatorModelHelper class provides the basic model functionality.\n" #define OMNIUI_PYBIND_DOC_ManipulatorModelHelper_onModelUpdated \ "Called by the model when the model value is changed. The class should react to the changes.\n" \ "\n" \ "\n" \ "### Arguments:\n" \ "\n" \ " `item :`\n" \ " The item in the model that is changed. If it's NULL, the root is changed.\n" #define OMNIUI_PYBIND_DOC_ManipulatorModelHelper_getRayFromNdc "Convert NDC 2D [-1..1] coordinates to 3D ray.\n" #define OMNIUI_PYBIND_DOC_ManipulatorModelHelper_setModel "Set the current model.\n" #define OMNIUI_PYBIND_DOC_ManipulatorModelHelper_getModel "Returns the current model.\n"
omniverse-code/kit/include/omni/ui/scene/bind/BindWidget.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "BindRectangle.h" #include "DocWidget.h" // clang-format off #define OMNIUI_PYBIND_INIT_Widget \ OMNIUI_PYBIND_INIT_Rectangle \ OMNIUI_PYBIND_INIT_CAST(fill_policy, setFillPolicy, Widget::FillPolicy) \ OMNIUI_PYBIND_INIT_CAST(update_policy, setUpdatePolicy, Widget::UpdatePolicy) \ OMNIUI_PYBIND_INIT_CAST(resolution_scale, setResolutionScale, float) \ OMNIUI_PYBIND_INIT_CAST(resolution_width, setResolutionWidth, uint32_t) \ OMNIUI_PYBIND_INIT_CAST(resolution_height, setResolutionHeight, uint32_t) #define OMNIUI_PYBIND_KWARGS_DOC_Widget \ "\n `fill_policy : `\n " \ OMNIUI_PYBIND_DOC_Widget_fillPolicy \ "\n `update_policy : `\n " \ OMNIUI_PYBIND_DOC_Widget_updatePolicy \ "\n `resolution_scale : `\n " \ OMNIUI_PYBIND_DOC_Widget_resolutionScale \ "\n `resolution_width : `\n " \ OMNIUI_PYBIND_DOC_Widget_resolutionWidth \ "\n `resolution_height : `\n " \ OMNIUI_PYBIND_DOC_Widget_resolutionHeight \ OMNIUI_PYBIND_KWARGS_DOC_Rectangle // clang-format on
omniverse-code/kit/include/omni/ui/scene/bind/BindHoverGesture.h
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "BindShapeGesture.h" #include "DocHoverGesture.h" #include <omni/ui/scene/HoverGesture.h> OMNIUI_PROTECT_PYBIND11_OBJECT(OMNIUI_SCENE_NS::HoverGesture, HoverGesture); // clang-format off #define OMNIUI_PYBIND_INIT_PyHoverGesture \ OMNIUI_PYBIND_INIT_CAST(mouse_button, setMouseButton, uint32_t) \ OMNIUI_PYBIND_INIT_CAST(modifiers, setModifiers, uint32_t) \ OMNIUI_PYBIND_INIT_CAST(trigger_on_view_hover, setTriggerOnViewHover, bool) \ OMNIUI_PYBIND_INIT_CALLBACK(on_began_fn, setOnBeganFn, void(AbstractShape const*)) \ OMNIUI_PYBIND_INIT_CALLBACK(on_changed_fn, setOnChangedFn, void(AbstractShape const*)) \ OMNIUI_PYBIND_INIT_CALLBACK(on_ended_fn, setOnEndedFn, void(AbstractShape const*)) \ OMNIUI_PYBIND_INIT_ShapeGesture #define OMNIUI_PYBIND_KWARGS_DOC_HoverGesture \ "\n `mouse_button : `\n " \ OMNIUI_PYBIND_DOC_HoverGesture_mouseButton \ "\n `modifiers : `\n " \ OMNIUI_PYBIND_DOC_HoverGesture_modifiers \ "\n `trigger_on_view_hover : `\n " \ OMNIUI_PYBIND_DOC_HoverGesture_triggerOnViewHover \ "\n `on_began_fn : `\n " \ OMNIUI_PYBIND_DOC_HoverGesture_onBegan \ "\n `on_changed_fn : `\n " \ OMNIUI_PYBIND_DOC_HoverGesture_onChanged \ "\n `on_ended_fn : `\n " \ OMNIUI_PYBIND_DOC_HoverGesture_onEnded \ OMNIUI_PYBIND_KWARGS_DOC_ShapeGesture // clang-format on
omniverse-code/kit/include/omni/ui/scene/bind/DocScene.h
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #define OMNIUI_PYBIND_DOC_Scene "Top level module string\n" \ "Represents the root of the scene and holds the shapes, gestures and managers.\n" \ "\n" #define OMNIUI_PYBIND_DOC_Scene_Scene "Constructor" #define OMNIUI_PYBIND_DOC_Scene_getDrawListBufferCount "Return the number of buffers used. Using for unit testing."
omniverse-code/kit/include/omni/ui/scene/bind/DocAbstractShape.h
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #define OMNIUI_PYBIND_DOC_AbstractShape \ "Base class for all the items that can be drawn and intersected with mouse pointer.\n" \ "\n" #define OMNIUI_PYBIND_DOC_AbstractShape_getGestures "All the gestures assigned to this shape.\n" #define OMNIUI_PYBIND_DOC_AbstractShape_setGestures "Replace the gestures of the shape.\n" #define OMNIUI_PYBIND_DOC_AbstractShape_addGesture "Add a single gesture to the shape.\n" #define OMNIUI_PYBIND_DOC_AbstractShape_getGesturePayload "Contains all the information about the intersection.\n" #define OMNIUI_PYBIND_DOC_AbstractShape_getGesturePayload01 \ "Contains all the information about the intersection at the specific state.\n"
omniverse-code/kit/include/omni/ui/scene/bind/BindTexturedMesh.h
// Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "BindPolygonMesh.h" #include "DocTexturedMesh.h" #include "DocImageHelper.h" // clang-format off #define OMNIUI_PYBIND_INIT_TexturedMesh \ OMNIUI_PYBIND_INIT_PolygonMesh \ OMNIUI_PYBIND_INIT_CALL(uvs, setUvs, pythonListToVector2) \ OMNIUI_PYBIND_INIT_CAST(source_url, setSourceUrl, std::string) \ OMNIUI_PYBIND_INIT_CAST(image_provider, setImageProvider, std::shared_ptr<ImageProvider>) \ OMNIUI_PYBIND_INIT_CAST(image_width, setImageWidth, uint32_t) \ OMNIUI_PYBIND_INIT_CAST(image_height, setImageHeight, uint32_t) #define OMNIUI_PYBIND_KWARGS_DOC_TexturedMesh \ "\n `uvs : `\n " \ OMNIUI_PYBIND_DOC_TexturedMesh_uvs \ "\n `source_url : `\n " \ OMNIUI_PYBIND_DOC_TexturedMesh_sourceUrl \ "\n `image_provider : `\n " \ OMNIUI_PYBIND_DOC_TexturedMesh_imageProvider \ "\n `image_width : `\n " \ OMNIUI_PYBIND_DOC_ImageHelper_imageWidth \ "\n `image_height : `\n " \ OMNIUI_PYBIND_DOC_ImageHelper_imageHeight \ OMNIUI_PYBIND_KWARGS_DOC_PolygonMesh // clang-format on
omniverse-code/kit/include/omni/ui/scene/bind/BindArc.h
// Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "BindAbstractShape.h" #include "DocArc.h" // clang-format off #define OMNIUI_PYBIND_INIT_Arc \ OMNIUI_PYBIND_INIT_AbstractShape \ OMNIUI_PYBIND_INIT_CAST(begin, setBegin, Float) \ OMNIUI_PYBIND_INIT_CAST(end, setEnd, Float) \ OMNIUI_PYBIND_INIT_CAST(thickness, setThickness, float) \ OMNIUI_PYBIND_INIT_CAST(intersection_thickness, setIntersectionThickness, float) \ OMNIUI_PYBIND_INIT_CALL(color, setColor, pythonToColor4) \ OMNIUI_PYBIND_INIT_CAST(tesselation, setTesselation, uint16_t) \ OMNIUI_PYBIND_INIT_CAST(axis, setAxis, uint8_t) \ OMNIUI_PYBIND_INIT_CAST(sector, setSector, bool) \ OMNIUI_PYBIND_INIT_CAST(culling, setCulling, Culling) \ OMNIUI_PYBIND_INIT_CAST(wireframe, setWireframe, bool) #define OMNIUI_PYBIND_KWARGS_DOC_Arc \ "\n `begin : `\n " \ OMNIUI_PYBIND_DOC_Arc_begin \ "\n `end : `\n " \ OMNIUI_PYBIND_DOC_Arc_end \ "\n `thickness : `\n " \ OMNIUI_PYBIND_DOC_Arc_thickness \ "\n `intersection_thickness : `\n " \ OMNIUI_PYBIND_DOC_Arc_intersectionThickness \ "\n `color : `\n " \ OMNIUI_PYBIND_DOC_Arc_color \ "\n `tesselation : `\n " \ OMNIUI_PYBIND_DOC_Arc_tesselation \ "\n `axis : `\n " \ OMNIUI_PYBIND_DOC_Arc_axis \ "\n `sector : `\n " \ OMNIUI_PYBIND_DOC_Arc_sector \ "\n `culling : `\n " \ OMNIUI_PYBIND_DOC_Arc_culling \ "\n `wireframe : `\n " \ OMNIUI_PYBIND_DOC_Arc_wireframe \ OMNIUI_PYBIND_KWARGS_DOC_AbstractShape // clang-format on
omniverse-code/kit/include/omni/ui/scene/bind/DocGestureManager.h
// Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #define OMNIUI_PYBIND_DOC_GestureManager \ "The object that controls batch processing and preventing of gestures. Typically each scene has a default manager and if the user wants to have own prevention logic, he can reimplement it.\n" \ "\n" #define OMNIUI_PYBIND_DOC_GestureManager_GestureManager "Constructor.\n" #define OMNIUI_PYBIND_DOC_GestureManager_setView \ "Set the camera.\n" \ "Todo\n" \ "resolution\n" #define OMNIUI_PYBIND_DOC_GestureManager_preProcess "Process mouse inputs and do all the intersections.\n" #define OMNIUI_PYBIND_DOC_GestureManager_prevent "Process all the prevention logic and reduce the number of gestures.\n" #define OMNIUI_PYBIND_DOC_GestureManager_process "Process the gestures.\n" #define OMNIUI_PYBIND_DOC_GestureManager_postProcess "Clean-up caches, save states.\n" #define OMNIUI_PYBIND_DOC_GestureManager_canBePrevented \ "Called per gesture. Determines if the gesture can be prevented.\n" #define OMNIUI_PYBIND_DOC_GestureManager_shouldPrevent \ "Called per gesture. Determines if the gesture should be prevented with another gesture. Useful to resolve intersections.\n" #define OMNIUI_PYBIND_DOC_GestureManager_amendInput \ "Called once a frame. Should be overriden to inject own input to the gestures.\n"
omniverse-code/kit/include/omni/ui/scene/bind/DocPolygonMesh.h
// Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #define OMNIUI_PYBIND_DOC_PolygonMesh \ "Encodes a mesh.\n" \ "\n" #define OMNIUI_PYBIND_DOC_PolygonMesh_getGesturePayload "Contains all the information about the intersection.\n" #define OMNIUI_PYBIND_DOC_PolygonMesh_getGesturePayload01 \ "Contains all the information about the intersection at the specific state.\n" #define OMNIUI_PYBIND_DOC_PolygonMesh_getIntersectionDistance \ "The distance in pixels from mouse pointer to the shape for the intersection.\n" #define OMNIUI_PYBIND_DOC_PolygonMesh_positions "The primary geometry attribute, describes points in local space.\n" #define OMNIUI_PYBIND_DOC_PolygonMesh_colors "Describes colors per vertex.\n" #define OMNIUI_PYBIND_DOC_PolygonMesh_vertexCounts \ "Provides the number of vertices in each face of the mesh, which is also the number of consecutive indices in vertex_indices that define the face. The length of this attribute is the number of faces in the mesh.\n" #define OMNIUI_PYBIND_DOC_PolygonMesh_vertexIndices \ "Flat list of the index (into the points attribute) of each vertex of each face in the mesh.\n" #define OMNIUI_PYBIND_DOC_PolygonMesh_thicknesses "When wireframe is true, it defines the thicknesses of lines.\n" #define OMNIUI_PYBIND_DOC_PolygonMesh_intersectionThickness "The thickness of the line for the intersection.\n" #define OMNIUI_PYBIND_DOC_PolygonMesh_wireframe "When true, the mesh is drawn as lines.\n" #define OMNIUI_PYBIND_DOC_PolygonMesh_PolygonMesh \ "Construct a mesh with predefined properties.\n" \ "\n" \ "\n" \ "### Arguments:\n" \ "\n" \ " `positions :`\n" \ " Describes points in local space.\n" \ "\n" \ " `colors :`\n" \ " Describes colors per vertex.\n" \ "\n" \ " `vertexCounts :`\n" \ " The number of vertices in each face.\n" \ "\n" \ " `vertexIndices :`\n" \ " The list of the index of each vertex of each face in the mesh.\n"
omniverse-code/kit/include/omni/ui/scene/bind/BindGestureManager.h
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "DocGestureManager.h" #include <omni/ui/bind/BindUtils.h> OMNIUI_PROTECT_PYBIND11_OBJECT(OMNIUI_SCENE_NS::GestureManager, GestureManager); #define OMNIUI_PYBIND_INIT_PyGestureManager #define OMNIUI_PYBIND_KWARGS_DOC_GestureManager
omniverse-code/kit/include/omni/ui/scene/bind/DocRectangle.h
// Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #define OMNIUI_PYBIND_DOC_Rectangle \ "\n" \ "\n" #define OMNIUI_PYBIND_DOC_Rectangle_getGesturePayload "Contains all the information about the intersection.\n" #define OMNIUI_PYBIND_DOC_Rectangle_getGesturePayload01 \ "Contains all the information about the intersection at the specific state.\n" #define OMNIUI_PYBIND_DOC_Rectangle_getIntersectionDistance \ "The distance in pixels from mouse pointer to the shape for the intersection.\n" #define OMNIUI_PYBIND_DOC_Rectangle_width "The size of the rectangle.\n" #define OMNIUI_PYBIND_DOC_Rectangle_height "The size of the rectangle.\n" #define OMNIUI_PYBIND_DOC_Rectangle_thickness "The thickness of the line.\n" #define OMNIUI_PYBIND_DOC_Rectangle_intersectionThickness "The thickness of the line for the intersection.\n" #define OMNIUI_PYBIND_DOC_Rectangle_color "The color of the line.\n" #define OMNIUI_PYBIND_DOC_Rectangle_wireframe "When true, it's a line. When false it's a mesh.\n" #define OMNIUI_PYBIND_DOC_Rectangle_axis "The axis the rectangle is perpendicular to.\n" #define OMNIUI_PYBIND_DOC_Rectangle_Rectangle \ "Construct a rectangle with predefined size.\n" \ "\n" \ "\n" \ "### Arguments:\n" \ "\n" \ " `width :`\n" \ " The size of the rectangle\n" \ "\n" \ " `height :`\n" \ " The size of the rectangle\n"
omniverse-code/kit/include/omni/ui/scene/bind/BindClickGesture.h
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "BindShapeGesture.h" #include "DocClickGesture.h" #include <omni/ui/scene/ClickGesture.h> OMNIUI_PROTECT_PYBIND11_OBJECT(OMNIUI_SCENE_NS::ClickGesture, ClickGesture); // clang-format off #define OMNIUI_PYBIND_INIT_PyClickGesture \ OMNIUI_PYBIND_INIT_CAST(mouse_button, setMouseButton, uint32_t) \ OMNIUI_PYBIND_INIT_CAST(modifiers, setModifiers, uint32_t) \ OMNIUI_PYBIND_INIT_CALLBACK(on_ended_fn, setOnEndedFn, void(AbstractShape const*)) \ OMNIUI_PYBIND_INIT_ShapeGesture #define OMNIUI_PYBIND_KWARGS_DOC_ClickGesture \ "\n `mouse_button : `\n " \ OMNIUI_PYBIND_DOC_ClickGesture_mouseButton \ "\n `modifiers : `\n " \ OMNIUI_PYBIND_DOC_ClickGesture_modifiers \ "\n `on_ended_fn : `\n " \ OMNIUI_PYBIND_DOC_ClickGesture_OnEnded \ OMNIUI_PYBIND_KWARGS_DOC_ShapeGesture // clang-format on
omniverse-code/kit/include/omni/ui/scene/bind/BindMath.h
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "DocMatrix44.h" #include <omni/ui/scene/Math.h> #include <omni/ui/bind/Pybind.h> OMNIUI_SCENE_NAMESPACE_OPEN_SCOPE pybind11::object matrix4ToPython(const Matrix44& matrix); Matrix44 pythonToMatrix4(const pybind11::handle& obj); pybind11::object vector2ToPython(const Vector2& vec); Vector2 pythonToVector2(const pybind11::handle& obj); pybind11::object vector3ToPython(const Vector3& vec); Vector3 pythonToVector3(const pybind11::handle& obj); pybind11::object vector4ToPython(const Vector4& vec); Vector4 pythonToVector4(const pybind11::handle& obj); Color4 pythonToColor4(const pybind11::handle& obj); std::vector<Vector4> pythonListToVector4(const pybind11::handle& obj); std::vector<Vector3> pythonListToVector3(const pybind11::handle& obj); std::vector<Vector2> pythonListToVector2(const pybind11::handle& obj); pybind11::object vector4ToPythonList(const std::vector<Vector4>& vec); pybind11::object vector3ToPythonList(const std::vector<Vector3>& vec); pybind11::object vector2ToPythonList(const std::vector<Vector2>& vec); OMNIUI_SCENE_NAMESPACE_CLOSE_SCOPE
omniverse-code/kit/include/omni/ui/scene/bind/DocCallbackBase.h
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #define OMNIUI_PYBIND_DOC_CallbackBase \ "Base object for callback containers.\n" \ "\n"
omniverse-code/kit/include/omni/ui/scene/bind/BindSceneView.h
// Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "DocManipulatorModelHelper.h" #include "DocSceneView.h" #include <omni/ui/bind/BindWidget.h> // clang-format off #define OMNIUI_PYBIND_INIT_SceneView \ OMNIUI_PYBIND_INIT_CAST(aspect_ratio_policy, setAspectRatioPolicy, SceneView::AspectRatioPolicy) \ OMNIUI_PYBIND_INIT_CAST(screen_aspect_ratio, setScreenAspectRatio, float) \ OMNIUI_PYBIND_INIT_CAST(child_windows_input, setChildWindowsInput, bool) \ OMNIUI_PYBIND_INIT_CAST(scene, setScene, std::shared_ptr<Scene>) \ OMNIUI_PYBIND_INIT_CAST(model, setModel, std::shared_ptr<AbstractManipulatorModel>) \ OMNIUI_PYBIND_INIT_Widget #define OMNIUI_PYBIND_KWARGS_DOC_SceneView \ "\n `aspect_ratio_policy : `\n " \ OMNIUI_PYBIND_DOC_SceneView_aspectRatioPolicy \ "\n `model : `\n " \ OMNIUI_PYBIND_DOC_SceneView_getView \ "\n `screen_aspect_ratio : `\n " \ OMNIUI_PYBIND_DOC_SceneView_screenAspectRatio \ "\n `child_windows_input : `\n " \ OMNIUI_PYBIND_DOC_SceneView_childWindowsInput \ OMNIUI_PYBIND_KWARGS_DOC_Widget // clang-format on
omniverse-code/kit/include/omni/ui/scene/bind/DocCallbackHelper.h
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #define OMNIUI_PYBIND_DOC_CallbackHelper \ "Base class for the objects that should automatically clean up the callbacks. It collects all the callbacks created with OMNIUI_CALLBACK and is able to clean all of them. We use it to destroy the callbacks if the parent object is destroyed, and it helps to break circular references created by pybind11 because pybind11 can't use weak pointers.\n" \ "\n"
omniverse-code/kit/include/omni/ui/scene/bind/DocCurve.h
// Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #define OMNIUI_PYBIND_DOC_Curve \ "Represents the curve.\n" \ "\n" #define OMNIUI_PYBIND_DOC_Curve_getGesturePayload "Contains all the information about the intersection.\n" #define OMNIUI_PYBIND_DOC_Curve_getGesturePayload01 \ "Contains all the information about the intersection at the specific state.\n" #define OMNIUI_PYBIND_DOC_Curve_getIntersectionDistance \ "The distance in pixels from mouse pointer to the shape for the intersection.\n" #define OMNIUI_PYBIND_DOC_Curve_positions \ "The list of positions which defines the curve. It has at least two positions. The curve has len(positions)-1\n" #define OMNIUI_PYBIND_DOC_Curve_colors \ "The list of colors which defines color per vertex. It has the same length as positions.\n" #define OMNIUI_PYBIND_DOC_Curve_thicknesses \ "The list of thicknesses which defines thickness per vertex. It has the same length as positions.\n" #define OMNIUI_PYBIND_DOC_Curve_intersectionThickness "The thickness of the line for the intersection.\n" #define OMNIUI_PYBIND_DOC_Curve_curveType "The curve interpolation type.\n" #define OMNIUI_PYBIND_DOC_Curve_tessellation "The number of points per curve segment. It can't be less than 2.\n" #define OMNIUI_PYBIND_DOC_Curve_Curve \ "Constructs Curve.\n" \ "\n" \ "\n" \ "### Arguments:\n" \ "\n" \ " `positions :`\n" \ " List of positions\n"
omniverse-code/kit/include/omni/experimental/job/IJob.gen.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/OmniAttr.h> #include <omni/core/Interface.h> #include <omni/core/ResultError.h> #include <functional> #include <utility> #include <type_traits> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL /** * Interface for providing a CPU affinity mask to the plugin. Instances of this interface can be thought of as an array * of \c MaskType values, which allows for setting affinities on machines with more than 64 processors. Each affinity * mask this object contains is a bitmask that represents the associated CPUs. * * On Linux, this object is treated as one large bitset analogous to cpu_set_t. So \c get_affinity_mask(0) represents * CPUs 0-63, \c get_affinity_mask(1) represents CPUs 64-127, etc. * * On Windows, each affinity mask in this object applies to its own Processor Group, so \c get_affinity_mask(0) is for * Processor Group 0, \c get_affinity_mask(1) for Processor Group 1, etc. */ template <> class omni::core::Generated<omni::experimental::job::IAffinityMask_abi> : public omni::experimental::job::IAffinityMask_abi { public: OMNI_PLUGIN_INTERFACE("omni::experimental::job::IAffinityMask") /** * Gets the affinity mask at \c index. * * @note \c index must be less than \ref get_mask_count_abi() * * @param index Index to get affinity mask for. * * @return The affinity mask at the provided index. */ omni::experimental::job::MaskType get_affinity_mask(size_t index) noexcept; /** * Gets the affinity \c mask at \c index. * * @note \c index must be less than \ref get_mask_count_abi() * * @param index Index to set affinity mask for. * @param mask Mask to set. */ void set_affinity_mask(size_t index, omni::experimental::job::MaskType mask) noexcept; /** * Gets the current number of affinity masks stored by this object. * * @return The current number of affinity masks stored by this object. */ size_t get_mask_count() noexcept; /** * Gets the default number of affinity masks stored by this object. * * @return The default number of affinity masks stored by this object. */ size_t get_default_mask_count() noexcept; /** * Sets the number of affinity masks stored by this object to \c count. * * If \c count is greater than the current size, the appended affinity masks will bet set to \c 0. If \c count * is less than the current size, then this object will only contain the first \c count elements after this call. * * @param count Number of affinity masks to set the size to. */ void set_mask_count(size_t count) noexcept; }; /** * Basic interface for launching jobs on a foreign job system. */ template <> class omni::core::Generated<omni::experimental::job::IJob_abi> : public omni::experimental::job::IJob_abi { public: OMNI_PLUGIN_INTERFACE("omni::experimental::job::IJob") /** * Adds a new job to be executed. * * @param job_fn User provided function to be executed by a worker. * @param job_data User provided data for the job, the memory must not be released until it no longer needed by the * task. */ void enqueue_job(omni::experimental::job::JobFunction job_fn, void* job_data) noexcept; }; /** * Interface for managing the number of workers in the job system. */ template <> class omni::core::Generated<omni::experimental::job::IJobWorker_abi> : public omni::experimental::job::IJobWorker_abi { public: OMNI_PLUGIN_INTERFACE("omni::experimental::job::IJobWorker") /** * Returns default number of workers used for creation of a new Job system. * * @return The default number of workers. */ size_t get_default_worker_count() noexcept; /** * Returns the number of worker threads in the job system. * * @returns The number of worker threads. */ size_t get_worker_count() noexcept; /** * Sets the number of workers in the job system. * * This function may stop all current threads and reset any previously set thread affinity. * * @param count The new number of workers to set in the system. A value of 0 means to use the default value returned * by getDefaultWorkerCount() */ void set_worker_count(size_t count) noexcept; }; /** * Interface for setting CPU affinity for the job system. */ template <> class omni::core::Generated<omni::experimental::job::IJobAffinity_abi> : public omni::experimental::job::IJobAffinity_abi { public: OMNI_PLUGIN_INTERFACE("omni::experimental::job::IJobAffinity") /** * Gets the current affinity of a worker. * * @param worker_id The worker to set the affinity of. If this id is larger than the current number of workers, * a \c nullptr will be returned. * * @return The current affinity being used by the worker. The returned value may be \c nullptr if the worker's * affinity could not be determined. */ omni::core::ObjectPtr<omni::experimental::job::IAffinityMask> get_affinity(size_t worker_id) noexcept; /** * Attempts to set the affinity for the specified worker. * * @note On Windows each thread can only belong to a single Processor Group, so the CPU Affinity will only be set * to the first non-zero entry. That is to say, if both \c mask->get_affinity_mask(0) and * \c mask->get_affinity_mask(1) both have bits sets, only the CPUs in \c mask->get_affinity_mask(0) will be set for * the affinity. * * @param worker_id The worker to set the affinity of. If this id is larger than the current number of workers, * false will be returned. * @param mask The affinity values to set. * * @return true if the affinity was successfully set, false otherwise. */ bool set_affinity(size_t worker_id, omni::core::ObjectParam<omni::experimental::job::IAffinityMask> mask) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::experimental::job::MaskType omni::core::Generated<omni::experimental::job::IAffinityMask_abi>::get_affinity_mask( size_t index) noexcept { return get_affinity_mask_abi(index); } inline void omni::core::Generated<omni::experimental::job::IAffinityMask_abi>::set_affinity_mask( size_t index, omni::experimental::job::MaskType mask) noexcept { set_affinity_mask_abi(index, mask); } inline size_t omni::core::Generated<omni::experimental::job::IAffinityMask_abi>::get_mask_count() noexcept { return get_mask_count_abi(); } inline size_t omni::core::Generated<omni::experimental::job::IAffinityMask_abi>::get_default_mask_count() noexcept { return get_default_mask_count_abi(); } inline void omni::core::Generated<omni::experimental::job::IAffinityMask_abi>::set_mask_count(size_t count) noexcept { set_mask_count_abi(count); } inline void omni::core::Generated<omni::experimental::job::IJob_abi>::enqueue_job( omni::experimental::job::JobFunction job_fn, void* job_data) noexcept { enqueue_job_abi(job_fn, job_data); } inline size_t omni::core::Generated<omni::experimental::job::IJobWorker_abi>::get_default_worker_count() noexcept { return get_default_worker_count_abi(); } inline size_t omni::core::Generated<omni::experimental::job::IJobWorker_abi>::get_worker_count() noexcept { return get_worker_count_abi(); } inline void omni::core::Generated<omni::experimental::job::IJobWorker_abi>::set_worker_count(size_t count) noexcept { set_worker_count_abi(count); } inline omni::core::ObjectPtr<omni::experimental::job::IAffinityMask> omni::core::Generated< omni::experimental::job::IJobAffinity_abi>::get_affinity(size_t worker_id) noexcept { return omni::core::steal(get_affinity_abi(worker_id)); } inline bool omni::core::Generated<omni::experimental::job::IJobAffinity_abi>::set_affinity( size_t worker_id, omni::core::ObjectParam<omni::experimental::job::IAffinityMask> mask) noexcept { return set_affinity_abi(worker_id, mask.get()); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/experimental/job/IJob.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file //! //! @brief IJob definition file. #pragma once #include "../../../carb/Interface.h" #include "../../core/IObject.h" #include "../../../carb/IObject.h" namespace omni { /** Namespace for experimental Interfaces and functionality. */ namespace experimental { namespace job { /** * Defines the function for performing a user-provided job. * * @param job_data User provided data for the job, the memory must not be released until it no longer needed by the * task. */ using JobFunction = void (*)(void* job_data); /** Forward declaration of the IAffinityMask interface. */ OMNI_DECLARE_INTERFACE(IAffinityMask); /** * Alias for an affinity mask. */ using MaskType = uint64_t; /** * Interface for providing a CPU affinity mask to the plugin. Instances of this interface can be thought of as an array * of \c MaskType values, which allows for setting affinities on machines with more than 64 processors. Each affinity * mask this object contains is a bitmask that represents the associated CPUs. * * On Linux, this object is treated as one large bitset analogous to cpu_set_t. So \c get_affinity_mask(0) represents * CPUs 0-63, \c get_affinity_mask(1) represents CPUs 64-127, etc. * * On Windows, each affinity mask in this object applies to its own Processor Group, so \c get_affinity_mask(0) is for * Processor Group 0, \c get_affinity_mask(1) for Processor Group 1, etc. */ class IAffinityMask_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.experimental.job.IAffinityMask")> { protected: /** * Gets the affinity mask at \c index. * * @note \c index must be less than \ref get_mask_count_abi() * * @param index Index to get affinity mask for. * * @return The affinity mask at the provided index. */ virtual MaskType get_affinity_mask_abi(size_t index) noexcept = 0; /** * Gets the affinity \c mask at \c index. * * @note \c index must be less than \ref get_mask_count_abi() * * @param index Index to set affinity mask for. * @param mask Mask to set. */ virtual void set_affinity_mask_abi(size_t index, MaskType mask) noexcept = 0; /** * Gets the current number of affinity masks stored by this object. * * @return The current number of affinity masks stored by this object. */ virtual size_t get_mask_count_abi() noexcept = 0; /** * Gets the default number of affinity masks stored by this object. * * @return The default number of affinity masks stored by this object. */ virtual size_t get_default_mask_count_abi() noexcept = 0; /** * Sets the number of affinity masks stored by this object to \c count. * * If \c count is greater than the current size, the appended affinity masks will bet set to \c 0. If \c count * is less than the current size, then this object will only contain the first \c count elements after this call. * * @param count Number of affinity masks to set the size to. */ virtual void set_mask_count_abi(size_t count) noexcept = 0; }; /** Forward declaration of the IJob interface. */ OMNI_DECLARE_INTERFACE(IJob); /** Forward declaration of the IJobWorker interface. */ OMNI_DECLARE_INTERFACE(IJobWorker); /** Forward declaration of the IJobAffinity interface. */ OMNI_DECLARE_INTERFACE(IJobAffinity); /** * Basic interface for launching jobs on a foreign job system. */ class IJob_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.experimental.job.IJob")> { protected: /** * Adds a new job to be executed. * * @param job_fn User provided function to be executed by a worker. * @param job_data User provided data for the job, the memory must not be released until it no longer needed by the * task. */ virtual void enqueue_job_abi(JobFunction job_fn, OMNI_ATTR("in, out") void* job_data) noexcept = 0; }; /** * Interface for managing the number of workers in the job system. */ class IJobWorker_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.experimental.job.IJobWorker")> { protected: /** * Returns default number of workers used for creation of a new Job system. * * @return The default number of workers. */ virtual size_t get_default_worker_count_abi() noexcept = 0; /** * Returns the number of worker threads in the job system. * * @returns The number of worker threads. */ virtual size_t get_worker_count_abi() noexcept = 0; /** * Sets the number of workers in the job system. * * This function may stop all current threads and reset any previously set thread affinity. * * @param count The new number of workers to set in the system. A value of 0 means to use the default value returned * by getDefaultWorkerCount() */ virtual void set_worker_count_abi(size_t count) noexcept = 0; }; /** * Interface for setting CPU affinity for the job system. */ class IJobAffinity_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.experimental.job.IJobAffinity")> { protected: /** * Gets the current affinity of a worker. * * @param worker_id The worker to set the affinity of. If this id is larger than the current number of workers, * a \c nullptr will be returned. * * @return The current affinity being used by the worker. The returned value may be \c nullptr if the worker's * affinity could not be determined. */ virtual IAffinityMask* get_affinity_abi(size_t worker_id) noexcept = 0; /** * Attempts to set the affinity for the specified worker. * * @note On Windows each thread can only belong to a single Processor Group, so the CPU Affinity will only be set * to the first non-zero entry. That is to say, if both \c mask->get_affinity_mask(0) and * \c mask->get_affinity_mask(1) both have bits sets, only the CPUs in \c mask->get_affinity_mask(0) will be set for * the affinity. * * @param worker_id The worker to set the affinity of. If this id is larger than the current number of workers, * false will be returned. * @param mask The affinity values to set. * * @return true if the affinity was successfully set, false otherwise. */ virtual bool set_affinity_abi(size_t worker_id, OMNI_ATTR("not_null") IAffinityMask* mask) noexcept = 0; }; } // namespace job } // namespace experimental } // namespace omni #include "IJob.gen.h"
omniverse-code/kit/include/omni/experimental/url/IUrl.gen.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/OmniAttr.h> #include <omni/core/Interface.h> #include <omni/core/ResultError.h> #include <functional> #include <utility> #include <type_traits> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL template <> class omni::core::Generated<omni::experimental::IUrl_abi> : public omni::experimental::IUrl_abi { public: OMNI_PLUGIN_INTERFACE("omni::experimental::IUrl") /** * Clears this URL */ void clear() noexcept; /** * Return the string representation of this URL */ omni::string to_string() noexcept; /** * Return the string representation of this URL, but with valid UTF-8 characters * decoded. This will leave invalid UTF-8 byte sequences and certain ASCII characters * encoded; including control codes, and characters that are reserved by the URL * specification as sub-delimiters. */ omni::string to_string_utf8() noexcept; /** * Sets this URL from a string */ void from_string(const omni::string& url_string) noexcept; /** * Sets this URL from a posix file path * The scheme will be "file" and the path will be the normalized and encoded file path * Normalization includes removing redundant path segments such as "//", "/./" and * collapsing ".." segments if possible. For example, it will convert "a/b/../" to "a" */ void from_filepath_posix(const omni::string& filepath) noexcept; /** * Sets this URL from a windows file path * The scheme will be "file" and the path will be the normalized and encoded file path * Path normalization includes everything from "from_filepath_posix_abi" plus: * - The drive letter is made uppercase * - Path separators are converted from \\ to / * - UNC paths such as "\\\\server\\share\path" or "\\\\?\\C:\\path" are handled correctly */ void from_filepath_windows(const omni::string& filepath) noexcept; /** * Sets this URL from a file path based on the native OS. * This calls either from_filepath_posix_abi or from_filepath_windows_abi */ void from_filepath_native(const omni::string& filepath) noexcept; /** * Returns true if the URL has a scheme component. * "scheme" is the part before the first colon, for example "http" or "omniverse". * A URL without a scheme component can only be a relative reference. * * @see get_scheme() * @see set_scheme() */ bool has_scheme() noexcept; /** * Returns true if the URL has an authority component. * "authority" is the part between the // and / * For example "user@server:port" * * @see get_authority_encoded() * @see set_authority_encoded() * @see has_userinfo() * @see has_host() * @see has_port() */ bool has_authority() noexcept; /** * Returns true if the URL has a userinfo sub-component. * "userinfo" is the part of the authority before @ * * @see get_userinfo() * @see set_userinfo() * @see has_authority() */ bool has_userinfo() noexcept; /** * Returns true if the URL has a host sub-component. * "host" is the part of the authority between @ and : * * @see get_host() * @see set_host() * @see has_authority() */ bool has_host() noexcept; /** * Returns true if the URL has a port sub-component. * "port" is the part of the authority after : * * @see get_port() * @see set_port() * @see has_authority() */ bool has_port() noexcept; /** * Returns true if the URL has a path component. * "path" is the part after _abi(and including) / * For example "/path/to/my/file.txt" * * @see get_path_encoded() * @see set_path_encoded() * @see set_path_decoded() */ bool has_path() noexcept; /** * Returns true if the URL has a query component. * "query" is the part after ? but before # * * @see get_query_encoded() * @see set_query_encoded() * @see set_query_decoded() */ bool has_query() noexcept; /** * Returns true if the URL has a fragment component. * "fragment" is the part after # * * @see get_fragment_encoded() * @see set_fragment_encoded() * @see set_fragment_decoded() */ bool has_fragment() noexcept; /** * Returns the scheme. * The scheme will always be fully decoded and in lower case. * * @see has_scheme() * @see set_scheme() */ omni::string get_scheme() noexcept; /** * Returns the authority, which may contain percent-encoded data * For example if the 'userinfo' contains : or @ it must be percent-encoded. * * @see set_authority_encoded() * @see get_userinfo() * @see get_host() * @see get_port() */ omni::string get_authority_encoded() noexcept; /** * Returns the userinfo, fully decoded. * * @see get_authority_encoded() * @see set_userinfo() * @see has_userinfo() */ omni::string get_userinfo() noexcept; /** * Returns the host, fully decoded. * * @see get_authority_encoded() * @see set_host() * @see has_host() */ omni::string get_host() noexcept; /** * Returns the port number * * @see get_authority_encoded() * @see set_port() * @see has_port() */ uint16_t get_port() noexcept; /** * Returns the percent-encoded path component. * * @see get_path_utf8() * @see set_path_encoded() * @see set_path_decoded() * @see has_path() */ omni::string get_path_encoded() noexcept; /** * Returns the path component with all printable ascii and valid UTF-8 characters decoded * Invalid UTF-8 and ASCII control codes will still be percent-encoded. * It's generally safe to print the result of this function on screen and in log files. * * @see get_path_encoded() * @see set_path_encoded() * @see set_path_decoded() * @see has_path() */ omni::string get_path_utf8() noexcept; /** * Returns the percent-encoded query component. * * @see get_query_encoded() * @see set_query_encoded() * @see set_query_decoded() * @see has_query() */ omni::string get_query_encoded() noexcept; /** * Returns the percent-encoded fragment component. * * @see get_fragment_encoded() * @see set_fragment_encoded() * @see set_fragment_decoded() * @see has_fragment() */ omni::string get_fragment_encoded() noexcept; /** * Sets the scheme. * * @see has_scheme() * @see get_scheme() */ void set_scheme(const omni::string& scheme) noexcept; /** * Sets the authority, which is expected to have all the sub-components percent-encoded. * If characters that _MUST_ be encoded are detected, they will be percent-encoded, however the percent sign itself * will _NOT_ be encoded. * * @see get_authority_encoded() * @see set_userinfo() * @see set_host() * @see set_port() */ void set_authority_encoded(const omni::string& authority) noexcept; /** * Sets the userinfo. This function expects the userinfo is not already percent-encoded. * * @see set_authority_encoded() * @see get_userinfo() * @see has_userinfo() */ void set_userinfo(const omni::string& userinfo) noexcept; /** * Sets the host. This function expects the host is not already percent-encoded. * * @see set_authority_encoded() * @see get_host() * @see has_host() */ void set_host(const omni::string& host) noexcept; /** * Sets the port number * * @see set_authority_encoded() * @see get_port() * @see has_port() */ void set_port(uint16_t port) noexcept; /** * Sets the path, which is already percent-encoded. * If characters that _MUST_ be encoded are detected, they will be percent-encoded, however the percent sign itself * will _NOT_ be encoded. * * @see get_path_encoded() * @see set_path_decoded() * @see has_path() */ void set_path_encoded(const omni::string& path_encoded) noexcept; /** * Sets the path, which is NOT already percent-encoded. * If characters that _MUST_ be encoded are detected, they will be percent-encoded, including the percent sign * itself * * @see get_path_encoded() * @see set_path_encoded() * @see has_path() */ void set_path_decoded(const omni::string& path_decoded) noexcept; /** * Sets the query, which is already percent-encoded. * If characters that _MUST_ be encoded are detected, they will be percent-encoded, however the percent sign itself * will _NOT_ be encoded. * * @see get_query_encoded() * @see set_query_decoded() * @see has_query() */ void set_query_encoded(const omni::string& query_encoded) noexcept; /** * Sets the query, which is NOT already percent-encoded. * If characters that _MUST_ be encoded are detected, they will be percent-encoded, including the percent sign * itself * * @see get_query_encoded() * @see set_query_encoded() * @see has_query() */ void set_query_decoded(const omni::string& query_decoded) noexcept; /** * Sets the fragment, which is already percent-encoded. * If characters that _MUST_ be encoded are detected, they will be percent-encoded, however the percent sign itself * will _NOT_ be encoded. * * @see get_fragment_encoded() * @see set_fragment_decoded() * @see has_fragment() */ void set_fragment_encoded(const omni::string& fragment_encoded) noexcept; /** * Sets the fragment, which is NOT already percent-encoded. * If characters that _MUST_ be encoded are detected, they will be percent-encoded, including the percent sign * itself * * @see get_fragment_encoded() * @see set_fragment_encoded() * @see has_fragment() */ void set_fragment_decoded(const omni::string& fragment_decoded) noexcept; /** * Create a new IUrl object that represents the shortest possible URL that makes @p other_url relative to this URL. * * Relative URLs are described in section 5.2 "Relative Resolution" of RFC-3986 * * @param other_url URL to make a relative URL to. * * @return A new IUrl object that is the relative URL between this URL and @p other_url. */ omni::core::ObjectPtr<omni::experimental::IUrl> make_relative( omni::core::ObjectParam<omni::experimental::IUrl> other_url) noexcept; /** * Creates a new IUrl object that is the result of resolving the provided @p relative_url with this URL as the base * URL. * * The algorithm for doing the combination is described in section 5.2 "Relative Resolution" of RFC-3986. * * @param relative_url URL to resolve with this URL as the base URL. * * @return A new IUrl object that is the result of resolving @p relative_url with this URL. */ omni::core::ObjectPtr<omni::experimental::IUrl> resolve_relative( omni::core::ObjectParam<omni::experimental::IUrl> relative_url) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline void omni::core::Generated<omni::experimental::IUrl_abi>::clear() noexcept { clear_abi(); } inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::to_string() noexcept { return to_string_abi(); } inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::to_string_utf8() noexcept { return to_string_utf8_abi(); } inline void omni::core::Generated<omni::experimental::IUrl_abi>::from_string(const omni::string& url_string) noexcept { from_string_abi(url_string); } inline void omni::core::Generated<omni::experimental::IUrl_abi>::from_filepath_posix(const omni::string& filepath) noexcept { from_filepath_posix_abi(filepath); } inline void omni::core::Generated<omni::experimental::IUrl_abi>::from_filepath_windows(const omni::string& filepath) noexcept { from_filepath_windows_abi(filepath); } inline void omni::core::Generated<omni::experimental::IUrl_abi>::from_filepath_native(const omni::string& filepath) noexcept { from_filepath_native_abi(filepath); } inline bool omni::core::Generated<omni::experimental::IUrl_abi>::has_scheme() noexcept { return has_scheme_abi(); } inline bool omni::core::Generated<omni::experimental::IUrl_abi>::has_authority() noexcept { return has_authority_abi(); } inline bool omni::core::Generated<omni::experimental::IUrl_abi>::has_userinfo() noexcept { return has_userinfo_abi(); } inline bool omni::core::Generated<omni::experimental::IUrl_abi>::has_host() noexcept { return has_host_abi(); } inline bool omni::core::Generated<omni::experimental::IUrl_abi>::has_port() noexcept { return has_port_abi(); } inline bool omni::core::Generated<omni::experimental::IUrl_abi>::has_path() noexcept { return has_path_abi(); } inline bool omni::core::Generated<omni::experimental::IUrl_abi>::has_query() noexcept { return has_query_abi(); } inline bool omni::core::Generated<omni::experimental::IUrl_abi>::has_fragment() noexcept { return has_fragment_abi(); } inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::get_scheme() noexcept { return get_scheme_abi(); } inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::get_authority_encoded() noexcept { return get_authority_encoded_abi(); } inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::get_userinfo() noexcept { return get_userinfo_abi(); } inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::get_host() noexcept { return get_host_abi(); } inline uint16_t omni::core::Generated<omni::experimental::IUrl_abi>::get_port() noexcept { return get_port_abi(); } inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::get_path_encoded() noexcept { return get_path_encoded_abi(); } inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::get_path_utf8() noexcept { return get_path_utf8_abi(); } inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::get_query_encoded() noexcept { return get_query_encoded_abi(); } inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::get_fragment_encoded() noexcept { return get_fragment_encoded_abi(); } inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_scheme(const omni::string& scheme) noexcept { set_scheme_abi(scheme); } inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_authority_encoded(const omni::string& authority) noexcept { set_authority_encoded_abi(authority); } inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_userinfo(const omni::string& userinfo) noexcept { set_userinfo_abi(userinfo); } inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_host(const omni::string& host) noexcept { set_host_abi(host); } inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_port(uint16_t port) noexcept { set_port_abi(port); } inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_path_encoded(const omni::string& path_encoded) noexcept { set_path_encoded_abi(path_encoded); } inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_path_decoded(const omni::string& path_decoded) noexcept { set_path_decoded_abi(path_decoded); } inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_query_encoded(const omni::string& query_encoded) noexcept { set_query_encoded_abi(query_encoded); } inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_query_decoded(const omni::string& query_decoded) noexcept { set_query_decoded_abi(query_decoded); } inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_fragment_encoded( const omni::string& fragment_encoded) noexcept { set_fragment_encoded_abi(fragment_encoded); } inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_fragment_decoded( const omni::string& fragment_decoded) noexcept { set_fragment_decoded_abi(fragment_decoded); } inline omni::core::ObjectPtr<omni::experimental::IUrl> omni::core::Generated<omni::experimental::IUrl_abi>::make_relative( omni::core::ObjectParam<omni::experimental::IUrl> other_url) noexcept { return omni::core::steal(make_relative_abi(other_url.get())); } inline omni::core::ObjectPtr<omni::experimental::IUrl> omni::core::Generated<omni::experimental::IUrl_abi>::resolve_relative( omni::core::ObjectParam<omni::experimental::IUrl> relative_url) noexcept { return omni::core::steal(resolve_relative_abi(relative_url.get())); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/experimental/url/IUrl.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "../../core/IObject.h" #include "../../String.h" namespace omni { namespace experimental { OMNI_DECLARE_INTERFACE(IUrl); class IUrl_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.IUrl")> { protected: /** * Clears this URL */ virtual void clear_abi() noexcept = 0; /** * Return the string representation of this URL */ virtual omni::string to_string_abi() noexcept = 0; /** * Return the string representation of this URL, but with valid UTF-8 characters * decoded. This will leave invalid UTF-8 byte sequences and certain ASCII characters * encoded; including control codes, and characters that are reserved by the URL * specification as sub-delimiters. */ virtual omni::string to_string_utf8_abi() noexcept = 0; /** * Sets this URL from a string */ virtual void from_string_abi(omni::string const& url_string) noexcept = 0; /** * Sets this URL from a posix file path * The scheme will be "file" and the path will be the normalized and encoded file path * Normalization includes removing redundant path segments such as "//", "/./" and * collapsing ".." segments if possible. For example, it will convert "a/b/../" to "a" */ virtual void from_filepath_posix_abi(omni::string const& filepath) noexcept = 0; /** * Sets this URL from a windows file path * The scheme will be "file" and the path will be the normalized and encoded file path * Path normalization includes everything from "from_filepath_posix_abi" plus: * - The drive letter is made uppercase * - Path separators are converted from \\ to / * - UNC paths such as "\\\\server\\share\path" or "\\\\?\\C:\\path" are handled correctly */ virtual void from_filepath_windows_abi(omni::string const& filepath) noexcept = 0; /** * Sets this URL from a file path based on the native OS. * This calls either from_filepath_posix_abi or from_filepath_windows_abi */ virtual void from_filepath_native_abi(omni::string const& filepath) noexcept = 0; /** * Returns true if the URL has a scheme component. * "scheme" is the part before the first colon, for example "http" or "omniverse". * A URL without a scheme component can only be a relative reference. * * @see get_scheme() * @see set_scheme() */ virtual bool has_scheme_abi() noexcept = 0; /** * Returns true if the URL has an authority component. * "authority" is the part between the // and / * For example "user@server:port" * * @see get_authority_encoded() * @see set_authority_encoded() * @see has_userinfo() * @see has_host() * @see has_port() */ virtual bool has_authority_abi() noexcept = 0; /** * Returns true if the URL has a userinfo sub-component. * "userinfo" is the part of the authority before @ * * @see get_userinfo() * @see set_userinfo() * @see has_authority() */ virtual bool has_userinfo_abi() noexcept = 0; /** * Returns true if the URL has a host sub-component. * "host" is the part of the authority between @ and : * * @see get_host() * @see set_host() * @see has_authority() */ virtual bool has_host_abi() noexcept = 0; /** * Returns true if the URL has a port sub-component. * "port" is the part of the authority after : * * @see get_port() * @see set_port() * @see has_authority() */ virtual bool has_port_abi() noexcept = 0; /** * Returns true if the URL has a path component. * "path" is the part after _abi(and including) / * For example "/path/to/my/file.txt" * * @see get_path_encoded() * @see set_path_encoded() * @see set_path_decoded() */ virtual bool has_path_abi() noexcept = 0; /** * Returns true if the URL has a query component. * "query" is the part after ? but before # * * @see get_query_encoded() * @see set_query_encoded() * @see set_query_decoded() */ virtual bool has_query_abi() noexcept = 0; /** * Returns true if the URL has a fragment component. * "fragment" is the part after # * * @see get_fragment_encoded() * @see set_fragment_encoded() * @see set_fragment_decoded() */ virtual bool has_fragment_abi() noexcept = 0; /** * Returns the scheme. * The scheme will always be fully decoded and in lower case. * * @see has_scheme() * @see set_scheme() */ virtual omni::string get_scheme_abi() noexcept = 0; /** * Returns the authority, which may contain percent-encoded data * For example if the 'userinfo' contains : or @ it must be percent-encoded. * * @see set_authority_encoded() * @see get_userinfo() * @see get_host() * @see get_port() */ virtual omni::string get_authority_encoded_abi() noexcept = 0; /** * Returns the userinfo, fully decoded. * * @see get_authority_encoded() * @see set_userinfo() * @see has_userinfo() */ virtual omni::string get_userinfo_abi() noexcept = 0; /** * Returns the host, fully decoded. * * @see get_authority_encoded() * @see set_host() * @see has_host() */ virtual omni::string get_host_abi() noexcept = 0; /** * Returns the port number * * @see get_authority_encoded() * @see set_port() * @see has_port() */ virtual uint16_t get_port_abi() noexcept = 0; /** * Returns the percent-encoded path component. * * @see get_path_utf8() * @see set_path_encoded() * @see set_path_decoded() * @see has_path() */ virtual omni::string get_path_encoded_abi() noexcept = 0; /** * Returns the path component with all printable ascii and valid UTF-8 characters decoded * Invalid UTF-8 and ASCII control codes will still be percent-encoded. * It's generally safe to print the result of this function on screen and in log files. * * @see get_path_encoded() * @see set_path_encoded() * @see set_path_decoded() * @see has_path() */ virtual omni::string get_path_utf8_abi() noexcept = 0; /** * Returns the percent-encoded query component. * * @see get_query_encoded() * @see set_query_encoded() * @see set_query_decoded() * @see has_query() */ virtual omni::string get_query_encoded_abi() noexcept = 0; /** * Returns the percent-encoded fragment component. * * @see get_fragment_encoded() * @see set_fragment_encoded() * @see set_fragment_decoded() * @see has_fragment() */ virtual omni::string get_fragment_encoded_abi() noexcept = 0; /** * Sets the scheme. * * @see has_scheme() * @see get_scheme() */ virtual void set_scheme_abi(omni::string const& scheme) noexcept = 0; /** * Sets the authority, which is expected to have all the sub-components percent-encoded. * If characters that _MUST_ be encoded are detected, they will be percent-encoded, however the percent sign itself * will _NOT_ be encoded. * * @see get_authority_encoded() * @see set_userinfo() * @see set_host() * @see set_port() */ virtual void set_authority_encoded_abi(omni::string const& authority) noexcept = 0; /** * Sets the userinfo. This function expects the userinfo is not already percent-encoded. * * @see set_authority_encoded() * @see get_userinfo() * @see has_userinfo() */ virtual void set_userinfo_abi(omni::string const& userinfo) noexcept = 0; /** * Sets the host. This function expects the host is not already percent-encoded. * * @see set_authority_encoded() * @see get_host() * @see has_host() */ virtual void set_host_abi(omni::string const& host) noexcept = 0; /** * Sets the port number * * @see set_authority_encoded() * @see get_port() * @see has_port() */ virtual void set_port_abi(uint16_t port) noexcept = 0; /** * Sets the path, which is already percent-encoded. * If characters that _MUST_ be encoded are detected, they will be percent-encoded, however the percent sign itself * will _NOT_ be encoded. * * @see get_path_encoded() * @see set_path_decoded() * @see has_path() */ virtual void set_path_encoded_abi(omni::string const& path_encoded) noexcept = 0; /** * Sets the path, which is NOT already percent-encoded. * If characters that _MUST_ be encoded are detected, they will be percent-encoded, including the percent sign * itself * * @see get_path_encoded() * @see set_path_encoded() * @see has_path() */ virtual void set_path_decoded_abi(omni::string const& path_decoded) noexcept = 0; /** * Sets the query, which is already percent-encoded. * If characters that _MUST_ be encoded are detected, they will be percent-encoded, however the percent sign itself * will _NOT_ be encoded. * * @see get_query_encoded() * @see set_query_decoded() * @see has_query() */ virtual void set_query_encoded_abi(omni::string const& query_encoded) noexcept = 0; /** * Sets the query, which is NOT already percent-encoded. * If characters that _MUST_ be encoded are detected, they will be percent-encoded, including the percent sign * itself * * @see get_query_encoded() * @see set_query_encoded() * @see has_query() */ virtual void set_query_decoded_abi(omni::string const& query_decoded) noexcept = 0; /** * Sets the fragment, which is already percent-encoded. * If characters that _MUST_ be encoded are detected, they will be percent-encoded, however the percent sign itself * will _NOT_ be encoded. * * @see get_fragment_encoded() * @see set_fragment_decoded() * @see has_fragment() */ virtual void set_fragment_encoded_abi(omni::string const& fragment_encoded) noexcept = 0; /** * Sets the fragment, which is NOT already percent-encoded. * If characters that _MUST_ be encoded are detected, they will be percent-encoded, including the percent sign * itself * * @see get_fragment_encoded() * @see set_fragment_encoded() * @see has_fragment() */ virtual void set_fragment_decoded_abi(omni::string const& fragment_decoded) noexcept = 0; /** * Create a new IUrl object that represents the shortest possible URL that makes @p other_url relative to this URL. * * Relative URLs are described in section 5.2 "Relative Resolution" of RFC-3986 * * @param other_url URL to make a relative URL to. * * @return A new IUrl object that is the relative URL between this URL and @p other_url. */ virtual IUrl* make_relative_abi(IUrl* other_url) noexcept = 0; /** * Creates a new IUrl object that is the result of resolving the provided @p relative_url with this URL as the base * URL. * * The algorithm for doing the combination is described in section 5.2 "Relative Resolution" of RFC-3986. * * @param relative_url URL to resolve with this URL as the base URL. * * @return A new IUrl object that is the result of resolving @p relative_url with this URL. */ virtual IUrl* resolve_relative_abi(IUrl* relative_url) noexcept = 0; }; } // namespace experimental } // namespace omni #include "IUrl.gen.h"
omniverse-code/kit/include/omni/graph/io/BundleAttrib.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #pragma message("omni/graph/io/BundleAttrib.h is deprecated. Include omni/graph/core/BundleAttrib.h instead.") // ==================================================================================================== /* _____ _ _ _ _ _ | __ \ | \ | | | | | | | | | | | | ___ | \| | ___ | |_ | | | |___ ___ | | | |/ _ \ | . ` |/ _ \| __| | | | / __|/ _ \ | |__| | (_) | | |\ | (_) | |_ | |__| \__ \ __/ |_____/ \___/ |_| \_|\___/ \__| \____/|___/\___| This is a temporary interface that can change at any time. */ // ==================================================================================================== #include "IDirtyID.h" #include <omni/graph/core/IBundle.h> namespace omni { namespace graph { namespace io { class BundlePrim; class ConstBundlePrim; using BundleAttribSourceType OMNI_GRAPH_IO_DEPRECATED = uint8_t; /** * BundleAttributeSource is used to differentiate between UsdAttributes * and UsdRelationships. * * TODO: Investigate why we can't use eRelationship for this purpose. */ enum class OMNI_GRAPH_IO_DEPRECATED BundleAttribSource : BundleAttribSourceType { Attribute, Relationship, }; /** * Attribute in bundle primitive. * * In contrast to (Const)BundlePrim and (Const)BundlePrims, PrimAttribute uses * const qualifier to express constness of the attribute. * * TODO: Review if const qualifier is appropriate. */ class OMNI_GRAPH_IO_DEPRECATED BundleAttrib { public: /** * Backward compatibility alias. */ using SourceType = BundleAttribSourceType; using Source = BundleAttribSource; BundleAttrib() = default; /** * Read initialization. */ BundleAttrib(ConstBundlePrim& prim, omni::graph::core::NameToken name) noexcept; /** * Read-Write initialization. */ BundleAttrib(BundlePrim& prim, omni::graph::core::NameToken name, omni::graph::core::Type type, size_t arrayElementCount, BundleAttribSource source) noexcept; BundleAttrib(BundleAttrib const&) = delete; BundleAttrib(BundleAttrib&&) noexcept = delete; BundleAttrib& operator=(BundleAttrib const&) = delete; BundleAttrib& operator=(BundleAttrib&&) noexcept = delete; /** * @return Bundle Primitive where this attribute belongs to. */ ConstBundlePrim* getBundlePrim() const noexcept; /** * @return Bundle Primitive where this attribute belongs to. */ BundlePrim* getBundlePrim() noexcept; /** * @return Non const attribute handle of this attribute. */ omni::graph::core::AttributeDataHandle handle() noexcept; /** * @return Const attribute handle of this attribute. */ omni::graph::core::ConstAttributeDataHandle handle() const noexcept; /** * @return Name of this attribute. */ omni::graph::core::NameToken name() const noexcept; /** * @return Type of this attribute. */ omni::graph::core::Type type() const noexcept; /** * @return Interpolation of this attribute. */ omni::graph::core::NameToken interpolation() const noexcept; /** * Set interpolation for this attribute. * * @return True if operation successful, false otherwise. */ bool setInterpolation(omni::graph::core::NameToken interpolation) noexcept; /** * Clean interpolation information for this attribute. */ void clearInterpolation() noexcept; /** * @return Dirty Id of this attribute. */ DirtyIDType dirtyID() const noexcept; /** * Set dirty id to given value. * * @return True if successful, false otherwise. */ bool setDirtyID(DirtyIDType dirtyID) noexcept; /** * Bump dirty id for this attribute. * * @return True if successful, false otherwise. */ bool bumpDirtyID() noexcept; /** * Set source for this attribute. * * @return True if successful, false otherwise. */ bool setSource(Source source) noexcept; /** * Reset source to default value for this attribute. */ void clearSource() noexcept; /** * @return True if this attribute is an array attribute. */ bool isArray() const noexcept; /** * @return Size of this attribute. If attribute is not an array, then size is 1. */ size_t size() const noexcept; /** * Changes size of this attribute. */ void resize(size_t arrayElementCount) noexcept; /** * Copy attribute contents from another attribute. * Destination name is preserved. */ void copyContentsFrom(BundleAttrib const& sourceAttr) noexcept; /** * @return Internal data as void pointer. */ void* getDataInternal() noexcept; /** * @return Internal data as void pointer. */ void const* getDataInternal() const noexcept; template <typename T> T get() const noexcept; // NOTE: If this is not an array type attribute, this pointer may not be valid once any prim, // even if it's not the prim containing this attribute, has an attribute added or removed, // due to how attribute data is stored. template <typename T> T* getData() noexcept; template <typename T> T const* getData() const noexcept; template <typename T> T const* getConstData() const noexcept; template <typename T> void set(T const& value) noexcept; template <typename T> void set(T const* values, size_t elementCount) noexcept; /*********************************************************************************************** * * TODO: Following methods might be deprecated in the future. * In the next iteration when real interface starts to emerge, we can retire those methods. * ***********************************************************************************************/ /** * @todo First iteration of MPiB didn't use 'eRelationship' type to describe relationships. * Thus, strange approach was created to treat attribute, that is a relationship as a "source". */ Source source() const noexcept; /** * @return true if this attribute is data. */ bool isAttributeData() const noexcept; /** * @return true if this attribute is relationship. */ bool isRelationshipData() const noexcept; /** * @deprecated IBundle2 interface does not require prefixing, use getName(). */ omni::graph::core::NameToken prefixedName() const noexcept; private: /** * Remove attribute and its internal data. */ void clearContents() noexcept; omni::graph::core::IConstBundle2* getConstBundlePtr() const noexcept; omni::graph::core::IBundle2* getBundlePtr() noexcept; ConstBundlePrim* m_bundlePrim{ nullptr }; // Attribute Definition: omni::graph::core::NameToken m_name = carb::flatcache::kUninitializedToken; carb::flatcache::TypeC m_type; // Attribute Property Cached Values: omni::graph::core::NameToken m_interpolation = carb::flatcache::kUninitializedToken; DirtyIDType m_dirtyID{ kInvalidDirtyID }; Source m_source { BundleAttribSource::Attribute }; friend class ConstBundlePrims; friend class BundlePrim; }; /** * Do not use! Backward compatibility alias. */ using BundleAttributeInfo OMNI_GRAPH_IO_DEPRECATED = BundleAttrib; } // namespace io } // namespace graph } // namespace omni #include "BundleAttribImpl.h"
omniverse-code/kit/include/omni/graph/io/BundlePrims.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #pragma message("omni/graph/io/BundlePrims.h is deprecated. Include omni/graph/core/BundlePrims.h instead.") // ==================================================================================================== /* _____ _ _ _ _ _ | __ \ | \ | | | | | | | | | | | | ___ | \| | ___ | |_ | | | |___ ___ | | | |/ _ \ | . ` |/ _ \| __| | | | / __|/ _ \ | |__| | (_) | | |\ | (_) | |_ | |__| \__ \ __/ |_____/ \___/ |_| \_|\___/ \__| \____/|___/\___| This is a temporary interface that can change at any time. */ // ==================================================================================================== #include "ConstBundlePrims.h" namespace omni { namespace graph { namespace io { class BundlePrims; class BundlePrimIterator; class BundlePrimAttrIterator; /** * Collection of read-write attributes in a primitive. */ class OMNI_GRAPH_IO_DEPRECATED BundlePrim : public ConstBundlePrim { public: using AttrMapIteratorType = BundleAttributeMap::iterator; /** * @return Parent of this bundle prim. */ BundlePrims* getBundlePrims() noexcept; /** * @return Bundle handle of this primitive. */ omni::graph::core::BundleHandle handle() noexcept; /** * Sets type of the primitive. */ void setType(omni::graph::core::NameToken type) noexcept; /** * @return Cached instance of BundleAttrib if attribute is found successfully, nullptr otherwise. */ BundleAttrib* getAttr(omni::graph::core::NameToken attrName) noexcept; /** * @return BundleAttrib if attribute is added successfully, nullptr otherwise. */ BundleAttrib* addAttr(omni::graph::core::NameToken attrName, omni::graph::core::Type type, size_t arrayElementCount = 0, BundleAttrib::Source source = BundleAttrib::Source::Attribute) noexcept; /** * Convenience structure for adding attributes. */ struct AddAttrInfo { omni::graph::core::NameToken attrName; omni::graph::core::Type type; size_t arrayElementCount; BundleAttrib::Source source; }; /** * Adds a list of attributes to this bundle prim. * * @param[in] attrList Vector of all the new attributes to be added to this prim * @returns True if all (new) attributes were added successfully * * @todo Weakness of this interface is that it forces usage of std::vector. */ bool addAttrs(std::vector<AddAttrInfo> const& attrList) noexcept; /** * Remove attribute with a given name from this primitive. */ void removeAttr(omni::graph::core::NameToken attrName) noexcept; /** * Recursively remove all attributes from this primitive. */ void clearContents() noexcept; /** * Copy contents from another bundle prim. */ void copyContentsFrom(ConstBundlePrim& source, bool removeAttrsNotInSource = true) noexcept; /** * Bump dirty id for this bundle prim. */ void bumpDirtyID() noexcept; /** * Set dirty id for this bundle prim. */ void setDirtyID(DirtyIDType dirtyID) noexcept; /** * @return Attribute iterator pointing to the first attribute in this bundle. */ BundlePrimAttrIterator begin() noexcept; /** * @return Attribute iterator pointing to the last attribute in this bundle. */ BundlePrimAttrIterator end() noexcept; /** * @return Attribute iterator pointing to the first attribute in this bundle. */ ConstBundlePrimAttrIterator cbegin() noexcept; /** * @return Attribute iterator pointing to the last attribute in this bundle. */ ConstBundlePrimAttrIterator cend() noexcept; /*********************************************************************************************** * * TODO: Following methods might be deprecated in the future. * In the next iteration when real interface starts to emerge, we can retire those methods. * ***********************************************************************************************/ /** * Create an attribute that is a relationship type. */ BundleAttrib* addRelationship(omni::graph::core::NameToken name, size_t targetCount) noexcept; /** * @deprecated Do not use! It doesn't do anything, it's kept for backward compatibility. */ void setPath(omni::graph::core::NameToken path) noexcept; /** * @deprecated Use getBundlePrims. */ BundlePrims* bundlePrims() noexcept; /** * @deprecated Do not use! */ void copyContentsFrom(ConstBundlePrim const& source, bool removeAttrsNotInSource = true) noexcept; private: /** * Direct initialization with IBundle interface. * * ConstBundlePrim and BundlePrim take advantage of polymorphic relationship * between IConstBundle and IBundle interfaces. * In order to modify bundles, BundlePrim makes an attempt to cast IConstBundle * to IBundle interface. When this process is successful then, bundle can be modified. * * Only BundlePrims is allowed to create instances of BundlePrim. */ BundlePrim(BundlePrims& bundlePrims, BundlePrimIndex primIndex, omni::core::ObjectPtr<IBundle2> bundle); /** * Clear contents of IBundle. */ void recursiveClearContents(omni::graph::core::GraphContextObj const& context, omni::graph::core::IBundleFactory* factory, omni::graph::core::IBundle2* bundle) noexcept; /** * @return Make an attempt to cast IConstBundle interface to IBundle. Returns nullptr if operation failed. */ omni::graph::core::IBundle2* getBundlePtr(omni::graph::core::IConstBundle2* constBundle) noexcept; /** * @return Make an attempt to cast IConstBundle interface to IBundle. Returns nullptr if operation failed. */ omni::graph::core::IBundle2* getBundlePtr() noexcept; /** * @return True if primitive is an instance of common attributes. */ bool isCommonAttrs() const noexcept { return m_primIndex == kInvalidBundlePrimIndex; } friend class BundlePrimIterator; friend class BundlePrims; friend class BundleAttrib; }; /** * Collection of read-write primitives in a bundle. * * Bundle Primitives is not movable, not copyable. It lifespan is managed by the user. */ class OMNI_GRAPH_IO_DEPRECATED BundlePrims : public ConstBundlePrims { public: /** * Acquire access to a bundle primitives under given handle. */ BundlePrims(omni::graph::core::GraphContextObj const& context, omni::graph::core::BundleHandle const& bundle); ~BundlePrims() noexcept; /** * @return Bundle handle of this primitive. */ omni::graph::core::BundleHandle handle() noexcept; /** * @return BundlePrim under given index, or nullptr if prim is not found. */ BundlePrim* getPrim(BundlePrimIndex primIndex) noexcept; /** * @return BundlePrim allowing access to attributes to this bundle primitives. */ BundlePrim& getCommonAttrs() noexcept; /** * Add new primitives to this bundle. * * @return Number of successfully added primitives. */ size_t addPrims(size_t primCountToAdd) noexcept; /** * Remove primitive under given index. */ bool removePrim(BundlePrimIndex primIndex) noexcept; /** * Cleans up this primitive bundle. Remove all primitives and attributes. */ void clearContents() noexcept; /** * Bump id of this bundle primitives. */ DirtyIDType bumpBundleDirtyID() noexcept; /** * @return Primitive iterator pointing to the first primitive in this bundle. */ BundlePrimIterator begin() noexcept; /** * @return Primitive iterator pointing to the last primitive in this bundle. */ BundlePrimIterator end() noexcept; /** * @return Primitive iterator pointing to the first primitive in this bundle. */ ConstBundlePrimIterator cbegin() noexcept; /** * @return Primitive iterator pointing to the last primitive in this bundle. */ ConstBundlePrimIterator cend() noexcept; /*********************************************************************************************** * * TODO: Following methods might be deprecated in the future. * In the next iteration when real interface starts to emerge, we can retire those methods. * ***********************************************************************************************/ /** * @deprecated Don't use! Read attach() description. */ BundlePrims(); /** * @deprecated Use appropriate constructor and heap allocate BundlePrims. * * @todo: There is no benefit of using this method. Cache has to be rebuild from scratch * whenever BundlePrims is attached/detached. * It would be better to remove default constructor and enforce cache construction * through constructor with arguments. */ void attach(omni::graph::core::GraphContextObj const& context, omni::graph::core::BundleHandle const& bundle) noexcept; /** * @deprecated Use appropriate constructor and heap allocate ConstBundlePrims. */ void detach() noexcept; /** * @deprecated Do not use! This function is deprecated. Adding prim types has been moved to attach. * ConstBundlePrims attach function will iterate over prims and collect their * paths and types.@brief */ omni::graph::core::NameToken* addPrimTypesIfMissing() noexcept; /** * @deprecated Do not use! Use removePrim with index. This override introduces ambiguity where int can * be converted to a pointer. * * @todo: Weakness of removePrim design is that it introduces two overrides with following arguments: * * pointer * * integer * This leads to ambiguity during override resolution. Override with a pointer should be avoided * and removed in the future. */ bool removePrim(ConstBundlePrim* prim) noexcept; /** * @deprecated Do not use! There is no need for this function to exist. * Get the primitive and call clearContents(). */ BundlePrim* getClearedPrim(BundlePrimIndex primIndex) noexcept; /** * @deprecated Responsibility to cache primitive's attributes has been moved to BundlePrim. */ void ensurePrimAttrsCached(BundlePrimIndex primIndex) noexcept; /** * This method exists for backward compatibility only. With new interface bundle holds the type. * * @deprecated Do not use! Use getConstPrimTypes(). */ omni::graph::core::NameToken* getPrimTypes() noexcept; /** * This method exists for backward compatibility only. With new interface bundle holds the path. * * @deprecated Do not use! Use getConstPrimPaths(). */ omni::graph::core::NameToken* getPrimPaths() noexcept; /** * @deprecated Do not use! Path is a part of IBundle interface. */ omni::graph::core::NameToken* addPrimPathsIfMissing() noexcept; private: /** * @return Returns nullptr if bundle is read only, or IBundle2 instance otherwise. */ omni::graph::core::IBundle2* getBundlePtr() noexcept; /** * @return Get prim dirty ids of this bundle. */ DirtyIDType* getPrimDirtyIDs() noexcept; // cached attribute handles using AttributeDataHandle = omni::graph::core::AttributeDataHandle; AttributeDataHandle m_bundleDirtyIDAttr{ AttributeDataHandle::invalidValue() }; AttributeDataHandle m_primDirtyIDsAttr{ AttributeDataHandle::invalidValue() }; AttributeDataHandle m_primPathsAttr{ AttributeDataHandle::invalidValue() }; AttributeDataHandle m_primTypesAttr{ AttributeDataHandle::invalidValue() }; AttributeDataHandle m_primIndexAttr{ AttributeDataHandle::invalidValue() }; friend class BundlePrim; friend class BundleAttrib; }; /** * Primitives in Bundle iterator. */ class OMNI_GRAPH_IO_DEPRECATED BundlePrimIterator { public: BundlePrimIterator(BundlePrims& bundlePrims, BundlePrimIndex primIndex = 0) noexcept; BundlePrimIterator(BundlePrimIterator const& that) noexcept = default; BundlePrimIterator& operator=(BundlePrimIterator const& that) noexcept = default; bool operator==(BundlePrimIterator const& that) const noexcept; bool operator!=(BundlePrimIterator const& that) const noexcept; BundlePrim& operator*() noexcept; BundlePrim* operator->() noexcept; BundlePrimIterator& operator++() noexcept; private: BundlePrims* m_bundlePrims; BundlePrimIndex m_primIndex; }; /** * Attributes in Primitive iterator. */ class OMNI_GRAPH_IO_DEPRECATED BundlePrimAttrIterator { public: BundlePrimAttrIterator(BundlePrim& bundlePrim, BundlePrim::AttrMapIteratorType attrIter) noexcept; BundlePrimAttrIterator(BundlePrimAttrIterator const& that) noexcept = default; BundlePrimAttrIterator& operator=(BundlePrimAttrIterator const& that) noexcept = default; bool operator==(BundlePrimAttrIterator const& that) const noexcept; bool operator!=(BundlePrimAttrIterator const& that) const noexcept; BundleAttrib& operator*() noexcept; BundleAttrib* operator->() noexcept; BundlePrimAttrIterator& operator++() noexcept; BundleAttrib const* getConst() noexcept; private: BundlePrim* m_bundlePrim; BundlePrim::AttrMapIteratorType m_attrIter; }; } // namespace io } // namespace graph } // namespace omni #include "BundlePrimsImpl.h"
omniverse-code/kit/include/omni/graph/io/ConstBundlePrimsImpl.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "ConstBundlePrims.h" #include <omni/graph/core/CppWrappers.h> #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/ComputeGraph.h> #include <algorithm> namespace omni { namespace graph { namespace io { // ==================================================================================================== // // Const Bundle Primitive // // ==================================================================================================== inline ConstBundlePrim::ConstBundlePrim(ConstBundlePrims& bundlePrims, BundlePrimIndex primIndex, omni::core::ObjectPtr<omni::graph::core::IConstBundle2> bundle) : m_bundlePrims{ &bundlePrims }, m_bundle{ std::move(bundle) }, m_primIndex{ primIndex } { // Read and cache all non internal attributes. readAndCacheAttributes(); } inline void ConstBundlePrim::readAndCacheAttributes() noexcept { using namespace omni::graph::core; IConstBundle2* bundle = getConstBundlePtr(); GraphContextObj const& context = getConstBundlePrims()->context(); std::vector<ConstAttributeDataHandle> attrHandles(bundle->getAttributeCount()); bundle->getConstAttributes(attrHandles.data(), attrHandles.size()); auto& attrs = getAttributes(); for(ConstAttributeDataHandle& attrHandle : attrHandles) { if(!attrHandle.isValid()) continue; NameToken attrName = context.iAttributeData->getName(context, attrHandle); attrs.insert(std::make_pair(attrName, std::make_unique<BundleAttrib>(*this, attrName))); } } inline BundleAttrib const* ConstBundlePrim::getConstAttr(core::NameToken attrName) noexcept { using namespace omni::graph::core; // Try to find cached attributes auto& attrMap = getAttributes(); auto it = attrMap.find(attrName); if (it != attrMap.end()) { return it->second.get(); } // Try to find attribute in this bundle. IConstBundle2* bundle = getConstBundlePtr(); ConstAttributeDataHandle attributeHandle = bundle->getConstAttributeByName(attrName); if (!attributeHandle.isValid()) { // attribute is not found, ensure entry is removed from the cache. auto it = attrMap.find(attrName); if (it != attrMap.end()) { attrMap.erase(it); } return nullptr; } // Check if attribute in the bundle is stale auto newPrimAttribute = new BundleAttrib{ *this, attrName}; std::unique_ptr<BundleAttrib> primAttributePtr{ newPrimAttribute }; attrMap.emplace(attrName, std::move(primAttributePtr)); return newPrimAttribute; } inline BundleAttrib const* ConstBundlePrim::getAttr(omni::graph::core::NameToken attrName) const noexcept { return const_cast<ConstBundlePrim*>(this)->getConstAttr(attrName); } inline size_t ConstBundlePrim::attrCount() noexcept { return getAttributes().size(); } inline BundlePrimIndex ConstBundlePrim::primIndex() noexcept { return m_primIndex; } inline omni::graph::core::NameToken ConstBundlePrim::path() noexcept { using namespace omni::graph::core; ConstBundlePrims* bundlePrims = getConstBundlePrims(); BundlePrimIndex const primIndex = this->primIndex(); if (primIndex >= bundlePrims->getPrimCount()) { return carb::flatcache::kUninitializedToken; } NameToken const* paths = bundlePrims->getConstPrimPaths(); return (paths != nullptr) ? paths[primIndex] : carb::flatcache::kUninitializedToken; } inline omni::graph::core::NameToken ConstBundlePrim::path() const noexcept { return const_cast<ConstBundlePrim*>(this)->path(); } inline omni::graph::core::NameToken ConstBundlePrim::type() noexcept { using namespace omni::graph::core; ConstBundlePrims* bundlePrims = getConstBundlePrims(); BundlePrimIndex const primIndex = this->primIndex(); if (primIndex >= bundlePrims->getPrimCount()) { return carb::flatcache::kUninitializedToken; } NameToken const* types = bundlePrims->getConstPrimTypes(); return (types != nullptr) ? types[primIndex] : carb::flatcache::kUninitializedToken; } inline omni::graph::core::NameToken ConstBundlePrim::type() const noexcept { return const_cast<ConstBundlePrim*>(this)->type(); } inline DirtyIDType ConstBundlePrim::dirtyID() noexcept { ConstBundlePrims* bundlePrims = getConstBundlePrims(); if (primIndex() >= bundlePrims->getPrimCount()) { return bundlePrims->getBundleDirtyID(); } DirtyIDType const* dirtyIDs = m_bundlePrims->getPrimDirtyIDs(); return (dirtyIDs != nullptr) ? dirtyIDs[m_primIndex] : kInvalidDirtyID; } inline DirtyIDType ConstBundlePrim::dirtyID() const noexcept { return const_cast<ConstBundlePrim*>(this)->dirtyID(); } inline ConstBundlePrims* ConstBundlePrim::getConstBundlePrims() noexcept { return m_bundlePrims; } inline ConstBundlePrimAttrIterator ConstBundlePrim::begin() noexcept { return ConstBundlePrimAttrIterator(*this, getAttributes().begin()); } inline ConstBundlePrimAttrIterator ConstBundlePrim::end() noexcept { return ConstBundlePrimAttrIterator(*this, getAttributes().end()); } inline ConstBundlePrimAttrIterator ConstBundlePrim::begin() const noexcept { ConstBundlePrim& thisPrim = const_cast<ConstBundlePrim&>(*this); return ConstBundlePrimAttrIterator(thisPrim, thisPrim.getAttributes().begin()); } inline ConstBundlePrimAttrIterator ConstBundlePrim::end() const noexcept { ConstBundlePrim& thisPrim = const_cast<ConstBundlePrim&>(*this); return ConstBundlePrimAttrIterator(thisPrim, thisPrim.getAttributes().end()); } inline omni::graph::core::IConstBundle2* ConstBundlePrim::getConstBundlePtr() noexcept { return m_bundle.get(); } inline ConstBundlePrim::BundleAttributeMap& ConstBundlePrim::getAttributes() noexcept { return m_attributes; } // ==================================================================================================== // // Const Bundle Primitives // // ==================================================================================================== inline ConstBundlePrims::ConstBundlePrims() { } inline ConstBundlePrims::ConstBundlePrims(omni::graph::core::GraphContextObj const& context, omni::graph::core::ConstBundleHandle const& bundle) : ConstBundlePrims() { attach(context, bundle); } inline void ConstBundlePrims::detach() noexcept { m_bundleDirtyID = kInvalidDirtyID; m_primTypes = nullptr; m_primPaths = nullptr; m_primDirtyIDs = nullptr; m_iDirtyID = nullptr; m_primitives.clear(); m_commonAttributes.reset(); m_context = omni::graph::core::GraphContextObj{}; m_bundle.release(); m_factory.release(); } inline omni::graph::core::NameToken const* ConstBundlePrims::getConstPrimPaths() noexcept { return m_primPaths; } inline ConstBundlePrims::BundlePrimArray& ConstBundlePrims::getPrimitives() noexcept { return m_primitives; } inline omni::graph::core::NameToken const* ConstBundlePrims::getConstPrimTypes() noexcept { return m_primTypes; } inline omni::graph::core::ConstBundleHandle ConstBundlePrims::getConstHandle() noexcept { return m_bundle->getConstHandle(); } template <typename FUNC> ConstBundlePrim* ConstBundlePrims::getConstPrim(BundlePrimIndex primIndex, FUNC createBundlePrim) noexcept { // Return invalid const bundle prim if out of bounds. size_t const bundlePrimCount = getPrimCount(); if (primIndex >= bundlePrimCount) { return nullptr; } // Search and return if in cache. auto& prims = getPrimitives(); if (prims.size() != bundlePrimCount) { prims.resize(bundlePrimCount); } if (prims[primIndex] != nullptr) { return prims[primIndex].get(); } // update the cache and return the bundle prim. std::unique_ptr<ConstBundlePrim> newBundlePrim{ createBundlePrim() }; if (!newBundlePrim) { return nullptr; } ConstBundlePrim* newBundlePrimPtr = newBundlePrim.get(); prims[primIndex] = std::move(newBundlePrim); return newBundlePrimPtr; } inline ConstBundlePrim* ConstBundlePrims::getPrim(BundlePrimIndex primIndex) noexcept { return getConstPrim(primIndex); } inline ConstBundlePrim* ConstBundlePrims::getConstPrim(BundlePrimIndex primIndex) noexcept { using namespace omni::graph::core; auto createBundlePrim = [this, &bundlePrims = *this, &primIndex]() -> ConstBundlePrim* { ConstBundleHandle bundleHandle = getConstBundlePtr()->getConstChildBundle(primIndex); if (!bundleHandle.isValid()) { return nullptr; } omni::core::ObjectPtr<IConstBundle2> childBundle = getBundleFactoryPtr()->getConstBundle(context(), bundleHandle); if (!childBundle) { return nullptr; } return new ConstBundlePrim{ bundlePrims, primIndex, childBundle }; }; return getConstPrim(primIndex, createBundlePrim); } inline DirtyIDType ConstBundlePrims::getBundleDirtyID() noexcept { return m_bundleDirtyID; } inline DirtyIDType const* ConstBundlePrims::getPrimDirtyIDs() noexcept { return m_primDirtyIDs; } inline ConstBundlePrim& ConstBundlePrims::getConstCommonAttrs() noexcept { return *m_commonAttributes; } inline omni::graph::core::GraphContextObj const& ConstBundlePrims::context() noexcept { using namespace omni::graph::core; if (m_bundle) { m_context = m_bundle->getContext(); } else { m_context = GraphContextObj{}; } return m_context; } inline DirtyIDType ConstBundlePrims::getNextDirtyID() noexcept { return m_iDirtyID->getNextDirtyID(); } inline void ConstBundlePrims::attach(omni::graph::core::GraphContextObj const& context, omni::graph::core::ConstBundleHandle const& bundleHandle) noexcept { using namespace omni::graph::core; ComputeGraph* computeGraph = carb::getCachedInterface<ComputeGraph>(); omni::core::ObjectPtr<IBundleFactory> factory = computeGraph->getBundleFactoryInterfacePtr(); omni::core::ObjectPtr<IConstBundle2> bundle = factory->getConstBundle(context, bundleHandle); attach(std::move(factory), std::move(bundle)); } inline void ConstBundlePrims::attach(omni::core::ObjectPtr<omni::graph::core::IBundleFactory>&& factoryPtr, omni::core::ObjectPtr<omni::graph::core::IConstBundle2>&& bundlePtr) noexcept { using namespace omni::graph::core; // Initialize members m_factory = std::move(factoryPtr); m_bundle = std::move(bundlePtr); // Initialize common attributes to provide access to ConstBundlePrims attributes. m_commonAttributes.reset(new ConstBundlePrim(*this, kInvalidBundlePrimIndex, m_bundle)); // Acquire IDirtyID interface m_iDirtyID = carb::getCachedInterface<omni::graph::io::IDirtyID>(); if (!m_bundle->isValid()) { return; } // TODO: Following code is necessary for backward compatibility. IConstBundle2* bundle = getConstBundlePtr(); GraphContextObj const& context = this->context(); // Bundle DirtyID. auto& bundleDirtyIDDef = detail::getBundleDirtyIDDefinition(); ConstAttributeDataHandle bundleDirtyIDHandle = bundle->getConstBundleMetadataByName(bundleDirtyIDDef.token); if (bundleDirtyIDHandle.isValid()) { setBundleDirtyID(*getDataR<DirtyIDType>(context, bundleDirtyIDHandle)); } else { setBundleDirtyID(kInvalidDirtyID); } // Prim DirtyIDs. auto& primDirtyIDsDef = detail::getPrimDirtyIDsDefinition(); ConstAttributeDataHandle primDirtyIDsHandle = bundle->getConstBundleMetadataByName(primDirtyIDsDef.token); if (primDirtyIDsHandle.isValid()) { size_t arrayLength = 0; context.iAttributeData->getElementCount(&arrayLength, context, &primDirtyIDsHandle, 1); setPrimDirtyIDsData(*getDataR<DirtyIDType*>(context, primDirtyIDsHandle)); } else { setPrimDirtyIDsData(nullptr); } // Prim Paths. auto& primPathsDef = detail::getPrimPathsDefinition(); ConstAttributeDataHandle primPathsHandle = bundle->getConstBundleMetadataByName(primPathsDef.token); if (primPathsHandle.isValid()) { size_t arrayLength = 0; context.iAttributeData->getElementCount(&arrayLength, context, &primPathsHandle, 1); setPrimPathsData(*getDataR<NameToken*>(context, primPathsHandle)); } else { setPrimPathsData(nullptr); } // Prim Types. auto& primTypesDef = detail::getPrimTypesDefinition(); ConstAttributeDataHandle primTypesHandle = bundle->getConstBundleMetadataByName(primTypesDef.token); if (primTypesHandle.isValid()) { size_t arrayLength = 0; context.iAttributeData->getElementCount(&arrayLength, context, &primTypesHandle, 1); setPrimTypesData(*getDataR<NameToken*>(context, primTypesHandle)); } else { setPrimTypesData(nullptr); } } inline void ConstBundlePrims::setBundleDirtyID(DirtyIDType bundleDirtyID) noexcept { m_bundleDirtyID = bundleDirtyID; } inline void ConstBundlePrims::setPrimDirtyIDsData(DirtyIDType const* primDirtyIDs) noexcept { m_primDirtyIDs = primDirtyIDs; } inline void ConstBundlePrims::setPrimPathsData(omni::graph::core::NameToken const* primPaths) noexcept { m_primPaths = primPaths; } inline void ConstBundlePrims::setPrimTypesData(omni::graph::core::NameToken const* primTypes) noexcept { m_primTypes = primTypes; } inline omni::graph::core::IBundleFactory* ConstBundlePrims::getBundleFactoryPtr() noexcept { return m_factory.get(); } inline omni::graph::core::IConstBundle2* ConstBundlePrims::getConstBundlePtr() noexcept { return m_bundle.get(); } inline size_t ConstBundlePrims::getPrimCount() noexcept { if (IConstBundle2* bundle = getConstBundlePtr()) { return bundle->getChildBundleCount(); } return 0; } inline ConstBundlePrimIterator ConstBundlePrims::begin() noexcept { return ConstBundlePrimIterator(*this); } inline ConstBundlePrimIterator ConstBundlePrims::end() noexcept { return ConstBundlePrimIterator(*this, getPrimCount()); } /*********************************************************************************************** * * TODO: Following methods might be deprecated in the future, but are kept for backward compatibility. * In the next iteration when real interface starts to emerge, we can retire those methods. * ***********************************************************************************************/ inline ConstBundlePrim& ConstBundlePrims::getCommonAttrs() noexcept { return getConstCommonAttrs(); } inline omni::graph::core::ConstBundleHandle ConstBundlePrims::handle() noexcept { return m_bundle->getConstHandle(); } inline omni::graph::core::NameToken const* ConstBundlePrims::getPrimPaths() noexcept { return getConstPrimPaths(); } inline void ConstBundlePrims::separateAttrs() noexcept { // There is nothing to separate. This function is deprecated. } inline void ConstBundlePrims::ensurePrimAttrsCached(BundlePrimIndex primIndex) noexcept { // Responsibility of caching attributes was moved to Bundle Prim. } // ==================================================================================================== // // Const Bundle Primitive Iterator // // ==================================================================================================== inline ConstBundlePrimIterator::ConstBundlePrimIterator(ConstBundlePrims& bundlePrims, BundlePrimIndex primIndex) noexcept : m_bundlePrims(&bundlePrims), m_primIndex(primIndex) { } inline bool ConstBundlePrimIterator::operator==(ConstBundlePrimIterator const& that) const noexcept { return m_bundlePrims == that.m_bundlePrims && m_primIndex == that.m_primIndex; } inline bool ConstBundlePrimIterator::operator!=(ConstBundlePrimIterator const& that) const noexcept { return !(*this == that); } inline ConstBundlePrim& ConstBundlePrimIterator::operator*() noexcept { return *(m_bundlePrims->getConstPrim(m_primIndex)); } inline ConstBundlePrim* ConstBundlePrimIterator::operator->() noexcept { return m_bundlePrims->getConstPrim(m_primIndex); } inline ConstBundlePrimIterator& ConstBundlePrimIterator::operator++() noexcept { ++m_primIndex; return *this; } // ==================================================================================================== // // Const Bundle Primitive Attribute Iterator // // ==================================================================================================== inline ConstBundlePrimAttrIterator::ConstBundlePrimAttrIterator(ConstBundlePrim& bundlePrim, ConstBundlePrim::AttrMapIteratorType attrIter) noexcept : m_bundlePrim(&bundlePrim), m_attrIter(attrIter) { } inline bool ConstBundlePrimAttrIterator::operator==(ConstBundlePrimAttrIterator const& that) const noexcept { return m_bundlePrim == that.m_bundlePrim && m_attrIter == that.m_attrIter; } inline bool ConstBundlePrimAttrIterator::operator!=(ConstBundlePrimAttrIterator const& that) const noexcept { return !(*this == that); } inline BundleAttrib const& ConstBundlePrimAttrIterator::operator*() const noexcept { CARB_ASSERT(m_attrIter->second); return *(m_attrIter->second); } inline BundleAttrib const*ConstBundlePrimAttrIterator:: operator->() const noexcept { CARB_ASSERT(m_attrIter->second); return m_attrIter->second.get(); } inline ConstBundlePrimAttrIterator& ConstBundlePrimAttrIterator::operator++() noexcept { ++m_attrIter; return *this; } } // namespace io } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/io/BundleAttribImpl.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "BundleAttrib.h" #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/CppWrappers.h> #include <omni/math/linalg/vec.h> #include <omni/math/linalg/matrix.h> #include <omni/math/linalg/quat.h> #include <omni/math/linalg/half.h> namespace omni { namespace math { namespace linalg { template <typename T> struct TypeToBaseType { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eUnknown; }; template <> struct TypeToBaseType<half> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eHalf; }; template <> struct TypeToBaseType<float> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eFloat; }; template <> struct TypeToBaseType<double> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eDouble; }; template <> struct TypeToBaseType<bool> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eBool; }; template <> struct TypeToBaseType<unsigned char> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eUChar; }; template <> struct TypeToBaseType<int> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eInt; }; template <> struct TypeToBaseType<int64_t> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eInt64; }; template <> struct TypeToBaseType<unsigned int> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eUInt; }; template <> struct TypeToBaseType<uint64_t> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eUInt64; }; template <> struct TypeToBaseType<carb::flatcache::Token> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eToken; }; template <typename T, size_t N> struct TypeToBaseType<base_vec<T, N>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T> struct TypeToBaseType<vec2<T>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T> struct TypeToBaseType<vec3<T>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T> struct TypeToBaseType<vec4<T>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T> struct TypeToBaseType<quat<T>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T, size_t N> struct TypeToBaseType<base_matrix<T, N>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T> struct TypeToBaseType<matrix2<T>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T> struct TypeToBaseType<matrix3<T>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T> struct TypeToBaseType<matrix4<T>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T> struct TypeToComponentCount { constexpr static size_t count = 1; }; template <typename T, size_t N> struct TypeToComponentCount<base_vec<T,N>> { constexpr static size_t count = N; }; template <typename T> struct TypeToComponentCount<vec2<T>> { constexpr static size_t count = 2; }; template <typename T> struct TypeToComponentCount<vec3<T>> { constexpr static size_t count = 3; }; template <typename T> struct TypeToComponentCount<vec4<T>> { constexpr static size_t count = 4; }; template <typename T> struct TypeToComponentCount<quat<T>> { constexpr static size_t count = 4; }; template <typename T, size_t N> struct TypeToComponentCount<base_matrix<T,N>> { constexpr static size_t count = N*N; }; template <typename T> struct TypeToComponentCount<matrix2<T>> { constexpr static size_t count = 4; }; template <typename T> struct TypeToComponentCount<matrix3<T>> { constexpr static size_t count = 9; }; template <typename T> struct TypeToComponentCount<matrix4<T>> { constexpr static size_t count = 16; }; } // namespace linalg } // namespace math } // namespace omni namespace omni { namespace graph { namespace io { namespace detail { // // Non-owning string buffer with compile time size evaluation // class StringBuffer { public: using value_type = char const*; using size_type = std::size_t; using const_iterator = char const*; constexpr StringBuffer(value_type data, size_type size) noexcept : m_data{ data }, m_size{ size } { } constexpr explicit StringBuffer(value_type data) noexcept : StringBuffer{ data, len(data) } { } constexpr StringBuffer(StringBuffer const&) = default; constexpr StringBuffer(StringBuffer&&) = default; constexpr value_type data() const noexcept { return m_data; } constexpr size_type size() const noexcept { return m_size; } constexpr const_iterator begin() const noexcept { return m_data; } constexpr const_iterator end() const noexcept { return m_data + m_size; } private: constexpr size_type len(value_type start) const noexcept { value_type end = start; for (; *end != '\0'; ++end) ; return end - start; } value_type m_data; size_type m_size; }; // Helper class to keep name and type together. struct AttrDefinition { AttrDefinition(StringBuffer _name, omni::graph::core::Type _type, omni::graph::core::NameToken _token) noexcept : name{ _name } , type{ _type } , token{ _token } { } AttrDefinition(carb::flatcache::IToken const* iToken, char const* _text, omni::graph::core::Type _type) noexcept : AttrDefinition{ StringBuffer{_text}, _type, iToken->getHandle(_text) } { } AttrDefinition(AttrDefinition const&) = delete; AttrDefinition(AttrDefinition&&) = delete; AttrDefinition& operator=(AttrDefinition const&) = delete; AttrDefinition& operator=(AttrDefinition&&) = delete; StringBuffer name; // Name and size of the attribute omni::graph::core::Type type; // Type of the attribute omni::graph::core::NameToken token; // Token representation of the name }; // Attribute Level Definitions: inline AttrDefinition const& getAttrInterpolationDefinition() noexcept { using namespace carb::flatcache; using namespace omni::graph::core; static AttrDefinition d{ carb::getCachedInterface<IToken>(), "interpolation", Type{ BaseDataType::eToken, 1, 0 } }; return d; } inline AttrDefinition const& getAttrDirtyIdDefinition() noexcept { using namespace carb::flatcache; using namespace omni::graph::core; static AttrDefinition d{ carb::getCachedInterface<IToken>(), "dirtyID", Type{ BaseDataType::eUInt64, 1, 0 } }; return d; } inline AttrDefinition const& getAttrSourceDefinition() noexcept { using namespace carb::flatcache; using namespace omni::graph::core; static AttrDefinition d{ carb::getCachedInterface<IToken>(), "source", Type{ BaseDataType::eUChar, 1, 0 } }; return d; } // Primitive Level Definitions: inline AttrDefinition const& getPrimPathsDefinition() noexcept { using namespace carb::flatcache; using namespace omni::graph::core; static AttrDefinition d{ carb::getCachedInterface<IToken>(), "primPaths", Type{ BaseDataType::eToken, 1, 1 } }; return d; } inline AttrDefinition const& getPrimTypesDefinition() noexcept { using namespace carb::flatcache; using namespace omni::graph::core; static AttrDefinition d{ carb::getCachedInterface<IToken>(), "primTypes", Type{ BaseDataType::eToken, 1, 1 } }; return d; } inline AttrDefinition const& getPrimIndexDefinition() noexcept { using namespace carb::flatcache; using namespace omni::graph::core; static AttrDefinition d{ carb::getCachedInterface<IToken>(), "primIndex", Type{ BaseDataType::eUInt64, 1, 0 } }; return d; } inline AttrDefinition const& getPrimDirtyIDsDefinition() noexcept { using namespace carb::flatcache; using namespace omni::graph::core; static AttrDefinition d{ carb::getCachedInterface<IToken>(), "primDirtyIDs", Type{ BaseDataType::eUInt64, 1, 1 } }; return d; } // Bundle Level Definitions inline AttrDefinition const& getBundleDirtyIDDefinition() noexcept { using namespace carb::flatcache; using namespace omni::graph::core; static AttrDefinition d{ carb::getCachedInterface<IToken>(), "bundleDirtyID", Type{ BaseDataType::eUInt64, 1, 0 } }; return d; } // Constant types. constexpr omni::graph::core::Type s_relationshipType{ omni::graph::core::BaseDataType::eToken, 1, 1 }; } // namespace detail inline bool BundleAttrib::isRelationshipData() const noexcept { return m_source == Source::Relationship && type() == detail::s_relationshipType; } inline bool BundleAttrib::setInterpolation(omni::graph::core::NameToken interpolation) noexcept { using namespace omni::graph::core; if (m_interpolation == interpolation) return true; if (interpolation == carb::flatcache::kUninitializedToken) { clearInterpolation(); return true; } if (IBundle2* bundle = getBundlePtr()) { auto& interpDef = detail::getAttrInterpolationDefinition(); AttributeDataHandle interpolationAttr = bundle->getAttributeMetadataByName(m_name, interpDef.token); if (!interpolationAttr.isValid()) { interpolationAttr = bundle->createAttributeMetadata(m_name, interpDef.token, interpDef.type); } m_interpolation = interpolation; auto context = bundle->getContext(); *getDataW<NameToken>(context, interpolationAttr) = interpolation; return true; } return false; } inline bool BundleAttrib::setDirtyID(DirtyIDType dirtyID) noexcept { using namespace omni::graph::core; if(m_dirtyID == dirtyID) return true; if (IBundle2* bundle = getBundlePtr()) { auto& dirtyIdDef = detail::getAttrDirtyIdDefinition(); AttributeDataHandle dirtyIDAttr = bundle->getAttributeMetadataByName(m_name, dirtyIdDef.token); if (!dirtyIDAttr.isValid()) { dirtyIDAttr = bundle->createAttributeMetadata(m_name, dirtyIdDef.token, dirtyIdDef.type); } m_dirtyID = dirtyID; auto context = bundle->getContext(); *omni::graph::core::getDataW<DirtyIDType>(context, dirtyIDAttr) = dirtyID; return true; } return false; } inline bool BundleAttrib::setSource(Source source) noexcept { using namespace omni::graph::core; if(m_source == source) return true; if (IBundle2* bundle = getBundlePtr()) { auto& sourceDef = detail::getAttrSourceDefinition(); AttributeDataHandle sourceAttr = bundle->getAttributeMetadataByName(m_name, sourceDef.token); if(!sourceAttr.isValid()) { sourceAttr = bundle->createAttributeMetadata(m_name, sourceDef.token, sourceDef.type); } m_source = source; auto context = bundle->getContext(); *omni::graph::core::getDataW<SourceType>(context, sourceAttr) = static_cast<SourceType>(source); return true; } return false; } inline void BundleAttrib::copyContentsFrom(BundleAttrib const& sourceAttr) noexcept { using namespace omni::graph::core; if (m_dirtyID == sourceAttr.m_dirtyID) return; IBundle2* dstBundle = getBundlePtr(); IConstBundle2* srcBundle = sourceAttr.getConstBundlePtr(); if (!dstBundle) { return; } auto context = dstBundle->getContext(); // Copy Attribute AttributeDataHandle dstAttrHandle = dstBundle->getAttributeByName(m_name); ConstAttributeDataHandle srcAttrHandle = srcBundle->getConstAttributeByName(sourceAttr.m_name); // Ensure that copyData updated the type correctly, if needed. CARB_ASSERT(context.iAttributeData->getType(context, dstAttrHandle) == Type(m_type)); context.iAttributeData->copyData(dstAttrHandle, context, srcAttrHandle); // Copy the cached type m_type = sourceAttr.m_type; // Copy the interpolation (does nothing if the same; clears interpolation if none on sourceAttr) setInterpolation(sourceAttr.interpolation()); // Copy the dirty ID setDirtyID(sourceAttr.m_dirtyID); // Copy source setSource(sourceAttr.m_source); } inline void BundleAttrib::clearInterpolation() noexcept { using namespace omni::graph::core; if (IBundle2* bundle = getBundlePtr()) { auto context = bundle->getContext(); auto& interpDef = detail::getAttrInterpolationDefinition(); bundle->removeAttributeMetadata(m_name, interpDef.token); m_interpolation = carb::flatcache::kUninitializedToken; } } inline ConstBundlePrim* BundleAttrib::getBundlePrim() const noexcept { return m_bundlePrim; } inline void BundleAttrib::clearSource() noexcept { using namespace omni::graph::core; if (IBundle2* bundle = getBundlePtr()) { auto context = bundle->getContext(); auto& sourceDef = detail::getAttrSourceDefinition(); bundle->removeAttributeMetadata(m_name, sourceDef.token); m_source = BundleAttribSource::Attribute; } } inline omni::graph::core::NameToken BundleAttrib::name() const noexcept { return m_name; } inline omni::graph::core::NameToken BundleAttrib::interpolation() const noexcept { return m_interpolation; } inline DirtyIDType BundleAttrib::dirtyID() const noexcept { return m_dirtyID; } inline omni::graph::core::Type BundleAttrib::type() const noexcept { return omni::graph::core::Type(m_type); } inline bool BundleAttrib::isArray() const noexcept { omni::graph::core::Type type{ carb::flatcache::TypeC{ m_type } }; CARB_ASSERT(type.arrayDepth < 2); return (type.arrayDepth != 0); } inline BundleAttrib::Source BundleAttrib::source() const noexcept { return m_source; } inline bool BundleAttrib::isAttributeData() const noexcept { return m_source == Source::Attribute; } inline omni::graph::core::NameToken BundleAttrib::prefixedName() const noexcept { return m_name; } inline size_t BundleAttrib::size() const noexcept { using namespace omni::graph::core; if (!isArray()) { return 1; } IConstBundle2* bundle = getConstBundlePtr(); auto context = bundle->getContext(); ConstAttributeDataHandle attr = bundle->getConstAttributeByName(m_name); size_t count; context.iAttributeData->getElementCount(&count, context, &attr, 1); return count; } inline void BundleAttrib::resize(size_t arrayElementCount) noexcept { using namespace omni::graph::core; CARB_ASSERT(isArray()); if (IBundle2* bundle = getBundlePtr()) { auto context = bundle->getContext(); AttributeDataHandle attr = bundle->getAttributeByName(m_name); context.iAttributeData->setElementCount(context, attr, arrayElementCount); } } inline void* BundleAttrib::getDataInternal() noexcept { using namespace omni::graph::core; if (IBundle2* bundle = getBundlePtr()) { auto context = bundle->getContext(); AttributeDataHandle attr = bundle->getAttributeByName(m_name); if (Type(m_type).arrayDepth == 0) { return getDataW<void>(context, attr); } return *getDataW<void*>(context, attr); } return nullptr; } inline void const* BundleAttrib::getDataInternal() const noexcept { using namespace omni::graph::core; IConstBundle2* constBundle = getConstBundlePtr(); GraphContextObj context = constBundle->getContext(); ConstAttributeDataHandle attr = constBundle->getConstAttributeByName(m_name); if (Type(m_type).arrayDepth == 0) { return getDataR<void const>(context, attr); } return *getDataR<void const*>(context, attr); } inline omni::graph::core::AttributeDataHandle BundleAttrib::handle() noexcept { using namespace omni::graph::core; if (IBundle2* bundle = getBundlePtr()) { return AttributeDataHandle(AttrKey(bundle->getHandle(), m_name.token)); } return AttributeDataHandle{ AttributeDataHandle::invalidValue() }; } inline omni::graph::core::ConstAttributeDataHandle BundleAttrib::handle() const noexcept { using namespace omni::graph::core; if(IConstBundle2* bundle = getConstBundlePtr()) { return ConstAttributeDataHandle{ AttrKey(bundle->getConstHandle(), m_name.token) }; } return ConstAttributeDataHandle{ ConstAttributeDataHandle::invalidValue() }; } template <typename T> T* BundleAttrib::getData() noexcept { // It must be valid to request a pointer to type T. // requesting a float* or vec3f* for a vec3f type is valid, but double* or vec2f* is not. using namespace omni::math::linalg; using Type = omni::graph::core::Type; bool const isSameBaseType = TypeToBaseType<T>::baseType == Type(m_type).baseType; bool const isSameCount = TypeToComponentCount<T>::count == Type(m_type).componentCount; bool const isValidCast = isSameBaseType && (TypeToComponentCount<T>::count == 1 || isSameCount); return isValidCast ? reinterpret_cast<T*>(getDataInternal()) : nullptr; } template <typename T> T const* BundleAttrib::getData() const noexcept { // It must be valid to request a pointer to type T. // requesting a float* or vec3f* for a vec3f type is valid, but double* or vec2f* is not. using namespace omni::math::linalg; using Type = omni::graph::core::Type; bool const isValidCast = TypeToBaseType<T>::baseType == Type(m_type).baseType && (TypeToComponentCount<T>::count == 1 || TypeToComponentCount<T>::count == Type(m_type).componentCount); return isValidCast ? reinterpret_cast<T const*>(getDataInternal()) : nullptr; } template <typename T> T const* BundleAttrib::getConstData() const noexcept { return getData<T>(); } template <typename T> T BundleAttrib::get() const noexcept { using namespace omni::math::linalg; using Type = omni::graph::core::Type; // TODO: Figure out how to support array attributes here. CARB_ASSERT(Type(m_type).arrayDepth == 0); // This has stronger requirements than getData, since get<float>() isn't valid // for a vec3f attribute, but getData<float>() is valid for a vec3f attribute. CARB_ASSERT(TypeToComponentCount<T>::count == Type(m_type).componentCount); return *getConstData<T>(); } template <typename T> void BundleAttrib::set(T const& value) noexcept { using namespace omni::math::linalg; using Type = omni::graph::core::Type; CARB_ASSERT(Type(m_type).arrayDepth == 0); // This has stronger requirements than getData, since set(1.0f) isn't valid // for a vec3f attribute, but getData<float>() is valid for a vec3f attribute. CARB_ASSERT(TypeToComponentCount<T>::count == Type(m_type).componentCount); *getData<T>() = value; } template <typename T> void BundleAttrib::set(T const* values, size_t elementCount) noexcept { using namespace omni::math::linalg; using Type = omni::graph::core::Type; CARB_ASSERT(Type(m_type).arrayDepth == 1); // This has stronger requirements than getData, since set(float const*,size_t) isn't valid // for a vec3f attribute, but getData<float>() is valid for a vec3f attribute. CARB_ASSERT(TypeToComponentCount<T>::count == Type(m_type).componentCount); resize(elementCount); if (elementCount > 0) { T* p = getData<T>(); for (size_t i = 0; i < elementCount; ++i) { p[i] = values[i]; } } } inline void BundleAttrib::clearContents() noexcept { using namespace omni::graph::core; /** * Remove attribute. Its metadata will be removed automatically together with it. */ IBundle2* bundle = getBundlePtr(); bundle->removeAttributeByName(m_name); /** * Invalidate data. */ m_source = BundleAttribSource::Attribute; m_dirtyID = kInvalidDirtyID; m_interpolation = carb::flatcache::kUninitializedToken; m_type = carb::flatcache::kUnknownType; m_name = carb::flatcache::kUninitializedToken; m_bundlePrim = nullptr; } } // namespace io } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/io/IDirtyID.h
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #pragma message("omni/graph/io/IDirtyID.h is deprecated. Include omni/graph/core/IDirtyID.h instead.") #define OMNI_GRAPH_IO_DEPRECATED [[deprecated("Use the counterpart in the omni::graph::core namespace instead.")]] #include <carb/Interface.h> #include <stddef.h> #include <stdint.h> namespace omni { namespace graph { namespace io { using DirtyIDType OMNI_GRAPH_IO_DEPRECATED = uint64_t; OMNI_GRAPH_IO_DEPRECATED static constexpr DirtyIDType kInvalidDirtyID = ~DirtyIDType(0); OMNI_GRAPH_IO_DEPRECATED static constexpr size_t kFunctionSize = sizeof(void (*)()); struct OMNI_GRAPH_IO_DEPRECATED IDirtyID { CARB_PLUGIN_INTERFACE("omni::graph::io::IDirtyID", 1, 0); /** * @return The next dirty ID, atomically incrementing the counter inside. */ DirtyIDType(CARB_ABI* getNextDirtyID)() = nullptr; }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle static_assert(offsetof(IDirtyID, getNextDirtyID) == 0 * kFunctionSize, "New IDirtyID ABI methods must be added at the end"); // DEPRECATED, BundlePrims class caches iDirtyID interface internally // This must be instantiated in every extension that uses it, similar to Token::iToken. // The exact location doesn't matter too much, though the PluginInterface.cpp is probably the best option. OMNI_GRAPH_IO_DEPRECATED extern const IDirtyID* iDirtyID; template <typename PREVIOUS_T> OMNI_GRAPH_IO_DEPRECATED inline bool checkDirtyIDChanged(PREVIOUS_T& previousID, DirtyIDType newID) { if (newID != previousID) { previousID = newID; return true; } // Equal, but if they're invalid, still treat them as changed return (newID == kInvalidDirtyID); } } } }
omniverse-code/kit/include/omni/graph/io/BundlePrimsImpl.h
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "BundlePrims.h" namespace omni { namespace graph { namespace io { // ==================================================================================================== // // Bundle Attribute // // Because entire Bundle Prims is inlined, we have to put definition of those functions // after declaration of ConstBundlePrim and ConstBundlePrims. // ==================================================================================================== inline BundleAttrib::BundleAttrib(ConstBundlePrim& prim, omni::graph::core::NameToken name) noexcept { using namespace omni::graph::core; // Get attribute handle and attribute properties IConstBundle2* bundle = prim.getConstBundlePtr(); ConstAttributeDataHandle attributeHandle = bundle->getConstAttributeByName(name); if(!attributeHandle.isValid()) { return; } GraphContextObj const& context = prim.getConstBundlePrims()->context(); m_bundlePrim = &prim; m_name = name; m_type = carb::flatcache::TypeC(context.iAttributeData->getType(context, attributeHandle)); // Read attribute properties. ConstAttributeDataHandle propertyAttributeHandle; propertyAttributeHandle = bundle->getConstAttributeMetadataByName(name, detail::getAttrInterpolationDefinition().token); if(propertyAttributeHandle.isValid()) { m_interpolation = *getDataR<NameToken>(context, propertyAttributeHandle); } propertyAttributeHandle = bundle->getConstAttributeMetadataByName(name, detail::getAttrDirtyIdDefinition().token); if(propertyAttributeHandle.isValid()) { m_dirtyID = *getDataR<DirtyIDType>(context, propertyAttributeHandle); } propertyAttributeHandle = bundle->getConstAttributeMetadataByName(name, detail::getAttrSourceDefinition().token); if(propertyAttributeHandle.isValid()) { m_source = static_cast<Source>(*getDataR<SourceType>(context, propertyAttributeHandle)); } } inline BundleAttrib::BundleAttrib(BundlePrim& prim, omni::graph::core::NameToken name, omni::graph::core::Type type, size_t arrayElementCount, Source source) noexcept : BundleAttrib{ prim, name } { using namespace omni::graph::core; // Attribute exists! if (m_bundlePrim) { return; } // Attribute does not exist. IBundle2* bundle = prim.getBundlePtr(); GraphContextObj const& context = prim.getConstBundlePrims()->context(); auto handle = bundle->createAttribute(name, type, arrayElementCount); omni::graph::core::getDataW<void*>(context, handle); // remove after OM-50059 is merged. m_bundlePrim = &prim; m_name = name; m_type = carb::flatcache::TypeC(type); // Interpolation is optional. // DirtyId is a unique id that tracks if attribute has changed. setDirtyID(prim.getConstBundlePrims()->getNextDirtyID()); // Source of the attribute identifies "data" or "relationship" setSource(source); } inline BundlePrim* BundleAttrib::getBundlePrim() noexcept { IConstBundle2* constBundle = getConstBundlePtr(); if(auto bundle = omni::cast<IBundle2>(constBundle)) { return static_cast<BundlePrim*>(m_bundlePrim); } return nullptr; } inline omni::graph::core::IConstBundle2* BundleAttrib::getConstBundlePtr() const noexcept { ConstBundlePrim* bundlePrim = getBundlePrim(); return bundlePrim->getConstBundlePtr(); } inline omni::graph::core::IBundle2* BundleAttrib::getBundlePtr() noexcept { BundlePrim* bundlePrim = getBundlePrim(); return bundlePrim->getBundlePtr(); } inline bool BundleAttrib::bumpDirtyID() noexcept { BundlePrim* bundlePrim = getBundlePrim(); bundlePrim->bumpDirtyID(); DirtyIDType nextId = bundlePrim->getConstBundlePrims()->getNextDirtyID(); return setDirtyID(nextId); } // ==================================================================================================== // // Bundle Primitive // // ==================================================================================================== inline BundlePrim::BundlePrim(BundlePrims& bundlePrims, BundlePrimIndex primIndex, omni::core::ObjectPtr<omni::graph::core::IBundle2> bundle) : ConstBundlePrim{ bundlePrims, primIndex, std::move(bundle) } { } inline void BundlePrim::setPath(omni::graph::core::NameToken path) noexcept { using namespace omni::graph::core; BundlePrims* bundlePrims = getBundlePrims(); NameToken* primPaths = bundlePrims->addPrimPathsIfMissing(); primPaths[primIndex()] = path; } inline void BundlePrim::setType(omni::graph::core::NameToken type) noexcept { using namespace omni::graph::core; BundlePrims* bundlePrims = getBundlePrims(); NameToken* primTypes = bundlePrims->addPrimTypesIfMissing(); primTypes[primIndex()] = type; } inline void BundlePrim::setDirtyID(DirtyIDType dirtyID) noexcept { auto primDirtyIDs = getBundlePrims()->getPrimDirtyIDs(); primDirtyIDs[m_primIndex] = dirtyID; } inline void BundlePrim::bumpDirtyID() noexcept { if (isCommonAttrs()) { BundlePrims* bundlePrims = getBundlePrims(); bundlePrims->bumpBundleDirtyID(); } else { DirtyIDType nextId = getBundlePrims()->getNextDirtyID(); setDirtyID(nextId); } } inline BundleAttrib* BundlePrim::addAttr(omni::graph::core::NameToken attrName, omni::graph::core::Type type, size_t arrayElementCount, BundleAttribSource source) noexcept { using namespace omni::graph::core; auto& attrs = getAttributes(); // Erase existing attribute. auto it = attrs.find(attrName); if (it != attrs.end()) { it->second->clearContents(); attrs.erase(it); } auto attr = new BundleAttrib{ *this, attrName, type, arrayElementCount, source }; attrs.emplace(attrName, attr); return attr; } inline BundleAttrib* BundlePrim::addRelationship(omni::graph::core::NameToken name, size_t targetCount) noexcept { return addAttr(name, detail::s_relationshipType, targetCount, BundleAttribSource::Relationship); } inline bool BundlePrim::addAttrs(std::vector<BundlePrim::AddAttrInfo> const& attrList) noexcept { using namespace omni::graph::core; IBundle2* bundle = getBundlePtr(); auto& attrs = getAttributes(); // Remove attributes that exists but properties are different. std::vector<BundlePrim::AddAttrInfo> attrToCreate; attrToCreate.reserve(attrList.size()); for (auto const& newAttr : attrList) { auto it = attrs.find(newAttr.attrName); if (it == attrs.end()) { attrToCreate.push_back(newAttr); continue; } BundleAttrib const* attr = it->second.get(); if (attr->type() != newAttr.type || attr->size() != newAttr.arrayElementCount || attr->source() != newAttr.source) { it->second->clearContents(); attrs.erase(it); attrToCreate.push_back(newAttr); } // attribute is the same nothing to do. } // Create attributes that require instantiation. for (auto const& tmp : attrToCreate) { auto attr = new BundleAttrib{ *this, tmp.attrName, tmp.type, tmp.arrayElementCount, tmp.source }; attrs.emplace(tmp.attrName, attr); } return true; } inline void BundlePrim::removeAttr(omni::graph::core::NameToken attrName) noexcept { using namespace omni::graph::core; // Remove attribute from internal member. auto& attrs = getAttributes(); auto it = attrs.find(attrName); if (it != attrs.end()) { it->second->clearContents(); attrs.erase(it); } } inline void BundlePrim::clearContents() noexcept { using namespace omni::graph::core; auto& attrs = getAttributes(); for (auto& attr : attrs) { attr.second->clearContents(); } getAttributes().clear(); } inline void BundlePrim::copyContentsFrom(ConstBundlePrim const& source, bool removeAttrsNotInSource /* = true*/) noexcept { return copyContentsFrom(const_cast<ConstBundlePrim&>(source), removeAttrsNotInSource); } inline void BundlePrim::copyContentsFrom(ConstBundlePrim& source, bool removeAttrsNotInSource /* = true*/) noexcept { using namespace omni::graph::core; // Nothing to do if they're already equal. if (dirtyID() == source.dirtyID()) return; BundlePrims* bundlePrims = getBundlePrims(); // Add/set any attributes from source, if the dirty IDs are different, being sure to copy the dirty IDs. // first we batch add them, then we copy the contents std::vector<BundlePrim::AddAttrInfo> attrsToAdd; attrsToAdd.reserve(source.attrCount()); for (auto const& sourceAttr : source) { NameToken name = sourceAttr.name(); // NOTE: Request a const attribute, to avoid bumping its dirty ID. BundleAttrib const* constDestAttr = getConstAttr(name); if (constDestAttr != nullptr && constDestAttr->dirtyID() == sourceAttr.dirtyID()) { continue; } if (constDestAttr == nullptr) { attrsToAdd.push_back( { sourceAttr.m_name, Type(carb::flatcache::TypeC{ sourceAttr.m_type }), 0, sourceAttr.m_source }); } } // add the attributes addAttrs(attrsToAdd); // copy the data for (auto const& sourceAttr : source) { NameToken name = sourceAttr.name(); // NOTE: Request a const attribute, to avoid bumping its dirty ID. BundleAttrib const* constDestAttr = getConstAttr(name); CARB_ASSERT(constDestAttr != nullptr); if (constDestAttr == nullptr || constDestAttr->dirtyID() == sourceAttr.dirtyID()) { continue; } const_cast<BundleAttrib*>(constDestAttr)->copyContentsFrom(sourceAttr); } CARB_ASSERT(attrCount() >= source.attrCount()); // If there are more attributes in this than in source, remove any that aren't in source. auto& attrMap = getAttributes(); if (attrCount() > source.attrCount() && removeAttrsNotInSource) { std::vector<NameToken> attrsToRemove; for (auto it = attrMap.begin(); it != attrMap.end();) { if (source.getConstAttr(it->second->name()) == nullptr) { it->second->clearContents(); it = attrMap.erase(it); } else { ++it; } } } } inline BundleAttrib* BundlePrim::getAttr(omni::graph::core::NameToken attrName) noexcept { auto& attrs = getAttributes(); auto it = attrs.find(attrName); if (it == attrs.end()) { return nullptr; } BundleAttrib* attr = it->second.get(); // TODO: Consider whether it's worth bumping the dirty ID later, when modification occurs. attr->bumpDirtyID(); return attr; } inline omni::graph::core::BundleHandle BundlePrim::handle() noexcept { return getBundlePtr()->getHandle(); } inline BundlePrims* BundlePrim::getBundlePrims() noexcept { omni::graph::core::IBundle2* bundle = getBundlePtr(); if (bundle) { ConstBundlePrims* bundlePrims = ConstBundlePrim::getConstBundlePrims(); return static_cast<BundlePrims*>(bundlePrims); } return nullptr; } inline BundlePrims* BundlePrim::bundlePrims() noexcept { return getBundlePrims(); } inline BundlePrimAttrIterator BundlePrim::begin() noexcept { return BundlePrimAttrIterator(*this, getAttributes().begin()); } inline BundlePrimAttrIterator BundlePrim::end() noexcept { return BundlePrimAttrIterator(*this, getAttributes().end()); } inline ConstBundlePrimAttrIterator BundlePrim::cbegin() noexcept { return ConstBundlePrim::begin(); } inline ConstBundlePrimAttrIterator BundlePrim::cend() noexcept { return ConstBundlePrim::end(); } inline omni::graph::core::IBundle2* BundlePrim::getBundlePtr(omni::graph::core::IConstBundle2* constBundle) noexcept { auto bundle = omni::cast<omni::graph::core::IBundle2>(constBundle); return bundle.get(); } inline omni::graph::core::IBundle2* BundlePrim::getBundlePtr() noexcept { using namespace omni::graph::core; IConstBundle2* constBundle = getConstBundlePtr(); IBundle2* bundle = getBundlePtr(constBundle); return bundle; } // ==================================================================================================== // // Bundle Primitives // // ==================================================================================================== inline BundlePrims::~BundlePrims() noexcept { detach(); } inline omni::graph::core::BundleHandle BundlePrims::handle() noexcept { using namespace omni::graph::core; if (IBundle2* bundle = getBundlePtr()) { return bundle->getHandle(); } return BundleHandle{ BundleHandle::invalidValue() }; } inline void BundlePrims::ensurePrimAttrsCached(BundlePrimIndex primIndex) noexcept { } inline BundlePrims::BundlePrims() : ConstBundlePrims() { } inline BundlePrims::BundlePrims(omni::graph::core::GraphContextObj const& context, omni::graph::core::BundleHandle const& bundle) : BundlePrims() { attach(context, bundle); } inline void BundlePrims::attach(omni::graph::core::GraphContextObj const& context, omni::graph::core::BundleHandle const& bundleHandle) noexcept { using namespace omni::graph::core; ComputeGraph* computeGraph = carb::getCachedInterface<ComputeGraph>(); omni::core::ObjectPtr<IBundleFactory> factoryPtr = computeGraph->getBundleFactoryInterfacePtr(); omni::core::ObjectPtr<IBundle2> bundlePtr = factoryPtr->getBundle(context, bundleHandle); ConstBundlePrims::attach(std::move(factoryPtr), std::move(bundlePtr)); IBundle2* bundle = getBundlePtr(); // // Bundle Level Attributes // auto& bundleDirtyIDDef = detail::getBundleDirtyIDDefinition(); if (!m_bundleDirtyIDAttr.isValid()) { m_bundleDirtyIDAttr = bundle->createBundleMetadata(bundleDirtyIDDef.token, bundleDirtyIDDef.type); DirtyIDType newBundleDirtyID = getNextDirtyID(); setBundleDirtyID(newBundleDirtyID); *getDataW<DirtyIDType>(context, m_bundleDirtyIDAttr) = newBundleDirtyID; } else { setBundleDirtyID(*getDataR<DirtyIDType>(context, m_bundleDirtyIDAttr)); } auto& primDirtyIDsDef = detail::getPrimDirtyIDsDefinition(); m_primDirtyIDsAttr = bundle->getBundleMetadataByName(primDirtyIDsDef.token); if(!m_primDirtyIDsAttr.isValid()) { m_primDirtyIDsAttr = bundle->createBundleMetadata(primDirtyIDsDef.token, primDirtyIDsDef.type, 0); setPrimDirtyIDsData(*getDataW<DirtyIDType*>(context, m_primDirtyIDsAttr)); } auto& primPathsDef = detail::getPrimPathsDefinition(); m_primPathsAttr = bundle->getBundleMetadataByName(primPathsDef.token); auto& primTypesDef = detail::getPrimTypesDefinition(); m_primTypesAttr = bundle->getBundleMetadataByName(primTypesDef.token); auto& primIndexDef = detail::getPrimIndexDefinition(); m_primIndexAttr = bundle->getBundleMetadataByName(primIndexDef.token); } inline void BundlePrims::detach() noexcept { using omni::graph::core::AttributeDataHandle; if(m_bundleDirtyIDAttr.isValid()) { auto& context = this->context(); // Assume that the bundle has changed, given that this is a non-const bundle wrapper. *getDataW<DirtyIDType>(context, m_bundleDirtyIDAttr) = getNextDirtyID(); m_bundleDirtyIDAttr = AttributeDataHandle{ AttributeDataHandle::invalidValue() }; } m_primDirtyIDsAttr = AttributeDataHandle{ AttributeDataHandle::invalidValue() }; m_primIndexAttr = AttributeDataHandle{ AttributeDataHandle::invalidValue() }; m_primTypesAttr = AttributeDataHandle{ AttributeDataHandle::invalidValue() }; m_primPathsAttr = AttributeDataHandle{ AttributeDataHandle::invalidValue() }; ConstBundlePrims::detach(); } inline BundlePrim* BundlePrims::getPrim(BundlePrimIndex primIndex) noexcept { using namespace omni::graph::core; auto createBundlePrim = [this, &bundlePrims = *this, &primIndex]() -> BundlePrim* { BundleHandle bundleHandle = getBundlePtr()->getChildBundle(primIndex); if (!bundleHandle.isValid()) { return nullptr; } omni::core::ObjectPtr<IBundle2> childBundle = getBundleFactoryPtr()->getBundle(context(), bundleHandle); if (!childBundle) { return nullptr; } return new BundlePrim{ bundlePrims, primIndex, childBundle }; }; // Since we acquire BundlePrim instance through BundlePrims interface, // we are required to bump dirty id of this prim because intention is to modify it. auto bundlePrim = static_cast<BundlePrim*>(ConstBundlePrims::getConstPrim(primIndex, createBundlePrim)); bundlePrim->bumpDirtyID(); return bundlePrim; } inline BundlePrim* BundlePrims::getClearedPrim(BundlePrimIndex primIndex) noexcept { BundlePrim* bundlePrim = getPrim(primIndex); if(!bundlePrim) { return nullptr; } bundlePrim->clearContents(); return bundlePrim; } inline omni::graph::core::NameToken* BundlePrims::addPrimPathsIfMissing() noexcept { using namespace omni::graph::core; // check if prims are valid size if (m_primPathsAttr.isValid()) { return getPrimPaths(); } // Create a new primPaths attribute. IBundle2* bundle = getBundlePtr(); auto& primPathsDef = detail::getPrimPathsDefinition(); size_t const primCount = getPrimCount(); m_primPathsAttr = bundle->createBundleMetadata(primPathsDef.token, primPathsDef.type, primCount); NameToken* primPaths = *getDataW<NameToken*>(context(), m_primPathsAttr); for(size_t i = 0; i < primCount; ++i) { primPaths[i] = carb::flatcache::kUninitializedToken; } setPrimPathsData(primPaths); return primPaths; } inline omni::graph::core::NameToken* BundlePrims::addPrimTypesIfMissing() noexcept { using namespace omni::graph::core; if(m_primTypesAttr.isValid()) { return getPrimTypes(); } // Create a new primTypes attribute. IBundle2* bundle = getBundlePtr(); auto& primTypesDef = detail::getPrimTypesDefinition(); size_t const primCount = getPrimCount(); m_primTypesAttr = bundle->createBundleMetadata(primTypesDef.token, primTypesDef.type, primCount); NameToken* primTypes = *getDataW<NameToken*>(context(), m_primTypesAttr); for(size_t i = 0; i < primCount; ++i) { primTypes[i] = carb::flatcache::kUninitializedToken; } setPrimTypesData(primTypes); return primTypes; } inline BundlePrim& BundlePrims::getCommonAttrs() noexcept { ConstBundlePrim& commonAttributes = ConstBundlePrims::getConstCommonAttrs(); return static_cast<BundlePrim&>(commonAttributes); } inline omni::graph::core::IBundle2* BundlePrims::getBundlePtr() noexcept { using namespace omni::graph::core; auto constBundle = getConstBundlePtr(); auto bundle = omni::cast<IBundle2>(constBundle); return bundle.get(); } inline uint64_t BundlePrims::bumpBundleDirtyID() noexcept { if (m_bundleDirtyIDAttr.isValid()) { auto& context = this->context(); DirtyIDType dirtyID = getNextDirtyID(); *getDataW<DirtyIDType>(context, m_bundleDirtyIDAttr) = dirtyID; return dirtyID; } return kInvalidDirtyID; } inline void BundlePrims::clearContents() noexcept { for (BundlePrimIndex primIndex = getPrimCount(); primIndex != 0;) { --primIndex; removePrim(primIndex); } // Delete all attributes from this bundle. BundlePrim& thisBundle = getCommonAttrs(); thisBundle.clearContents(); // remove internal data IBundle2* bundle = getBundlePtr(); using omni::graph::core::AttributeDataHandle; // Clearing bundle prims internal attributes such as bundleDirtyID and others causes downstream problems. // Initial implementation never cleared those attributes. #if 0 auto bundlePrimsInternalAttributes = { std::ref(m_bundleDirtyIDAttr), // std::ref(m_primDirtyIDsAttr), // std::ref(m_primPathsAttr), // std::ref(m_primTypesAttr), // std::ref(m_primIndexAttr), // }; for (auto& internalAttribute : bundlePrimsInternalAttributes) { if (internalAttribute.get().isValid()) { bundle->removeAttribute(internalAttribute.get()); } internalAttribute.get() = AttributeDataHandle{ AttributeDataHandle::invalidValue() }; } #endif } inline bool BundlePrims::removePrim(ConstBundlePrim* prim) noexcept { return removePrim(prim->primIndex()); } inline bool BundlePrims::removePrim(BundlePrimIndex primIndex) noexcept { using namespace omni::graph::core; IBundle2* bundle = getBundlePtr(); auto& context = this->context(); auto& prims = getPrimitives(); // remove children and attributes BundlePrim* childBundlePrim = getPrim(primIndex); if (!childBundlePrim) { return false; } // clear contents and remove bundle from a map childBundlePrim->clearContents(); bundle->removeChildBundle(childBundlePrim->handle()); // If removed primitive is not the last one, // swap last one with removed one and update index. size_t const newPrimCount = prims.size() - 1; if (primIndex != newPrimCount) { prims[primIndex] = std::move(prims[newPrimCount]); prims[primIndex]->m_primIndex = primIndex; } prims.resize(newPrimCount); // Update contents of array attributes if (primIndex != newPrimCount) { auto primDirtyIDs = getPrimDirtyIDs(); primDirtyIDs[primIndex] = primDirtyIDs[newPrimCount]; auto primPaths = getPrimPaths(); if (primPaths != nullptr) { primPaths[primIndex] = primPaths[newPrimCount]; } auto primTypes = getPrimTypes(); if (primTypes != nullptr) { primTypes[primIndex] = primTypes[newPrimCount]; } } // Reduce element counts of underlying attributes and update the pointers in case they've changed. context.iAttributeData->setElementCount(context, m_primDirtyIDsAttr, newPrimCount); setPrimDirtyIDsData(*getDataW<DirtyIDType*>(context, m_primDirtyIDsAttr)); if (m_primPathsAttr.isValid()) { context.iAttributeData->setElementCount(context, m_primPathsAttr, newPrimCount); setPrimPathsData(*getDataW<NameToken*>(context, m_primPathsAttr)); } if (m_primTypesAttr.isValid()) { context.iAttributeData->setElementCount(context, m_primTypesAttr, newPrimCount); setPrimTypesData(*getDataW<NameToken*>(context, m_primTypesAttr)); } return true; } inline size_t BundlePrims::addPrims(size_t primCountToAdd) noexcept { using namespace omni::graph::core; size_t oldPrimCount = getConstBundlePtr()->getChildBundleCount(); if (primCountToAdd == 0) { return oldPrimCount; } size_t const newPrimCount = oldPrimCount + primCountToAdd; CARB_ASSERT(newPrimCount > oldPrimCount); IBundle2* bundle = getBundlePtr(); IBundleFactory* factory = getBundleFactoryPtr(); auto& context = this->context(); // Create primIndex that stores last index of the primitive. if(!m_primIndexAttr.isValid()) { auto& primIndexDef = detail::getPrimIndexDefinition(); m_primIndexAttr = bundle->getBundleMetadataByName(primIndexDef.token); if(!m_primIndexAttr.isValid()) { m_primIndexAttr = bundle->createBundleMetadata(primIndexDef.token, primIndexDef.type); *getDataW<uint64_t>(context, m_primIndexAttr) = 0; } } uint64_t* primIndexData = getDataW<uint64_t>(context, m_primIndexAttr); // Create new child bundles. // All children are called 'prim' + primIndex, because IBundle2 interface does not allow sparse hierarchy. // Then child paths are stored as an attribute. BundlePrimArray& prims = getPrimitives(); prims.resize(newPrimCount); std::string primPathStr; for (BundlePrimIndex primIndex = oldPrimCount; primIndex < newPrimCount; ++primIndex) { primPathStr = "prim" + std::to_string(*primIndexData + primIndex); NameToken primName = context.iToken->getHandle(primPathStr.data()); BundleHandle childHandle = bundle->createChildBundle(primName); auto childBundle = factory->getBundle(context, childHandle); prims[primIndex].reset(new BundlePrim(*this, primIndex, std::move(childBundle))); } // Update primDirtyIDs. if(m_primDirtyIDsAttr.isValid()) { context.iAttributeData->setElementCount(context, m_primDirtyIDsAttr, newPrimCount); } else { auto& primDirtyIDsDef = detail::getPrimDirtyIDsDefinition(); m_primDirtyIDsAttr = bundle->createBundleMetadata(primDirtyIDsDef.token, primDirtyIDsDef.type, newPrimCount); } setPrimDirtyIDsData(*getDataW<DirtyIDType*>(context, m_primDirtyIDsAttr)); auto primDirtyIDs = getPrimDirtyIDs(); for (BundlePrimIndex i = oldPrimCount; i < newPrimCount; ++i) { primDirtyIDs[i] = getNextDirtyID(); } // Update primPaths. if(m_primPathsAttr.isValid()) { context.iAttributeData->setElementCount(context, m_primPathsAttr, newPrimCount); NameToken* primPathsData = *getDataW<NameToken*>(context, m_primPathsAttr); for (BundlePrimIndex primIndex = oldPrimCount; primIndex < newPrimCount; ++primIndex) { primPathsData[primIndex] = carb::flatcache::kUninitializedToken; } setPrimPathsData(primPathsData); } // Update primTypes. if(m_primTypesAttr.isValid()) { context.iAttributeData->setElementCount(context, m_primTypesAttr, newPrimCount); NameToken* primTypesData = *getDataW<NameToken*>(context, m_primTypesAttr); for(BundlePrimIndex primIndex = oldPrimCount; primIndex < newPrimCount; ++primIndex) { primTypesData[primIndex] = carb::flatcache::kUninitializedToken; } setPrimTypesData(primTypesData); } *primIndexData += primCountToAdd; // Update prim index offset. return oldPrimCount; } inline DirtyIDType* BundlePrims::getPrimDirtyIDs() noexcept { return const_cast<DirtyIDType*>(ConstBundlePrims::getPrimDirtyIDs()); } inline omni::graph::core::NameToken* BundlePrims::getPrimTypes() noexcept { return const_cast<omni::graph::core::NameToken*>(ConstBundlePrims::getConstPrimTypes()); } inline omni::graph::core::NameToken* BundlePrims::getPrimPaths() noexcept { return const_cast<omni::graph::core::NameToken*>(ConstBundlePrims::getConstPrimPaths()); } inline BundlePrimIterator BundlePrims::begin() noexcept { return BundlePrimIterator(*this); } inline BundlePrimIterator BundlePrims::end() noexcept { return BundlePrimIterator(*this, getPrimCount()); } inline ConstBundlePrimIterator BundlePrims::cbegin() noexcept { return ConstBundlePrims::begin(); } inline ConstBundlePrimIterator BundlePrims::cend() noexcept { return ConstBundlePrims::end(); } // ==================================================================================================== // // Bundle Primitive Iterator // // ==================================================================================================== inline BundlePrimIterator::BundlePrimIterator(BundlePrims& bundlePrims, BundlePrimIndex primIndex) noexcept : m_bundlePrims(&bundlePrims), m_primIndex(primIndex) { } inline bool BundlePrimIterator::operator==(BundlePrimIterator const& that) const noexcept { return m_bundlePrims == that.m_bundlePrims && m_primIndex == that.m_primIndex; } inline bool BundlePrimIterator::operator!=(BundlePrimIterator const& that) const noexcept { return !(*this == that); } inline BundlePrim& BundlePrimIterator::operator*() noexcept { return *(m_bundlePrims->getPrim(m_primIndex)); } inline BundlePrim* BundlePrimIterator::operator->() noexcept { return m_bundlePrims->getPrim(m_primIndex); } inline BundlePrimIterator& BundlePrimIterator::operator++() noexcept { ++m_primIndex; return *this; } // ==================================================================================================== // // Bundle Primitive Attribute Iterator // // ==================================================================================================== inline BundlePrimAttrIterator::BundlePrimAttrIterator(BundlePrim& bundlePrim, BundlePrim::AttrMapIteratorType attrIter) noexcept : m_bundlePrim(&bundlePrim), m_attrIter(attrIter) { } inline bool BundlePrimAttrIterator::operator==(BundlePrimAttrIterator const& that) const noexcept { return m_bundlePrim == that.m_bundlePrim && m_attrIter == that.m_attrIter; } inline bool BundlePrimAttrIterator::operator!=(BundlePrimAttrIterator const& that) const noexcept { return !(*this == that); } inline BundleAttrib const* BundlePrimAttrIterator::getConst() noexcept { CARB_ASSERT(m_bundlePrim != nullptr); CARB_ASSERT(m_attrIter->second); BundleAttrib* attr = m_attrIter->second.get(); // NOTE: Does not bump the dirty ID, since this is const. return attr; } inline BundleAttrib& BundlePrimAttrIterator::operator*() noexcept { CARB_ASSERT(m_bundlePrim != nullptr); CARB_ASSERT(m_attrIter->second); BundleAttrib* attr = m_attrIter->second.get(); // TODO: Consider bumping the dirty ID later, when modification occurs. attr->bumpDirtyID(); return *attr; } inline BundleAttrib* BundlePrimAttrIterator::operator->() noexcept { CARB_ASSERT(m_bundlePrim != nullptr); CARB_ASSERT(m_attrIter->second); BundleAttrib* attr = m_attrIter->second.get(); // TODO: Consider bumping the dirty ID later, when modification occurs. attr->bumpDirtyID(); return attr; } inline BundlePrimAttrIterator& BundlePrimAttrIterator::operator++() noexcept { ++m_attrIter; return *this; } } // namespace io } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/io/ConstBundlePrims.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #pragma message("omni/graph/io/ConstBundlePrims.h is deprecated. Include omni/graph/core/ConstBundlePrims.h instead.") // ==================================================================================================== /* _____ _ _ _ _ _ | __ \ | \ | | | | | | | | | | | | ___ | \| | ___ | |_ | | | |___ ___ | | | |/ _ \ | . ` |/ _ \| __| | | | / __|/ _ \ | |__| | (_) | | |\ | (_) | |_ | |__| \__ \ __/ |_____/ \___/ |_| \_|\___/ \__| \____/|___/\___| This is a temporary interface that can change at any time. */ // ==================================================================================================== #include "BundleAttrib.h" #include <omni/graph/core/IBundleFactory.h> #include <unordered_map> #include <memory> #include <vector> namespace omni { namespace graph { namespace io { class ConstBundlePrims; class ConstBundlePrimIterator; class ConstBundlePrimAttrIterator; /** * Index used to identify primitives in a bundle. */ using BundlePrimIndex OMNI_GRAPH_IO_DEPRECATED = size_t; OMNI_GRAPH_IO_DEPRECATED constexpr BundlePrimIndex kInvalidBundlePrimIndex = ~BundlePrimIndex(0); /** * Collection of read-only attributes in a primitive. * * Const Bundle Primitive is not movable, not copyable. It lifespan is managed by Const Bundle Primitives. */ class OMNI_GRAPH_IO_DEPRECATED ConstBundlePrim { public: using BundleAttributeMap = std::unordered_map<omni::graph::core::NameToken, std::unique_ptr<BundleAttrib>>; using AttrMapIteratorType = BundleAttributeMap::const_iterator; ConstBundlePrim(ConstBundlePrim const&) = delete; ConstBundlePrim(ConstBundlePrim&&) = delete; ConstBundlePrim& operator=(ConstBundlePrim const& that) = delete; ConstBundlePrim& operator=(ConstBundlePrim&&) = delete; /** * @return Parent bundle prims of this primitive. */ ConstBundlePrims* getConstBundlePrims() noexcept; /** * @return Number of attributes in this primitive. Does not include internal attributes. */ size_t attrCount() noexcept; /** * @return PrimAttribute if attribute with given name is found, nullptr otherwise. */ BundleAttrib const* getConstAttr(omni::graph::core::NameToken attrName) noexcept; /** * @return Index of this primitive in parent bundle. */ BundlePrimIndex primIndex() noexcept; /** * @return Path of this primitive. */ omni::graph::core::NameToken path() noexcept; /** * @return Type of this primitive. */ omni::graph::core::NameToken type() noexcept; /** * @return Dirty id value of this primitive. */ DirtyIDType dirtyID() noexcept; /** * @return Attribute iterator pointing to the first attribute in this bundle. */ ConstBundlePrimAttrIterator begin() noexcept; /** * @return Attribute iterator pointing to the last attribute in this bundle. */ ConstBundlePrimAttrIterator end() noexcept; /*********************************************************************************************** * * TODO: Following methods might be deprecated in the future. * In the next iteration when real interface starts to emerge, we can retire those methods. * ***********************************************************************************************/ /** * @deprecated Do not use!. Use getConstAttr(). */ [[deprecated("Use non const instead.")]] BundleAttrib const* getAttr(omni::graph::core::NameToken attrName) const noexcept; /** * @deprecated Do not use!. Use non-const variant of path(). */ [[deprecated("Use non const instead.")]] omni::graph::core::NameToken path() const noexcept; /** * @deprecated Do not use!. Use non-const variant of type(). */ [[deprecated("Use non const instead.")]] omni::graph::core::NameToken type() const noexcept; /** * @deprecated Do not use!. Use non-const variant of dirtyID(). */ [[deprecated("Use non const instead.")]] DirtyIDType dirtyID() const noexcept; /** * @deprecated Do not use!. Use non-const variant of begin(). */ [[deprecated("Use non const instead.")]] ConstBundlePrimAttrIterator begin() const noexcept; /** * @deprecated Do not use!. Use non-const variant of end(). */ [[deprecated("Use non const instead.")]] ConstBundlePrimAttrIterator end() const noexcept; protected: /** * Direct initialization with IConstBundle interface. * * ConstBundlePrim and BundlePrim take advantage of polymorphic relationship * between IConstBundle and IBundle interfaces. * In order to modify bundles, BundlePrim makes attempt to down cast IConstBundle * to IBundle interface. When this process is successful then, bundle can be modified. * * Only ConstBundlePrims is allowed to create instances of ConstBundlePrim. */ ConstBundlePrim(ConstBundlePrims& bundlePrims, BundlePrimIndex primIndex, omni::core::ObjectPtr<omni::graph::core::IConstBundle2> bundle); /** * @return IConstBundle interface for this bundle primitive. */ omni::graph::core::IConstBundle2* getConstBundlePtr() noexcept; /** * @return Get attribute used by ConstBundlePrims and BundlePrims. */ BundleAttributeMap& getAttributes() noexcept; /** * Reads public attributes from the bundle and caches them as BundleAttribs. */ void readAndCacheAttributes() noexcept; private: ConstBundlePrims* m_bundlePrims{ nullptr }; // Parent of this bundle prim. omni::core::ObjectPtr<omni::graph::core::IConstBundle2> m_bundle; BundlePrimIndex m_primIndex{ kInvalidBundlePrimIndex }; // Index of a child bundle of this primitive. DirtyIDType m_dirtyId{ kInvalidDirtyID }; BundleAttributeMap m_attributes; // Cached public attributes that belong to this primitive. friend class BundleAttrib; // Required to access IConstBundle interface. friend class BundlePrim; // Required to access primitive type. friend class BundlePrims; // Required to update internal indices. friend class ConstBundlePrims; // Required to call constructor. }; /** * Collection of read-only primitives in a bundle. * * Const Bundle Primitives is not movable, not copyable. It lifespan is managed by the user. */ class OMNI_GRAPH_IO_DEPRECATED ConstBundlePrims { public: ConstBundlePrims(); ConstBundlePrims(omni::graph::core::GraphContextObj const& context, omni::graph::core::ConstBundleHandle const& bundle); ConstBundlePrims(ConstBundlePrims const&) = delete; ConstBundlePrims(ConstBundlePrims&&) = delete; ConstBundlePrims& operator=(ConstBundlePrims const&) = delete; ConstBundlePrims& operator=(ConstBundlePrims&&) = delete; /** * @return Bundle handle of this primitive. */ omni::graph::core::ConstBundleHandle getConstHandle() noexcept; /** * @return Number of primitives in this bundle of primitives. */ size_t getPrimCount() noexcept; /** * @return Get read only primitive under specified index. */ ConstBundlePrim* getConstPrim(BundlePrimIndex primIndex) noexcept; /** * @return Dirty Id of all primitives. */ DirtyIDType getBundleDirtyID() noexcept; /** * @return Bundle primitives dirty ids. */ DirtyIDType const* getPrimDirtyIDs() noexcept; /** * Paths of all primitives in this bundle. * * Primitive paths are lazily computed. This operation can be slow because iteration over all * bundles is required. ConstBundlePrim access paths directly from primitives. * Use only when necessary. * * @todo Paths should be represented by PathC type. * * @return Pointer to primitive paths array, or nullptr if there are no primitive paths. */ omni::graph::core::NameToken const* getConstPrimPaths() noexcept; /** * Get primitive types in this bundle of primitives. Once primitive is created path can not be changed. * Primitive can be copied but not moved. */ omni::graph::core::NameToken const* getConstPrimTypes() noexcept; /** * Common Attributes are attributes that are shared for entire bundle. * An example of a common attribute is "transform" attribute. * * @return ConstBundlePrims as ConstBundlePrim to access attributes. */ ConstBundlePrim& getConstCommonAttrs() noexcept; /** * @return Context where bundle primitives belongs to. */ omni::graph::core::GraphContextObj const& context() noexcept; /** * @return Primitive iterator pointing to the first primitive in this bundle. */ ConstBundlePrimIterator begin() noexcept; /** * @return Primitive iterator pointing to the last primitive in this bundle. */ ConstBundlePrimIterator end() noexcept; /*********************************************************************************************** * * TODO: Following methods might be deprecated in the future. * In the next iteration when real interface starts to emerge, we can retire those methods. * ***********************************************************************************************/ /** * @deprecated Do not use! Use getConstPrim(). */ ConstBundlePrim* getPrim(BundlePrimIndex primIndex) noexcept; /** * @deprecated Dirty id management is an internal state of this class and should be private. * * @return Next available id. */ DirtyIDType getNextDirtyID() noexcept; /** * @deprecated Use appropriate constructor and heap allocate ConstBundlePrims. * * @todo: There is no benefit of using this method. Cache has to be rebuild from scratch * whenever ConstBundlePrims is attached/detached. * It would be better to remove default constructor and enforce cache construction * through constructor with arguments. */ void attach(omni::graph::core::GraphContextObj const& context, omni::graph::core::ConstBundleHandle const& bundle) noexcept; /** * @deprecated Use appropriate constructor and heap allocate ConstBundlePrims. */ void detach() noexcept; /** * @deprecated Use getConstHandle. */ omni::graph::core::ConstBundleHandle handle() noexcept; /** * @deprecated Use getConstPrimPaths. */ omni::graph::core::NameToken const* getPrimPaths() noexcept; /** * @deprecated Use getConstCommonAttrs. */ ConstBundlePrim& getCommonAttrs() noexcept; /** * @deprecated There is no need to separate attributes. Inherently IBundle2 interface keeps them separated. */ void separateAttrs() noexcept; /** * @deprecated Caching attributes is not needed. Calling this method doesn't do anything. */ void ensurePrimAttrsCached(BundlePrimIndex primIndex) noexcept; protected: using ConstBundlePrimPtr = std::unique_ptr<ConstBundlePrim>; using BundlePrimArray = std::vector<ConstBundlePrimPtr>; /** * Get bundle primitives in this bundle. */ BundlePrimArray& getPrimitives() noexcept; /** * IConstBundle2 is a polymorphic base for IBundle2, thus passing bundle argument allows passing * version of the interface that allows mutations. */ void attach(omni::core::ObjectPtr<omni::graph::core::IBundleFactory>&& factory, omni::core::ObjectPtr<omni::graph::core::IConstBundle2>&& bundle) noexcept; /** * @return Factory to spawn instances of IBundle interface. */ omni::graph::core::IBundleFactory* getBundleFactoryPtr() noexcept; /** * @return IBundle instance of this bundle. */ omni::graph::core::IConstBundle2* getConstBundlePtr() noexcept; /** * Instances of BundlePrim are instantiated on demand. Argument create allows * instantiation mutable or immutable IConstBundle2 interface. */ template<typename FUNC> ConstBundlePrim* getConstPrim(BundlePrimIndex primIndex, FUNC create) noexcept; void setBundleDirtyID(DirtyIDType bundleDirtyID) noexcept; void setPrimDirtyIDsData(DirtyIDType const* primDirtyIDs) noexcept; void setPrimPathsData(omni::graph::core::NameToken const* primPaths) noexcept; void setPrimTypesData(omni::graph::core::NameToken const* primTypes) noexcept; private: omni::core::ObjectPtr<omni::graph::core::IBundleFactory> m_factory; omni::core::ObjectPtr<omni::graph::core::IConstBundle2> m_bundle; omni::graph::core::GraphContextObj m_context; // Backward compatibility. /** * ConstBundlePrims is a bundle as well. To access attributes under this bundle we need to acquire * an instance of ConstBundlePrim for this bundle. Common attributes, with unfortunate name, * gives us ability to access those attributes. */ ConstBundlePrimPtr m_commonAttributes; BundlePrimArray m_primitives; // Cached instances of BundlePrim. IDirtyID* m_iDirtyID{ nullptr }; // Cached interface to manage generation of unique ids. DirtyIDType m_bundleDirtyID; DirtyIDType const* m_primDirtyIDs{ nullptr }; // Backward compatibility - cached prim ids. omni::graph::core::NameToken const* m_primPaths{ nullptr }; // Backward compatibility - cached prim paths. omni::graph::core::NameToken const* m_primTypes{ nullptr }; // Backward compatibility - cached prim types. friend class ConstBundlePrim; friend class BundlePrim; friend class BundleAttrib; }; /** * Primitives in Bundle iterator. */ class OMNI_GRAPH_IO_DEPRECATED ConstBundlePrimIterator { public: ConstBundlePrimIterator(ConstBundlePrims& bundlePrims, BundlePrimIndex primIndex = 0) noexcept; ConstBundlePrimIterator(ConstBundlePrimIterator const& that) noexcept = default; ConstBundlePrimIterator& operator=(ConstBundlePrimIterator const& that) noexcept = default; bool operator==(ConstBundlePrimIterator const& that) const noexcept; bool operator!=(ConstBundlePrimIterator const& that) const noexcept; ConstBundlePrim& operator*() noexcept; ConstBundlePrim* operator->() noexcept; ConstBundlePrimIterator& operator++() noexcept; private: ConstBundlePrims* m_bundlePrims; BundlePrimIndex m_primIndex; }; /** * Attributes in Primitive iterator. */ class OMNI_GRAPH_IO_DEPRECATED ConstBundlePrimAttrIterator { public: ConstBundlePrimAttrIterator(ConstBundlePrim& bundlePrim, ConstBundlePrim::AttrMapIteratorType attrIter) noexcept; ConstBundlePrimAttrIterator(ConstBundlePrimAttrIterator const& that) noexcept = default; ConstBundlePrimAttrIterator& operator=(ConstBundlePrimAttrIterator const& that) noexcept = default; bool operator==(ConstBundlePrimAttrIterator const& that) const noexcept; bool operator!=(ConstBundlePrimAttrIterator const& that) const noexcept; BundleAttrib const& operator*() const noexcept; BundleAttrib const* operator->() const noexcept; ConstBundlePrimAttrIterator& operator++() noexcept; private: ConstBundlePrim* m_bundlePrim; ConstBundlePrim::AttrMapIteratorType m_attrIter; }; } // namespace io } // namespace graph } // namespace omni #include "ConstBundlePrimsImpl.h"
omniverse-code/kit/include/omni/graph/action/IActionGraph.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IActionGraph.h //! //! @brief Defines @ref omni::graph::action::IActionGraph_abi #pragma once #include <omni/core/IObject.h> #include <omni/core/Omni.h> #include <omni/graph/core/iComputeGraph.h> namespace omni { namespace graph { namespace action { //! Declare the IActionGraph interface definition OMNI_DECLARE_INTERFACE(IActionGraph); /** * @brief Functions for implementing nodes which are used in `Action Graph`. * * Nodes in `Action Graph` have special functionality which is not present in other graph types. */ class IActionGraph_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.action.IActionGraph")> { protected: /** * @brief Indicate that the given output attribute should be enabled, so that execution flow should continue * along downstream networks. * * @note This should only be called from within a node @c compute function. * * @param[in] attributeName attribute on the current node to be set * @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph * * @return Success if executed successfully */ virtual OMNI_ATTR("no_py") omni::core::Result setExecutionEnabled_abi(omni::graph::core::NameToken attributeName, omni::graph::core::InstanceIndex instanceIdx) noexcept = 0; /** * @brief Indicate that the given output attribute should be enabled, and the current node should be pushed to the * execution @c stack. This means that when the downstream execution flow has completed, this node will be * @c popped from the execution stack and its @c compute function will be called again. * * @note This should only be called from within a node @c compute function. * * @param[in] attributeName attribute on the current node to be set * @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph * * @return Success if executed successfully */ virtual OMNI_ATTR("no_py") omni::core::Result setExecutionEnabledAndPushed_abi(omni::graph::core::NameToken attributeName, omni::graph::core::InstanceIndex instanceIdx) noexcept = 0; /** * @brief Indicate that the current execution flow should be blocked at the given node, and the node should be * @c ticked every update of the Graph (@c compute function called), until it calls \ref endLatentState_abi. * * @note This should only be called from within a node @c compute function. * * @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph * * @return Success if executed successfully */ virtual OMNI_ATTR("no_py") omni::core::Result startLatentState_abi(omni::graph::core::InstanceIndex instanceIdx) noexcept = 0; /** * @brief Indicate that the current execution flow should be un-blocked at the given node, if it is currently in a * latent state. It is an error to call this function before calling \ref startLatentState_abi. * * @note This should only be called from within a node @c compute function. * * @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph * * @return Success if executed successfully */ virtual OMNI_ATTR("no_py") omni::core::Result endLatentState_abi(omni::graph::core::InstanceIndex instanceIdx) noexcept = 0; /** * @brief Read the current latent state of the node. This state is set using \ref startLatentState_abi and \ref endLatentState_abi * * @note This should only be called from within a node @c compute function. * * @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph * @returns true if the node is currently in a latent state * * @return false if the node is not in a latent state, or the call failed */ virtual OMNI_ATTR("no_py") bool getLatentState_abi(omni::graph::core::InstanceIndex instanceIdx) noexcept = 0; /** * @brief Read the enabled state of an input execution attribute. An input attribute is considered enabled if it is * connected to the upstream node that was computed immediately prior to the currently computing node. Event nodes * and nodes in latent state may not have any enabled input. * * @note This should only be called from within a node @c compute function. * * @param[in] attributeName attribute on the current node to be set * @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph * @returns true if the given attribute is considered enabled. * * @return false if the attribute is considered disabled or the call failed */ virtual OMNI_ATTR("no_py") bool getExecutionEnabled_abi(omni::graph::core::NameToken attributeName, omni::graph::core::InstanceIndex instanceIdx) noexcept = 0; }; //! Access the IActionGraph interface. This is more efficient than creating an instance each time it is needed. //! //! The returned pointer is a singleton managed by *omni.graph.action*, and does *not* have @ref //! omni::core::IObject::acquire() called on it before being returned. The caller should *not* call @ref //! omni::core::IObject::release() on the returned raw pointer. //! //! @thread_safety This method is thread safe. inline IActionGraph* getInterface() noexcept; } // namespace action } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include "IActionGraph.gen.h" // additional headers needed for API implementation #include <omni/core/ITypeFactory.h> inline omni::graph::action::IActionGraph* omni::graph::action::getInterface() noexcept { // createType() always calls acquire() and returns an ObjectPtr to make sure release() is called. we don't want to // hold a ref here to avoid static destruction issues. here we allow the returned ObjectPtr to destruct (after // calling get()) to release our ref. we know the DLL in which the singleton was created is maintaining a ref and // will keep the singleton alive for the lifetime of the DLL. static auto sSingleton = omni::core::createType<omni::graph::action::IActionGraph>().get(); return sSingleton; } // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include "IActionGraph.gen.h"
omniverse-code/kit/include/omni/graph/action/IActionGraph.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL /** * @brief Functions for implementing nodes which are used in `Action Graph`. * * Nodes in `Action Graph` have special functionality which is not present in other graph types. */ template <> class omni::core::Generated<omni::graph::action::IActionGraph_abi> : public omni::graph::action::IActionGraph_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::action::IActionGraph") /** * @brief Indicate that the given output attribute should be enabled, so that execution flow should continue * along downstream networks. * * @note This should only be called from within a node @c compute function. * * @param[in] attributeName attribute on the current node to be set * @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph * * @return Success if executed successfully */ omni::core::Result setExecutionEnabled(omni::graph::core::NameToken attributeName, omni::graph::core::InstanceIndex instanceIdx) noexcept; /** * @brief Indicate that the given output attribute should be enabled, and the current node should be pushed to the * execution @c stack. This means that when the downstream execution flow has completed, this node will be * @c popped from the execution stack and its @c compute function will be called again. * * @note This should only be called from within a node @c compute function. * * @param[in] attributeName attribute on the current node to be set * @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph * * @return Success if executed successfully */ omni::core::Result setExecutionEnabledAndPushed(omni::graph::core::NameToken attributeName, omni::graph::core::InstanceIndex instanceIdx) noexcept; /** * @brief Indicate that the current execution flow should be blocked at the given node, and the node should be * @c ticked every update of the Graph (@c compute function called), until it calls \ref endLatentState_abi. * * @note This should only be called from within a node @c compute function. * * @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph * * @return Success if executed successfully */ omni::core::Result startLatentState(omni::graph::core::InstanceIndex instanceIdx) noexcept; /** * @brief Indicate that the current execution flow should be un-blocked at the given node, if it is currently in a * latent state. It is an error to call this function before calling \ref startLatentState_abi. * * @note This should only be called from within a node @c compute function. * * @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph * * @return Success if executed successfully */ omni::core::Result endLatentState(omni::graph::core::InstanceIndex instanceIdx) noexcept; /** * @brief Read the current latent state of the node. This state is set using \ref startLatentState_abi and \ref * endLatentState_abi * * @note This should only be called from within a node @c compute function. * * @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph * @returns true if the node is currently in a latent state * * @return false if the node is not in a latent state, or the call failed */ bool getLatentState(omni::graph::core::InstanceIndex instanceIdx) noexcept; /** * @brief Read the enabled state of an input execution attribute. An input attribute is considered enabled if it is * connected to the upstream node that was computed immediately prior to the currently computing node. Event nodes * and nodes in latent state may not have any enabled input. * * @note This should only be called from within a node @c compute function. * * @param[in] attributeName attribute on the current node to be set * @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph * @returns true if the given attribute is considered enabled. * * @return false if the attribute is considered disabled or the call failed */ bool getExecutionEnabled(omni::graph::core::NameToken attributeName, omni::graph::core::InstanceIndex instanceIdx) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::core::Result omni::core::Generated<omni::graph::action::IActionGraph_abi>::setExecutionEnabled( omni::graph::core::NameToken attributeName, omni::graph::core::InstanceIndex instanceIdx) noexcept { return setExecutionEnabled_abi(attributeName, instanceIdx); } inline omni::core::Result omni::core::Generated<omni::graph::action::IActionGraph_abi>::setExecutionEnabledAndPushed( omni::graph::core::NameToken attributeName, omni::graph::core::InstanceIndex instanceIdx) noexcept { return setExecutionEnabledAndPushed_abi(attributeName, instanceIdx); } inline omni::core::Result omni::core::Generated<omni::graph::action::IActionGraph_abi>::startLatentState( omni::graph::core::InstanceIndex instanceIdx) noexcept { return startLatentState_abi(instanceIdx); } inline omni::core::Result omni::core::Generated<omni::graph::action::IActionGraph_abi>::endLatentState( omni::graph::core::InstanceIndex instanceIdx) noexcept { return endLatentState_abi(instanceIdx); } inline bool omni::core::Generated<omni::graph::action::IActionGraph_abi>::getLatentState( omni::graph::core::InstanceIndex instanceIdx) noexcept { return getLatentState_abi(instanceIdx); } inline bool omni::core::Generated<omni::graph::action::IActionGraph_abi>::getExecutionEnabled( omni::graph::core::NameToken attributeName, omni::graph::core::InstanceIndex instanceIdx) noexcept { return getExecutionEnabled_abi(attributeName, instanceIdx); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/action/PyIActionGraph.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // #pragma once #include <omni/core/ITypeFactory.h> #include <omni/python/PyBind.h> #include <omni/python/PyString.h> #include <omni/python/PyVec.h> #include <sstream> auto bindIActionGraph(py::module& m) { // hack around pybind11 issues with C++17 // - https://github.com/pybind/pybind11/issues/2234 // - https://github.com/pybind/pybind11/issues/2666 // - https://github.com/pybind/pybind11/issues/2856 py::class_<omni::core::Generated<omni::graph::action::IActionGraph_abi>, omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::action::IActionGraph_abi>>, omni::core::IObject> clsParent(m, "_IActionGraph"); py::class_<omni::graph::action::IActionGraph, omni::core::Generated<omni::graph::action::IActionGraph_abi>, omni::python::detail::PyObjectPtr<omni::graph::action::IActionGraph>, omni::core::IObject> cls(m, "IActionGraph", R"OMNI_BIND_RAW_(@brief Functions for implementing nodes which are used in `Action Graph`. Nodes in `Action Graph` have special functionality which is not present in other graph types.)OMNI_BIND_RAW_"); cls.def(py::init( [](const omni::core::ObjectPtr<omni::core::IObject>& obj) { auto tmp = omni::core::cast<omni::graph::action::IActionGraph>(obj.get()); if (!tmp) { throw std::runtime_error("invalid type conversion"); } return tmp; })); cls.def(py::init( []() { auto tmp = omni::core::createType<omni::graph::action::IActionGraph>(); if (!tmp) { throw std::runtime_error("unable to create omni::graph::action::IActionGraph instantiation"); } return tmp; })); return omni::python::PyBind<omni::graph::action::IActionGraph>::bind(cls); }
omniverse-code/kit/include/omni/graph/exec/unstable/IPopulatePass.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IPopulatePass.h //! //! @brief Defines @ref omni::graph::exec::unstable::IPopulatePass. #pragma once #include <omni/graph/exec/unstable/IPass.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class IGraphBuilder; class INode; class IPopulatePass; class IPopulatePass_abi; //! Base class for populate passes. //! //! Register a populate pass with @ref OMNI_GRAPH_EXEC_REGISTER_POPULATE_PASS(). When registering a pass, a "name to //! match" is also specified. This name is the name of a node or definition on which the registered pass should //! populate. //! //! Populate passes are typically the first pass type to run in the pass pipeline. When a node is encountered during //! construction, only a single populate pass will get a chance to populate the newly discovered node. If no pass is //! registered against the node's name, the node definition's name is used to find a population pass to run. //! //! Populate pass is allowed to attach a new definition to a node it runs on. //! //! Minimal rebuild of the execution graph topology should be considered by the pass each time it runs. Pass pipeline //! leaves the responsibility of deciding if pass needs to run to the implementation. At minimum it can rely on //! verifying that topology of @ref omni::graph::exec::unstable::NodeGraphDef it generated before is still valid or //! @ref omni::graph::exec::unstable::NodeDef has not changed. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. class IPopulatePass_abi : public omni::core::Inherits<IPass, OMNI_TYPE_ID("omni.graph.exec.unstable.IPopulatePass")> { protected: //! Call from pass pipeline to apply graph transformations on a given node (definition or topology). virtual OMNI_ATTR("throw_result") omni::core::Result run_abi(IGraphBuilder* builder, INode* node) noexcept = 0; }; //! Smart pointer managing an instance of @ref IPopulatePass. using PopulatePassPtr = omni::core::ObjectPtr<IPopulatePass>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IPopulatePass.gen.h> //! @copydoc omni::graph::exec::unstable::IPopulatePass_abi //! //! @ingroup groupOmniGraphExecPasses groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IPopulatePass : public omni::core::Generated<omni::graph::exec::unstable::IPopulatePass_abi> { }; // additional headers needed for API implementation #include <omni/graph/exec/unstable/IGraphBuilder.h> #include <omni/graph/exec/unstable/INode.h> // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IPopulatePass.gen.h>
omniverse-code/kit/include/omni/graph/exec/unstable/IGraph.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Top-level container for storing the Execution Framework's graph of graphs. //! //! @ref omni::graph::exec::unstable::IGraph is the top-level container used to store the graph of graphs. This //! top-level container is referred to as the <i>execution graph</i>. //! //! @ref omni::graph::exec::unstable::IGraph's responsibilities include: //! //! - Tracking if the graph is currently being constructed. See @ref omni::graph::exec::unstable::IGraph::inBuild(). //! //! - Tracking gross changes to the topologies of graphs within the execution graph. This is done with the <i>global //! topology stamp</i> (see @ref omni::graph::exec::unstable::IGraph::getGlobalTopologyStamp()). Each time a topology //! is invalidated, the global topology stamp is incremented. Consumers of the execution graph can use this stamp to //! detect changes in the graph. See @rstref{Graph Invalidation <ef_graph_invalidation>} for details. //! //! - Owning and providing access to the top level graph definition (see @ref //! omni::graph::exec::unstable::IGraph::getNodeGraphDef()). The root node of the top-level graph definition is the //! root of execution graph. @ref omni::graph::exec::unstable::IGraph is the only container, other than @ref //! omni::graph::exec::unstable::INode, that attaches to definitions. //! //! See @rstref{Graph Concepts <ef_graph_concepts>} for more information on how @ref omni::graph::exec::unstable::IGraph //! fits into the Execution Framework. //! //! See @ref omni::graph::exec::unstable::Graph for a concrete implementation of this interface. template <> class omni::core::Generated<omni::graph::exec::unstable::IGraph_abi> : public omni::graph::exec::unstable::IGraph_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IGraph") //! Access the top-level node graph definition. //! //! The returned @ref omni::graph::exec::unstable::INodeGraphDef will *not* have //! @ref omni::core::IObject::acquire() called before being returned. //! //! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref //! omni::graph::exec::unstable::IGraph. omni::graph::exec::unstable::INodeGraphDef* getNodeGraphDef() noexcept; //! Name set on the graph during construction. //! //! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref //! omni::graph::exec::unstable::IGraph. const omni::graph::exec::unstable::ConstName& getName() noexcept; //! Return global topology of the graph. Useful when detecting that graph transformation pipeline needs to run. //! //! See @rstref{Graph Invalidation <ef_graph_invalidation>} to understand how this stamp is used to detect changes //! in the graph. //! //! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref //! omni::graph::exec::unstable::IGraph. It is up to the caller to mutate the stamp in a thread safe manner. omni::graph::exec::unstable::Stamp* getGlobalTopologyStamp() noexcept; //! Return @c true if a @ref omni::graph::exec::unstable::IGraphBuilder is currently building a part of this graph. //! //! @thread_safety This method is thread safe. bool inBuild() noexcept; //! Mark that an @ref omni::graph::exec::unstable::IGraphBuilder is currently building a part of this graph. //! //! Each builder should call @c _setInBuild(true) followed by @c _setInBuild(false) once building is complete. Since //! multiple builders can be active at a time, it is safe for this method to be called multiple times. //! //! This method should only be called by @ref omni::graph::exec::unstable::IGraphBuilder. //! //! @thread_safety This method is thread safe. void _setInBuild(bool inBuild) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::graph::exec::unstable::INodeGraphDef* omni::core::Generated< omni::graph::exec::unstable::IGraph_abi>::getNodeGraphDef() noexcept { return getNodeGraphDef_abi(); } inline const omni::graph::exec::unstable::ConstName& omni::core::Generated<omni::graph::exec::unstable::IGraph_abi>::getName() noexcept { return *(getName_abi()); } inline omni::graph::exec::unstable::Stamp* omni::core::Generated< omni::graph::exec::unstable::IGraph_abi>::getGlobalTopologyStamp() noexcept { return getGlobalTopologyStamp_abi(); } inline bool omni::core::Generated<omni::graph::exec::unstable::IGraph_abi>::inBuild() noexcept { return inBuild_abi(); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraph_abi>::_setInBuild(bool inBuild) noexcept { _setInBuild_abi(inBuild); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/SmallVector.h
// Copied from USD repository: https://github.com/PixarAnimationStudios/USD // // Copyright 2019 Pixar // // Licensed under the Apache License, Version 2.0 (the "Apache License") // with the following modification; you may not use this file except in // compliance with the Apache License and the following modification to it: // Section 6. Trademarks. is deleted and replaced with: // // 6. Trademarks. This License does not grant permission to use the trade // names, trademarks, service marks, or product names of the Licensor // and its affiliates, except as required to comply with Section 4(c) of // the License and to reproduce the content of the NOTICE file. // // You may obtain a copy of the Apache License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the Apache License with the above modification is // distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the Apache License for the specific // language governing permissions and limitations under the Apache License. // #pragma once #include <algorithm> #include <cstddef> #include <cstdint> #include <cstdlib> #include <initializer_list> #include <iterator> #include <limits> #include <memory> #include <new> #include <type_traits> namespace omni { namespace graph { namespace exec { namespace unstable { //! Contains parts of the small vector implementation that do not depend on //! *all* of SmallVector's template parameters. class SmallVectorBase { public: //! Size type using size_type = std::uint32_t; //! Difference type using difference_type = std::uint32_t; //! Returns the local capacity that may be used without increasing the size //! of the SmallVector. SmallVector<T, N> will never use more local //! capacity than is specified by N but clients that wish to maximize local //! occupancy in a generic way can compute N using this function. template <typename U> static constexpr size_type ComputeSerendipitousLocalCapacity() { return (alignof(U) <= alignof(_Data<U, 0>)) ? sizeof(_Data<U, 0>) / sizeof(U) : 0; } protected: //! Invoke std::uninitialized_copy that either moves or copies entries, //! depending on whether the type is move constructible or not. template <typename Iterator> static Iterator _UninitializedMove(Iterator first, Iterator last, Iterator dest) { return std::uninitialized_copy(std::make_move_iterator(first), std::make_move_iterator(last), dest); } //! Invokes either the move or copy constructor (via placement new), //! depending on whether U is move constructible or not. template <typename U> static void _MoveConstruct(U* p, U* src) { new (p) U(std::move(*src)); } #ifndef DOXYGEN_BUILD //! The data storage, which is a union of both the local storage, as well //! as a pointer, holding the address to the remote storage on the heap, if //! used. template <typename U, size_type M> union _Data { public: //! Returns raw pointer to local storage of type @c U U* GetLocalStorage() { return reinterpret_cast<U*>(_local); } //! Returns const raw pointer to local storage of type @c U const U* GetLocalStorage() const { return reinterpret_cast<const U*>(_local); } //! Returns raw pointer to remote storage of type @c U U* GetRemoteStorage() { return _remote; } //! Returns const raw pointer to remote storage of type @c U const U* GetRemoteStorage() const { return _remote; } //! Sets remote storage to @p p void SetRemoteStorage(U* p) { _remote = p; } private: alignas(U) char _local[sizeof(U) * M]; U* _remote; }; //! For N == 0 the _Data class has been specialized to elide the local //! storage completely. This way we don't have to rely on compiler-specific //! support for 0-sized arrays. template <typename U> union _Data<U, 0> { public: //! Specialization for 0-sized local storage. Returns nullptr. U* GetLocalStorage() { // XXX: Could assert here. Introduce dependency on tf/diagnostic.h? return nullptr; } //! Specialization for 0-sized local storage. Returns nullptr. const U* GetLocalStorage() const { // XXX: Could assert here. Introduce dependency on tf/diagnostic.h? return nullptr; } //! Returns raw pointer to remote storage of type @c U U* GetRemoteStorage() { return _remote; } //! Returns const raw pointer to remote storage of type @c U const U* GetRemoteStorage() const { return _remote; } //! Sets remote storage to @p p void SetRemoteStorage(U* p) { _remote = p; } private: U* _remote; }; #endif // DOXYGEN_BUILD }; //! //! \class SmallVector //! //! This is a small-vector class with local storage optimization, the local //! storage can be specified via a template parameter, and expresses the //! number of entries the container can store locally. //! //! In addition to the local storage optimization, this vector is also //! optimized for storing a smaller number of entries on the heap: It features //! a reduced memory footprint (minimum 16 bytes) by limiting max_size() to //! 2^32, which should still be more than enough for most use cases where a //! small-vector is advantageous. //! //! SmallVector mimics the std::vector API, and can thus be easily used as a //! drop-in replacement where appropriate. Note, however, that not all the //! methods on std::vector are implemented here, and that SmallVector may //! have methods in addition to those that you would find on std::vector. //! //! Note that a SmallVector that has grown beyond its local storage, will //! NOT move its entries back into the local storage once it shrinks back to N. //! template <typename T, std::size_t N> class SmallVector : public SmallVectorBase { public: //! @{ //! Relevant type definitions using value_type = T; //! Relevant type definitions using reference = T&; //! Relevant type definitions using const_reference = const T&; //! }@ //! @{ //! Iterator Support using iterator = T*; //! Iterator Support using const_iterator = const T*; //! Iterator Support using reverse_iterator = std::reverse_iterator<iterator>; //! Iterator Support using const_reverse_iterator = std::reverse_iterator<const_iterator>; //! }@ //! Default constructor. //! SmallVector() : _size(0), _capacity(N) { } //! Construct a vector holding \p n value-initialized elements. //! explicit SmallVector(size_type n) : _capacity(N) { _InitStorage(n); value_type* d = data(); for (size_type i = 0; i < n; ++i) { new (d + i) value_type(); } } //! Construct a vector holding \p n copies of \p v. //! SmallVector(size_type n, const value_type& v) : _capacity(N) { _InitStorage(n); std::uninitialized_fill_n(data(), n, v); } //! Enum to disambiguate constructors enum DefaultInitTag { DefaultInit }; //! Construct a vector holding \p n default-initialized elements. //! SmallVector(size_type n, DefaultInitTag) : _capacity(N) { _InitStorage(n); value_type* d = data(); for (size_type i = 0; i < n; ++i) { new (d + i) value_type; } } //! Copy constructor. //! SmallVector(const SmallVector& rhs) : _capacity(N) { _InitStorage(rhs.size()); std::uninitialized_copy(rhs.begin(), rhs.end(), begin()); } //! Move constructor. //! SmallVector(SmallVector&& rhs) : _size(0), _capacity(N) { // If rhs can not be stored locally, take rhs's remote storage and // reset rhs to empty. if (rhs.size() > N) { _data.SetRemoteStorage(rhs._data.GetRemoteStorage()); std::swap(_capacity, rhs._capacity); } // If rhs is stored locally, it's faster to simply move the entries // into this vector's storage, destruct the entries at rhs, and swap // sizes. Note that capacities will be the same in this case, so no // need to swap those. else { _UninitializedMove(rhs.begin(), rhs.end(), begin()); rhs._Destruct(); } std::swap(_size, rhs._size); } //! Construct a new vector from initializer list SmallVector(std::initializer_list<T> values) : SmallVector(values.begin(), values.end()) { } //! Compile time check to enabled method when forward iterator is available template <typename _ForwardIterator> using _EnableIfForwardIterator = typename std::enable_if<std::is_convertible<typename std::iterator_traits<_ForwardIterator>::iterator_category, std::forward_iterator_tag>::value>::type; //! Creates a new vector containing copies of the data between //! \p first and \p last. template <typename ForwardIterator, typename = _EnableIfForwardIterator<ForwardIterator>> SmallVector(ForwardIterator first, ForwardIterator last) : _capacity(N) { _InitStorage(static_cast<difference_type>(std::distance(first, last))); std::uninitialized_copy(first, last, begin()); } //! Destructor. //! ~SmallVector() { _Destruct(); _FreeStorage(); } //! Assignment operator. //! SmallVector& operator=(const SmallVector& rhs) { if (this != &rhs) { assign(rhs.begin(), rhs.end()); } return *this; } //! Move assignment operator. //! SmallVector& operator=(SmallVector&& rhs) { if (this != &rhs) { swap(rhs); } return *this; } //! Replace existing contents with the contents of \p ilist. //! SmallVector& operator=(std::initializer_list<T> ilist) { assign(ilist.begin(), ilist.end()); return *this; } //! Swap two vector instances. //! void swap(SmallVector& rhs) { // Both this vector and rhs are stored locally. if (_IsLocal() && rhs._IsLocal()) { SmallVector* smaller = size() < rhs.size() ? this : &rhs; SmallVector* larger = size() < rhs.size() ? &rhs : this; // Swap all the entries up to the size of the smaller vector. std::swap_ranges(smaller->begin(), smaller->end(), larger->begin()); // Move the tail end of the entries, and destruct them at the // source vector. for (size_type i = smaller->size(); i < larger->size(); ++i) { _MoveConstruct(smaller->data() + i, &(*larger)[i]); (*larger)[i].~value_type(); } // Swap sizes. Capacities are already equal in this case. std::swap(smaller->_size, larger->_size); } // Both this vector and rhs are stored remotely. Simply swap the // pointers, as well as size and capacity. else if (!_IsLocal() && !rhs._IsLocal()) { value_type* tmp = _data.GetRemoteStorage(); _data.SetRemoteStorage(rhs._data.GetRemoteStorage()); rhs._data.SetRemoteStorage(tmp); std::swap(_size, rhs._size); std::swap(_capacity, rhs._capacity); } // Either this vector or rhs is stored remotely, whereas the other // one is stored locally. else { SmallVector* remote = _IsLocal() ? &rhs : this; SmallVector* local = _IsLocal() ? this : &rhs; // Get a pointer to the remote storage. We'll be overwriting the // pointer value below, so gotta retain it first. value_type* remoteStorage = remote->_GetStorage(); // Move all the entries from the vector with the local storage, to // the other vector's local storage. This will overwrite the pointer // to the other vectors remote storage. Note that we will have to // also destruct the elements at the source's local storage. The // source will become the one with the remote storage, so those // entries will be essentially freed. for (size_type i = 0; i < local->size(); ++i) { _MoveConstruct(remote->_data.GetLocalStorage() + i, &(*local)[i]); (*local)[i].~value_type(); } // Swap the remote storage into the vector which previously had the // local storage. It's been properly cleaned up now. local->_data.SetRemoteStorage(remoteStorage); // Swap sizes and capacities. Easy peasy. std::swap(remote->_size, local->_size); std::swap(remote->_capacity, local->_capacity); } } //! Insert an rvalue-reference entry at the given iterator position. //! iterator insert(const_iterator it, value_type&& v) { return _Insert(it, std::move(v)); } //! Insert an entry at the given iterator. //! iterator insert(const_iterator it, const value_type& v) { return _Insert(it, v); } //! Erase an entry at the given iterator. //! iterator erase(const_iterator it) { return erase(it, it + 1); } //! Erase entries between [ \p first, \p last ) from the vector. //! iterator erase(const_iterator it, const_iterator last) { value_type* p = const_cast<value_type*>(&*it); value_type* q = const_cast<value_type*>(&*last); // If we're not removing anything, bail out. if (p == q) { return iterator(p); } const difference_type num = static_cast<difference_type>(std::distance(p, q)); // Move entries starting at last, down a few slots to starting a it. value_type* e = data() + size(); std::move(q, e, p); // Destruct all the freed up slots at the end of the vector. for (value_type* i = (e - num); i != e; ++i) { i->~value_type(); } // Bump down the size. _size -= num; // Return an iterator to the next entry. return iterator(p); } //! Reserve storage for \p newCapacity entries. //! void reserve(size_type newCapacity) { // Only reserve storage if the new capacity would grow past the local // storage, or the currently allocated storage. We'll grow to // accommodate exactly newCapacity entries. if (newCapacity > capacity()) { _GrowStorage(newCapacity); } } //! Resize the vector to \p newSize and insert copies of \p v. //! void resize(size_type newSize, const value_type& v = value_type()) { // If the new size is smaller than the current size, let go of some // entries at the tail. if (newSize < size()) { erase(const_iterator(data() + newSize), const_iterator(data() + size())); } // Otherwise, lets grow and fill: Reserve some storage, fill the tail // end with copies of v, and update the new size. else if (newSize > size()) { reserve(newSize); std::uninitialized_fill(data() + size(), data() + newSize, v); _size = newSize; } } //! Clear the entries in the vector. Does not let go of the underpinning //! storage. //! void clear() { _Destruct(); _size = 0; } //! Clears any previously held entries, and copies entries between //! [ \p first, \p last ) to this vector. //! template <typename ForwardIterator, typename = _EnableIfForwardIterator<ForwardIterator>> void assign(ForwardIterator first, ForwardIterator last) { clear(); const difference_type newSize = static_cast<difference_type>(std::distance(first, last)); reserve(newSize); std::uninitialized_copy(first, last, begin()); _size = newSize; } //! Replace existing contents with the contents of \p ilist. //! void assign(std::initializer_list<T> ilist) { assign(ilist.begin(), ilist.end()); } //! Emplace an entry at the back of the vector. //! template <typename... Args> void emplace_back(Args&&... args) { if (size() == capacity()) { _GrowStorage(_NextCapacity()); } new (data() + size()) value_type(std::forward<Args>(args)...); _size += 1; } //! Copy an entry to the back of the vector, //! void push_back(const value_type& v) { emplace_back(v); } //! Move an entry to the back of the vector. //! void push_back(value_type&& v) { emplace_back(std::move(v)); } //! Copy the range denoted by [\p first, \p last) into this vector //! before \p pos. //! template <typename ForwardIterator> void insert(iterator pos, ForwardIterator first, ForwardIterator last) { static_assert(std::is_convertible<typename std::iterator_traits<ForwardIterator>::iterator_category, std::forward_iterator_tag>::value, "Input Iterators not supported."); // Check for the insert-at-end special case as the very first thing so // that we give the compiler the best possible opportunity to // eliminate the general case code. const bool insertAtEnd = pos == end(); const difference_type numNewElems = (difference_type)std::distance(first, last); const size_type neededCapacity = size() + numNewElems; const size_type nextCapacity = std::max(_NextCapacity(), neededCapacity); // Insertions at the end would be handled correctly by the code below // without this special case. However, insert(end(), f, l) is an // extremely common operation so we provide this fast path both to // avoid unneeded work and to make it easier for the compiler to // eliminate dead code when pos == end(). if (insertAtEnd) { // The reallocation here is not a simple reserve. We want to grow // the storage only when there are too many new elements but the // desired size is based on the growth factor. if (neededCapacity > capacity()) { _GrowStorage(nextCapacity); } std::uninitialized_copy(first, last, end()); _size += numNewElems; return; } if (neededCapacity > capacity()) { // Because we need to realloc, we can do the insertion by copying // each range, [begin(), pos), [first, last), [pos, end()), into // the new storage. const size_type posI = (size_type)std::distance(begin(), pos); value_type* newStorage = _Allocate(nextCapacity); iterator newPrefixBegin = iterator(newStorage); iterator newPos = newPrefixBegin + posI; iterator newSuffixBegin = newPos + numNewElems; _UninitializedMove(begin(), pos, newPrefixBegin); std::uninitialized_copy(first, last, newPos); _UninitializedMove(pos, end(), newSuffixBegin); // Destroy old data and set up this new buffer. _Destruct(); _FreeStorage(); _data.SetRemoteStorage(newStorage); _capacity = nextCapacity; } else { // Insert in-place requires handling four ranges. // // For both the range-to-move [pos, end()) and the range-to-insert // [first, last), there are two subranges: the subrange to copy // and the subrange to uinitialized_copy. Note that only three of // these ranges may be non-empty: either there is a non-empty // prefix of [pos, end()) that needs to be copied over existing // elements or there is a non-empty suffix of [first, last) that // needs to be placed in uninitialized storage. const difference_type numMoveElems = (difference_type)std::distance(pos, end()); const difference_type numUninitMoves = (difference_type)std::min(numNewElems, numMoveElems); const difference_type numInitMoves = numMoveElems - numUninitMoves; const difference_type numUninitNews = numNewElems - numUninitMoves; const difference_type numInitNews = numNewElems - numUninitNews; // Move our existing elements out of the way of new elements. iterator umSrc = pos + numInitMoves; iterator umDst = end() + numUninitNews; _UninitializedMove(umSrc, end(), umDst); std::copy_backward(pos, umSrc, umDst); // Copy new elements into place. for (difference_type i = 0; i < numInitNews; ++i, ++first, ++pos) { *pos = *first; } std::uninitialized_copy(first, last, end()); } _size += numNewElems; } //! Insert elements from \p ilist starting at position \p pos. //! void insert(iterator pos, std::initializer_list<T> ilist) { insert(pos, ilist.begin(), ilist.end()); } //! Remove the entry at the back of the vector. //! void pop_back() { back().~value_type(); _size -= 1; } //! Returns the current size of the vector. //! size_type size() const { return _size; } //! Returns the maximum size of this vector. //! static constexpr size_type max_size() { return std::numeric_limits<size_type>::max(); } //! Returns \c true if this vector is empty. //! bool empty() const { return size() == 0; } //! Returns the current capacity of this vector. Note that if the returned //! value is <= N, it does NOT mean the storage is local. A vector that has //! previously grown beyond its local storage, will not move entries back to //! the local storage once it shrinks to N. //! size_type capacity() const { return _capacity; } //! Returns the local storage capacity. The vector uses its local storage //! if capacity() <= internal_capacity(). //! This method mimics the boost::container::small_vector interface. //! static constexpr size_type internal_capacity() { return N; } //! Returns an iterator to the beginning of the vector. //! @{ iterator begin() { return iterator(_GetStorage()); } //! Returns an iterator to the beginning of the vector. const_iterator begin() const { return const_iterator(_GetStorage()); } //! Returns an iterator to the beginning of the vector. const_iterator cbegin() const { return begin(); } //! @} //! Returns an iterator to the end of the vector. //! @{ iterator end() { return iterator(_GetStorage() + size()); } //! Returns an iterator to the end of the vector. const_iterator end() const { return const_iterator(_GetStorage() + size()); } //! Returns an iterator to the end of the vector. const_iterator cend() const { return end(); } //! @} //! Returns a reverse iterator to the beginning of the vector. //! @{ reverse_iterator rbegin() { return reverse_iterator(end()); } //! Returns a reverse iterator to the beginning of the vector. const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } //! Returns a reverse iterator to the beginning of the vector. const_reverse_iterator crbegin() const { return rbegin(); } //! @} //! @{ //! Returns a reverse iterator to the end of the vector. reverse_iterator rend() { return reverse_iterator(begin()); } //! Returns a reverse iterator to the end of the vector. const_reverse_iterator rend() const { return const_reverse_iterator(begin()); } //! Returns a reverse iterator to the end of the vector. const_reverse_iterator crend() const { return rend(); } //! @} //! Returns the first element in the vector. //! reference front() { return *begin(); } //! Returns the first element in the vector. //! const_reference front() const { return *begin(); } //! Returns the last element in the vector. //! reference back() { return *(data() + size() - 1); } //! Returns the last elements in the vector. //! const_reference back() const { return *(data() + size() - 1); } //! Access the specified element. //! reference operator[](size_type i) { return *(data() + i); } //! Access the specified element. //! const_reference operator[](size_type i) const { return *(data() + i); } //! Direct access to the underlying array. //! value_type* data() { return _GetStorage(); } //! Direct access to the underlying array. //! const value_type* data() const { return _GetStorage(); } //! Lexicographically compares the elements in the vectors for equality. //! bool operator==(const SmallVector& rhs) const { return size() == rhs.size() && std::equal(begin(), end(), rhs.begin()); } //! Lexicographically compares the elements in the vectors for inequality. //! bool operator!=(const SmallVector& rhs) const { return !operator==(rhs); } private: //! Returns true if the local storage is used. bool _IsLocal() const { return _capacity <= N; } //! Return a pointer to the storage, which is either local or remote //! depending on the current capacity. value_type* _GetStorage() { return _IsLocal() ? _data.GetLocalStorage() : _data.GetRemoteStorage(); } //! Return a const pointer to the storage, which is either local or remote //! depending on the current capacity. const value_type* _GetStorage() const { return _IsLocal() ? _data.GetLocalStorage() : _data.GetRemoteStorage(); } //! Free the remotely allocated storage. void _FreeStorage() { if (!_IsLocal()) { free(_data.GetRemoteStorage()); } } //! Destructs all the elements stored in this vector. void _Destruct() { value_type* b = data(); value_type* e = b + size(); for (value_type* p = b; p != e; ++p) { p->~value_type(); } } //! Allocate a buffer on the heap. static value_type* _Allocate(size_type size) { return static_cast<value_type*>(malloc(sizeof(value_type) * size)); } //! Initialize the vector with new storage, updating the capacity and size. void _InitStorage(size_type size) { if (size > capacity()) { _data.SetRemoteStorage(_Allocate(size)); _capacity = size; } _size = size; } //! Grow the storage to be able to accommodate newCapacity entries. This //! always allocates remotes storage. void _GrowStorage(const size_type newCapacity) { value_type* newStorage = _Allocate(newCapacity); _UninitializedMove(begin(), end(), iterator(newStorage)); _Destruct(); _FreeStorage(); _data.SetRemoteStorage(newStorage); _capacity = newCapacity; } //! Returns the next capacity to use for vector growth. The growth factor //! here is 1.5. A constant 1 is added so that we do not have to special //! case initial capacities of 0 and 1. size_type _NextCapacity() const { const size_type cap = capacity(); return cap + (cap / 2) + 1; } //! Insert the value v at iterator it. We use this method that takes a //! universal reference to de-duplicate the logic required for the insert //! overloads, one taking an rvalue reference, and the other one taking a //! const reference. This way, we can take the most optimal code path ( //! move, or copy without making redundant copies) based on whether v is //! a rvalue reference or const reference. template <typename U> iterator _Insert(const_iterator it, U&& v) { value_type* newEntry; // If the iterator points to the end, simply push back. if (it == end()) { push_back(std::forward<U>(v)); return end() - 1; } // Grow the remote storage, if we need to. This invalidates iterators, // so special care must be taken in order to return a new, valid // iterator. else if (size() == capacity()) { const size_type newCapacity = _NextCapacity(); value_type* newStorage = _Allocate(newCapacity); value_type* i = const_cast<value_type*>(&*it); value_type* curData = data(); newEntry = _UninitializedMove(curData, i, newStorage); new (newEntry) value_type(std::forward<U>(v)); _UninitializedMove(i, curData + size(), newEntry + 1); _Destruct(); _FreeStorage(); _data.SetRemoteStorage(newStorage); _capacity = newCapacity; } // Our current capacity is big enough to allow us to simply shift // elements up one slot and insert v at it. else { // Move all the elements after it up by one slot. newEntry = const_cast<value_type*>(&*it); value_type* last = const_cast<value_type*>(&back()); new (data() + size()) value_type(std::move(*last)); std::move_backward(newEntry, last, last + 1); // Move v into the slot at the supplied iterator position. newEntry->~value_type(); new (newEntry) value_type(std::forward<U>(v)); } // Bump size and return an iterator to the newly inserted entry. ++_size; return iterator(newEntry); } //! The vector storage, which is a union of the local storage and a pointer //! to the heap memory, if allocated. _Data<value_type, N> _data; //! The current size of the vector, i.e. how many entries it contains. size_type _size; //! The current capacity of the vector, i.e. how big the currently allocated //! storage space is. size_type _capacity; }; //! Swap fuction for @ref SmallVector template <typename T, std::size_t N> void swap(SmallVector<T, N>& a, SmallVector<T, N>& b) { a.swap(b); } } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/exec/unstable/IGraphBuilder.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Graph builder is the only class that has the ability to modify topology of a graph. //! //! Topological edits of the graph are only allowed during graph transformation and should never //! be performed during execution of the graph. Construction of the builder will automatically drop //! all the connections between nodes. //! //! Methods on this class mutating a graph topology are not thread-safe (unless documented otherwise) template <> class omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi> : public omni::graph::exec::unstable::IGraphBuilder_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IGraphBuilder") //! Return owner of all graphs this builder touches //! //! The returned @ref omni::graph::exec::unstable::IGraph will *not* have //! @ref omni::core::IObject::acquire() called before being returned. omni::graph::exec::unstable::IGraph* getGraph() noexcept; //! Returns the topology this builder can modify. //! //! The returned @ref omni::graph::exec::unstable::ITopology will *not* have //! @ref omni::core::IObject::acquire() called before being returned. omni::graph::exec::unstable::ITopology* getTopology() noexcept; //! Returns the context in which this builder works. //! //! The returned @ref omni::graph::exec::unstable::IGraphBuilderContext will *not* have //! @ref omni::core::IObject::acquire() called before being returned. omni::graph::exec::unstable::IGraphBuilderContext* getContext() noexcept; //! Returns @ref omni::graph::exec::unstable::INodeGraphDef this builder can modify. //! //! The returned @ref omni::graph::exec::unstable::INodeGraphDef will *not* have //! @ref omni::core::IObject::acquire() called before being returned. omni::graph::exec::unstable::INodeGraphDef* getNodeGraphDef() noexcept; //! Connect two given nodes. //! //! It is an error if the two nodes are not in the same topology. //! //! Neither given node should be @c nullptr. //! //! Neither @ref omni::graph::exec::unstable::INode have @ref omni::core::IObject::acquire() called //! during the connection process. //! //! May throw. void connect(omni::core::ObjectParam<omni::graph::exec::unstable::INode> upstreamNode, omni::core::ObjectParam<omni::graph::exec::unstable::INode> downstreamNode); //! Disconnect two given nodes. //! //! It is an error if the two nodes are not in the same topology. //! //! Neither given node should be @c nullptr. //! //! Neither @ref omni::graph::exec::unstable::INode have @ref omni::core::IObject::acquire() called //! during the disconnection process. //! //! May throw. void disconnect(omni::core::ObjectParam<omni::graph::exec::unstable::INode> upstreamNode, omni::core::ObjectParam<omni::graph::exec::unstable::INode> downstreamNode); //! Remove a node from topology. //! //! The given node must not be @c nullptr. //! //! May throw. void remove(omni::core::ObjectParam<omni::graph::exec::unstable::INode> node); //! Sets the definition for given node. //! //! If a definition is already set, it will be replaced by the given definition. //! //! The given definition may be @c nullptr. //! //! @ref omni::core::IObject::acquire() is called on the given definition pointer. //! //! See also @ref omni::graph::exec::unstable::IGraphBuilder::setNodeGraphDef(). //! //! This method is NOT thread safe. void setNodeDef(omni::core::ObjectParam<omni::graph::exec::unstable::INode> node, omni::core::ObjectParam<omni::graph::exec::unstable::INodeDef> nodeDef) noexcept; //! Sets the definition for give node. //! //! If a definition is already set, it will be replaced by the given definition. //! //! The given definition may be @c nullptr. //! //! @ref omni::core::IObject::acquire() is called on the given definition pointer. //! //! See also @ref omni::graph::exec::unstable::IGraphBuilder::setNodeDef(). //! //! This method is NOT thread safe. void setNodeGraphDef(omni::core::ObjectParam<omni::graph::exec::unstable::INode> node, omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept; //! Unsets given node's definition. //! //! If the definition is already @c nullptr, this method does nothing. //! //! This method is NOT thread safe. void clearDef(omni::core::ObjectParam<omni::graph::exec::unstable::INode> node) noexcept; //! Replace well formed cluster of nodes with a single node and the given definition. //! //! All nodes must exist in the same and current topology, otherwise the entire operation is aborted. //! //! @ref omni::core::IObject::acquire() is called on the given definition pointer. //! //! This method is NOT thread safe. void replacePartition(const omni::graph::exec::unstable::NodePartition& partition, omni::core::ObjectParam<omni::graph::exec::unstable::IDef> definition); //! Create a new node in current node graph def. //! //! The given node name must not be @c nullptr. //! //! The given node def can be @c nullptr. //! //! Node creation can return @c nullptr when current node graph def doesn't allow node construction outside //! of the pass that created it. //! //! The returned @ref omni::graph::exec::unstable::INode will have @ref omni::core::IObject::acquire() called on it. omni::core::ObjectPtr<omni::graph::exec::unstable::INode> createNode( const char* name, omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def); //! Access created nodes by this builder. //! //! Span is no longer valid when topology of the graph changes. You need to query it again. //! //! In case a node once created gets removed by another pass, returned list will continue to have it. //! It is safe to do, because we do not delete underlying nodes until the next graph population. //! Checking if node is valid in current topology allows to filter out these cases. //! //! The pointers in the span are non owning, i.e. @ref omni::graph::exec::unstable::INode will not have //! @ref omni::core::IObject::acquire() called on it. omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> getCreatedNodes() noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::graph::exec::unstable::IGraph* omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::getGraph() noexcept { return getGraph_abi(); } inline omni::graph::exec::unstable::ITopology* omni::core::Generated< omni::graph::exec::unstable::IGraphBuilder_abi>::getTopology() noexcept { return getTopology_abi(); } inline omni::graph::exec::unstable::IGraphBuilderContext* omni::core::Generated< omni::graph::exec::unstable::IGraphBuilder_abi>::getContext() noexcept { return getContext_abi(); } inline omni::graph::exec::unstable::INodeGraphDef* omni::core::Generated< omni::graph::exec::unstable::IGraphBuilder_abi>::getNodeGraphDef() noexcept { return getNodeGraphDef_abi(); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::connect( omni::core::ObjectParam<omni::graph::exec::unstable::INode> upstreamNode, omni::core::ObjectParam<omni::graph::exec::unstable::INode> downstreamNode) { OMNI_THROW_IF_ARG_NULL(upstreamNode); OMNI_THROW_IF_ARG_NULL(downstreamNode); OMNI_THROW_IF_FAILED(connect_abi(upstreamNode.get(), downstreamNode.get())); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::disconnect( omni::core::ObjectParam<omni::graph::exec::unstable::INode> upstreamNode, omni::core::ObjectParam<omni::graph::exec::unstable::INode> downstreamNode) { OMNI_THROW_IF_ARG_NULL(upstreamNode); OMNI_THROW_IF_ARG_NULL(downstreamNode); OMNI_THROW_IF_FAILED(disconnect_abi(upstreamNode.get(), downstreamNode.get())); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::remove( omni::core::ObjectParam<omni::graph::exec::unstable::INode> node) { OMNI_THROW_IF_ARG_NULL(node); OMNI_THROW_IF_FAILED(remove_abi(node.get())); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::setNodeDef( omni::core::ObjectParam<omni::graph::exec::unstable::INode> node, omni::core::ObjectParam<omni::graph::exec::unstable::INodeDef> nodeDef) noexcept { setNodeDef_abi(node.get(), nodeDef.get()); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::setNodeGraphDef( omni::core::ObjectParam<omni::graph::exec::unstable::INode> node, omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept { setNodeGraphDef_abi(node.get(), nodeGraphDef.get()); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::clearDef( omni::core::ObjectParam<omni::graph::exec::unstable::INode> node) noexcept { clearDef_abi(node.get()); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::replacePartition( const omni::graph::exec::unstable::NodePartition& partition, omni::core::ObjectParam<omni::graph::exec::unstable::IDef> definition) { OMNI_THROW_IF_ARG_NULL(definition); replacePartition_abi(&partition, definition.get()); } inline omni::core::ObjectPtr<omni::graph::exec::unstable::INode> omni::core::Generated< omni::graph::exec::unstable::IGraphBuilder_abi>::createNode(const char* name, omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def) { OMNI_THROW_IF_ARG_NULL(name); auto return_ = omni::core::steal(createNode_abi(name, def.get())); return return_; } inline omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> omni::core::Generated< omni::graph::exec::unstable::IGraphBuilder_abi>::getCreatedNodes() noexcept { return getCreatedNodes_abi(); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/GraphBuilder.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file GraphBuilder.h //! //! @brief Defines @ref omni::graph::exec::unstable::IGraphBuilder. #pragma once #include <carb/Format.h> #include <omni/graph/exec/unstable/Assert.h> #include <omni/graph/exec/unstable/IGraph.h> #include <omni/graph/exec/unstable/IGraphBuilder.h> #include <omni/graph/exec/unstable/IGraphBuilderContext.h> #include <omni/graph/exec/unstable/INode.h> #include <omni/graph/exec/unstable/INodeDef.h> #include <omni/graph/exec/unstable/INodeFactory.h> #include <omni/graph/exec/unstable/INodeGraphDef.h> #include <omni/graph/exec/unstable/INodeGraphDefDebug.h> namespace omni { namespace graph { namespace exec { namespace unstable { //! @copydoc omni::graph::exec::unstable::IGraphBuilder template <typename... Bases> class GraphBuilderT : public Implements<Bases...> { public: //! Construct graph builder for a root @ref INodeGraphDef. //! //! Construction of a graph builder has a side effect on underlying @c topology causing its invalidation. //! //! May throw. static omni::core::ObjectPtr<GraphBuilderT> create(omni::core::ObjectParam<IGraphBuilderContext> context) { OMNI_THROW_IF_ARG_NULL(context); OMNI_GRAPH_EXEC_ASSERT( !omni::graph::exec::unstable::cast<INodeGraphDefDebug>(context->getGraph()->getNodeGraphDef()) || !omni::graph::exec::unstable::cast<INodeGraphDefDebug>(context->getGraph()->getNodeGraphDef())->isExecuting()); auto builder = omni::core::steal(new GraphBuilderT(context.get(), context->getGraph()->getNodeGraphDef())); auto topology = builder->getTopology(); topology->invalidate(); builder->_modifiedTopology(topology); return builder; } //! Construct graph builder for a given @ref INodeGraphDef. //! //! Construction of a graph builder has a side effect on underlying @c topology causing its invalidation. //! //! May throw. static omni::core::ObjectPtr<GraphBuilderT> create(omni::core::ObjectParam<IGraphBuilderContext> context, omni::core::ObjectParam<INodeGraphDef> nodeGraphDef) { OMNI_THROW_IF_ARG_NULL(context); OMNI_THROW_IF_ARG_NULL(nodeGraphDef); OMNI_GRAPH_EXEC_ASSERT(!omni::graph::exec::unstable::cast<INodeGraphDefDebug>(nodeGraphDef.get()) || !omni::graph::exec::unstable::cast<INodeGraphDefDebug>(nodeGraphDef.get())->isExecuting()); auto builder = omni::core::steal(new GraphBuilderT(context.get(), nodeGraphDef.get())); auto topology = builder->getTopology(); topology->invalidate(); builder->_modifiedTopology(topology); return builder; } //! Construct graph builder for a given @ref INodeGraphDef without causing topology invalidation. //! //! This builder is used by the pass pipeline when operations to the graph will alter existing topology. //! //! May throw. static omni::core::ObjectPtr<GraphBuilderT> createForPass(omni::core::ObjectParam<IGraphBuilderContext> context, omni::core::ObjectParam<INodeGraphDef> nodeGraphDef) { OMNI_THROW_IF_ARG_NULL(context); OMNI_THROW_IF_ARG_NULL(nodeGraphDef); OMNI_GRAPH_EXEC_ASSERT(!omni::graph::exec::unstable::cast<INodeGraphDefDebug>(nodeGraphDef.get()) || !omni::graph::exec::unstable::cast<INodeGraphDefDebug>(nodeGraphDef.get())->isExecuting()); auto builder = omni::core::steal(new GraphBuilderT(context.get(), nodeGraphDef.get())); // Detect when node graph was constructed outside of the pass pipeline. Tag these defs are created // during current construction stamp. // // This usage pattern we only have in tests currently. auto topology = nodeGraphDef->getTopology(); if (!topology->getConstructionStamp().isValid()) { builder->_modifiedTopology(topology); } return builder; } protected: //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::getGraph_abi IGraph* getGraph_abi() noexcept override { return m_context->getGraph(); } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::getTopology_abi ITopology* getTopology_abi() noexcept override { return m_nodeGraphDef->getTopology(); } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::getContext_abi IGraphBuilderContext* getContext_abi() noexcept override { return m_context; } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::getNodeGraphDef_abi INodeGraphDef* getNodeGraphDef_abi() noexcept override { return m_nodeGraphDef; } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::connect_abi omni::core::Result connect_abi(INode* upstreamNode, INode* downstreamNode) noexcept override { try { _modifiedTopology(upstreamNode->getTopology()); IGraphBuilderNode* upstream = exec::unstable::cast<exec::unstable::IGraphBuilderNode>(upstreamNode); IGraphBuilderNode* downstream = exec::unstable::cast<exec::unstable::IGraphBuilderNode>(downstreamNode); if (_connect(upstream, downstream)) { return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_RETURN_ERROR(omni::core::kResultFail); } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION() } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::disconnect_abi omni::core::Result disconnect_abi(INode* upstreamNode, INode* downstreamNode) noexcept override { try { _modifiedTopology(upstreamNode->getTopology()); IGraphBuilderNode* upstream = exec::unstable::cast<exec::unstable::IGraphBuilderNode>(upstreamNode); IGraphBuilderNode* downstream = exec::unstable::cast<exec::unstable::IGraphBuilderNode>(downstreamNode); if (_disconnect(upstream, downstream)) { return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_RETURN_ERROR(omni::core::kResultFail); } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION() } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::remove_abi omni::core::Result remove_abi(INode* node) noexcept override { _modifiedTopology(node->getTopology()); try { IGraphBuilderNode* nodeToRemove = exec::unstable::cast<exec::unstable::IGraphBuilderNode>(node); nodeToRemove->validateOrResetTopology(); // Nodes don't have an edge back to the root. Check if the removed node is a child of the root. auto graphRoot = nodeToRemove->getRoot(); graphRoot->_removeChild(nodeToRemove); // Silently fails if node is not a root child. // Cache these pointers to avoid virtual method overhead. auto children = nodeToRemove->getChildren(); auto parents = nodeToRemove->getParents(); // Disconnect all parents from the node to be removed. for (auto parent : parents) { parent->_removeChild(nodeToRemove); } // Disconnect all children from the node to be removed. for (auto child : children) { child->_removeParent(nodeToRemove); } // Invalidate all remaining connections of the node to be removed. nodeToRemove->_invalidateConnections(); return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION() } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::setNodeDef_abi void setNodeDef_abi(INode* node, INodeDef* nodeDef) noexcept override { _modifiedTopology(node->getTopology()); exec::unstable::cast<exec::unstable::IGraphBuilderNode>(node)->_setNodeDef(nodeDef); } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::setNodeGraphDef_abi void setNodeGraphDef_abi(INode* node, INodeGraphDef* nodeGraphDef) noexcept override { _modifiedTopology(node->getTopology()); exec::unstable::cast<exec::unstable::IGraphBuilderNode>(node)->_setNodeGraphDef(nodeGraphDef); } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::clearDef_abi void clearDef_abi(INode* node) noexcept override { _modifiedTopology(node->getTopology()); exec::unstable::cast<exec::unstable::IGraphBuilderNode>(node)->_clearDef(); } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::replacePartition_abi void replacePartition_abi(const NodePartition* partition, IDef* definition) noexcept override { if (partition->size() == 0) return; // validate the partition INode* rootNode = partition->front()->getRoot(); for (auto nodeInPartition : *partition) { if (!nodeInPartition->isValidTopology() || nodeInPartition->getRoot() != rootNode) { return; } } // mutate the graph _commitPartition(m_nodeGraphDef, partition, definition); _modifiedTopology(m_nodeGraphDef->getTopology()); } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::createNode_abi INode* createNode_abi(const char* name, IDef* def) noexcept override { if (auto factory = m_nodeGraphDef->getNodeFactory()) { auto newNode = factory->createNode(name, def); m_createdNodes.push_back(newNode.get()); return newNode.detach(); } else { return nullptr; } } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::getCreatedNodes_abi Span<INode* const> getCreatedNodes_abi() noexcept override { return m_createdNodes.size() ? Span<INode* const>{ m_createdNodes.begin(), m_createdNodes.size() } : Span<INode* const>{ nullptr, 0 }; } //! Constructor GraphBuilderT(IGraphBuilderContext* context, INodeGraphDef* nodeGraphDef) : m_context{ context }, m_nodeGraphDef{ nodeGraphDef } { m_context->getGraph()->_setInBuild(true); } ~GraphBuilderT() { m_context->getGraph()->_setInBuild(false); } private: //! This builder modified topology of a graph. Currently it is possible it is not modifying topology belonging to //! NodeGraphDef it refers to. //! void _modifiedTopology(ITopology* modifiedTopology) { modifiedTopology->_setConstructionInSync(m_context->getConstructionStamp()); } bool _connect(IGraphBuilderNode* upstream, IGraphBuilderNode* downstream) { if (upstream->getTopology() == downstream->getTopology()) { upstream->validateOrResetTopology(); downstream->validateOrResetTopology(); if (!upstream->hasChild(downstream)) { upstream->_addChild(downstream); if (!upstream->isRoot()) { downstream->_addParent(upstream); } } return true; } return false; } bool _disconnect(IGraphBuilderNode* upstream, IGraphBuilderNode* downstream) { if (upstream->getTopology() == downstream->getTopology()) { upstream->validateOrResetTopology(); downstream->validateOrResetTopology(); if (upstream->hasChild(downstream)) { upstream->_removeChild(downstream); if (!upstream->isRoot()) { downstream->_removeParent(upstream); } } return true; } return false; } //! Make changes to the topology with already validated partition and definition. void _commitPartition(INodeGraphDef* nodeGraphDef, const NodePartition* partition, IDef* definition) noexcept { OMNI_GRAPH_EXEC_ASSERT(nodeGraphDef && definition && (partition->size() > 0)); OMNI_GRAPH_EXEC_ASSERT(nodeGraphDef->getNodeFactory().get()); // we affect the topology, but shouldn't require any memory operation std::vector<INode*> parents, children; // optimization, let's assume each node has one parent and one child from outside of the partition parents.reserve(partition->size() * 2); children.reserve(partition->size() * 2); // we want cost to be linear and for that we are going to avoid searches in the partition // we achieve this by collecting all parents/children (some will be in the partition), // then invalidating the partition and cleaning up the immediate upstream and downstream for (auto nodeInPartition : *partition) { for (auto parent : nodeInPartition->getParents()) { parents.push_back(parent); } for (auto child : nodeInPartition->getChildren()) { children.push_back(child); } // make the node invalid without invalidating the entire topology exec::unstable::cast<IGraphBuilderNode>(nodeInPartition)->_invalidateConnections(); } // generate replacement node std::string nodeName = carb::fmt::format("Partition_{}", partition->front()->getName().getString().c_str()); auto newNode = createNode_abi(nodeName.c_str(), definition); auto newBuilderNode = exec::unstable::cast<IGraphBuilderNode>(newNode); // in one pass: cleanup the topology and reconnect to the new node auto rootBuilderNode = exec::unstable::cast<IGraphBuilderNode>(partition->front()->getRoot()); rootBuilderNode->_removeInvalidChildren(); for (auto parent : parents) { if (parent->isValidTopology()) { auto parentBuilderNode = exec::unstable::cast<IGraphBuilderNode>(parent); parentBuilderNode->_removeInvalidChildren(); this->_connect(parentBuilderNode, newBuilderNode); } } for (auto child : children) { if (child->isValidTopology()) { auto childBuilderNode = exec::unstable::cast<IGraphBuilderNode>(child); childBuilderNode->_removeInvalidParents(); this->_connect(newBuilderNode, childBuilderNode); } } // Need to make sure we are connected to the root (indirectly, or directly if this is an entry node) if (newNode->getParents().size() == 0) { this->_connect(rootBuilderNode, newBuilderNode); } } IGraphBuilderContext* m_context{ nullptr }; //!< All graph builders are operating within a context. We store pointer //!< to it. INodeGraphDef* m_nodeGraphDef{ nullptr }; //!< Graph topology this builder can modify. This is not yet enforced in //!< code. //! Most of the time we won't be needing any space. The size of 2 was chosen arbitrary. using NodeArray = SmallVector<INode*, 2>; NodeArray m_createdNodes; //!< Collect nodes created dynamically to allow pass pipeline discover them. }; //! Core GraphBuilder implementation for @ref omni::graph::exec::unstable::IGraphBuilder using GraphBuilder = GraphBuilderT<IGraphBuilder>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/exec/unstable/IPassPipeline.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Runs registered passes. //! //! The role of pass pipeline is to populate and prepare the execution graph. The base implementation runs passes based //! on the type and registration order. Most applications will define their own pass pipeline to control how the //! execution graph is generated. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. template <> class omni::core::Generated<omni::graph::exec::unstable::IPassPipeline_abi> : public omni::graph::exec::unstable::IPassPipeline_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPassPipeline") //! Test if pipeline needs to rebuild (mostly for its acceleration structures). bool needsConstruction() noexcept; //! Build the pipeline (mostly for its acceleration structures). void construct(); //! Test if pipeline needs to run (after topology changes in the graph). bool needsExecute(const omni::graph::exec::unstable::Stamp& globalTopology) noexcept; //! Execute the graph transformations pipeline void execute(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderContext> builderContext, omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef); }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline bool omni::core::Generated<omni::graph::exec::unstable::IPassPipeline_abi>::needsConstruction() noexcept { return needsConstruction_abi(); } inline void omni::core::Generated<omni::graph::exec::unstable::IPassPipeline_abi>::construct() { OMNI_THROW_IF_FAILED(construct_abi()); } inline bool omni::core::Generated<omni::graph::exec::unstable::IPassPipeline_abi>::needsExecute( const omni::graph::exec::unstable::Stamp& globalTopology) noexcept { return needsExecute_abi(globalTopology); } inline void omni::core::Generated<omni::graph::exec::unstable::IPassPipeline_abi>::execute( omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderContext> builderContext, omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) { OMNI_THROW_IF_ARG_NULL(builderContext); OMNI_THROW_IF_ARG_NULL(nodeGraphDef); OMNI_THROW_IF_FAILED(execute_abi(builderContext.get(), nodeGraphDef.get())); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/SchedulingInfo.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file SchedulingInfo.h //! //! @brief Defines omni::graph::exec::unstable::SchedulingInfo. #pragma once namespace omni { namespace graph { namespace exec { namespace unstable { //! Constraints to be fulfilled by the scheduler when dispatching a task. //! //! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during //! execution. enum class SchedulingInfo { eSerial, //!< Execution of a task should be serialized globally. No other serial task should be running. eParallel, //!< Execution of a task can be done safely in parallel. Parallel tasks can run together with serial. eIsolate, //!< Execution of a task has to be done in isolation. No other tasks can run concurrently. eSchedulerBypass //!< Execution of a task should bypass the scheduler. Either to avoid overhead for lightweight //!< tasks or to serialize within a thread generating the work. }; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/exec/unstable/IBackgroundTask.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Class representing a background task. template <> class omni::core::Generated<omni::graph::exec::unstable::IBackgroundTask_abi> : public omni::graph::exec::unstable::IBackgroundTask_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IBackgroundTask") //! Returns a @c std::future like object used to check if the background task has completed. //! //! A error is returned if this method is called more than once. //! //! This method is not thread safe. omni::core::ObjectPtr<omni::graph::exec::unstable::IBackgroundResult> getBackgroundResult(); }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::core::ObjectPtr<omni::graph::exec::unstable::IBackgroundResult> omni::core::Generated< omni::graph::exec::unstable::IBackgroundTask_abi>::getBackgroundResult() { omni::core::ObjectPtr<omni::graph::exec::unstable::IBackgroundResult> out; OMNI_THROW_IF_FAILED(getBackgroundResult_abi(out.put())); return out; } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutionCurrentThread.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IExecutionCurrentThread.h //! //! @brief Defines @ref omni::graph::exec::unstable::IExecutionCurrentThread. #pragma once #include <omni/graph/exec/unstable/Assert.h> #include <omni/graph/exec/unstable/IBase.h> #include <omni/graph/exec/unstable/Status.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class ExecutionTask; class IExecutionContext; class IExecutionCurrentThread_abi; class IExecutionCurrentThread; class IExecutionStateInfo; class IExecutor; class IGraph; //! Encapsulates the execution state for the current thread allowing callers to determine quantities like the @ref //! omni::graph::exec::unstable::ExecutionTask currently executing on the thread. //! //! Because methods in this interface return thread local data, all methods in this interface are thread safe. //! //! This interface is usually accessed as a singleton via one of the following helper methods: //! //! - @ref omni::graph::exec::unstable::getCurrentTask() //! //! - @ref omni::graph::exec::unstable::getCurrentExecutor() //! //! This interface contains methods for graph and task execution. Users should not call these methods directly. See //! the methods' docs below for the correct way to perform execution. //! //! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during //! execution. class IExecutionCurrentThread_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IExecutionCurrentThread")> { protected: //! Executes the given @ref omni::graph::exec::unstable::Graph. //! //! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::IExecutionContext::execute(). //! //! From an ABI point-of-view, the purpose of this method is to handle the special case of the top-level @ref //! omni::graph::exec::unstable::INodeGraphDef being contained by @ref omni::graph::exec::unstable::IGraph rather //! than pointed to by a node in another @ref omni::graph::exec::unstable::INodeGraphDef. Meaningful values are set //! for the threads current task and executor (see @ref omni::graph::exec::unstable::getCurrentTask() and @ref //! omni::graph::exec::unstable::getCurrentExecutor()). //! //! @thread_safety This method is thread safe. virtual Status executeGraph_abi(OMNI_ATTR("not_null, throw_if_null") IGraph* graph, OMNI_ATTR("not_null, throw_if_null") IExecutionContext* context) noexcept = 0; //! Executes and sets the thread's "current" task to the given task. //! //! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::ExecutionTask::execute(). //! //! This method executes the definition of the node pointed to by the given task. Importantly, this method sets //! thread local data to track the currently running task and executor (see @ref //! omni::graph::exec::unstable::getCurrentTask() and @ref omni::graph::exec::unstable::getCurrentExecutor()). //! //! @thread_safety This method is thread safe. virtual Status execute_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* task, IExecutor* executor, OMNI_ATTR("in, out, not_null, throw_if_null") Status* taskStatus) noexcept = 0; //! Access the task currently executing on the current thread. //! //! Useful when needing to access execution context state without having to pass it to every function. //! //! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::getCurrentTask(). //! //! May return @c nullptr. //! //! @thread_safety This method is thread safe. virtual ExecutionTask* getCurrentTask_abi() noexcept = 0; //! Access the executor currently executing on the current thread. //! //! Useful when needing to spawn extra work within the scope of the graph. //! //! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::getCurrentExecutor(). //! //! May return @c nullptr. //! //! @thread_safety This method is thread safe. virtual OMNI_ATTR("no_acquire") IExecutor* getCurrentExecutor_abi() noexcept = 0; }; //! Smart pointer managing an instance of @ref IExecutionCurrentThread. using ExecutionCurrentThreadPtr = omni::core::ObjectPtr<IExecutionCurrentThread>; //! Access current thread's execution state. //! //! The returned pointer is a singleton managed by *omni.graph.exec*, and does *not* have @ref //! omni::core::IObject::acquire() called on it before being returned. The caller should *not* call @ref //! omni::core::IObject::release() on the returned raw pointer. //! //! @thread_safety This method is thread safe. inline IExecutionCurrentThread* getCurrentThread() noexcept; //! Access task currently executed on a calling thread. //! //! May return @c nullptr. //! //! @thread_safety This method is thread safe. inline ExecutionTask* getCurrentTask() noexcept; //! Access executor currently used on a calling thread. //! //! Useful when needing to spawn extra work within the scope of the graph. //! //! The returned @ref IExecutor does *not* have @ref omni::core::IObject::acquire() called before being returned. //! //! May return @c nullptr. //! //! @thread_safety This method is thread safe. inline IExecutor* getCurrentExecutor() noexcept; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IExecutionCurrentThread.gen.h> //! @copydoc omni::graph::exec::unstable::IExecutionCurrentThread_abi //! //! @ingroup groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IExecutionCurrentThread : public omni::core::Generated<omni::graph::exec::unstable::IExecutionCurrentThread_abi> { }; // additional headers needed for API implementation #include <omni/core/ITypeFactory.h> #include <omni/graph/exec/unstable/ExecutionTask.h> #include <omni/graph/exec/unstable/IExecutionContext.h> #include <omni/graph/exec/unstable/IExecutor.h> #include <omni/graph/exec/unstable/IGraph.h> inline omni::graph::exec::unstable::IExecutionCurrentThread* omni::graph::exec::unstable::getCurrentThread() noexcept { // createType() always calls acquire() and returns an ObjectPtr to make sure release() is called. we don't want to // hold a ref here to avoid static destruction issues. here we allow the returned ObjectPtr to destruct (after // calling get()) to release our ref. we know the DLL in which the singleton was created is maintaining a ref and // will keep the singleton alive for the lifetime of the DLL. static auto sSingleton = omni::core::createType<IExecutionCurrentThread>().get(); return sSingleton; } inline omni::graph::exec::unstable::ExecutionTask* omni::graph::exec::unstable::getCurrentTask() noexcept { return getCurrentThread()->getCurrentTask(); } inline omni::graph::exec::unstable::IExecutor* omni::graph::exec::unstable::getCurrentExecutor() noexcept { return getCurrentThread()->getCurrentExecutor(); } // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IExecutionCurrentThread.gen.h>
omniverse-code/kit/include/omni/graph/exec/unstable/INodeFactory.gen.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Factory interface for creating @ref omni::graph::exec::unstable::INode objects. //! //! Usually used in conjunction with @ref omni::graph::exec::unstable::INodeGraphDef. //! //! See @ref omni::graph::exec::unstable::createNodeFactory() to generate one of these objects from an invocable object //! (e.g. @c std::function). template <> class omni::core::Generated<omni::graph::exec::unstable::INodeFactory_abi> : public omni::graph::exec::unstable::INodeFactory_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::INodeFactory") //! Creates and returns a new node within a topology this factory came from. //! //! It is legal to pass nullptr as a definition, or either @ref omni::graph::exec::unstable::INodeDef //! or @ref omni::graph::exec::unstable::INodeGraphDef //! //! The returned @ref omni::graph::exec::unstable::INode will have @ref omni::core::IObject::acquire() called on it. omni::core::ObjectPtr<omni::graph::exec::unstable::INode> createNode( const char* name, omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def); }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::core::ObjectPtr<omni::graph::exec::unstable::INode> omni::core::Generated< omni::graph::exec::unstable::INodeFactory_abi>::createNode(const char* name, omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def) { OMNI_THROW_IF_ARG_NULL(name); omni::core::ObjectPtr<omni::graph::exec::unstable::INode> out; OMNI_THROW_IF_FAILED(createNode_abi(name, def.get(), out.put())); return out; } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/IInvalidationForwarder.gen.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Interface wrapping a function (possibly with storage) to forward topology invalidation notices. template <> class omni::core::Generated<omni::graph::exec::unstable::IInvalidationForwarder_abi> : public omni::graph::exec::unstable::IInvalidationForwarder_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IInvalidationForwarder") //! Invokes the wrapped function. //! //! The given topology must not be @c nullptr. void invoke(omni::core::ObjectParam<omni::graph::exec::unstable::ITopology> topology); }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline void omni::core::Generated<omni::graph::exec::unstable::IInvalidationForwarder_abi>::invoke( omni::core::ObjectParam<omni::graph::exec::unstable::ITopology> topology) { OMNI_THROW_IF_ARG_NULL(topology); invoke_abi(topology.get()); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/Graph.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file Graph.h //! //! @brief Defines @ref omni::graph::exec::unstable::IGraph. #pragma once #include <omni/graph/exec/unstable/Assert.h> #include <omni/graph/exec/unstable/ExecutorFactory.h> #include <omni/graph/exec/unstable/IGraph.h> #include <omni/graph/exec/unstable/NodeGraphDef.h> #include <memory> #include <string> namespace omni { namespace graph { namespace exec { namespace unstable { //! @copydoc omni::graph::exec::unstable::IGraph template <typename... Bases> class GraphT : public Implements<Bases...> { public: //! Construct a graph with default executor attached to an empty node graph. //! //! May throw. static omni::core::ObjectPtr<GraphT> create(const char* name) { OMNI_THROW_IF_ARG_NULL(name); return omni::core::steal(new GraphT(name)); } //! Construct a graph with a given executor and an empty node graph. //! //! May throw. static omni::core::ObjectPtr<GraphT> create(const ExecutorFactory& executorFactory, const char* name) { OMNI_THROW_IF_ARG_NULL(name); return omni::core::steal(new GraphT(executorFactory, name)); } //! Construct a graph with the given node graph. //! //! The signature of @p nodeGraphDefFactory must be equivalent to `NodeGraphDefPtr(IGraph*)`. //! //! May throw. template <typename Fn> static omni::core::ObjectPtr<GraphT> create(const char* name, Fn&& nodeGraphDefFactory) { OMNI_THROW_IF_ARG_NULL(name); return omni::core::steal(new GraphT(name, std::forward<Fn>(nodeGraphDefFactory))); } protected: //! Core implementation of @ref omni::graph::exec::unstable::IGraph::getNodeGraphDef_abi INodeGraphDef* getNodeGraphDef_abi() noexcept override { return m_nodeGraphDef.get(); } //! Core implementation of @ref omni::graph::exec::unstable::IGraph::getName_abi const ConstName* getName_abi() noexcept override { return &m_name; } //! Core implementation of @ref omni::graph::exec::unstable::IGraph::getGlobalTopologyStamp_abi Stamp* getGlobalTopologyStamp_abi() noexcept override { return &m_globalTopologyStamp; } //! Core implementation of @ref omni::graph::exec::unstable::IGraph::inBuild_abi virtual bool inBuild_abi() noexcept override { return (m_inBuild > 0); } //! Core implementation of @ref omni::graph::exec::unstable::IGraph::_setInBuild_abi virtual void _setInBuild_abi(bool inBuild) noexcept override { if (inBuild) { ++m_inBuild; } else { --m_inBuild; OMNI_GRAPH_EXEC_ASSERT(m_inBuild > -1); } } //! Constructor //! //! Construct with a default top level graph definition GraphT(const char* name) : m_name(name) { m_globalTopologyStamp.next(); m_nodeGraphDef = NodeGraphDef::create(this, "NODE-ROOT"); // may throw } //! Constructor //! //! Construct with a custom executor for a top level graph definition GraphT(const ExecutorFactory& executorFactory, const char* name) : m_name(name) { m_globalTopologyStamp.next(); m_nodeGraphDef = NodeGraphDef::create(this, executorFactory, "NODE-ROOT"); // may throw } //! Constructor //! //! Construct with a custom top level graph factory template <typename Fn> GraphT(const char* name, Fn&& nodeGraphDefFactory) : m_name(name) { m_globalTopologyStamp.next(); m_nodeGraphDef = nodeGraphDefFactory(this); // may throw } private: Stamp m_globalTopologyStamp; //!< Global graph topology. Incremented every time any nested topologies changes omni::core::ObjectPtr<INodeGraphDef> m_nodeGraphDef; //!< Top level node graph definition ConstName m_name; //!< Name of the execution graph //! How many builders are active. Atomic since multiple builders may be running in parallel. std::atomic<int> m_inBuild{ 0 }; }; //! Core Graph implementation for @ref omni::graph::exec::unstable::IGraph using Graph = GraphT<IGraph>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/exec/unstable/NodeGraphDef.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file NodeGraphDef.h //! //! @brief Declares @ref omni::graph::exec::unstable::NodeGraphDef #pragma once #include <omni/graph/exec/unstable/Assert.h> #include <omni/graph/exec/unstable/Executor.h> #include <omni/graph/exec/unstable/ExecutorFactory.h> #include <omni/graph/exec/unstable/INodeGraphDef.h> #include <omni/graph/exec/unstable/INodeGraphDefDebug.h> #include <omni/graph/exec/unstable/Topology.h> #include <atomic> namespace omni { namespace graph { namespace exec { namespace unstable { //! @copydoc omni::graph::exec::unstable::INodeGraphDef template <typename... Bases> class NodeGraphDefT : public Implements<Bases...> { public: //! Construct graph node definition with default executor //! //! @param owner Execution graph having this graph as part of the global topology //! @param definitionName Definition name is considered as a token that transformation passes can register against //! //! May throw. static omni::core::ObjectPtr<NodeGraphDefT> create(omni::core::ObjectParam<IGraph> owner, const char* definitionName) { OMNI_THROW_IF_ARG_NULL(owner); OMNI_THROW_IF_ARG_NULL(definitionName); return omni::core::steal(new NodeGraphDefT(owner, definitionName)); } //! Construct graph node definition with a given executor factory //! //! @param owner Execution graph having this graph as part of the global topology //! @param executorFactory Factory returning executor for this graph //! @param definitionName Definition name is considered as a token that transformation passes can register against //! //! May throw. static omni::core::ObjectPtr<NodeGraphDefT> create(omni::core::ObjectParam<IGraph> owner, const ExecutorFactory& executorFactory, const char* definitionName) { OMNI_THROW_IF_ARG_NULL(owner); OMNI_THROW_IF_ARG_NULL(definitionName); return omni::core::steal(new NodeGraphDefT(owner, executorFactory, definitionName)); } protected: //! Core implementation of @ref omni::graph::exec::unstable::IDef::execute_abi for @ref NodeGraphDef //! //! Execution is delegated to @ref omni::graph::exec::unstable::IExecutor. The lifetime of an executor is only for a //! single execution and any state that needs to persist longer than a single execution must be written with @ref //! omni::graph::exec::unstable::IExecutionContext::setNodeData_abi() Status execute_abi(ExecutionTask* info) noexcept override { // ef-docs nodegraphdef-execute-begin omni::core::ObjectPtr<IExecutor> executor; if (m_executorFactory) { executor = m_executorFactory(m_topology, *info); } else { executor = ExecutorFallback::create(m_topology, *info); } return executor->execute(); // execute the node specified by info->getNode() // ef-docs nodegraphdef-execute-end } //! Core implementation of @ref omni::graph::exec::unstable::IDef::getSchedulingInfo_abi for @ref NodeGraphDef SchedulingInfo getSchedulingInfo_abi(const ExecutionTask* info) noexcept override { return SchedulingInfo::eSerial; } //! Core implementation of @ref omni::graph::exec::unstable::IDef::getName_abi for @ref NodeGraphDef const ConstName* getName_abi() noexcept override { return &m_name; } //! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDef::getTopology_abi ITopology* getTopology_abi() noexcept override { return m_topology.get(); } //! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDef::initializeState_abi omni::core::Result initializeState_abi(ExecutionTask* rootTask) noexcept override { return omni::core::kResultSuccess; } //! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDef::preExecute_abi Status preExecute_abi(ExecutionTask* info) noexcept override { return Status::eSuccess; } //! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDef::postExecute_abi Status postExecute_abi(ExecutionTask* info) noexcept override { return Status::eSuccess; } //! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDef::getNodeFactory_abi INodeFactory* getNodeFactory_abi() noexcept override { return nullptr; } //! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDefDebug::getExecutionCount_abi uint64_t getExecutionCount_abi() noexcept override { return m_executionCount; } //! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDefDebug::incrementExecutionCount_abi void incrementExecutionCount_abi() noexcept override { ++m_executionCount; } //! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDefDebug::decrementExecutionCount_abi void decrementExecutionCount_abi() noexcept override { --m_executionCount; } //! Constructor with a default executor NodeGraphDefT(omni::core::ObjectParam<IGraph> owner, const char* definitionName) // may throw : m_topology{ Topology::create(definitionName) }, m_name{ definitionName } { _addInvalidationForwarder(owner); } //! Constructor with a custom executor NodeGraphDefT(omni::core::ObjectParam<IGraph> owner, ExecutorFactory executorFactory, const char* definitionName) // may throw : m_topology{ Topology::create(definitionName) }, m_executorFactory(std::move(executorFactory)), m_name{ definitionName } { _addInvalidationForwarder(owner); } private: //! Private method that will allow forwarding of topology invalidation to the execution graph. //! Invalid global topology will allow pass pipeline to execute and discover invalidated definitions. void _addInvalidationForwarder(omni::core::ObjectParam<IGraph> owner) // may throw { m_topology->addInvalidationForwarder(reinterpret_cast<InvalidationForwarderId>(owner.get()), [global = owner->getGlobalTopologyStamp()](ITopology*) -> void { global->next(); }); } omni::core::ObjectPtr<ITopology> m_topology; //!< Graphs topology ExecutorFactory m_executorFactory; //!< Executor factory (if empty, default executor will be used) std::atomic<std::size_t> m_executionCount{ 0 }; //!< Debugging counter to detect illegal executions. ConstName m_name; //!< Definition name }; //! Core NodeGraphDef implementation for @ref omni::graph::exec::unstable::INodeGraphDef using NodeGraphDef = NodeGraphDefT<INodeGraphDef, INodeGraphDefDebug>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/exec/unstable/INodeFactory.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file INodeFactory.h //! //! @brief Defines @ref omni::graph::exec::unstable::INodeFactory. #pragma once #include <omni/graph/exec/unstable/Assert.h> #include <omni/graph/exec/unstable/IBase.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class IGraphBuilder; class IDef; class INode; class INodeFactory; class INodeFactory_abi; //! Factory interface for creating @ref omni::graph::exec::unstable::INode objects. //! //! Usually used in conjunction with @ref omni::graph::exec::unstable::INodeGraphDef. //! //! See @ref omni::graph::exec::unstable::createNodeFactory() to generate one of these objects from an invocable object //! (e.g. @c std::function). class INodeFactory_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.INodeFactory")> { protected: //! Creates and returns a new node within a topology this factory came from. //! //! It is legal to pass nullptr as a definition, or either @ref omni::graph::exec::unstable::INodeDef //! or @ref omni::graph::exec::unstable::INodeGraphDef //! //! The returned @ref omni::graph::exec::unstable::INode will have @ref omni::core::IObject::acquire() called on it. virtual OMNI_ATTR("throw_result") omni::core::Result createNode_abi(OMNI_ATTR("in, not_null, throw_if_null, c_str") const char* name, IDef* def, OMNI_ATTR("not_null, throw_if_null, out, *return") INode** out) noexcept = 0; }; //! Smart pointer managing an instance of @ref INodeFactory. using NodeFactoryPtr = omni::core::ObjectPtr<INodeFactory>; //! Generates an @ref INodeFactory from an invocable object such as a function pointer, functor, etc. //! //! The given function should have the signature `omni::core::ObjectPtr<INode>(char*, IDef*)`. template <typename Fn> NodeFactoryPtr createNodeFactory(Fn&& fn); } // namespace unstable } // namespace exec } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/INodeFactory.gen.h> //! @copydoc omni::graph::exec::unstable::INodeFactory_abi //! //! @ingroup groupOmniGraphExecInterfaces class omni::graph::exec::unstable::INodeFactory : public omni::core::Generated<omni::graph::exec::unstable::INodeFactory_abi> { }; // additional headers needed for API implementation #include <omni/graph/exec/unstable/IGraphBuilder.h> #include <omni/graph/exec/unstable/INode.h> #ifndef DOXYGEN_BUILD template <typename Fn> omni::graph::exec::unstable::NodeFactoryPtr omni::graph::exec::unstable::createNodeFactory(Fn&& fn) { class FactoryImpl : public Implements<INodeFactory> { public: FactoryImpl(Fn&& fn) : m_fn(std::move(fn)) { } protected: omni::core::Result createNode_abi(const char* name, IDef* def, INode** out) noexcept override { try { NodePtr newNode = m_fn(name, def); // may throw *out = newNode.detach(); return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION() } Fn m_fn; }; return omni::core::steal(new FactoryImpl(std::forward<Fn>(fn))); } #endif // DOXYGEN_BUILD // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/INodeFactory.gen.h>
omniverse-code/kit/include/omni/graph/exec/unstable/IBackgroundResultWriter.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Functor interface used to write the result of a background task. template <> class omni::core::Generated<omni::graph::exec::unstable::IBackgroundResultWriter_abi> : public omni::graph::exec::unstable::IBackgroundResultWriter_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IBackgroundResultWriter") //! Write the result. omni::graph::exec::unstable::Status write(omni::graph::exec::unstable::ExecutionTask& info) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IBackgroundResultWriter_abi>::write( omni::graph::exec::unstable::ExecutionTask& info) noexcept { return write_abi(&info); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/BackgroundTask.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file BackgroundTask.h //! //! @brief Defines @ref omni::graph::exec::unstable::BackgroundTask. #pragma once #include <omni/graph/exec/unstable/Assert.h> #include <omni/graph/exec/unstable/IBackgroundTask.h> #include <future> namespace omni { namespace graph { namespace exec { namespace unstable { //! @copydoc omni::graph::exec::unstable::IBackgroundTask class BackgroundTask : public Implements<IBackgroundTask> { public: //! Creates a new @ref BackgroundTask. //! //! May throw static omni::core::ObjectPtr<BackgroundTask> create() { return omni::core::steal(new BackgroundTask); } protected: //! Allows access to result of an async operation. using Future = std::future<omni::core::ObjectPtr<IBackgroundResultWriter>>; //! Allows setting the result of an async operation. using Promise = std::promise<omni::core::ObjectPtr<IBackgroundResultWriter>>; //! @copydoc IBackgroundTask_abi::getBackgroundResult_abi omni::core::Result getBackgroundResult_abi(IBackgroundResult** out) noexcept override { class Result : public Implements<IBackgroundResult> { public: Result(Future&& future) : m_future(std::move(future)) { } protected: omni::core::Result isReady_abi(bool* out) noexcept override { try { *out = (m_future.wait_for(std::chrono::milliseconds(0)) == std::future_status::ready); // may throw return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION() } omni::core::Result cancel_abi(bool blocking) noexcept override { try { if (blocking) { m_future.wait(); // may throw } return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION() } omni::core::Result write_abi(ExecutionTask* info, Status* out) noexcept override { try { *out = Status::eUnknown; *out = m_future.get()->write(*info); // may throw return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION() } omni::core::Result waitFor_abi(uint64_t nanoseconds, BackgroundResultStatus* out) noexcept override { try { auto result = m_future.wait_for(std::chrono::nanoseconds(nanoseconds)); // may throw switch (result) { case std::future_status::deferred: // ? case std::future_status::ready: *out = BackgroundResultStatus::eReady; break; case std::future_status::timeout: *out = BackgroundResultStatus::eTimeout; break; default: throw std::logic_error("unknown future state"); } return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION() } private: Future m_future; }; try { *out = new Result(m_promise.get_future()); return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION() } //! @copydoc IBackgroundTask_abi::setResultWriter_abi omni::core::Result setResultWriter_abi(IBackgroundResultWriter* writer) noexcept override { try { m_promise.set_value(omni::core::borrow(writer)); return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION() } private: Promise m_promise; }; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/exec/unstable/Types.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file Types.h //! //! @brief Defines typedefs used by interfaces. //! //! Because interface bindings are inlined and sometimes coupled, you sometimes need to break out typedefs into their //! own file so that you can get the include order correct in interface .h files. #pragma once #include <omni/graph/exec/unstable/EnumBitops.h> #include <cstdint> #include <limits> namespace omni { namespace graph { //! Omniverse Execution Framework (EF) //! //! The Execution Framework has no dependencies on OmniGraph and designed to be front-end agnostic. It could very //! much live in its own namespace, but we decided to make it part of @ref omni::graph namespace. There is no runtime //! without authoring front-end and we consider OmniGraph everyone knows as the front-end to runtime execution. //! EF then sits at the backend, orchestrating execution of computation defined by one or many front-ends. //! //! OmniGraph is becoming an umbrella for authoring front-end and execution backend. namespace exec { //! Unstable features currently in development. Do not depend on any API or ABI in this namespace, as it will change //! without notice. namespace unstable { //! Each node in an @ref ITopology is given a unique index (via @ref ITopology::acquireNodeIndex()). using NodeIndexInTopology = uint64_t; //! Type which store a unique identifier for a node or definition. using NameHash = uint64_t; //! Hash of each node's topology index in a path. using ExecutionPathHash = uint64_t; //! Key for a piece of data attached to a node. using NodeDataKey = uint64_t; //! Pass priority used by @ref IPassPipeline to resolve conflicts between passes. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. using PassPriority = uint32_t; //! Constant to denote an @ref INode has not been assigned an index in an @ref ITopology. constexpr const uint64_t kInvalidNodeIndexInTopology = std::numeric_limits<uint64_t>::max(); static_assert(std::numeric_limits<uint64_t>::max() == 0xFFFFFFFFFFFFFFFF, "unexpected uin64_t max value"); //! Grouping type for different passes. //! //! Graph transformation pass is registered with a given type and type can't be changed after. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. //! //! @note We are not yet using all these states...expect changes. //! //! @ingroup groupOmniGraphExecPassRegistration enum class PassType { ePopulate, //!< open-up graph types ePartitioning, //!< change granularity of executions (including executors) eGlobal, //!< pass is running over entire graph. no other passes can run now eTypeInference, //!< resolve types eOverrideExecution, //!< override compute methods, executors, etc eScheduling, //!< memory access, pipeline stages, etc eCount //!< total number of known pass types }; //! Current execution status of pass pipeline a @ref omni::graph::exec::unstable::IPassPipeline. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. enum class PassPipelineStatus : uint32_t { eNone = 0, //!< Pipeline is not executing. eExecuting = 1 << 0, //!< Pipeline is running eTopologyChangesAllowed = 1 << 1, //!< Pipeline is allowing mutating changes to topology }; //! Enable bitwise operations on PassPipelineStatus state. template <> struct EnumBitops<PassPipelineStatus> : EnumBitops<>::allow_bitops { }; //! Result of waiting for the result of a @ref omni::graph::exec::unstable::IBackgroundResult. enum class BackgroundResultStatus { eReady, //!< The result is ready. eTimeout, //!< The result did not become ready int he specified wait time. }; //! Type specific function for deleting context specific execution data associated with a node. //! //! The function is expected to know the type given as the first arg and handle the deletion of the type in an //! appropriate manner. Usually, this means casting the `void*` pointer to the proper type and calling `delete`. using NodeDataDeleterFn = void(void*); } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/exec/unstable/IBase.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Base class for all @ref omni::graph::exec objects. //! //! Defines an interface for casting between objects without calling @ref omni::core::IObject::acquire(). template <> class omni::core::Generated<omni::graph::exec::unstable::IBase_abi> : public omni::graph::exec::unstable::IBase_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IBase") //! Casts this object to the type described the the given id. //! //! Returns @c nullptr if the cast was not successful. //! //! Unlike @ref omni::core::IObject::cast(), this casting method does not call @ref omni::core::IObject::acquire(). //! //! @thread_safety This method is thread safe. void* castWithoutAcquire(omni::core::TypeId id) noexcept; //! Returns the number of different instances (this included) referencing the current object. //! //! @thread_safety This method is thread safe. uint32_t getUseCount() noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline void* omni::core::Generated<omni::graph::exec::unstable::IBase_abi>::castWithoutAcquire(omni::core::TypeId id) noexcept { return castWithoutAcquire_abi(id); } inline uint32_t omni::core::Generated<omni::graph::exec::unstable::IBase_abi>::getUseCount() noexcept { return getUseCount_abi(); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/IPassFactory.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Factory interface for creating @ref omni::graph::exec::unstable::IPass objects. //! //! Usually used in conjunction with @ref omni::graph::exec::unstable::IPassRegistry. //! //! See @ref omni::graph::exec::unstable::createPassFactory() to generate one of these objects from an invocable object //! (e.g. @c std::function). //! //! See @ref groupOmniGraphExecPassRegistration for more information about pass registration. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. template <> class omni::core::Generated<omni::graph::exec::unstable::IPassFactory_abi> : public omni::graph::exec::unstable::IPassFactory_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPassFactory") //! Creates and returns a pass. //! //! The returned @ref omni::graph::exec::unstable::IPass will have @ref omni::core::IObject::acquire() called on it. omni::core::ObjectPtr<omni::graph::exec::unstable::IPass> createPass( omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder); }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::core::ObjectPtr<omni::graph::exec::unstable::IPass> omni::core:: Generated<omni::graph::exec::unstable::IPassFactory_abi>::createPass( omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder) { OMNI_THROW_IF_ARG_NULL(builder); omni::core::ObjectPtr<omni::graph::exec::unstable::IPass> out; OMNI_THROW_IF_FAILED(createPass_abi(builder.get(), out.put())); return out; } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/ITopology.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! The Topology of a graph is stored in this class. //! //! @ref omni::graph::exec::unstable::ITopology is a helper interface used to quickly invalidate the topology, quickly //! determine if the topology has been invalidated, assign each node in the topology a unique index (suitable for access //! in contiguous memory), and provide access to the root node. //! //! Topologies play a large role in graph invalidation. See @rstref{Graph Invalidation <ef_graph_invalidation>} for //! details. //! //! To better understand how this object relates to other objects in the Execution Framework, see //! @rstref{Graph Concepts <ef_graph_concepts>}. //! //! See @ref omni::graph::exec::unstable::Topology for a concrete implementation of this interface. template <> class omni::core::Generated<omni::graph::exec::unstable::ITopology_abi> : public omni::graph::exec::unstable::ITopology_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::ITopology") //! Returns how many nodes are alive in this topology. Some of the counted nodes may not be connected and //! discoverable from the root node. //! //! @thread_safety This method is thread safe. uint64_t getNodeCount() noexcept; //! Returns the topology's root node that allows reaching all of the valid nodes in the topology. //! //! The returned @ref omni::graph::exec::unstable::INode will *not* have @ref omni::core::IObject::acquire() called //! before being returned. //! //! The returned pointer will remain valid for the lifetime of this object. //! //! @thread_safety This method is thread safe. omni::graph::exec::unstable::INode* getRoot() noexcept; //! Returns the topology stamp. This stamp is updated each time the topology is invalidated. //! //! See omni::graph::exec::unstable::ITopology::invalidate() to invalidate the topology (and thereby update this //! Stamp). //! //! @thread_safety This method is thread safe. omni::graph::exec::unstable::Stamp getStamp() noexcept; //! Invalidate topology. All edges of the graph will be dropped (lazily), nodes remain valid and can be used to //! build new topology. //! //! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details about how this method is used during //! invalidation. //! //! It is not recommended to call this method during graph execution. Rather, defer invalidation until after //! execution. //! //! @thread_safety This method is thread safe. void invalidate() noexcept; //! Returns a unique index for a node in this topology. //! //! Users should not call this method. Only the constructors of implementations of @ref //! omni::graph::exec::unstable::INode should call this method. //! //! Returns an error if an index could not be acquired. //! //! See @ref omni::graph::exec::unstable::ITopology::releaseNodeIndex(). //! //! @thread_safety This method is not thread safe. omni::graph::exec::unstable::NodeIndexInTopology acquireNodeIndex(); //! Release unique index of a node in this topology. Shouldn't be used by anything else than a node's destructor. //! //! See @ref omni::graph::exec::unstable::ITopology::acquireNodeIndex(). //! //! @thread_safety This method is not thread safe. void releaseNodeIndex(omni::graph::exec::unstable::NodeIndexInTopology index) noexcept; //! Add a callback to forward invalidation to other entities. //! //! At a minimum, the top-level @ref omni::graph::exec::unstable::IGraph will register a invalidation callback with //! all topologies created within a pass pipeline. This allows tracking invalidation and triggering minimal graph //! rebuild. //! //! In the future, override passes can generate new graphs and still track authoring invalidation by registering to //! the original graph topologies invalidation. //! //! The given @ref omni::graph::exec::unstable::IInvalidationForwarder will be stored and have @ref //! omni::core::IObject::acquire() called. //! //! If @p owner has a current forwarder, it will be replaced with the given forwarder. //! //! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details about how this method is used during //! invalidation. //! //! See @ref omni::graph::exec::unstable::ITopology::removeInvalidationForwarder(). //! //! @thread_safety This method is not thread safe. omni::core::Result addInvalidationForwarder( omni::graph::exec::unstable::InvalidationForwarderId owner, omni::core::ObjectParam<omni::graph::exec::unstable::IInvalidationForwarder> callback); //! Remove invalidation forwarding for a given owner. //! //! If the given owner is not known, this method does nothing. //! //! See @ref omni::graph::exec::unstable::ITopology::addInvalidationForwarder(). //! //! @thread_safety This method is not thread safe. void removeInvalidationForwarder(omni::graph::exec::unstable::InvalidationForwarderId owner) noexcept; //! Get construction version this topology is synchronized with. //! //! @thread_safety This method is thread safe. omni::graph::exec::unstable::SyncStamp getConstructionStamp() noexcept; //! Private method only for IGraphBuilder, used to tag construction version. //! //! @thread_safety Calling this method concurrently is not recommended. void _setConstructionInSync(const omni::graph::exec::unstable::Stamp& toSync) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline uint64_t omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::getNodeCount() noexcept { return getNodeCount_abi(); } inline omni::graph::exec::unstable::INode* omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::getRoot() noexcept { return getRoot_abi(); } inline omni::graph::exec::unstable::Stamp omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::getStamp() noexcept { return getStamp_abi(); } inline void omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::invalidate() noexcept { invalidate_abi(); } inline omni::graph::exec::unstable::NodeIndexInTopology omni::core::Generated< omni::graph::exec::unstable::ITopology_abi>::acquireNodeIndex() { omni::graph::exec::unstable::NodeIndexInTopology out; OMNI_THROW_IF_FAILED(acquireNodeIndex_abi(&out)); return out; } inline void omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::releaseNodeIndex( omni::graph::exec::unstable::NodeIndexInTopology index) noexcept { releaseNodeIndex_abi(index); } inline omni::core::Result omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::addInvalidationForwarder( omni::graph::exec::unstable::InvalidationForwarderId owner, omni::core::ObjectParam<omni::graph::exec::unstable::IInvalidationForwarder> callback) { OMNI_THROW_IF_ARG_NULL(callback); auto return_ = addInvalidationForwarder_abi(owner, callback.get()); return return_; } inline void omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::removeInvalidationForwarder( omni::graph::exec::unstable::InvalidationForwarderId owner) noexcept { removeInvalidationForwarder_abi(owner); } inline omni::graph::exec::unstable::SyncStamp omni::core::Generated< omni::graph::exec::unstable::ITopology_abi>::getConstructionStamp() noexcept { return getConstructionStamp_abi(); } inline void omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::_setConstructionInSync( const omni::graph::exec::unstable::Stamp& toSync) noexcept { _setConstructionInSync_abi(toSync); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/EnumBitops.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file EnumBitops.h //! //! @brief Defines @ref omni::graph::exec::unstable::EnumBitops. #pragma once #include <type_traits> namespace omni { namespace graph { namespace exec { namespace unstable { //! Enable bitwise operations on enum classes. Templates save on writing boiler plate code to allow this. template <class T = void> struct EnumBitops { }; #ifndef DOXYGEN_BUILD template <> struct EnumBitops<void> { struct _allow_bitops { static constexpr bool allow_bitops = true; }; using allow_bitops = _allow_bitops; template <class T, class R = T> using t = typename std::enable_if<std::is_enum<T>::value && EnumBitops<T>::allow_bitops, R>::type; template <class T> using u = typename std::underlying_type<T>::type; }; template <class T> constexpr EnumBitops<>::t<T> operator~(T a) { return static_cast<T>(~static_cast<EnumBitops<>::u<T>>(a)); } template <class T> constexpr EnumBitops<>::t<T> operator|(T a, T b) { return static_cast<T>(static_cast<EnumBitops<>::u<T>>(a) | static_cast<EnumBitops<>::u<T>>(b)); } template <class T> constexpr EnumBitops<>::t<T> operator&(T a, T b) { return static_cast<T>(static_cast<EnumBitops<>::u<T>>(a) & static_cast<EnumBitops<>::u<T>>(b)); } template <class T> constexpr EnumBitops<>::t<T> operator^(T a, T b) { return static_cast<T>(static_cast<EnumBitops<>::u<T>>(a) ^ static_cast<EnumBitops<>::u<T>>(b)); } template <class T> constexpr EnumBitops<>::t<T, T&> operator|=(T& a, T b) { a = a | b; return a; } template <class T> constexpr EnumBitops<>::t<T, T&> operator&=(T& a, T b) { a = a & b; return a; } template <class T> constexpr EnumBitops<>::t<T, T&> operator^=(T& a, T b) { a = a ^ b; return a; } template <class T, typename = EnumBitops<>::t<T>> constexpr bool to_bool(T a) { return static_cast<EnumBitops<>::u<T>>(a) != 0; } #endif // DOXYGEN_BUILD } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/exec/unstable/IApplyOnEachFunction.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IApplyOnEachFunction.h //! //! @brief Defines @ref omni::graph::exec::unstable::IApplyOnEachFunction. #pragma once #include <omni/graph/exec/unstable/IBase.h> namespace omni { namespace graph { namespace exec { namespace unstable { class IApplyOnEachFunction_abi; class IApplyOnEachFunction; class ExecutionPath; //! Interface wrapping a function (possibly with storage) to apply on all instantiations of a given definition. class IApplyOnEachFunction_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IApplyOnEachFunction")> { protected: //! Invokes the wrapped function. virtual void invoke_abi(OMNI_ATTR("in, not_null, throw_if_null, ref") const ExecutionPath* path) noexcept = 0; }; //! Smart pointer managing an instance of @ref IApplyOnEachFunction. using ApplyOnEachFunctionPtr = omni::core::ObjectPtr<IApplyOnEachFunction>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IApplyOnEachFunction.gen.h> //! @copydoc omni::graph::exec::unstable::IApplyOnEachFunction_abi //! //! @ingroup groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IApplyOnEachFunction : public omni::core::Generated<omni::graph::exec::unstable::IApplyOnEachFunction_abi> { }; #include <omni/graph/exec/unstable/ExecutionPath.h> #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IApplyOnEachFunction.gen.h>
omniverse-code/kit/include/omni/graph/exec/unstable/ExecutionTask.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file ExecutionTask.h //! //! @brief Defines @ref omni::graph::exec::unstable::ExecutionTask. #pragma once #include <omni/core/ITypeFactory.h> #include <omni/graph/exec/unstable/Assert.h> #include <omni/graph/exec/unstable/Status.h> #include <limits> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class ExecutionPath; class IExecutionContext; class IExecutionCurrentThread; class IExecutor; class INode; //! A task tag can be used by other entities (such as @ref Executor) to group tasks together. using ExecutionTaskTag = uint64_t; //! Represents work item generated by an @ref IExecutor and passed to a scheduler for dispatch. //! //! ExecutionTask is a utility class that describes a task to be potentially executed on behalf of a @ref INode in a //! given @ref IExecutionContext. //! //! @rst //! //! .. image:: /../docs/ef-execution-path-point-k.svg //! :align: center //! //! @endrst //! //! @ref ExecutionTask stores four key pieces of information: //! //! - *A pointer to the Node to be executed*. The pointed to @ref INode contains a pointer to either an @ref INodeDef or //! @ref NodeGraphDef which contains the computation definition. See @ref ExecutionTask::getNode(). //! //! - *The unique path to the node*. In addition to the @ref INode to be executed, an @ref ExecutionPath to the node's //! upstream (i.e. containing) node is stored. Combined, these two pieces of information form a unique id for the //! node. //! //! Above, if an @ref ExecutionTask is describing the *k* node pointed to by the yellow arrow, @ref //! ExecutionTask::getNode() would point to *k* and @ref ExecutionTask::getUpstreamPath() would return */f/p*. Note, //! the @ref ExecutionTask::getUpstreamPath() *does not* store */f/p/k*, just */f/p*. This is a micro-optimization //! that allows the same |ExecutionPath| to be reused while visiting nodes within the same //! @ref INodeGraphDef. //! //! - *A pointer to the current execution's* @ref IExecutionContext. Execution always happen in a given context. It's //! this context, @ref IExecutionContext, that stores the state of the execution. Multiple entities can be executing a //! given @rstref{execution graph <ef_execution_graph>}, each execution using its own @ref IExecutionContext. In order //! to understand which of these potentially many executions a task is a part, @ref ExecutionTask stores a reference //! to the execution's @ref IExecutionContext. This @ref ExecutionTask::getContext() combined with @ref //! ExecutionTask::getUpstreamPath() and @ref ExecutionTask::getNode() can be used to access the per-execution state //! for the node (see @ref OMNI_GRAPH_EXEC_GET_NODE_DATA_AS() and @ref OMNI_GRAPH_EXEC_SET_NODE_DATA()). //! //! - *A "tag" to identify a task when multiple tasks are associated with a node.* If an @ref INode generates many //! tasks during execution, @ref ExecutionTask::getTag() can be used to uniquely identify each of the node's tasks. //! The semantic meaning of @ref ExecutionTask::getTag() is @ref IExecutor dependent and can be used for purposes //! other than unique identification. //! //! This struct is ABI-safe. //! //! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during //! execution. //! // @note Node definition needs to stay unchanged while there are executions to complete. In future we should fully // decouple description of compute from compute symbol library. This would allow mutating the authoring side // without the need to synchronize with execution. class ExecutionTask { public: enum : ExecutionTaskTag { kEmptyTag = std::numeric_limits<ExecutionTaskTag>::max() //!< Special value to represent an empty tag. }; static_assert(std::numeric_limits<ExecutionTaskTag>::max() == 0xFFFFFFFFFFFFFFFF, "unexpected ExecutionTaskTag max value"); //! Constructor for execution task //! //! @param context Context in which execution task is created. Task can only access state from this context. //! @ref omni::core::IObject::acquire() is not called on this context. It is up to the calling //! code to ensure the context remains valid for the lifetime of the ExecutionTask. //! //! @param node Node holding the execution definition omni::core::IObject::acquire() is not called on this //! context. It is up to the calling code to ensure the context remains valid for the lifetime of //! the ExecutionTask. //! //! @param upPath Execution path to the graph owning the node. Node can be executed multiple times with different //! paths when graph definition is shared. //! //! @param tag Used to identify dynamically generated work items that node can compute. ExecutionTask(IExecutionContext* context, INode* node, const ExecutionPath& upPath, ExecutionTaskTag tag = kEmptyTag) noexcept : m_context(context), m_node(node), m_upstreamPath(&upPath), m_tag(tag) { OMNI_GRAPH_EXEC_ASSERT(context); OMNI_GRAPH_EXEC_ASSERT(node); static_assert(std::is_standard_layout<ExecutionTask>::value, "ExecutionTask is expected to be abi safe"); static_assert(offsetof(ExecutionTask, m_context) == 0, "unexpected context offset"); static_assert(offsetof(ExecutionTask, m_node) == 8, "unexpected node offset"); static_assert(offsetof(ExecutionTask, m_upstreamPath) == 16, "unexpected upstream path offset"); static_assert(offsetof(ExecutionTask, m_tag) == 24, "unexpected tag offset"); static_assert(offsetof(ExecutionTask, m_userIndex) == 32, "unexpected status offset"); static_assert(offsetof(ExecutionTask, m_status) == 40, "unexpected status offset"); static_assert(48 == sizeof(ExecutionTask), "ExecutionTask is an unexpected size"); } //! Return context for this task. //! //! The returned @ref IExecutionContext will *not* have @ref omni::core::IObject::acquire() called before being //! returned. IExecutionContext* getContext() const noexcept { return m_context; } //! Return node for this task. //! //! The returned @ref INode will *not* have @ref omni::core::IObject::acquire() called before being returned. INode* getNode() const noexcept { return m_node; } //! Return execution path to graph owning the node. const ExecutionPath& getUpstreamPath() const noexcept { return *m_upstreamPath; } //! Return tag. ExecutionTaskTag getTag() const noexcept { return m_tag; } //! Check if this task has a valid tag set. This will mean that a node generates more than one task. bool hasValidTag() const noexcept { return (m_tag != kEmptyTag); } //! Return execution status for this task Status getExecutionStatus() const noexcept { return m_status; } //! Execute the task. Will be called by the scheduler when task is dispatched for execution. inline Status execute(omni::core::ObjectParam<IExecutor> executor) noexcept; //! This index will never be used by the framework, but is a way to pass something into //! user code via generated task. Mutating this value is allowed as long as it is done //! via only legal way to access task, i.e. getCurrentTask //! //! Setter for user index void setUserIndex(uint64_t userIndex) noexcept { m_userIndex = userIndex; } //! This index will never be used by the framework, but is a way to pass something into //! user code via generated task. Mutating this value is allowed as long as it is done //! via only legal way to access task, i.e. getCurrentTask //! //! Getter for user index uint64_t getUserIndex() const noexcept { return m_userIndex; } //! Sets the status of the task. //! //! This is an internal method and should not be called by users. void setExecutionStatus(Status status) noexcept { m_status = status; } private: //! Context in which this task was created. This context needs to live as long as there are still executions to //! complete. IExecutionContext* m_context; //! Node holding the execution definition. INode* m_node; //! Execution path to the graph owning the node. const ExecutionPath* m_upstreamPath; //! Used to identify dynamically generated work items that node can compute. ExecutionTaskTag m_tag; //! User index help with passing data into user code. uint64_t m_userIndex{ 0 }; //! Execution status. Status m_status{ Status::eUnknown }; //! Reserved padding space. uint32_t m_reserved; }; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni #include <omni/graph/exec/unstable/IExecutionCurrentThread.h> //! Execute the task. Will be called by the scheduler when task is dispatched for execution. inline omni::graph::exec::unstable::Status omni::graph::exec::unstable::ExecutionTask::execute( omni::core::ObjectParam<IExecutor> executor) noexcept { if (Status::eUnknown != m_status) { return m_status; } return getCurrentThread()->execute(*this, executor, &m_status); }
omniverse-code/kit/include/omni/graph/exec/unstable/IGraph.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IGraph.h //! //! @brief Defines @ref omni::graph::exec::unstable::IGraph. #pragma once #include <omni/graph/exec/unstable/ConstName.h> #include <omni/graph/exec/unstable/IBase.h> #include <omni/graph/exec/unstable/Stamp.h> #include <omni/graph/exec/unstable/Status.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class IExecutionContext; class IGraph_abi; class IGraph; class INode; class INodeGraphDef; class ITopology; //! Top-level container for storing the Execution Framework's graph of graphs. //! //! @ref omni::graph::exec::unstable::IGraph is the top-level container used to store the graph of graphs. This //! top-level container is referred to as the <i>execution graph</i>. //! //! @ref omni::graph::exec::unstable::IGraph's responsibilities include: //! //! - Tracking if the graph is currently being constructed. See @ref omni::graph::exec::unstable::IGraph::inBuild(). //! //! - Tracking gross changes to the topologies of graphs within the execution graph. This is done with the <i>global //! topology stamp</i> (see @ref omni::graph::exec::unstable::IGraph::getGlobalTopologyStamp()). Each time a topology //! is invalidated, the global topology stamp is incremented. Consumers of the execution graph can use this stamp to //! detect changes in the graph. See @rstref{Graph Invalidation <ef_graph_invalidation>} for details. //! //! - Owning and providing access to the top level graph definition (see @ref //! omni::graph::exec::unstable::IGraph::getNodeGraphDef()). The root node of the top-level graph definition is the //! root of execution graph. @ref omni::graph::exec::unstable::IGraph is the only container, other than @ref //! omni::graph::exec::unstable::INode, that attaches to definitions. //! //! See @rstref{Graph Concepts <ef_graph_concepts>} for more information on how @ref omni::graph::exec::unstable::IGraph //! fits into the Execution Framework. //! //! See @ref omni::graph::exec::unstable::Graph for a concrete implementation of this interface. class IGraph_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IGraph")> { protected: //! Access the top-level node graph definition. //! //! The returned @ref omni::graph::exec::unstable::INodeGraphDef will *not* have //! @ref omni::core::IObject::acquire() called before being returned. //! //! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref //! omni::graph::exec::unstable::IGraph. virtual OMNI_ATTR("no_acquire") INodeGraphDef* getNodeGraphDef_abi() noexcept = 0; //! Name set on the graph during construction. //! //! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref //! omni::graph::exec::unstable::IGraph. virtual OMNI_ATTR("ref") const ConstName* getName_abi() noexcept = 0; //! Return global topology of the graph. Useful when detecting that graph transformation pipeline needs to run. //! //! See @rstref{Graph Invalidation <ef_graph_invalidation>} to understand how this stamp is used to detect changes //! in the graph. //! //! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref //! omni::graph::exec::unstable::IGraph. It is up to the caller to mutate the stamp in a thread safe manner. virtual Stamp* getGlobalTopologyStamp_abi() noexcept = 0; //! Return @c true if a @ref omni::graph::exec::unstable::IGraphBuilder is currently building a part of this graph. //! //! @thread_safety This method is thread safe. virtual bool inBuild_abi() noexcept = 0; //! Mark that an @ref omni::graph::exec::unstable::IGraphBuilder is currently building a part of this graph. //! //! Each builder should call @c _setInBuild(true) followed by @c _setInBuild(false) once building is complete. Since //! multiple builders can be active at a time, it is safe for this method to be called multiple times. //! //! This method should only be called by @ref omni::graph::exec::unstable::IGraphBuilder. //! //! @thread_safety This method is thread safe. virtual void _setInBuild_abi(bool inBuild) noexcept = 0; }; //! Smart pointer managing an instance of @ref IGraph. using GraphPtr = omni::core::ObjectPtr<IGraph>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IGraph.gen.h> //! @copydoc omni::graph::exec::unstable::IGraph_abi //! //! @ingroup groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IGraph : public omni::core::Generated<omni::graph::exec::unstable::IGraph_abi> { public: //! Access topology of the graph. //! //! The returned @ref ITopology does *not* have @ref omni::core::IObject::acquire() called before being returned. //! //! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref //! omni::graph::exec::unstable::IGraph. inline ITopology* getTopology() noexcept; //! Access root of the graph. //! //! The returned @ref INode does *not* have @ref omni::core::IObject::acquire() called before being returned. //! //! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref //! omni::graph::exec::unstable::IGraph. inline INode* getRoot() noexcept; }; #include <omni/graph/exec/unstable/IExecutionContext.h> #include <omni/graph/exec/unstable/INodeGraphDef.h> inline omni::graph::exec::unstable::ITopology* omni::graph::exec::unstable::IGraph::getTopology() noexcept { return getNodeGraphDef()->getTopology(); } inline omni::graph::exec::unstable::INode* omni::graph::exec::unstable::IGraph::getRoot() noexcept { return getNodeGraphDef()->getRoot(); } // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IGraph.gen.h>
omniverse-code/kit/include/omni/graph/exec/unstable/SmallStack.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/Memory.h> #include <omni/core/Assert.h> #include <cstdint> #include <cstring> #include <stdexcept> #include <type_traits> namespace omni { namespace graph { namespace exec { namespace unstable { namespace detail { //! ABI-safe stack with inline memory to avoid heap allocation. //! //! Reserved memory within the stack will be used until it is exceeded, at which heap memory will be used. //! //! It is assumed the items stored are `sizeof(uint64_t)`. template <typename T = uint64_t> class SmallStack { public: //! Type of the item in the stack. using ItemType = T; //! Constructor. SmallStack() noexcept { static_assert(8 == sizeof(ItemType), "unexpected item size"); static_assert(std::is_trivially_destructible<ItemType>::value, "items stored must be trivially destructible"); static_assert(offsetof(SmallStack, m_external.data) == 0, "unexpected external data offset"); static_assert(offsetof(SmallStack, m_external.count) == 8, "unexpected external count offset"); static_assert(offsetof(SmallStack, m_external.maxCount) == 12, "unexpected external maxCount offset"); static_assert(offsetof(SmallStack, m_internal.data) == 0, "unexpected data offset"); static_assert(offsetof(SmallStack, m_internal.count) == 56, "unexpected count offset"); static_assert(offsetof(SmallStack, m_internal.isInternal) == 60, "unexpected internal flag offset "); m_internal.count = 0; m_internal.isInternal = 1; } //! Constructor with a single item. SmallStack(ItemType item) noexcept { m_internal.count = 0; m_internal.isInternal = 1; push(item); // may throw, but wont in this case. } //! Copy constructor. //! //! May throw. SmallStack(const SmallStack& other) // may throw { m_internal.isInternal = 1; _copy(other); } //! Construct from a range. //! //! @p end must be equal or greater than @p begin. //! //! May throw. SmallStack(ItemType* begin, ItemType* end) // may throw { OMNI_ASSERT(end >= begin); m_internal.isInternal = 1; uint32_t count = static_cast<uint32_t>(end - begin); _copy(begin, count, count); } //! Copies the contents of the given stack and pushes the given item. //! //! May throw. SmallStack(const SmallStack& other, ItemType item) // may throw { uint32_t otherCount = other.count(); uint32_t count = otherCount + 1; ItemType* p; if (count > kMaxInternalDataItemCount) { p = _allocate(count); m_internal.isInternal = 0; m_external.data = p; m_external.count = count; m_external.maxCount = count; } else { m_internal.isInternal = 1; p = m_internal.data; m_internal.count = count; } std::memcpy(p, other.begin(), sizeof(ItemType) * otherCount); p[otherCount] = item; } //! Move constructor. SmallStack(SmallStack&& other) noexcept { m_internal.isInternal = 1; _move(std::move(other)); } //! Destructor ~SmallStack() noexcept { _free(); } //! Assignment operator. //! //! May throw. SmallStack& operator=(const SmallStack& other) // may throw { if (this != &other) { _copy(other); } return *this; } //! Assignment operator. SmallStack& operator=(SmallStack&& other) noexcept { if (this != &other) { _move(std::move(other)); } return *this; } //! Compares two stacks, returning either a negative number, positive number, or zero. //! //! Works similar to @c std::memcmp. //! //! Returns a negative value if this stack less than @p other. //! //! Returns a positive value if this stack greater than @p other. //! //! Returns zero if the stacks are equal. //! //! The returned negative or positive values are not guaranteed to be exactly -1 or 1. int compare(const SmallStack& other) const noexcept { int thisCount = count(); int otherCount = other.count(); if (thisCount == otherCount) { return std::memcmp(begin(), other.begin(), sizeof(ItemType) * otherCount); } else { return (thisCount - otherCount); } } //! Return @c true if the stack is empty. inline bool empty() const noexcept { return (0 == count()); } //! Returns the top of the stack. //! //! Reading the top of an empty stack is undefined behavior. inline ItemType top() const noexcept { if (_isInternal()) { OMNI_ASSERT(0 != m_internal.count); return m_internal.data[m_internal.count - 1]; } else { OMNI_ASSERT(0 != m_internal.count); return m_external.data[m_external.count - 1]; } } //! Push the given item to the top of the stack. //! //! May throw. inline void push(ItemType elem) // may throw { if (_isInternal()) { if (m_internal.count == kMaxInternalDataItemCount) { // we've ran out of internal space _allocExternalAndCopyInternal(); m_external.data[m_external.count++] = elem; } else { m_internal.data[m_internal.count++] = elem; } } else { if (m_external.count == m_external.maxCount) { _grow(); } m_external.data[m_external.count++] = elem; } } //! Removes the top of the stack. //! //! Popping an empty stack is undefined behavior. inline void pop() noexcept { if (_isInternal()) { OMNI_ASSERT(m_internal.count > 0); m_internal.count--; } else { OMNI_ASSERT(m_external.count > 0); m_external.count--; } } //! Returns the number of items in the stack. inline uint32_t count() const noexcept { if (_isInternal()) { return m_internal.count; } else { return m_external.count; } } //! Returns the number of items in the stack. inline uint32_t size() const noexcept { return count(); } //! Returns a pointer to the oldest item in the stack. //! //! If the stack is empty, the returned pointer should not be read or written though can be compared to @ref end(). inline const ItemType* begin() const noexcept { if (_isInternal()) { return m_internal.data; } else { return m_external.data; } } //! Returns a pointer to one past the top of the stack. //! //! If the stack is empty, the returned pointer should not be read or written though can be compared to @ref //! begin(). inline const ItemType* end() const noexcept { if (_isInternal()) { return m_internal.data + m_internal.count; } else { return m_external.data + m_external.count; } } //! Returns a pointer to the oldest item in the stack. //! //! Result are undefined if the stack is empty. inline const ItemType* data() const noexcept { return begin(); } private: inline bool _isInternal() const noexcept { return m_internal.isInternal; } inline uint32_t _maxCount() const noexcept { if (_isInternal()) { return kMaxInternalDataItemCount; } else { return m_external.maxCount; } } inline void _free() noexcept { if (!_isInternal()) { carb::deallocate(m_external.data); m_internal.count = 0; m_internal.isInternal = 0; } } // assumes _free() has already been called (when needed) inline void _copy(const SmallStack& other) { _copy(const_cast<ItemType*>(other.begin()), other.count(), other._maxCount()); } // assumes _free() has already been called (when needed) inline void _copy(ItemType* data, uint32_t count, uint32_t maxCount) { if (_maxCount() < count) { // not enough storage for the copy. we'll have to allocate more. OMNI_ASSERT(maxCount >= count); _free(); m_external.data = reinterpret_cast<ItemType*>(carb::allocate(sizeof(ItemType) * maxCount)); if (!m_external.data) { throw std::bad_alloc(); } std::memcpy(m_external.data, data, sizeof(ItemType) * count); m_external.count = count; m_external.maxCount = maxCount; m_internal.isInternal = 0; } else { // data fits in our storage. simply copy it. if (_isInternal()) { std::memcpy(m_internal.data, data, sizeof(ItemType) * count); m_internal.count = count; } else { std::memcpy(m_external.data, data, sizeof(ItemType) * count); m_external.count = count; } } } // assumes _free() has already been called (when needed) inline void _move(SmallStack&& other) noexcept { if (other._isInternal()) { // since other is using its internal storage, we have to copy the data _copy(other); other.m_internal.count = 0; } else { // other is using external storage _free(); m_internal.isInternal = 0; m_external.data = other.m_external.data; m_external.count = other.m_external.count; m_external.maxCount = other.m_external.maxCount; other.m_internal.count = 0; other.m_internal.isInternal = 1; } } inline ItemType* _allocate(uint32_t maxCount) { auto data = reinterpret_cast<ItemType*>(carb::allocate(sizeof(ItemType) * maxCount)); if (!data) { throw std::bad_alloc(); } return data; } inline void _allocExternalAndCopyInternal() { OMNI_ASSERT(_isInternal()); constexpr uint32_t newMaxCount = kMaxInternalDataItemCount * 2; ItemType* data = _allocate(newMaxCount); std::memcpy(data, m_internal.data, sizeof(ItemType) * newMaxCount); m_external.data = data; m_external.count = kMaxInternalDataItemCount; m_external.maxCount = newMaxCount; m_internal.isInternal = 0; } inline void _grow() { OMNI_ASSERT(!_isInternal()); OMNI_ASSERT(m_external.maxCount > 0); m_external.maxCount *= 2; ItemType* data = _allocate(m_external.maxCount); std::memcpy(data, m_external.data, sizeof(ItemType) * m_external.count); carb::deallocate(m_external.data); m_external.data = data; } constexpr static uint32_t kMaxInternalDataItemCount = 7; struct ExternalData { ItemType* data; uint32_t count; uint32_t maxCount; }; static_assert(sizeof(ExternalData) == 16, "ExternalData is unexpected size"); struct InternalData { ItemType data[kMaxInternalDataItemCount]; uint32_t count; uint32_t isInternal; }; static_assert(sizeof(InternalData) == 64, "InternalData is unexpected size"); private: union { ExternalData m_external; InternalData m_internal; }; }; static_assert(sizeof(SmallStack<uint64_t>) == 64, "SmallStack has unexpected size"); static_assert(std::is_standard_layout<SmallStack<uint64_t>>::value, "SmallStack is not ABI-safe"); } // namespace detail } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutor.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Executes the node in a graph definition. //! //! The purpose of an executor is to generate work for the nodes in an graph definition. @ref //! omni::graph::exec::unstable::IExecutor is a minimal interface that defines enough methods to accomplish just that. //! //! However, @ref omni::graph::exec::unstable::IExecutor's minimal nature is not what most users want when customizing //! execution for their graph definitions. Rather, they want something useful. @ref //! omni::graph::exec::unstable::Executor is an useful implementation of @ref omni::graph::exec::unstable::IExecutor //! designed for graph definition authors to extend. See //! @ref omni::graph::exec::unstable::Executor's documentation to better understand the purpose, duties, and //! capabilities of an executor. //! //! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during //! execution. //! //! See @rstref{Creating an Executor <ef_executor_creation>} for a guide on creating a customize executor for your graph //! defintion. template <> class omni::core::Generated<omni::graph::exec::unstable::IExecutor_abi> : public omni::graph::exec::unstable::IExecutor_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IExecutor") //! Main execute method. Returning status of the execution. omni::graph::exec::unstable::Status execute() noexcept; //! Request for scheduling of additional work after the given task has executed but before it has completed. //! //! @param task The current task omni::graph::exec::unstable::Status continueExecute(omni::graph::exec::unstable::ExecutionTask& task) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IExecutor_abi>::execute() noexcept { return execute_abi(); } inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IExecutor_abi>::continueExecute( omni::graph::exec::unstable::ExecutionTask& task) noexcept { return continueExecute_abi(&task); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/Module.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file Module.h //! //! @brief Helpers for writing modules/plugins based on @ref omni::graph::exec. #pragma once #include <omni/graph/exec/unstable/PassRegistry.h> //! Helper macro to ensure EF features are enabled in the current module/plugin. //! //! This macro should be called from either @c carbOnPluginStartup or @c onStarted. //! //! If your module/plugin registers EF nodes or passes, you must call this macro. //! //! For Kit-based extensions, rather than calling this macro, call OMNI_KIT_EXEC_CORE_ON_MODULE_STARTED(), which will //! call this macro on your behalf. #define OMNI_GRAPH_EXEC_ON_MODULE_STARTED(moduleName_) \ try \ { \ omni::graph::exec::unstable::registerModulePasses(); \ } \ catch (std::exception & e) \ { \ CARB_LOG_ERROR("failed to register %s's passes: %s", moduleName_, e.what()); \ } //! Helper macro to ensure EF features are safely disabled when the current module/plugin unloads. //! //! This macro should be called from either @c carbOnPluginShutdown or @c onUnload. //! //! If your module/plugin registers EF nodes or passes, you must call this macro. //! //! For Kit-based extensions, rather than calling this macro, call OMNI_KIT_EXEC_CORE_ON_MODULE_UNLOAD(), which will //! call this macro on your behalf. #define OMNI_GRAPH_EXEC_ON_MODULE_UNLOAD() \ do \ { \ omni::graph::exec::unstable::deregisterModulePasses(); \ } while (0)
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutionCurrentThread.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Encapsulates the execution state for the current thread allowing callers to determine quantities like the @ref //! omni::graph::exec::unstable::ExecutionTask currently executing on the thread. //! //! Because methods in this interface return thread local data, all methods in this interface are thread safe. //! //! This interface is usually accessed as a singleton via one of the following helper methods: //! //! - @ref omni::graph::exec::unstable::getCurrentTask() //! //! - @ref omni::graph::exec::unstable::getCurrentExecutor() //! //! This interface contains methods for graph and task execution. Users should not call these methods directly. See //! the methods' docs below for the correct way to perform execution. //! //! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during //! execution. template <> class omni::core::Generated<omni::graph::exec::unstable::IExecutionCurrentThread_abi> : public omni::graph::exec::unstable::IExecutionCurrentThread_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IExecutionCurrentThread") //! Executes the given @ref omni::graph::exec::unstable::Graph. //! //! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::IExecutionContext::execute(). //! //! From an ABI point-of-view, the purpose of this method is to handle the special case of the top-level @ref //! omni::graph::exec::unstable::INodeGraphDef being contained by @ref omni::graph::exec::unstable::IGraph rather //! than pointed to by a node in another @ref omni::graph::exec::unstable::INodeGraphDef. Meaningful values are set //! for the threads current task and executor (see @ref omni::graph::exec::unstable::getCurrentTask() and @ref //! omni::graph::exec::unstable::getCurrentExecutor()). //! //! @thread_safety This method is thread safe. omni::graph::exec::unstable::Status executeGraph( omni::core::ObjectParam<omni::graph::exec::unstable::IGraph> graph, omni::core::ObjectParam<omni::graph::exec::unstable::IExecutionContext> context); //! Executes and sets the thread's "current" task to the given task. //! //! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::ExecutionTask::execute(). //! //! This method executes the definition of the node pointed to by the given task. Importantly, this method sets //! thread local data to track the currently running task and executor (see @ref //! omni::graph::exec::unstable::getCurrentTask() and @ref omni::graph::exec::unstable::getCurrentExecutor()). //! //! @thread_safety This method is thread safe. omni::graph::exec::unstable::Status execute(omni::graph::exec::unstable::ExecutionTask& task, omni::core::ObjectParam<omni::graph::exec::unstable::IExecutor> executor, omni::graph::exec::unstable::Status* taskStatus); //! Access the task currently executing on the current thread. //! //! Useful when needing to access execution context state without having to pass it to every function. //! //! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::getCurrentTask(). //! //! May return @c nullptr. //! //! @thread_safety This method is thread safe. omni::graph::exec::unstable::ExecutionTask* getCurrentTask() noexcept; //! Access the executor currently executing on the current thread. //! //! Useful when needing to spawn extra work within the scope of the graph. //! //! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::getCurrentExecutor(). //! //! May return @c nullptr. //! //! @thread_safety This method is thread safe. omni::graph::exec::unstable::IExecutor* getCurrentExecutor() noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IExecutionCurrentThread_abi>::executeGraph( omni::core::ObjectParam<omni::graph::exec::unstable::IGraph> graph, omni::core::ObjectParam<omni::graph::exec::unstable::IExecutionContext> context) { OMNI_THROW_IF_ARG_NULL(graph); OMNI_THROW_IF_ARG_NULL(context); auto return_ = executeGraph_abi(graph.get(), context.get()); return return_; } inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IExecutionCurrentThread_abi>::execute( omni::graph::exec::unstable::ExecutionTask& task, omni::core::ObjectParam<omni::graph::exec::unstable::IExecutor> executor, omni::graph::exec::unstable::Status* taskStatus) { OMNI_THROW_IF_ARG_NULL(taskStatus); auto return_ = execute_abi(&task, executor.get(), taskStatus); return return_; } inline omni::graph::exec::unstable::ExecutionTask* omni::core::Generated< omni::graph::exec::unstable::IExecutionCurrentThread_abi>::getCurrentTask() noexcept { return getCurrentTask_abi(); } inline omni::graph::exec::unstable::IExecutor* omni::core::Generated< omni::graph::exec::unstable::IExecutionCurrentThread_abi>::getCurrentExecutor() noexcept { return getCurrentExecutor_abi(); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/IPassTypeRegistry.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IPassTypeRegistry.h //! //! @brief Defines @ref omni::graph::exec::unstable::IPassTypeRegistry. #pragma once #include <omni/graph/exec/unstable/ConstName.h> #include <omni/graph/exec/unstable/ElementAt.h> #include <omni/graph/exec/unstable/IBase.h> #include <omni/graph/exec/unstable/Types.h> #include <cstring> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class IPassFactory; class IPassTypeRegistry; class IPassTypeRegistry_abi; //! ABI-safe struct to hold registered @ref omni::graph::exec::unstable::IPassFactory objects. struct PassTypeRegistryEntry { //! The name of the pass type. const char* name; //! Factory interface for creating an instance of the pass. //! //! This struct does not acquire this pointer. //! //! This pointer is never @c nullptr. IPassFactory* factory; //! Some passes (e.g. populate passes) desire to affect only a subset of the nodes and/or definitions in a graph. //! This field is used to specify the name of the node/definitions the pass wishes to affect. //! //! The meaning of this field is pass type dependent. Many passes ignore this field. //! //! This pointer is never @c nullptr. const ConstName* nameToMatch; //! Some pass types (e.g. partition passes) are designed such that only a single pass should affect an entity. When //! multiple passes wish to affect an entity, this priority value can be used to resolve the conflict. The meaning //! of the priority value is pass type specific. Many passes ignore this value. PassPriority priority; //! Reserved padding space. uint32_t reserved; }; static_assert(std::is_standard_layout<PassTypeRegistryEntry>::value, "PassTypeRegistryEntry is expected to be abi safe"); static_assert(offsetof(PassTypeRegistryEntry, name) == 0, "unexpected name offset"); static_assert(offsetof(PassTypeRegistryEntry, factory) == 8, "unexpected factory offset"); static_assert(offsetof(PassTypeRegistryEntry, nameToMatch) == 16, "unexpected hash offset"); static_assert(offsetof(PassTypeRegistryEntry, priority) == 24, "unexpected hash offset"); static_assert(32 == sizeof(PassTypeRegistryEntry), "PassTypeRegistryEntry is an unexpected size"); //! @ref omni::graph::exec::unstable::IPassFactory registry for a particular @ref omni::graph::exec::unstable::PassType. //! //! See @ref groupOmniGraphExecPassRegistration for more information about pass registration. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. class IPassTypeRegistry_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IPassTypeRegistry")> { protected: //! Returns the number of registered passes. virtual uint64_t getPassCount_abi() noexcept = 0; //! Returns the pass at the given index. //! //! If the index is greater than the count, an error is returned. //! //! The returned @ref omni::graph::exec::unstable::PassTypeRegistryEntry is valid as long as this pass type registry //! is not mutated (e.g. a pass is added or removed from the registry). virtual OMNI_ATTR("throw_result") omni::core::Result getPassAt_abi(uint64_t index, OMNI_ATTR("out, not_null, throw_if_null") PassTypeRegistryEntry* outEntry) noexcept = 0; }; //! Smart pointer managing an instance of @ref IPassTypeRegistry. using PassTypeRegistryPtr = omni::core::ObjectPtr<IPassTypeRegistry>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IPassTypeRegistry.gen.h> //! @copydoc omni::graph::exec::unstable::IPassTypeRegistry_abi //! //! @ingroup groupOmniGraphExecPassRegistration groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IPassTypeRegistry : public omni::core::Generated<omni::graph::exec::unstable::IPassTypeRegistry_abi> { public: //! Implementation detail to access registry ABI struct GetPass { //! Access element at a given index static void getAt(IPassTypeRegistry* owner, uint64_t index, PassTypeRegistryEntry* out) { owner->getPassAt(index, out); } //! Returns element count static uint64_t getCount(IPassTypeRegistry* owner) { return owner->getPassCount(); } }; //! Implementation detail that wraps index-based node access with iterators. using Passes = detail::ElementAt<IPassTypeRegistry, PassTypeRegistryEntry, GetPass>; //! Returns an object that allows the list of passes to be iterated over (i.e. using range-based for loops). //! //! The returned iterator is valid as long as this pass type registry is not mutated (e.g. a pass is added or //! removed from the registry). Passes getPasses() noexcept { return Passes(this); } }; // additional headers needed for API implementation #include <omni/graph/exec/unstable/IPassFactory.h> // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IPassTypeRegistry.gen.h>
omniverse-code/kit/include/omni/graph/exec/unstable/IScheduleFunction.gen.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Interface wrapping a function (possibly with storage). Used to wrap a task when passing generated work to the //! scheduler. template <> class omni::core::Generated<omni::graph::exec::unstable::IScheduleFunction_abi> : public omni::graph::exec::unstable::IScheduleFunction_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IScheduleFunction") //! Main execute method. Returning status of the execution. omni::graph::exec::unstable::Status invoke() noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IScheduleFunction_abi>::invoke() noexcept { return invoke_abi(); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/INode.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Represents work in a graph. Nodes point to a shared execution definition to state the actual work. //! //! @ref omni::graph::exec::unstable::INode is the main structural component used to build a graph's topology. @ref //! omni::graph::exec::unstable::INode stores edges to *parents* (i.e. predecessors) and *children* (i.e. successors). //! These edges set an ordering between nodes. See @ref omni::graph::exec::unstable::INode::getParents() and @ref //! omni::graph::exec::unstable::INode::getChildren() respectively. //! //! A node represents work to be performed. The description of the work to be performed is stored in a *definition* //! (i.e. @ref omni::graph::exec::unstable::IDef). Each node wishing to perform work points to a definition (see @ref //! omni::graph::exec::unstable::INode::getDef()). //! //! The definition to which a node points can be one of two types. The first type, @ref //! omni::graph::exec::unstable::INodeDef, defines work opaquely (i.e. EF is unable to view the work definition and //! potentially optimize it). The second type, @ref omni::graph::exec::unstable::INodeGraphDef, defines work with a //! graph. This last representation is the most power as it allows for both *extensibilty* and *composibility* in EF. //! //! @rst //! //! .. image:: /../docs/ef-simple-w-defs.svg //! :align: center //! //! @endrst //! //! Above, we see that nodes point to graph definitions, which contain other nodes that point to other graph //! definitions. This structure of graphs pointing to other graphs is where EF gets its *graph of graphs* name. //! //! Not all nodes will point to a definition. For example, the @rstref{root node <ef_root_node>} in each graph //! definition will not point to a definition. //! //! A node is always part of a graph definition and the graph definition's executor is responsible for orchestrating and //! generating work to the scheduler. //! //! Node's within a graph definition are assigned a unique index, between zero and the number of nodes in the //! definition. This index is often used as a lookup into transient arrays used to store state during graph traversals. //! See @ref omni::graph::exec::unstable::INode::getIndexInTopology(). //! //! Nodes have a notion of validity. See @rstref{Graph Invalidation <ef_graph_invalidation>} for details. //! //! @ref omni::graph::exec::unstable::INode does not contain methods for either settings the node's definition or //! connecting nodes to each other. This functionality is reserved for @ref omni::graph::exec::unstable::IGraphBuilder. //! See @rstref{Graph Construction <ef_pass_concepts>} for details. //! //! See @rstref{Graph Concepts <ef_graph_concepts>} for a guide on how this object relates to other objects in the //! Execution Framework. //! //! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during //! execution. //! //! Users may wish to implement this interface to store meaningful authoring level data in EF. For example, OmniGraph //! uses an implementation of this node to store graph instancing information. See @ref //! omni::graph::exec::unstable::Node for a concrete implementation of this interface suitable for sub-classing. template <> class omni::core::Generated<omni::graph::exec::unstable::INode_abi> : public omni::graph::exec::unstable::INode_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::INode") //! Access topology owning this node //! //! The returned @ref omni::graph::exec::unstable::ITopology will *not* have @ref omni::core::IObject::acquire() //! called before being returned. omni::graph::exec::unstable::ITopology* getTopology() noexcept; //! Access node's unique identifier name. const omni::graph::exec::unstable::ConstName& getName() noexcept; //! Access nodes unique index withing owning topology. Index will be always smaller than topology size. omni::graph::exec::unstable::NodeIndexInTopology getIndexInTopology() noexcept; //! Access parents. omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> getParents() noexcept; //! Access children. omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> getChildren() noexcept; //! Return number of parents that cause cycles within the graph during traversal over this node. uint32_t getCycleParentCount() noexcept; //! Check if topology/connectivity of nodes is valid within current topology version. //! //! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details on invalidation. bool isValidTopology() noexcept; //! Make topology valid for current topology version. Drop all the connections if topology changed. //! //! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details on invalidation. void validateOrResetTopology() noexcept; //! Access base node definition (can be empty). //! //! When you wish to determine if the attached definition is either opaque or a graph, consider calling @ref //! omni::graph::exec::unstable::INode::getNodeDef() or @ref omni::graph::exec::unstable::INode::getNodeGraphDef() //! rather than this method. //! //! The returned @ref omni::graph::exec::unstable::IDef will *not* have @ref omni::core::IObject::acquire() called //! before being returned. omni::graph::exec::unstable::IDef* getDef() noexcept; //! Access node definition (can be empty). //! //! If the returned pointer is @c nullptr, either the definition does not implement @ref //! omni::graph::exec::unstable::INodeDef or there is no definition attached to the node. //! //! The returned @ref omni::graph::exec::unstable::INodeDef will *not* have @ref omni::core::IObject::acquire() //! called before being returned. //! //! Also see @ref omni::graph::exec::unstable::INode::getDef() and @ref //! omni::graph::exec::unstable::INode::getNodeGraphDef(). omni::graph::exec::unstable::INodeDef* getNodeDef() noexcept; //! Access node's graph definition (can be empty) //! //! The returned graph definition pointer is the graph definition which defines the work this node represents. The //! returned pointer **is not** the graph definition that contains this node. //! //! If the returned pointer is @c nullptr, either the definition does not implement @ref //! omni::graph::exec::unstable::INodeGraphDef or there is no definition attached to the node. //! //! The returned @ref omni::graph::exec::unstable::INodeGraphDef will *not* have @ref omni::core::IObject::acquire() //! called before being returned. //! //! Also see @ref omni::graph::exec::unstable::INode::getDef() and @ref //! omni::graph::exec::unstable::INode::getNodeDef(). omni::graph::exec::unstable::INodeGraphDef* getNodeGraphDef() noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::graph::exec::unstable::ITopology* omni::core::Generated<omni::graph::exec::unstable::INode_abi>::getTopology() noexcept { return getTopology_abi(); } inline const omni::graph::exec::unstable::ConstName& omni::core::Generated<omni::graph::exec::unstable::INode_abi>::getName() noexcept { return *(getName_abi()); } inline omni::graph::exec::unstable::NodeIndexInTopology omni::core::Generated< omni::graph::exec::unstable::INode_abi>::getIndexInTopology() noexcept { return getIndexInTopology_abi(); } inline omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> omni::core::Generated< omni::graph::exec::unstable::INode_abi>::getParents() noexcept { return getParents_abi(); } inline omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> omni::core::Generated< omni::graph::exec::unstable::INode_abi>::getChildren() noexcept { return getChildren_abi(); } inline uint32_t omni::core::Generated<omni::graph::exec::unstable::INode_abi>::getCycleParentCount() noexcept { return getCycleParentCount_abi(); } inline bool omni::core::Generated<omni::graph::exec::unstable::INode_abi>::isValidTopology() noexcept { return isValidTopology_abi(); } inline void omni::core::Generated<omni::graph::exec::unstable::INode_abi>::validateOrResetTopology() noexcept { validateOrResetTopology_abi(); } inline omni::graph::exec::unstable::IDef* omni::core::Generated<omni::graph::exec::unstable::INode_abi>::getDef() noexcept { return getDef_abi(); } inline omni::graph::exec::unstable::INodeDef* omni::core::Generated<omni::graph::exec::unstable::INode_abi>::getNodeDef() noexcept { return getNodeDef_abi(); } inline omni::graph::exec::unstable::INodeGraphDef* omni::core::Generated< omni::graph::exec::unstable::INode_abi>::getNodeGraphDef() noexcept { return getNodeGraphDef_abi(); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/IPass.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IPass.h //! //! @brief Defines @ref omni::graph::exec::unstable::IPass. #pragma once #include <omni/graph/exec/unstable/IBase.h> #include <omni/graph/exec/unstable/Types.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class IPass; class IPass_abi; //! @defgroup groupOmniGraphExecPasses Passes //! //! @brief Interfaces, classes, and helpers related to graph transformation passes. //! //! Passes are user definable objects that populate, transform, and optimize the execution graph. //! //! Passes are registered using one of the @ref groupOmniGraphExecPassRegistration helpers. //! //! Passes are executed during graph construction via a @ref omni::graph::exec::unstable::PassPipeline. //! Base class for graph transformation passes. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. class IPass_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IPass")> { }; //! Smart pointer managing an instance of @ref omni::graph::exec::unstable::IPass. using PassPtr = omni::core::ObjectPtr<IPass>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IPass.gen.h> //! @copydoc omni::graph::exec::unstable::IPass_abi //! //! @ingroup groupOmniGraphExecPasses groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IPass : public omni::core::Generated<omni::graph::exec::unstable::IPass_abi> { }; // additional headers needed for API implementation // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IPass.gen.h>
omniverse-code/kit/include/omni/graph/exec/unstable/Node.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file Node.h //! //! @brief Defines @ref omni::graph::exec::unstable::Node. #pragma once #include <omni/core/ResultError.h> #include <omni/graph/exec/unstable/Assert.h> #include <omni/graph/exec/unstable/IGraph.h> #include <omni/graph/exec/unstable/IGraphBuilderNode.h> #include <omni/graph/exec/unstable/INode.h> #include <omni/graph/exec/unstable/INodeDef.h> #include <omni/graph/exec/unstable/INodeGraphDef.h> #include <omni/graph/exec/unstable/ITopology.h> #include <omni/graph/exec/unstable/SmallVector.h> #include <omni/graph/exec/unstable/Types.h> namespace omni { namespace graph { namespace exec { namespace unstable { //! @copydoc omni::graph::exec::unstable::INode template <typename... Bases> class NodeT : public Implements<Bases...> { public: //! Constructor of a node with an empty definition. //! //! May throw. static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<ITopology> topology, const char* idName) { OMNI_THROW_IF_ARG_NULL(topology); OMNI_THROW_IF_ARG_NULL(idName); return omni::core::steal(new NodeT(topology.get(), idName)); } //! Constructor of a node with an empty definition. //! //! May throw. static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<IGraph> owner, const char* idName) { OMNI_THROW_IF_ARG_NULL(owner); OMNI_THROW_IF_ARG_NULL(idName); return omni::core::steal(new NodeT(owner->getTopology(), idName)); } //! Constructor of a node with a node graph definition. //! //! May throw. static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<IGraph> owner, omni::core::ObjectParam<INodeGraphDef> nodeGraphDef, const char* idName) { OMNI_THROW_IF_ARG_NULL(owner); OMNI_THROW_IF_ARG_NULL(idName); return omni::core::steal(new NodeT(owner->getTopology(), nodeGraphDef.get(), idName)); } //! Constructor of a node with a opaque node definition. //! //! May throw. static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<ITopology> owner, omni::core::ObjectParam<INodeGraphDef> nodeGraphDef, const char* idName) { OMNI_THROW_IF_ARG_NULL(owner); OMNI_THROW_IF_ARG_NULL(idName); return omni::core::steal(new NodeT(owner.get(), nodeGraphDef.get(), idName)); } //! Constructor of a node with a opaque node definition. //! //! May throw. static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<IGraph> owner, omni::core::ObjectParam<INodeDef> nodeDef, const char* idName) { OMNI_THROW_IF_ARG_NULL(owner); OMNI_THROW_IF_ARG_NULL(idName); return omni::core::steal(new NodeT(owner->getTopology(), nodeDef.get(), idName)); } //! Constructor of a node with a opaque node definition. //! //! May throw. static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<ITopology> topology, omni::core::ObjectParam<INodeDef> nodeDef, const char* idName) { OMNI_THROW_IF_ARG_NULL(topology); OMNI_THROW_IF_ARG_NULL(idName); return omni::core::steal(new NodeT(topology.get(), nodeDef.get(), idName)); } //! Constructor of a node with a base definition (can be null_ptr, NodeDef or NodeGraphDef). //! //! May throw. static omni::core::ObjectPtr<NodeT> createForDef(omni::core::ObjectParam<ITopology> topology, omni::core::ObjectParam<IDef> def, const char* idName) { OMNI_THROW_IF_ARG_NULL(topology); OMNI_THROW_IF_ARG_NULL(idName); if (!def) return omni::core::steal(new NodeT(topology.get(), idName)); else if (auto* nodeDef = omni::graph::exec::unstable::cast<INodeDef>(def)) return omni::core::steal(new NodeT(topology.get(), nodeDef, idName)); else if (auto* nodeGraphDef = omni::graph::exec::unstable::cast<INodeGraphDef>(def)) return omni::core::steal(new NodeT(topology.get(), nodeGraphDef, idName)); else return nullptr; } //! Destructor virtual ~NodeT() { // in case we decide to implement move constructor if (m_indexInTopology != kInvalidNodeIndexInTopology) { m_topology->releaseNodeIndex(m_indexInTopology); if (isValidTopology_abi()) { m_topology->invalidate(); } } } // disambiguate between INode and IGraphBuilderNode using INode::getChildren; using INode::getParents; using INode::getTopology; protected: //! Core implementation of @ref omni::graph::exec::unstable::INode::getTopology_abi ITopology* getTopology_abi() noexcept override { return m_topology; } //! Core implementation of @ref omni::graph::exec::unstable::INode::getName_abi const ConstName* getName_abi() noexcept override { return &m_name; } //! Core implementation of @ref omni::graph::exec::unstable::INode::getIndexInTopology_abi NodeIndexInTopology getIndexInTopology_abi() noexcept override { return m_indexInTopology; } //! Core implementation of @ref omni::graph::exec::unstable::INode::getParents_abi Span<INode* const> getParents_abi() noexcept override { return isValidTopology_abi() ? Span<INode* const>{ m_parents.begin(), m_parents.size() } : Span<INode* const>{ nullptr, 0 }; } //! Core implementation of @ref omni::graph::exec::unstable::INode::getChildren_abi Span<INode* const> getChildren_abi() noexcept override { return isValidTopology_abi() ? Span<INode* const>{ m_children.begin(), m_children.size() } : Span<INode* const>{ nullptr, 0 }; } //! Core implementation of @ref omni::graph::exec::unstable::INode::getCycleParentCount_abi uint32_t getCycleParentCount_abi() noexcept override { return isValidTopology_abi() ? m_cycleParentCount : 0; } //! Core implementation of @ref omni::graph::exec::unstable::INode::isValidTopology_abi //! //! @note This method is called in the destructor and therefore must be marked as final bool isValidTopology_abi() noexcept final override { return m_topologyStamp.inSync(m_topology->getStamp()); } //! Core implementation of @ref omni::graph::exec::unstable::INode::validateOrResetTopology_abi virtual void validateOrResetTopology_abi() noexcept { if (m_topologyStamp.makeSync(m_topology->getStamp())) { // topology changed, let's clear the old one m_parents.clear(); m_children.clear(); m_cycleParentCount = 0; } } //! Core implementation of @ref omni::graph::exec::unstable::INode::getDef_abi IDef* getDef_abi() noexcept override { if (m_nodeDef.get()) { return m_nodeDef.get(); } else { return m_nodeGraphDef.get(); } } //! Core implementation of @ref omni::graph::exec::unstable::INode::getNodeDef_abi INodeDef* getNodeDef_abi() noexcept override { return m_nodeDef.get(); } //! Core implementation of @ref omni::graph::exec::unstable::INode::getNodeGraphDef_abi INodeGraphDef* getNodeGraphDef_abi() noexcept override { return m_nodeGraphDef.get(); } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_addParent_abi omni::core::Result _addParent_abi(IGraphBuilderNode* parent) noexcept override { try { OMNI_GRAPH_EXEC_CAST_OR_RETURN(asNode, INode, parent); m_parents.push_back(asNode); return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION(); } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_removeParent_abi omni::core::Result _removeParent_abi(IGraphBuilderNode* parent) noexcept override { try { OMNI_GRAPH_EXEC_CAST_OR_RETURN(asNode, INode, parent); _eraseRemove(m_parents, asNode); return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION(); } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_addChild_abi omni::core::Result _addChild_abi(IGraphBuilderNode* child) noexcept override { try { OMNI_GRAPH_EXEC_CAST_OR_RETURN(asNode, INode, child); m_children.push_back(asNode); return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION(); } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_removeChild_abi omni::core::Result _removeChild_abi(IGraphBuilderNode* child) noexcept override { try { OMNI_GRAPH_EXEC_CAST_OR_RETURN(asNode, INode, child); _eraseRemove(m_children, asNode); return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION(); } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_removeInvalidParents_abi void _removeInvalidParents_abi() noexcept override { if (isValidTopology_abi()) { m_parents.erase( std::remove_if(m_parents.begin(), m_parents.end(), [](INode* n) { return !n->isValidTopology(); }), m_parents.end()); } } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_removeInvalidChildren_abi void _removeInvalidChildren_abi() noexcept override { if (isValidTopology_abi()) { m_children.erase( std::remove_if(m_children.begin(), m_children.end(), [](INode* n) { return !n->isValidTopology(); }), m_children.end()); } } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_invalidateConnections_abi //! //! @warning This only removes connections on a single node. The topology has bi-directional connections //! for every node with the exception of the connection with the root node. void _invalidateConnections_abi() noexcept override { m_topologyStamp.invalidate(); } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::setCycleParentCount_abi void setCycleParentCount_abi(uint32_t count) noexcept override { m_cycleParentCount = count; } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_setNodeDef_abi void _setNodeDef_abi(INodeDef* nodeDef) noexcept override { m_nodeDef.borrow(nodeDef); m_nodeGraphDef.release(); } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_setNodeGraphDef_abi void _setNodeGraphDef_abi(INodeGraphDef* nodeGraphDef) noexcept override { m_nodeGraphDef.borrow(nodeGraphDef); m_nodeDef.release(); } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_clearDef_abi void _clearDef_abi() noexcept override { m_nodeDef.release(); m_nodeGraphDef.release(); } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::getParentAt_abi omni::core::Result getParentAt_abi(uint64_t index, IGraphBuilderNode** out) noexcept override { *out = nullptr; if (!isValidTopology_abi() || index >= m_parents.size()) { OMNI_GRAPH_EXEC_RETURN_ERROR(omni::core::kResultInvalidIndex); } else { OMNI_GRAPH_EXEC_CAST_OR_RETURN( asGraphBuilderNode, IGraphBuilderNode, m_parents[static_cast<uint32_t>(index)]); *out = asGraphBuilderNode; // explicitly does not acquire return omni::core::kResultSuccess; } } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::getParentCount_abi uint64_t getParentCount_abi() noexcept override { return isValidTopology_abi() ? m_parents.size() : 0; } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::getChildAt_abi omni::core::Result getChildAt_abi(uint64_t index, IGraphBuilderNode** out) noexcept override { *out = nullptr; if (!isValidTopology_abi() || index >= m_children.size()) { OMNI_GRAPH_EXEC_RETURN_ERROR(omni::core::kResultInvalidIndex); } else { OMNI_GRAPH_EXEC_CAST_OR_RETURN( asGraphBuilderNode, IGraphBuilderNode, m_children[static_cast<uint32_t>(index)]); *out = asGraphBuilderNode; // explicitly does not acquire return omni::core::kResultSuccess; } } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::getChildCount_abi uint64_t getChildCount_abi() noexcept override { return isValidTopology_abi() ? m_children.size() : 0; } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::hasChild_abi bool hasChild_abi(IGraphBuilderNode* node) noexcept override { if (!isValidTopology_abi()) return false; auto asNode = omni::graph::exec::unstable::cast<INode>(node); if (!asNode) { return false; } return std::find(m_children.begin(), m_children.end(), asNode) != m_children.end(); } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::isRoot_abi bool isRoot_abi() noexcept override { return (m_topology->getRoot() == static_cast<INode*>(this)); } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::getRoot_abi omni::core::Result getRoot_abi(IGraphBuilderNode** out) noexcept override { *out = nullptr; OMNI_GRAPH_EXEC_CAST_OR_RETURN(asGraphBuilderNode, IGraphBuilderNode, m_topology->getRoot()); *out = asGraphBuilderNode; // explicitly does not acquire return omni::core::kResultSuccess; } //! Constructor NodeT(ITopology* topology, const char* idName) // may throw : m_topology{ topology }, m_indexInTopology{ m_topology->acquireNodeIndex() }, m_name{ idName } { } //! Constructor NodeT(ITopology* topology, INodeGraphDef* nodeGraphDef, const char* idName) // may throw : m_topology{ topology }, m_indexInTopology{ m_topology->acquireNodeIndex() }, m_nodeGraphDef{ nodeGraphDef, omni::core::kBorrow }, m_name{ idName } { } //! Constructor NodeT(ITopology* topology, INodeDef* nodeDef, const char* idName) // may throw : m_topology{ topology }, m_indexInTopology{ m_topology->acquireNodeIndex() }, m_nodeDef{ nodeDef, omni::core::kBorrow }, m_name{ idName } { } private: //! Container for connections. //! //! Using @ref omni::graph::exec::unstable::SmallVector with local storage space for two nodes. //! The local space storage was hand picked, following the experience that most of graph nodes //! have very few downstream nodes. using NodeArray = SmallVector<INode*, 2>; //! Helper erase-remove idiom to remove and eliminate a node from the container template <typename T> void _eraseRemove(T& v, INode* n) // may throw { v.erase(std::remove(v.begin(), v.end(), n), v.end()); }; ITopology* m_topology; //!< Topology owning this node //! Acquired local index NodeIndexInTopology m_indexInTopology{ kInvalidNodeIndexInTopology }; NodeArray m_parents; //!< Edges to parents NodeArray m_children; //!< Edges to children uint32_t m_cycleParentCount{ 0 }; //!< Cycling parents (used by the graph traversal) SyncStamp m_topologyStamp; //!< Validity check for edges omni::core::ObjectPtr<INodeDef> m_nodeDef; //!< Node definition omni::core::ObjectPtr<INodeGraphDef> m_nodeGraphDef; //!< Node graph definition ConstName m_name; //!< Identifier name }; //! Core Node implementation for @ref omni::graph::exec::unstable::INode using Node = NodeT<INode, IGraphBuilderNode>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/exec/unstable/Status.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file Status.h //! //! @brief Defines omni::graph::exec::unstable::Status. #pragma once #include <omni/graph/exec/unstable/EnumBitops.h> #include <cstdint> namespace omni { namespace graph { namespace exec { namespace unstable { //! Return status of all executions. enum class Status : uint32_t { eUnknown = 0, //!< Status is undetermined yet eSuccess = 1 << 0, //!< Execution was successful eSkip = 1 << 1, //!< Execution was skipped eDeferred = 1 << 2, //!< Execution was deferred to start and/or complete outside of current execution frame eFailure = 1 << 3, //!< Execution failed eInProgress = 1 << 4 //!< Execution is in progress }; //! Enable bitwise operations on return state. template <> struct EnumBitops<Status> : EnumBitops<>::allow_bitops { }; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/exec/unstable/IGraphBuilderNode.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IGraphBuilderNode.h //! //! @brief Defines @ref omni::graph::exec::unstable::IGraphBuilderNode. #pragma once #include <omni/core/ResultError.h> #include <omni/graph/exec/unstable/ElementAt.h> #include <omni/graph/exec/unstable/IBase.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class IGraphBuilderNode; class IGraphBuilderNode_abi; class INode; class INodeDef; class INodeGraphDef; class ITopology; //! Describes a node @ref omni::graph::exec::unstable::IGraphBuilder can manipulate. //! //! Only @ref omni::graph::exec::unstable::IGraphBuilder should use @ref omni::graph::exec::unstable::IGraphBuilderNode. //! One way to think about this interface is that it is a private interface used by //! @ref omni::graph::exec::unstable::IGraphBuilder to connect instances of @ref omni::graph::exec::unstable::INode. class IGraphBuilderNode_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IGraphBuilderNode")> { protected: //! Adds the given node as a parent (i.e. upstream) of this node. //! //! @ref omni::core::IObject::acquire() is not called on the given node. It is up to the calling code to ensure the //! node persists while in use by this interface. //! //! @p parent must not be @c nullptr. //! //! It is undefined behavior to add a parent multiple times to a node. //! //! This method is not thread safe. //! //! May throw. virtual OMNI_ATTR("throw_result") omni::core::Result _addParent_abi(OMNI_ATTR("not_null, throw_if_null") IGraphBuilderNode* parent) noexcept = 0; //! Removes the given node as a parent. //! //! If given node is not a parent, this method returns success. //! //! This method is not thread safe. //! //! May throw. virtual OMNI_ATTR("throw_result") omni::core::Result _removeParent_abi(OMNI_ATTR("not_null, throw_if_null") IGraphBuilderNode* parent) noexcept = 0; //! Adds the given node as a child (i.e. downstream) of this node. //! //! @ref omni::core::IObject::acquire() is not called on the given node. It is up to the calling code to ensure the //! node persists while in use by this interface. //! //! @p child must not be @c nullptr. //! //! It is undefined behavior to add a child multiple times to a node. //! //! This method is not thread safe. //! //! May throw. virtual OMNI_ATTR("throw_result") omni::core::Result _addChild_abi(OMNI_ATTR("not_null, throw_if_null") IGraphBuilderNode* child) noexcept = 0; //! Removes the given node as a child. //! //! If given node is not a child, this method returns success. //! //! This method is not thread safe. //! //! May throw. virtual OMNI_ATTR("throw_result") omni::core::Result _removeChild_abi(OMNI_ATTR("not_null, throw_if_null") IGraphBuilderNode* child) noexcept = 0; //! Remove from the container parent nodes that no longer exist in current topology, i.e are invalid. //! //! @ref omni::core::IObject::release() is not called on the invalid nodes. //! //! This method is not thread safe. virtual void _removeInvalidParents_abi() noexcept = 0; //! Remove from the container child nodes that no longer exist in current topology, i.e are invalid. //! //! @ref omni::core::IObject::release() is not called on the invalid nodes. //! //! This method is not thread safe. virtual void _removeInvalidChildren_abi() noexcept = 0; //! Invalidate all children and parents connections by invalidating the topology this node is sync with. //! //! This method is thread safe. virtual void _invalidateConnections_abi() noexcept = 0; //! Sets the number of parents who are a part of cycle. //! //! This method is not thread safe. virtual void setCycleParentCount_abi(uint32_t count) noexcept = 0; //! Sets the definition for this node. //! //! If a definition is already set, it will be replaced by the given definition. //! //! The given definition may be @c nullptr. //! //! @ref omni::core::IObject::acquire() is called on the given pointer. //! //! See also @ref omni::graph::exec::unstable::IGraphBuilderNode::_setNodeGraphDef(). //! //! This method is not thread safe. virtual void _setNodeDef_abi(INodeDef* nodeDef) noexcept = 0; //! Sets the definition for this node. //! //! If a definition is already set, it will be replaced by the given definition. //! //! The given definition may be @c nullptr. //! //! @ref omni::core::IObject::acquire() is called on the given pointer. //! //! See also @ref omni::graph::exec::unstable::IGraphBuilderNode::_setNodeDef(). //! //! This method is not thread safe. virtual void _setNodeGraphDef_abi(INodeGraphDef* nodeGraphDef) noexcept = 0; //! Unsets this node's definition. //! //! If the definition is already @c nullptr, this method does nothing. //! //! This method is not thread safe. virtual void _clearDef_abi() noexcept = 0; //! Access the topology owning this node. //! //! The returned @ref omni::graph::exec::unstable::ITopology will *not* have //! @ref omni::core::IObject::acquire() called before being returned. //! //! This method is not thread safe. virtual OMNI_ATTR("no_acquire") ITopology* getTopology_abi() noexcept = 0; //! Make topology valid for current topology version. Drop all the connections if topology changed. //! //! This method is not thread safe. virtual void validateOrResetTopology_abi() noexcept = 0; //! Access parent at the given index. //! //! If the given index is greater than the parent count, an error is returned. //! //! This method is not thread safe. //! //! May throw due to internal casting. //! //! See @ref omni::graph::exec::unstable::IGraphBuilderNode::getParentCount(). //! //! Consider using @ref omni::graph::exec::unstable::IGraphBuilderNode::getParents() //! for a modern C++ wrapper to this method. //! //! The returned @ref omni::graph::exec::unstable::IGraphBuilderNode will *not* have //! @ref omni::core::IObject::acquire() called before being returned. virtual OMNI_ATTR("throw_result") omni::core::Result getParentAt_abi(uint64_t index, OMNI_ATTR("not_null, throw_if_null, out, *no_acquire, *return") IGraphBuilderNode** out) noexcept = 0; //! Returns the number of parents. //! //! This method is not thread safe. virtual uint64_t getParentCount_abi() noexcept = 0; //! Access child at the given index. //! //! If the given index is greater than the parent count, an error is returned. //! //! This method is not thread safe. //! //! May throw due to internal casting. //! //! See @ref omni::graph::exec::unstable::IGraphBuilderNode::getChildCount(). //! //! Consider using @ref omni::graph::exec::unstable::IGraphBuilderNode::getChildren() //! for a modern C++ wrapper to this method. //! //! The returned @ref omni::graph::exec::unstable::IGraphBuilderNode will *not* have //! @ref omni::core::IObject::acquire() called before being returned. virtual OMNI_ATTR("throw_result") omni::core::Result getChildAt_abi(uint64_t index, OMNI_ATTR("not_null, throw_if_null, out, *no_acquire, *return") IGraphBuilderNode** out) noexcept = 0; //! Returns the number of children. //! //! This method is not thread safe. virtual uint64_t getChildCount_abi() noexcept = 0; //! Returns @c true if the given node is an immediate child of this node. //! //! @p node may be @c nullptr. //! //! This method is not thread safe. virtual bool hasChild_abi(IGraphBuilderNode* node) noexcept = 0; //! Returns @c true if this node is the root of the topology. //! //! This method is not thread safe. virtual bool isRoot_abi() noexcept = 0; //! Returns the root node of the topology of which this node is a part. //! //! This method is not thread safe. //! //! May throw due to internal casting. //! //! The returned @ref omni::graph::exec::unstable::IGraphBuilderNode will *not* have //! @ref omni::core::IObject::acquire() called before being returned. virtual omni::core::Result getRoot_abi(OMNI_ATTR("not_null, throw_if_null, out, *no_acquire") IGraphBuilderNode** out) noexcept = 0; }; //! Smart pointer managing an instance of @ref IGraphBuilderNode. using GraphBuilderNodePtr = omni::core::ObjectPtr<IGraphBuilderNode>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IGraphBuilderNode.gen.h> //! @copydoc omni::graph::exec::unstable::IGraphBuilderNode_abi //! //! @ingroup groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IGraphBuilderNode : public omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi> { public: //! Implementation detail to access parent ABI. struct GetParent { //! Access element at a given index static void getAt(IGraphBuilderNode* owner, uint64_t index, IGraphBuilderNode** out) { *out = owner->getParentAt(index); } //! Returns element count static uint64_t getCount(IGraphBuilderNode* owner) { return owner->getParentCount(); } }; //! Implementation detail to access children ABI. struct GetChild { //! Access element at a given index static void getAt(IGraphBuilderNode* owner, uint64_t index, IGraphBuilderNode** out) { *out = owner->getChildAt(index); } //! Returns element count static uint64_t getCount(IGraphBuilderNode* owner) { return owner->getChildCount(); } }; //! Implementation details that wraps index-based node access with iterators. using Parents = detail::ElementAt<IGraphBuilderNode, IGraphBuilderNode*, GetParent>; //! Implementation details that wraps index-based node access with iterators. using Children = detail::ElementAt<IGraphBuilderNode, IGraphBuilderNode*, GetChild>; //! Returns an object that allows the list of parents to be iterated over (i.e. using range-based for loops). Parents getParents() noexcept { return Parents(this); } //! Returns an object that allows the list of children to be iterated over (i.e. using range-based for loops). Children getChildren() noexcept { return Children(this); } //! Returns the root node of the topology of which this node is a part. //! //! May throw. inline IGraphBuilderNode* getRoot(); }; // additional headers needed for API implementation #include <omni/graph/exec/unstable/INode.h> #include <omni/graph/exec/unstable/INodeDef.h> #include <omni/graph/exec/unstable/INodeGraphDef.h> #include <omni/graph/exec/unstable/ITopology.h> inline omni::graph::exec::unstable::IGraphBuilderNode* omni::graph::exec::unstable::IGraphBuilderNode::getRoot() { IGraphBuilderNode* out; OMNI_THROW_IF_FAILED(getRoot_abi(&out)); return out; } // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IGraphBuilderNode.gen.h>
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutionStateInfo.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IExecutionStateInfo.h //! //! @brief Defines @ref omni::graph::exec::unstable::IExecutionStateInfo. #pragma once #include <omni/graph/exec/unstable/IBase.h> #include <omni/graph/exec/unstable/Span.h> #include <omni/graph/exec/unstable/Stamp.h> #include <omni/graph/exec/unstable/Types.h> #include <memory> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class IBackgroundResult; class IExecutionStateInfo; class IExecutionStateInfo_abi; //! State associated with a given execution task //! //! @note We separated execution state from the execution graph to allow concurrent and/or nested execution class IExecutionStateInfo_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IExecutionStateInfo")> { protected: //! Store a "future" result for this state. The actual computation is running asynchronously outside of execution //! frame //! //! @return \c true if execution state accepts "future" results. virtual bool storeBackgroundResult_abi(OMNI_ATTR("not_null, throw_if_null") IBackgroundResult* result) noexcept = 0; //! Query used by some executors to determine if computation of a node is necessary virtual bool needsCompute_abi(Stamp execVersion) noexcept = 0; //! Set to request computation virtual void requestCompute_abi() noexcept = 0; //! Reset request to compute after computation was performed virtual void setComputed_abi() noexcept = 0; //! Get current/last exec version set for this node during execution virtual SyncStamp getExecutionStamp_abi() noexcept = 0; //! Set current exec version for this node. Returns true if version wasn't in sync. virtual bool setExecutionStamp_abi(Stamp execVersion) noexcept = 0; //! Returns a value from a node's key/value datastore. //! //! The key is used as a look-up in the node's key/value datastore. //! //! The type of each data item is returned in @p outTypeId. //! //! @p outPtr will be updated with a pointer to the actual data. //! //! @p outItemSize store the size of each item in the returned array. //! //! @p outItemCount contains the number of items returned (i.e. the number //! of items @p outPtr points to). For an array, this will be greater than //! 1. //! //! If the key is not found, @p outPtr is set to @c nullptr and @p //! outItemCount is set to 0. //! //! Accessing the node's key/value datastore is not thread safe. //! //! An exception is thrown on all other errors. virtual OMNI_ATTR("throw_result") omni::core::Result getNodeData_abi(NodeDataKey key, OMNI_ATTR("out, not_null, throw_if_null") omni::core::TypeId* outTypeId, OMNI_ATTR("out, not_null, throw_if_null, *out, *in") void** outPtr, OMNI_ATTR("out, not_null, throw_if_null") uint64_t* outItemSize, OMNI_ATTR("out, not_null, throw_if_null") uint64_t* outItemCount) noexcept = 0; //! Sets a value in a node's key/value datastore. //! //! The key is used as a look-up in the node's key/value datastore. //! //! The type of each data item is set with @p typeId. //! //! @p data points to an array of data items. //! //! @p itemSize is the size of each item in the given array. //! //! @p itemCount contains the number of items pointed to by @p data. For an //! array, this will be greater than 1. //! //! @p deleter is a function used to delete @p data when either a new value //! is set at the key or the context is invalidated. If @p deleter is @c //! nullptr, it is up to the calling code to manage the lifetime of the @p //! data. //! //! Accessing the node's key/value datastore is not thread safe. //! //! An exception is thrown on all other errors. virtual OMNI_ATTR("throw_result") omni::core::Result setNodeData_abi(NodeDataKey key, omni::core::TypeId typeId, OMNI_ATTR("in, out, not_null, throw_if_null") void* data, uint64_t itemSize, uint64_t itemCount, OMNI_ATTR("in, out") NodeDataDeleterFn* deleter) noexcept = 0; }; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IExecutionStateInfo.gen.h> //! @copydoc omni::graph::exec::unstable::IExecutionStateInfo_abi //! //! @ingroup groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IExecutionStateInfo : public omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi> { public: //! Returns a pointer to a value stored in the node's key/value datastore. //! //! If there is no value stored at the given @p key an empty span is //! returned. //! //! Accessing the node's key/value datastore is not thread safe. //! //! If the type @c T does not match the type of the store data, an exception //! is thrown. //! //! An exception is thrown on all other errors. //! //! Prefer using @ref OMNI_GRAPH_EXEC_GET_NODE_DATA_AS() instead of this method, which will populate the type id for //! you. template <typename T> inline Span<T> getNodeDataAs(omni::core::TypeId desiredType, NodeDataKey key); //! Stores a value in the node's key/value datastore. //! //! If a value is already stored at the given @p key it will be replaced. //! //! Accessing the node's key/value datastore is not thread safe. //! //! An exception is thrown on all errors. //! //! Prefer using @ref OMNI_GRAPH_EXEC_SET_NODE_DATA() instead of this method, which will populate the type id for //! you. template <typename SpecifiedT, typename DataT> inline void setNodeData(omni::core::TypeId itemType, NodeDataKey key, std::unique_ptr<DataT> data); }; // additional headers needed for API implementation #include <omni/graph/exec/unstable/IBackgroundResult.h> #ifndef DOXYGEN_BUILD // templates and doxygen are not friends (remove this line to see why) template <typename T> inline omni::graph::exec::unstable::Span<T> omni::graph::exec::unstable::IExecutionStateInfo::getNodeDataAs( omni::core::TypeId desiredType, NodeDataKey key) { omni::core::TypeId outType; void* outPtr; uint64_t outItemSize, outItemCount; OMNI_THROW_IF_FAILED(getNodeData_abi(key, &outType, &outPtr, &outItemSize, &outItemCount)); if (outPtr) { if (outType != desiredType) { throw omni::core::ResultError(omni::core::kResultInvalidDataType); } if (outItemSize != sizeof(T)) { throw omni::core::ResultError(omni::core::kResultInvalidDataSize); } } return Span<T>{ reinterpret_cast<T*>(outPtr), outItemCount }; } template <typename SpecifiedT, typename DataT> inline void omni::graph::exec::unstable::IExecutionStateInfo::setNodeData(omni::core::TypeId desiredType, NodeDataKey key, std::unique_ptr<DataT> data) { static_assert(std::is_same<SpecifiedT, DataT>::value, "given TypeId does not match the data type"); static_assert(!std::is_array<DataT>::value, "setting arrays as node data via unique_ptr not yet implemented"); OMNI_THROW_IF_FAILED(setNodeData_abi(key, desiredType, data.get(), sizeof(DataT), 1, [](void* p) { typename std::unique_ptr<DataT>::deleter_type deleter; deleter(reinterpret_cast<DataT*>(p)); })); data.release(); // now safe to release ownership } #endif // DOXYGEN_BUILD //! Calls either @ref omni::graph::exec::unstable::IExecutionContext::getNodeDataAs() or @ref //! omni::graph::exec::unstable::IExecutionStateInfo::getNodeDataAs() (dependent on the type of the first argument). //! //! The purpose of this macro is generate an appropriate @ref omni::core::TypeId at compile time from the data item's //! type. The user can manually do this, but this macro is much less error prone. //! //! @code //! auto data = OMNI_GRAPH_EXEC_GET_NODE_DATA_AS( task->getContext(), GraphContextCacheOverride, //! task->getUpstreamPath(), nullptr, tokens::kInstanceContext).data(); //! @endcode //! //! The macro itself is a variadic macro and can map to multiple overloads of @c getNodeDataAs() methods in the //! interface given as the first argument //! //! With newer compilers (GCC >= 8), this macro can be replaced with templated methods (without breaking the ABI). #define OMNI_GRAPH_EXEC_GET_NODE_DATA_AS(context_, type_, ...) \ context_->getNodeDataAs<type_>(CARB_HASH_STRING(CARB_STRINGIFY(type_)), __VA_ARGS__) // the ugly macro above is used to hash the type of the data at compile time. // // it's possible to get the type of data at compile time by inspecting the function name (e.g. __FUNCSIG__ and // __PRETTY_FUNCTION__). however __PRETTY_FUNCTION__ was not a constexpr until GCC 8. omniverse currently uses GCC 7 // so were left with this hack. // // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66639 //! Calls either @ref omni::graph::exec::unstable::IExecutionContext::setNodeData() or @ref //! omni::graph::exec::unstable::IExecutionStateInfo::setNodeData() (dependent on the type of the first argument). //! //! The purpose of this macro is generate an appropriate @ref omni::core::TypeId at compile time from the data item's //! type. The user can manually do this, but this macro is much less error prone. //! //! @code //! OMNI_GRAPH_EXEC_SET_NODE_DATA(stateInfo, GraphContextCacheOverride, tokens::kInstanceContext, //! std::move(contextOverridePtr)); //! @endcode //! //! The macro itself is a variadic macro and can map to multiple overloads of @c setNodeData() methods in the interface //! given as the first argument //! //! With newer compilers (GCC >= 8), this macro can be replaced with templated methods (without breaking the ABI). #define OMNI_GRAPH_EXEC_SET_NODE_DATA(context_, type_, ...) \ context_->setNodeData<type_>(CARB_HASH_STRING(CARB_STRINGIFY(type_)), __VA_ARGS__) // the ugly macro above is used to hash the type of the data at compile time. // // it's possible to get the type of data at compile time by inspecting the function name (e.g. __FUNCSIG__ and // __PRETTY_FUNCTION__). however __PRETTY_FUNCTION__ was not a constexpr until GCC 8. omniverse currently uses GCC 7 // so were left with this hack. // // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66639 // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IExecutionStateInfo.gen.h>
omniverse-code/kit/include/omni/graph/exec/unstable/Traversal.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file Traversal.h //! //! @brief Defines graph traversal algorithms. //! See @rstref{Traversing a Graph <ef_graph_traversal_guide>} and //! @rstref{Graph Traversal In-Depth <ef_graph_traversal_advanced>} //! for more information how to utilize graph traversals. #pragma once #include <concurrentqueue/include_concurrentqueue.h> #include <omni/graph/exec/unstable/AtomicBackoff.h> #include <omni/graph/exec/unstable/EnumBitops.h> #include <atomic> #include <functional> #include <memory> #include <queue> #include <type_traits> #include <vector> namespace omni { namespace graph { namespace exec { namespace unstable { //! Implementation details. namespace detail { //! Information returned to traversal callback indicating visit order enum class VisitOrder { eUnknown = 0, //!< Visit order is not specified eFirst = 1 << 0, //!< This is the first visit to a node during the traversal eNext = 1 << 1, //!< This is a next visit to a node, i.e. not a first one and not the last one eLast = 1 << 2, //!< This is the last visit to a node during the traversal eCycle = 1 << 3 //!< In case of cycles, when traversal enters node at the last visit, more visit can happen with //!< cycle order }; } // namespace detail //! Enable bitwise operation on VisitOrder template <> struct EnumBitops<detail::VisitOrder> : EnumBitops<>::allow_bitops { }; namespace detail { //! Traversal information stored per node struct NodeData { std::atomic<std::size_t> visitCount{ 0 }; //!< How many times this node was visited //! Copy constructor //! //! Compiler will not generate by default copy constructor (nor assignment operator) due to atomic member. //! We are adding it explicitly here because the usage pattern is not allowing copy construction //! in concurrent execution. NodeData(const NodeData& src) : visitCount(src.visitCount.load()) { } //! Assignment operator //! //! Compiler will not generate by default copy constructor (nor assignment operator) due to atomic member. //! We are adding it explicitly here because the usage pattern is not allowing assignment //! in concurrent execution. NodeData& operator=(const NodeData& rhs) { visitCount.store(rhs.visitCount.load()); return *this; } // and because we added above, we still have to add default constructor which can't be added implicitly //! Default constructor NodeData() { } }; // ef-docs visit-first-begin //! Traversal strategy that enters the node when it was first time discovered struct VisitFirst { //! Call to traverse the graph with a strategy to visit only when first time discovered template <typename Node, typename NodeData> static VisitOrder tryVisit(Node* node, NodeData& nodeData) { auto lastVisit = nodeData.visitCount++; // read+increment only once. other threads can be doing the same. return (lastVisit == 0) ? VisitOrder::eFirst : VisitOrder::eUnknown; } }; // ef-docs visit-first-end // ef-docs visit-last-begin //! Traversal strategy that enters the node when entire upstream was already visited and this is the last //! opportunity to enter the node. //! //! In case of cycles, this algorithm is relying on knowledge of number of parents that are causing cycles. struct VisitLast { //! Call to traverse the graph with a strategy to visit only when no more visits are possible template <typename Node, typename NodeData> static VisitOrder tryVisit(Node& node, NodeData& nodeData) { auto requiredCount = node->getParents().size() - node->getCycleParentCount(); auto currentVisit = ++nodeData.visitCount; // increment+read only once. other threads can be doing the same. if (requiredCount == 0 && currentVisit == 1) { return VisitOrder::eLast; } else if (currentVisit == requiredCount) { return VisitOrder::eLast; } return VisitOrder::eUnknown; } }; // ef-docs visit-last-end // ef-docs visit-all-begin //! Traversal strategy that allows discovering all the edges in the graph. Traversal continuation is controlled by user //! code. struct VisitAll { //! Call to traverse the graph with a strategy to visit all edges of the graph template <typename Node, typename NodeData> static VisitOrder tryVisit(Node& node, NodeData& nodeData) { auto parentCount = node->getParents().size(); auto requiredCount = parentCount - node->getCycleParentCount(); auto currentVisit = ++nodeData.visitCount; // increment+read only once. other threads can be doing the same. if (requiredCount == 0 && currentVisit == 1) { return (VisitOrder::eFirst | VisitOrder::eLast); } VisitOrder ret = VisitOrder::eUnknown; if (currentVisit > requiredCount) { ret = VisitOrder::eCycle; } else if (currentVisit == requiredCount) { ret = (currentVisit == 1) ? (VisitOrder::eFirst | VisitOrder::eLast) : VisitOrder::eLast; } else if (currentVisit == 1) { ret = VisitOrder::eFirst; } else { ret = VisitOrder::eNext; } return ret; } }; // ef-docs visit-all-end #ifndef DOXYGEN_BUILD struct FlowDFS { }; struct FlowBFS { }; struct SerialQueue { }; struct ConcurrentQueue { }; template <typename GraphNode, typename Flow, typename Queue, bool ConstNode, typename Enable = void> struct TraversalBase { }; template <typename GraphNode, typename Flow, typename Queue, bool ConstNode> struct TraversalBase<GraphNode, Flow, Queue, ConstNode, std::enable_if_t<std::is_same<Flow, FlowDFS>::value>> { void incrementInfo() { } void decrementInfo() { } }; //! Base traversal class when using BFS traversal order and doesn't require thread safe queue template <typename GraphNode, typename Flow, typename Queue, bool ConstNode> struct TraversalBase<GraphNode, Flow, Queue, ConstNode, std::enable_if_t<std::is_same<Flow, FlowBFS>::value && std::is_same<Queue, SerialQueue>::value>> { using Node = typename std::conditional_t<ConstNode, const GraphNode, GraphNode>; using NodeQueue = std::queue<Node*>; NodeQueue m_queue; void push(Node* node) { m_queue.push(&node); } bool tryPop(Node*& node) { if (m_queue.empty()) { return false; } node = m_queue.front(); m_queue.pop(); return true; } void incrementInfo() { } void decrementInfo() { } bool hasInfo() const { return false; } }; //! Base traversal class when using BFS traversal order and DOES require thread safe queue template <typename GraphNode, typename Flow, typename Queue, bool ConstNode> struct TraversalBase<GraphNode, Flow, Queue, ConstNode, std::enable_if_t<std::is_same<Flow, FlowBFS>::value && std::is_same<Queue, ConcurrentQueue>::value>> { using Node = typename std::conditional_t<ConstNode, const GraphNode, GraphNode>; using NodeQueue = moodycamel::ConcurrentQueue<Node*>; NodeQueue m_queue; std::atomic<std::size_t> m_infoCount{ 0 }; TraversalBase() noexcept { } TraversalBase(TraversalBase&& src) noexcept : m_queue(std::move(src.m_queue)), m_infoCount(src.m_infoCount.load()) { } TraversalBase(TraversalBase& src) = delete; void push(Node* node) { m_queue.enqueue(node); } bool tryPop(Node*& node) { return m_queue.try_dequeue(node); } void incrementInfo() { m_infoCount++; } void decrementInfo() { m_infoCount--; } bool hasInfo() const { return m_infoCount.load() > 0; } }; struct NoUserData { }; template <typename GraphNode, typename NodeUserData, typename Enable = void> struct UserDataBase { explicit UserDataBase(std::size_t size) noexcept { } UserDataBase(UserDataBase&& src) noexcept { } }; //! User defined data class to be available for each traversed node template <typename GraphNode, typename NodeUserData> struct UserDataBase<GraphNode, NodeUserData, std::enable_if_t<!std::is_same<NodeUserData, NoUserData>::value>> { static_assert(std::is_trivially_copyable<NodeUserData>::value, "User data needs to be trivially copyable"); explicit UserDataBase(std::size_t size) noexcept : m_userData(size) { } UserDataBase(UserDataBase&& src) noexcept : m_userData(std::move(src.m_userData)) { } NodeUserData& userData(GraphNode* node) { return m_userData[node->getIndexInTopology()]; } using NodeUserDataArray = std::vector<NodeUserData>; NodeUserDataArray m_userData; }; using QueueType = ConcurrentQueue; // or SerialQueue (but serial queue will fail with some multithreaded unit tests) //! Traversal class //! //! @tparam GraphNode Node typename //! @tparam Graph Graph typename //! @tparam Strategy Traversal visit strategy (first, last, all) typename //! @tparam Flow Visit flow (DFS or BFS) typename //! @tparam NodeUserData Custom user data typename allocated for each node //! @tparam ConstNode Is this a const traversal template <typename GraphNode, typename Graph, typename Strategy, typename Flow, typename NodeUserData, bool ConstNode> class Traversal : private TraversalBase<GraphNode, Flow, QueueType, ConstNode>, private UserDataBase<GraphNode, NodeUserData> { public: using Node = typename std::conditional_t<ConstNode, const GraphNode, GraphNode>; using Base = TraversalBase<GraphNode, Flow, QueueType, ConstNode>; using BaseUserData = UserDataBase<GraphNode, NodeUserData>; struct Info { Traversal& traversal; VisitOrder order; Info(Traversal& t, VisitOrder o) noexcept : traversal(t), order(o) { traversal.Base::incrementInfo(); } Info(const Info& src) noexcept : traversal(src.traversal), order(src.order) { traversal.Base::incrementInfo(); } Info(Info&& src) noexcept : traversal(src.traversal), order(src.order) { traversal.Base::incrementInfo(); } ~Info() { traversal.Base::decrementInfo(); } Info& operator=(const Info& rhs) = delete; Info& operator=(Info&& rhs) = delete; bool isFirstVisit() const { return (order & VisitOrder::eFirst) == VisitOrder::eFirst; } bool isLastVisit() const { return (order & VisitOrder::eLast) == VisitOrder::eLast; } void continueVisit(GraphNode* node) { return traversal.continueVisit(node); } NodeUserData& userData(GraphNode* node) { return traversal.BaseUserData::userData(node); } }; using CallbackType = void(Info, GraphNode*, GraphNode*); using CallbackFn = std::function<CallbackType>; using NodeDataArray = std::vector<NodeData>; explicit Traversal(Graph* g, CallbackFn call) noexcept : BaseUserData(g->getNodeCount()), m_callback(call), m_data(g->getNodeCount()) { } Traversal() = delete; Traversal(const Traversal& src) = delete; Traversal(Traversal&& src) = delete; Traversal& operator=(const Traversal& rhs) = delete; Traversal& operator=(Traversal&& rhs) = delete; void continueVisit(GraphNode* prev) { continueImpl(Flow(), prev); } void startVisit(GraphNode* node) { startImpl(Flow(), node); } void markVisited(GraphNode* node) { nodeData(node).visitCount = node->getParents().size(); } private: // see if our traversal policy allows us to visit this node void tryVisit(GraphNode* prev, GraphNode* current) { VisitOrder visitOrder = Strategy::tryVisit(current, nodeData(current)); if (visitOrder > VisitOrder::eUnknown) { m_callback({ *this, visitOrder }, prev, current); } } void tryContinue(GraphNode* current) { for (auto child : current->getChildren()) { tryVisit(current, child); } } void continueImpl(FlowDFS, Node* prev) { tryContinue(prev); } void continueImpl(FlowBFS, Node* prev) { Base::push(prev); } void startImpl(FlowDFS, Node* node) { markVisited(node); tryContinue(node); } void startImpl(FlowBFS, Node* node) { markVisited(node); Base::push(node); AtomicBackoff backoff; Node* stackNode = nullptr; while (true) { if (Base::tryPop(stackNode)) { tryContinue(stackNode); backoff.reset(); continue; } if (Base::hasInfo()) { backoff.pause(); } else { break; } } } NodeData& nodeData(GraphNode* node) { return m_data[node->getIndexInTopology()]; } CallbackFn m_callback; NodeDataArray m_data; }; //! Main traversal template for DFS algorithms template <typename GraphNode, typename Graph, typename Strategy, typename NodeUserData, bool ConstNode> void run_traversal_dfs(typename std::conditional_t<ConstNode, const GraphNode, GraphNode>* node, typename Traversal<GraphNode, Graph, Strategy, FlowDFS, NodeUserData, ConstNode>::CallbackFn call) { Traversal<GraphNode, Graph, Strategy, FlowDFS, NodeUserData, ConstNode> traversal(node->getTopology(), call); traversal.startVisit(node); } //! Main traversal template for BFS algorithms template <typename GraphNode, typename Graph, typename Strategy, typename NodeUserData, bool ConstNode> void run_traversal_bfs(typename std::conditional_t<ConstNode, const GraphNode, GraphNode>* node, typename Traversal<GraphNode, Graph, Strategy, FlowBFS, NodeUserData, ConstNode>::CallbackFn call) { Traversal<GraphNode, Graph, Strategy, FlowBFS, NodeUserData, ConstNode> traversal(node->getTopology(), call); traversal.startVisit(node); } //! Main traversal template for DFS algorithms. Traversal is allocated on the heap and returned. template <typename GraphNode, typename Graph, typename Strategy, typename NodeUserData, bool ConstNode> auto alloc_and_run_traversal_dfs( typename std::conditional_t<ConstNode, const GraphNode, GraphNode>* node, typename Traversal<GraphNode, Graph, Strategy, FlowDFS, NodeUserData, ConstNode>::CallbackFn call) { using TraversalType = Traversal<GraphNode, Graph, Strategy, FlowDFS, NodeUserData, ConstNode>; std::unique_ptr<TraversalType> traversal = std::make_unique<TraversalType>(node->getTopology(), call); traversal->startVisit(node); return traversal; } //! Main traversal template for BFS algorithms. Traversal is allocated on the heap and returned. template <typename GraphNode, typename Graph, typename Strategy, typename NodeUserData, bool ConstNode> auto alloc_and_run_traversal_bfs( typename std::conditional_t<ConstNode, const GraphNode, GraphNode>* node, typename Traversal<GraphNode, Graph, Strategy, FlowBFS, NodeUserData, ConstNode>::CallbackFn call) { using TraversalType = Traversal<GraphNode, Graph, Strategy, FlowBFS, NodeUserData, ConstNode>; std::unique_ptr<TraversalType> traversal = std::make_unique<TraversalType>(node->getTopology(), call); traversal->startVisit(node); return traversal; } #endif // DOXYGEN_BUILD } // detail } // namespace unstable } // namespace exec } // namespace graph } // namespace omni #include <omni/graph/exec/unstable/IGraph.h> #include <omni/graph/exec/unstable/INode.h> #include <omni/graph/exec/unstable/ITopology.h> namespace omni { namespace graph { namespace exec { namespace unstable { //! @copydoc omni::graph::exec::unstable::detail::VisitFirst using VisitFirst = detail::VisitFirst; //! @copydoc omni::graph::exec::unstable::detail::VisitLast using VisitLast = detail::VisitLast; //! @copydoc omni::graph::exec::unstable::detail::VisitAll using VisitAll = detail::VisitAll; using detail::FlowBFS; using detail::FlowDFS; using detail::NoUserData; using detail::Traversal; using detail::VisitOrder; // ef-docs traversal-methods-begin //! Deep-first-search traversal template <typename Strategy, typename NodeUserData = NoUserData> void traversal_dfs(INode* node, typename Traversal<INode, ITopology, Strategy, FlowDFS, NodeUserData, false>::CallbackFn call) { detail::run_traversal_dfs<INode, ITopology, Strategy, NodeUserData, false>(node, call); } //! Breadth-first-search traversal template <typename Strategy, typename NodeUserData = NoUserData> void traversal_bfs(INode* node, typename Traversal<INode, ITopology, Strategy, FlowBFS, NodeUserData, false>::CallbackFn call) { detail::run_traversal_bfs<INode, ITopology, Strategy, NodeUserData, false>(node, call); } //! Deep-first-search traversal. Traversal allocated on heap and returned to extend lifetime //! to the end of all concurrent tasks. template <typename Strategy, typename NodeUserData = NoUserData> auto concurrent_traversal_dfs(INode* node, typename Traversal<INode, ITopology, Strategy, FlowDFS, NodeUserData, false>::CallbackFn call) { return detail::alloc_and_run_traversal_dfs<INode, ITopology, Strategy, NodeUserData, false>(node, call); } //! Breadth-first-search traversal. Traversal allocated on heap and returned to extend lifetime //! to the end of all concurrent tasks. template <typename Strategy, typename NodeUserData = NoUserData> auto concurrent_traversal_bfs(INode* node, typename Traversal<INode, ITopology, Strategy, FlowBFS, NodeUserData, false>::CallbackFn call) { return detail::alloc_and_run_traversal_bfs<INode, ITopology, Strategy, NodeUserData, false>(node, call); } // ef-docs traversal-methods-end } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/exec/unstable/INodeGraphDefDebug.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file INodeGraphDefDebug.h //! //! @brief Defines @ref omni::graph::exec::unstable::INodeGraphDefDebug. #pragma once #include <omni/graph/exec/unstable/IBase.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class INodeGraphDefDebug; class INodeGraphDefDebug_abi; //! Interface containing debugging methods for @ref omni::graph::exec::unstable::INodeGraphDef. //! //! Implementation of this interface is optional. class INodeGraphDefDebug_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.INodeGraphDefDebug")> { protected: //! Returns the current execution count. A value of 0 means the graph is not executing. virtual uint64_t getExecutionCount_abi() noexcept = 0; //! Increments the execution count. virtual void incrementExecutionCount_abi() noexcept = 0; //! Decrements the execution count. It is undefined behavior for call decrement more than increment. virtual void decrementExecutionCount_abi() noexcept = 0; }; //! Smart pointer managing an instance of @ref INodeGraphDefDebug. using NodeGraphDefDebugPtr = omni::core::ObjectPtr<INodeGraphDefDebug>; class ScopedExecutionDebug; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/INodeGraphDefDebug.gen.h> //! @copydoc omni::graph::exec::unstable::INodeGraphDefDebug_abi //! //! @ingroup groupOmniGraphExecInterfaces class omni::graph::exec::unstable::INodeGraphDefDebug : public omni::core::Generated<omni::graph::exec::unstable::INodeGraphDefDebug_abi> { public: //! Returns @c true if the graph's execution count is greater than 0. inline bool isExecuting() noexcept { return (getExecutionCount() > 0); } }; //! Scoped object used mark that a given @ref INodeGraphDef is currently executing. //! //! Since @ref INodeGraphDef objects can be shared across nodes, it is safe to create multiple instances of this object //! with the same @ref INodeGraphDef. class omni::graph::exec::unstable::ScopedExecutionDebug { public: //! Marks the given @ref INodeGraphDef as executing. ScopedExecutionDebug(omni::core::ObjectParam<IBase> nodeGraphDef) : m_nodeGraphDef(omni::graph::exec::unstable::cast<INodeGraphDefDebug>(nodeGraphDef)) { if (m_nodeGraphDef) { m_nodeGraphDef->incrementExecutionCount(); } } //! Decrements the given @ref INodeGraphDef's execution tracker. ~ScopedExecutionDebug() { if (m_nodeGraphDef) { m_nodeGraphDef->decrementExecutionCount(); } } private: INodeGraphDefDebug* m_nodeGraphDef; }; // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/INodeGraphDefDebug.gen.h>
omniverse-code/kit/include/omni/graph/exec/unstable/GraphUtils.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file GraphUtils.h //! //! @brief Defines utilities for graph visualization. #pragma once #include <carb/Format.h> #include <omni/graph/exec/unstable/IGraph.h> #include <omni/graph/exec/unstable/Traversal.h> #include <ostream> #include <string> #include <unordered_map> #include <vector> namespace omni { namespace graph { namespace exec { namespace unstable { //! Debugging utility to write out the graph topology in a graphviz format //! //! @param inGraph Graph to dump //! @param out Output stream to receive the produced graphviz text output inline void writeFlattenedAsGraphviz(omni::core::ObjectParam<IGraph> inGraph, std::ostream& out); namespace detail { #ifndef DOXYGEN_BUILD constexpr const char* colorScheme() { return "paired10"; } constexpr unsigned colorSchemeSize() { return 10; } struct GraphState { std::unordered_map<size_t, unsigned> colorMapping; unsigned nextColorIndex{ 0 }; unsigned getColor(size_t hash) { /*static const std::vector<const char*> gColors = { "black","aqua","aquamarine","bisque", "blue","blueviolet","brown","burlywood","cadetblue","chartreuse","chocolate","coral","cornflowerblue", "crimson","cyan","darkblue","darkcyan","darkgoldenrod","darkgray","darkgreen","darkgrey","darkkhaki", "darkmagenta","darkolivegreen","darkorange","darkorchid","darkred","darksalmon","darkseagreen","darkslateblue", "darkslategray","darkslategrey","darkturquoise","darkviolet","deeppink","deepskyblue","dimgray","dimgrey","dodgerblue", "firebrick","floralwhite","forestgreen","fuchsia","gainsboro","gold","goldenrod","gray","grey","green","greenyellow", "hotpink","indianred","indigo","khaki","lavender","lavenderblush","lawngreen","lemonchiffon","lightblue","lightcoral", "lightcyan","lightgoldenrodyellow","lightgray","lightgreen","lightgrey","lightpink","lightsalmon","lightseagreen", "lightskyblue","lightslategray","lightslategrey","lightsteelblue","lightyellow","limegreen","linen","magenta", "maroon","mediumaquamarine","mediumblue","mediumorchid","mediumpurple","mediumseagreen","mediumslateblue", "mediumspringgreen","mediumturquoise","mediumvioletred","midnightblue","mistyrose","moccasin","navy","oldlace","olive", "olivedrab","orange","orangered","orchid","palegoldenrod","palegreen","paleturquoise","palevioletred","papayawhip", "peachpuff","peru","pink","plum","powderblue","purple","red","rosybrown","royalblue","saddlebrown","salmon","sandybrown", "seagreen","sienna","silver","skyblue","slateblue","slategray","slategrey","springgreen","steelblue","tan","teal", "thistle","tomato","turquoise","violet","wheat","whitesmoke","yellow","yellowgreen" };*/ auto foundIt = colorMapping.find(hash); if (foundIt == colorMapping.end()) { auto& color = colorMapping[hash]; color = nextColorIndex; nextColorIndex = (nextColorIndex + 1) % colorSchemeSize(); return color; } else { return foundIt->second; } } }; constexpr const char* singleIndent() { return " "; } inline std::string makeQuoted(const std::string& s) { return "\"" + s + "\""; } inline std::string makeId(const std::string& path, INode* node) { return path + "/" + node->getName().toString(); } inline std::string makeClusterId(const std::string& id) { return "Cluster//" + id; } inline void writeNodeProperties(INode* node, std::ostream& out) { if (node->isRoot()) out << "[" << "label=\"\" " << "shape=point" << "]"; else out << "[" << "label=" << makeQuoted(node->getName().toString()) << "]"; } inline void writeNode(const std::string& indent, const std::string& path, INode* node, std::ostream& out) { out << indent << makeQuoted(makeId(path, node)) << " "; writeNodeProperties(node, out); out << ";" << std::endl; } inline void writeConnection(const std::string& indent, const std::string& path, INode* nodeA, INode* nodeB, std::ostream& out) { auto graphA = nodeA->getNodeGraphDef(); auto graphB = nodeB->getNodeGraphDef(); std::string pathA = makeId(path, nodeA); std::string pathB = makeId(path, nodeB); std::string nodeAId = graphA ? makeId(pathA, graphA->getRoot()) : pathA; std::string nodeBId = graphB ? makeId(pathB, graphB->getRoot()) : pathB; out << indent; out << makeQuoted(nodeAId); out << " -> "; out << makeQuoted(nodeBId); if (graphA || graphB) { out << "["; if (graphA) out << "ltail=" << makeQuoted(makeClusterId(pathA)); if (graphB) out << "lhead=" << makeQuoted(makeClusterId(pathB)); out << "]"; } out << ";" << std::endl; } inline void writeSubgraphProperties( const std::string& indent, INode* node, INodeGraphDef* graph, GraphState& state, std::ostream& out) { const auto& nodeGraphDefName = graph->getName(); std::string nodeGraphDefNameLabel = carb::fmt::format("{}({})", nodeGraphDefName.getString().c_str(), nodeGraphDefName.getHash()); // this is nested NodeGraphDef if (node) { const auto& nodeName = node->getName(); std::string nodeNameLabel = carb::fmt::format("{}({})", nodeName.getString().c_str(), node->getIndexInTopology()); out << indent << "label = " << makeQuoted(nodeNameLabel + " | " + nodeGraphDefNameLabel) << std::endl; } // this is top level NodeGraphDef else { out << indent << "label = " << makeQuoted("EXECUTION GRAPH | " + nodeGraphDefNameLabel) << std::endl; } auto color = state.getColor(graph->getName().getHash()); out << indent << "color = " << color << std::endl; out << indent << "node [color = " << color << "]" << std::endl; out << indent << "edge [color = " << color << "]" << std::endl; } inline void writeGraph(const std::string& indent, const std::string& path, INode* node, INodeGraphDef* graph, GraphState& state, std::ostream& out) { out << indent << "subgraph " << makeQuoted(makeClusterId(path)) << " {" << std::endl; std::string thisIndent = indent + singleIndent(); writeSubgraphProperties(thisIndent, node, graph, state, out); // for readability, we first write nodes... writeNode(thisIndent, path, graph->getRoot(), out); traversal_dfs<VisitFirst>(graph->getRoot(), [&out, &thisIndent, &path, &state](auto info, INode* prev, INode* curr) { auto nodeGraph = curr->getNodeGraphDef(); if (nodeGraph) writeGraph(thisIndent, makeId(path, curr), curr, nodeGraph, state, out); else writeNode(thisIndent, path, curr, out); info.continueVisit(curr); }); // ... and then we write connections traversal_dfs<VisitAll>(graph->getRoot(), [&out, &thisIndent, &path](auto info, INode* prev, INode* curr) { writeConnection(thisIndent, path, prev, curr, out); if (info.isFirstVisit()) // visit all edges, continue traversal on the first one info.continueVisit(curr); }); out << indent << "}" << std::endl; } inline void writeGraphProperties(const std::string& indent, std::ostream& out) { out << indent << "compound=true" << std::endl; out << indent << "colorscheme=" << colorScheme() << std::endl; out << indent << "node [shape=circle style=filled fontcolor=white color=black colorscheme=" << colorScheme() << "]" << std::endl; out << indent << "edge [colorscheme=" << colorScheme() << "]" << std::endl; out << indent << "rankdir=LR" << std::endl; out << indent << "style=rounded" << std::endl; } #endif // DOXYGEN_BUILD } // namespace detail inline void writeFlattenedAsGraphviz(omni::core::ObjectParam<IGraph> inGraph, std::ostream& out) { using namespace detail; out << "digraph ExecutionGraph {" << std::endl; { writeGraphProperties(singleIndent(), out); GraphState state; writeGraph(singleIndent(), "", nullptr, inGraph->getNodeGraphDef(), state, out); } out << "}" << std::endl; } } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/exec/unstable/IPassTypeRegistry.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! @ref omni::graph::exec::unstable::IPassFactory registry for a particular @ref omni::graph::exec::unstable::PassType. //! //! See @ref groupOmniGraphExecPassRegistration for more information about pass registration. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. template <> class omni::core::Generated<omni::graph::exec::unstable::IPassTypeRegistry_abi> : public omni::graph::exec::unstable::IPassTypeRegistry_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPassTypeRegistry") //! Returns the number of registered passes. uint64_t getPassCount() noexcept; //! Returns the pass at the given index. //! //! If the index is greater than the count, an error is returned. //! //! The returned @ref omni::graph::exec::unstable::PassTypeRegistryEntry is valid as long as this pass type registry //! is not mutated (e.g. a pass is added or removed from the registry). void getPassAt(uint64_t index, omni::graph::exec::unstable::PassTypeRegistryEntry* outEntry); }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline uint64_t omni::core::Generated<omni::graph::exec::unstable::IPassTypeRegistry_abi>::getPassCount() noexcept { return getPassCount_abi(); } inline void omni::core::Generated<omni::graph::exec::unstable::IPassTypeRegistry_abi>::getPassAt( uint64_t index, omni::graph::exec::unstable::PassTypeRegistryEntry* outEntry) { OMNI_THROW_IF_ARG_NULL(outEntry); OMNI_THROW_IF_FAILED(getPassAt_abi(index, outEntry)); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL static_assert(std::is_standard_layout<omni::graph::exec::unstable::PassTypeRegistryEntry>::value, "omni::graph::exec::unstable::PassTypeRegistryEntry must be standard layout to be used in ONI ABI");
omniverse-code/kit/include/omni/graph/exec/unstable/IGraphBuilder.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IGraphBuilder.h //! //! @brief Defines @ref omni::graph::exec::unstable::IGraphBuilder. #pragma once #include <omni/core/ResultError.h> #include <omni/graph/exec/unstable/IBase.h> #include <omni/graph/exec/unstable/NodePartition.h> #include <omni/graph/exec/unstable/Span.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class IGraph; class IGraphBuilder_abi; class IGraphBuilder; class IGraphBuilderContext; class IGraphBuilderNode; class INode; class IDef; class INodeDef; class INodeGraphDef; class ITopology; //! Graph builder is the only class that has the ability to modify topology of a graph. //! //! Topological edits of the graph are only allowed during graph transformation and should never //! be performed during execution of the graph. Construction of the builder will automatically drop //! all the connections between nodes. //! //! Methods on this class mutating a graph topology are not thread-safe (unless documented otherwise) class IGraphBuilder_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IGraphBuilder")> { protected: //! Return owner of all graphs this builder touches //! //! The returned @ref omni::graph::exec::unstable::IGraph will *not* have //! @ref omni::core::IObject::acquire() called before being returned. virtual OMNI_ATTR("no_acquire") IGraph* getGraph_abi() noexcept = 0; //! Returns the topology this builder can modify. //! //! The returned @ref omni::graph::exec::unstable::ITopology will *not* have //! @ref omni::core::IObject::acquire() called before being returned. virtual OMNI_ATTR("no_acquire") ITopology* getTopology_abi() noexcept = 0; //! Returns the context in which this builder works. //! //! The returned @ref omni::graph::exec::unstable::IGraphBuilderContext will *not* have //! @ref omni::core::IObject::acquire() called before being returned. virtual OMNI_ATTR("no_acquire") IGraphBuilderContext* getContext_abi() noexcept = 0; //! Returns @ref omni::graph::exec::unstable::INodeGraphDef this builder can modify. //! //! The returned @ref omni::graph::exec::unstable::INodeGraphDef will *not* have //! @ref omni::core::IObject::acquire() called before being returned. virtual OMNI_ATTR("no_acquire") INodeGraphDef* getNodeGraphDef_abi() noexcept = 0; //! Connect two given nodes. //! //! It is an error if the two nodes are not in the same topology. //! //! Neither given node should be @c nullptr. //! //! Neither @ref omni::graph::exec::unstable::INode have @ref omni::core::IObject::acquire() called //! during the connection process. //! //! May throw. virtual OMNI_ATTR("throw_result") omni::core::Result connect_abi(OMNI_ATTR("not_null, throw_if_null") INode* upstreamNode, OMNI_ATTR("not_null, throw_if_null") INode* downstreamNode) noexcept = 0; //! Disconnect two given nodes. //! //! It is an error if the two nodes are not in the same topology. //! //! Neither given node should be @c nullptr. //! //! Neither @ref omni::graph::exec::unstable::INode have @ref omni::core::IObject::acquire() called //! during the disconnection process. //! //! May throw. virtual OMNI_ATTR("throw_result") omni::core::Result disconnect_abi(OMNI_ATTR("not_null, throw_if_null") INode* upstreamNode, OMNI_ATTR("not_null, throw_if_null") INode* downstreamNode) noexcept = 0; //! Remove a node from topology. //! //! The given node must not be @c nullptr. //! //! May throw. virtual OMNI_ATTR("throw_result") omni::core::Result remove_abi(OMNI_ATTR("not_null, throw_if_null") INode* node) noexcept = 0; //! Sets the definition for given node. //! //! If a definition is already set, it will be replaced by the given definition. //! //! The given definition may be @c nullptr. //! //! @ref omni::core::IObject::acquire() is called on the given definition pointer. //! //! See also @ref omni::graph::exec::unstable::IGraphBuilder::setNodeGraphDef(). //! //! This method is NOT thread safe. virtual void setNodeDef_abi(OMNI_ATTR("not_null") INode* node, INodeDef* nodeDef) noexcept = 0; //! Sets the definition for give node. //! //! If a definition is already set, it will be replaced by the given definition. //! //! The given definition may be @c nullptr. //! //! @ref omni::core::IObject::acquire() is called on the given definition pointer. //! //! See also @ref omni::graph::exec::unstable::IGraphBuilder::setNodeDef(). //! //! This method is NOT thread safe. virtual void setNodeGraphDef_abi(OMNI_ATTR("not_null") INode* node, INodeGraphDef* nodeGraphDef) noexcept = 0; //! Unsets given node's definition. //! //! If the definition is already @c nullptr, this method does nothing. //! //! This method is NOT thread safe. virtual void clearDef_abi(OMNI_ATTR("not_null") INode* node) noexcept = 0; //! Replace well formed cluster of nodes with a single node and the given definition. //! //! All nodes must exist in the same and current topology, otherwise the entire operation is aborted. //! //! @ref omni::core::IObject::acquire() is called on the given definition pointer. //! //! This method is NOT thread safe. virtual void replacePartition_abi(OMNI_ATTR("in, not_null, throw_if_null, ref") const NodePartition* partition, OMNI_ATTR("not_null, throw_if_null") IDef* definition) noexcept = 0; //! Create a new node in current node graph def. //! //! The given node name must not be @c nullptr. //! //! The given node def can be @c nullptr. //! //! Node creation can return @c nullptr when current node graph def doesn't allow node construction outside //! of the pass that created it. //! //! The returned @ref omni::graph::exec::unstable::INode will have @ref omni::core::IObject::acquire() called on it. virtual INode* createNode_abi(OMNI_ATTR("in, not_null, throw_if_null, c_str") const char* name, IDef* def) noexcept = 0; //! Access created nodes by this builder. //! //! Span is no longer valid when topology of the graph changes. You need to query it again. //! //! In case a node once created gets removed by another pass, returned list will continue to have it. //! It is safe to do, because we do not delete underlying nodes until the next graph population. //! Checking if node is valid in current topology allows to filter out these cases. //! //! The pointers in the span are non owning, i.e. @ref omni::graph::exec::unstable::INode will not have //! @ref omni::core::IObject::acquire() called on it. virtual Span<INode* const> getCreatedNodes_abi() noexcept = 0; }; //! Smart pointer managing an instance of @ref IGraphBuilder. using GraphBuilderPtr = omni::core::ObjectPtr<IGraphBuilder>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IGraphBuilder.gen.h> //! @copydoc omni::graph::exec::unstable::IGraphBuilder_abi //! //! @ingroup groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IGraphBuilder : public omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi> { }; // additional headers needed for API implementation #include <omni/graph/exec/unstable/IGraphBuilderNode.h> #include <omni/graph/exec/unstable/INode.h> #include <omni/graph/exec/unstable/INodeDef.h> #include <omni/graph/exec/unstable/INodeGraphDef.h> // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IGraphBuilder.gen.h>
omniverse-code/kit/include/omni/graph/exec/unstable/IPassPipeline.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IPassPipeline.h //! //! @brief Defines @ref omni::graph::exec::unstable::IPassPipeline. #pragma once #include <omni/graph/exec/unstable/IBase.h> #include <omni/graph/exec/unstable/Stamp.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class IGraphBuilderContext; class IPassPipeline; class IPassPipeline_abi; class INodeGraphDef; //! Runs registered passes. //! //! The role of pass pipeline is to populate and prepare the execution graph. The base implementation runs passes based //! on the type and registration order. Most applications will define their own pass pipeline to control how the //! execution graph is generated. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. class IPassPipeline_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IPassPipeline")> { protected: //! Test if pipeline needs to rebuild (mostly for its acceleration structures). virtual bool needsConstruction_abi() noexcept = 0; //! Build the pipeline (mostly for its acceleration structures). virtual OMNI_ATTR("throw_result") omni::core::Result construct_abi() noexcept = 0; //! Test if pipeline needs to run (after topology changes in the graph). virtual bool needsExecute_abi(Stamp globalTopology) noexcept = 0; //! Execute the graph transformations pipeline virtual OMNI_ATTR("throw_result") omni::core::Result execute_abi(OMNI_ATTR("not_null, throw_if_null") IGraphBuilderContext* builderContext, OMNI_ATTR("not_null, throw_if_null") INodeGraphDef* nodeGraphDef) noexcept = 0; }; //! Smart pointer managing an instance of @ref IPassPipeline. using PassPipelinePtr = omni::core::ObjectPtr<IPassPipeline>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IPassPipeline.gen.h> //! @copydoc omni::graph::exec::unstable::IPassPipeline_abi //! //! @ingroup groupOmniGraphExecPasses groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IPassPipeline : public omni::core::Generated<omni::graph::exec::unstable::IPassPipeline_abi> { }; // additional headers needed for API implementation #include <omni/graph/exec/unstable/IGraphBuilder.h> #include <omni/graph/exec/unstable/IGraphBuilderContext.h> #include <omni/graph/exec/unstable/INodeGraphDef.h> // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IPassPipeline.gen.h>
omniverse-code/kit/include/omni/graph/exec/unstable/Executor.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file Executor.h //! //! @brief Defines @ref omni::graph::exec::unstable::Executor. #pragma once #include <carb/cpp/TypeTraits.h> #include <omni/graph/exec/unstable/ExecutionPath.h> #include <omni/graph/exec/unstable/ExecutionTask.h> #include <omni/graph/exec/unstable/IExecutionContext.h> #include <omni/graph/exec/unstable/IExecutor.h> #include <omni/graph/exec/unstable/INode.h> #include <omni/graph/exec/unstable/ITopology.h> #include <omni/graph/exec/unstable/ScheduleFunction.h> #include <omni/graph/exec/unstable/SchedulingInfo.h> #include <atomic> #include <memory> #include <queue> #include <type_traits> namespace omni { namespace graph { namespace exec { namespace unstable { //! Basic scheduler which executes the task on calling thread and accumulates the result struct SerialScheduler { //! Constructor SerialScheduler(IExecutionContext* context) { } //! Serial scheduler will emplace the task on serial queue and dispatch one by one after entering process loop template <typename Fn> Status schedule(Fn&& task, SchedulingInfo) { m_tasks.emplace( [task = captureScheduleFunction(task), this]() mutable { Status stat = invokeScheduleFunction(task); this->m_tasksStatus |= stat; }); return Status::eSuccess; } //! Enter processing of tasks and return accumulated status Status getStatus() { while (!m_tasks.empty()) { auto& task = m_tasks.front(); task(); m_tasks.pop(); } return m_tasksStatus; } private: //! Collecting status from all tasks executed by this instance Status m_tasksStatus{ Status::eUnknown }; //! Collect serial tasks in queue to avoid hitting potential stack size limit with the recursive //! pattern we had before. std::queue<std::function<void()>> m_tasks; }; //! Data available for executor on every node when traversing the graph. //! //! This data does NOT persist from execution to execution. It is written and read by executor during task generation. //! //! @note Can be customized via one of executor template parameters. struct ExecutionNodeData { std::atomic<std::uint32_t> visitCount{ 0 }; //!< Number of traversal visit to the node. std::atomic<bool> hasComputedUpstream{ false }; //!< Propagated value during traversal - has upstream computed. std::atomic<bool> hasDeferredUpstream{ false }; //!< Propagated value during traversal - has deferred upstream //!< computation. //! Copy constructor //! @note We need to explicitly add copy constructor since they are not available for atomic operations. //! We don't use them in concurrent execution so we will be fine. ExecutionNodeData(const ExecutionNodeData& src) : visitCount(src.visitCount.load()), hasComputedUpstream(src.hasComputedUpstream.load()), hasDeferredUpstream(src.hasDeferredUpstream.load()) { } //! Assignment operator //! @note We need to explicitly add assignment operator since they are not available for atomic operations. //! We don't use them in concurrent execution so we will be fine. ExecutionNodeData& operator=(const ExecutionNodeData& rhs) { visitCount.store(rhs.visitCount.load()); hasComputedUpstream.store(rhs.hasComputedUpstream.load()); hasDeferredUpstream.store(rhs.hasDeferredUpstream.load()); return *this; } //! Default constructor //! @note Compiler will not implicitly generate default constructor because we defined copy constructor. ExecutionNodeData() { } }; // ef-docs execution-visit-begin //! Graph traversal visit strategy. //! //! Will generate a new task when all upstream nodes have been executed. struct ExecutionVisit { //! Called when the traversal wants to visit a node. This method determines what to do with the node (e.g. schedule //! it, defer it, etc). template <typename ExecutorInfo> static Status tryVisit(ExecutorInfo info) noexcept { auto& nodeData = info.getNodeData(); if (info.currentTask.getExecutionStatus() == Status::eDeferred) nodeData.hasDeferredUpstream = true; // we only set to true...doesn't matter which thread does it first std::size_t requiredCount = info.nextNode->getParents().size() - info.nextNode->getCycleParentCount(); if ((requiredCount == 0) || (++nodeData.visitCount == requiredCount)) { if (!nodeData.hasDeferredUpstream) { // spawning a task within executor doesn't change the upstream path. just reference the same one. ExecutionTask newTask(info.getContext(), info.nextNode, info.getUpstreamPath()); return info.schedule(std::move(newTask)); } else return Status::eDeferred; } return Status::eUnknown; } }; // ef-docs execution-visit-end //! Graph traversal visit strategy with dirty cache check. //! //! This strategy will generate a new task when all upstream nodes have been visited and: //! //! - The node's state requests compute (i.e. the node has been marked as dirty) //! //! or: //! //! - An upstream node computed //! //! If neither of the conditions are true, but all parent nodes have been visited, execution continues (via @ref //! omni::graph::exec::unstable::IExecutor::continueExecute()). // ef-docs execution-visit-cache-begin struct ExecutionVisitWithCacheCheck { //! Called when the traversal wants to visit a node. This method determines what to do with the node (e.g. schedule //! it, defer it, etc). template <typename ExecutorInfo> static Status tryVisit(ExecutorInfo info) noexcept { auto& nodeData = info.getNodeData(); auto triggeringTaskStatus = info.currentTask.getExecutionStatus(); if (triggeringTaskStatus == Status::eSuccess) nodeData.hasComputedUpstream = true; // we only set to true...doesn't matter which thread does it first else if (triggeringTaskStatus == Status::eDeferred) nodeData.hasDeferredUpstream = true; // we only set to true...doesn't matter which thread does it first std::size_t requiredCount = info.nextNode->getParents().size() - info.nextNode->getCycleParentCount(); if ((requiredCount == 0) || (++nodeData.visitCount == requiredCount)) { if (nodeData.hasDeferredUpstream) return Status::eDeferred; else { // spawning a task within executor doesn't change the upstream path. just reference the same one. ExecutionTask newTask(info.getContext(), info.nextNode, info.getUpstreamPath()); if (nodeData.hasComputedUpstream || info.getContext()->getStateInfo(newTask)->needsCompute(info.getExecutionStamp())) return info.schedule(std::move(newTask)); else // continue downstream...there may be something dirty. Bypass scheduler to avoid unnecessary // overhead return info.continueExecute(newTask); } } return Status::eUnknown; } }; // ef-docs execution-visit-cache-end //! Algorithm to determine how task should be scheduled struct DefaultSchedulingStrategy { //! Returns the SchedulingInfo (e.g. serial, main thread, etc.) for the given task. static SchedulingInfo getSchedulingInfo(const ExecutionTask& task) { INode* node = task.getNode(); if (node->getNodeDef()) return node->getNodeDef()->getSchedulingInfo(task); else if (node->getNodeGraphDef()) return node->getNodeGraphDef()->getSchedulingInfo(task); else return SchedulingInfo::eSchedulerBypass; // bypass the scheduler since there is nothing to compute } }; //! Easily configurable @ref omni::graph::exec::unstable::IExecutor implementation providing necessary tools for most //! common executor types. //! //! The @ref omni::graph::exec::unstable::Executor class traverses parts of the //! @rstref{execution graph <ef_execution_graph>}, generating tasks for each node *visited*. One of the core concepts of //! EF is that *each* @ref omni::graph::exec::unstable::INodeGraphDef *specifies the* @ref //! omni::graph::exec::unstable::IExecutor *that should be used to execute the subgraph it defines*. This allows each //! @ref omni::graph::exec::unstable::INodeGraphDef to control a host of strategies for how its subgraph is executed. //! Some of the strategies are as follows: //! //! - *If a node should be scheduled*. For example, the executor may decide to prune parts of the graph based on the //! result of a previous execution (i.e. conditional execution). An executor may detect part of the graph does not //! need to be computed because a previous execution's results are still valid (i.e. caching). An executor may also //! employ strategies such as executing a node once all of its parent have completed or executing the node as soon as //! any of the parents have executed. //! //! - *How nodes are scheduled*. When an executor visits a node, the executor may choose to execute the computational //! logic in the node's definition immediately. Alternatively, it can delegate the execution to a *scheduler*. Working //! with the scheduler, an executor is able to provide execution strategies such as: //! //! - Defer execution of the node to a later time. //! //! - Execute the node in parallel with other nodes in the graph. //! //! - Ensure the node is the only node executing at the moment (e.g. "isolated" tasks). //! //! - Execute the node on a specified thread (e.g. the thread that started executing the graph). //! //! - *Where nodes are scheduled*. An executor can work with a resource scheduler to determine *where* to execute a //! node. This includes deciding the best GPU on a multi-GPU system to execute a GPU node. Likewise, executors can //! consult data center aware schedulers to schedule nodes on remote machines. //! //! - *The amount of work to be scheduled*. When visiting a node, an executor can create any number of tasks to //! accomplish the node's computation. These tasks are able to dynamically create additional work/tasks that the //! executor is able to track. //! //! Executors and schedulers work together to produce, schedule, and execute tasks on behalf of the node. Executors //! determine which nodes should be visited and generate appropriate work (i.e. tasks). Said differently, executor //! objects "interpret" the graph based on the behavioral logic encapsulated in the executor. Schedulers collect tasks, //! possibly concurrently from many executor objects, and map the tasks to hardware resources for execution. //! //! This @ref omni::graph::exec::unstable::Executor template contains several parameters that allow the user to control //! the strategies above. //! //! <h1><b>Node Type to be Traversed</b></h1> //! //! The @p ExecNode parameter defines the interface used to communicate with nodes. This will usually be @ref //! omni::graph::exec::unstable::INode or a subclass thereof. //! //! <h1><b>Work Generation Strategy</b></h1> //! //! @p ExecStrategy defines when/if/what work should be generated when visiting a node. EF provides several //! implementations of this strategy: //! //! - @ref omni::graph::exec::unstable::ExecutionVisit - Generates work after all parent nodes have been executed. //! //! - @ref omni::graph::exec::unstable::ExecutionVisitWithCacheCheck - Generates work after all parents have been //! visited *and* either a parent has successfully executed or the node has been explicitly marked for execution (i.e. //! dirty). //! //! Users are able to define their own execution strategies. For example OmniGraph defines custom work generation //! strategies for its various graph types (e.g. pull, push, etc). //! //! <h1><b>Transient Execution Data</b></h1> //! //! Executing a graph definition may require transient data to implement the executor's work generation strategy. For //! example, when executing parents in parallel, transient data is needed to atomically count the number of parents that //! have executed to avoid a node incorrectly executing multiple times. @p ExecNodeData is a `struct` the user can //! define to store this transient data. //! //! Each node in the graph definition is assigned an @p ExecNodeData. This transient data type is usually tied to @p //! ExecStrategy but can also be utilized by the other parameters in this template. //! //! EF provides the @ref omni::graph::exec::unstable::ExecutionNodeData struct to work with EF's built-in execution //! strategies. //! //! <h1><b>Scheduler</b></h1> //! //! The executors job is to traverse a graph definition, generating appropriate work as nodes are visited. That work is //! given to a scheduler, whose job it is to dispatch the work. The benefit of a scheduler is that it can have a //! holistic view of the system, across multiple running executors, and efficiently dispatch the work to proper hardware //! resources. //! //! The @p Scheduler parameter defines the scheduler to be used. //! //! EF defines the @ref omni::graph::exec::unstable::SerialScheduler which executes task serially. In practice, more //! advanced schedulers are available. For example, *omni.kit.exec.core* defines the @c ParallelSpawner scheduler //! (based on [Intel's Thread Building Blocks](https://github.com/oneapi-src/oneTBB)) which is able to run tasks in //! parallel. //! //! <h1><b>Scheduling Strategy</b></h1> //! //! The @p SchedulingStrategy provides a @ref omni::graph::exec::unstable::SchedulingInfo for each generated task. @ref //! omni::graph::exec::unstable::SchedulingInfo is an enum that outlines scheduling constraints for a task (e.g. must be //! run serially, must run on the thread that started graph execution, etc). //! //! EF's @ref omni::graph::exec::unstable::DefaultSchedulingStrategy calls the definitions's @ref //! omni::graph::exec::unstable::IDef::getSchedulingInfo() to get the definitions's preferred strategy. However, users //! may choose to override the definitions's preferred strategy with a custom @c SchedulingStrategy. For example, //! forcing all definitions to run serially to ease in debugging the execution graph. //! //! <h1><b>Virtual Methods</b></h1> //! //! In addition to the template parameters, users may choose to override one of @ref //! omni::graph::exec::unstable::Executor's virtual methods. These methods are: //! //! - @ref omni::graph::exec::unstable::IExecutor::execute_abi(): This method begins execution of the node provided in //! the constructor. Since this node is usually a root node, this method simply calls @ref //! omni::graph::exec::unstable::IExecutor::continueExecute_abi() to execute nodes beyond the root node and calls the //! scheduler's @c getStatus() method which is a blocking call that waits for outstanding work to finish. //! //! - @ref omni::graph::exec::unstable::IExecutor::continueExecute_abi(): This method is called after each node //! executes. It's job is to continue executing the nodes downstream of the executed node. By default, this method //! uses the work generation strategy (i.e. @c ExecStrategy) on each of the node's children. //! //! <h1><b>Miscellaneous</b></h1> //! //! The lifetime of an executor is short. They exist only when executing their owning graph definition. All transient //! data stored in @p ExecNodeData is valid only during this lifetime. //! //! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during //! execution. //! //! See @rstref{Creating an Executor <ef_executor_creation>} for a guide on creating a customize executor for your graph //! defintion. template <typename ExecNode, typename ExecStrategy, typename ExecNodeData, typename Scheduler, typename SchedulingStrategy, typename ExecutorInterface = IExecutor> class Executor : public Implements<ExecutorInterface> { using Node = const ExecNode; using NodeData = ExecNodeData; using NodeDataArray = std::vector<NodeData>; using ThisExecutor = Executor<ExecNode, ExecStrategy, ExecNodeData, Scheduler, SchedulingStrategy, ExecutorInterface>; using ThisExecutorPtr = omni::core::ObjectPtr<ThisExecutor>; //! Helper utility to check scheduler dispatch strategy. By default schedulers are not deferred. template <typename S, typename Enabled = void> struct is_deferred { static constexpr bool value = false; }; //! Template utility to check scheduler dispatch strategy. Schedulers with scheduleDeferred method are deferred. //! Deferred tasks will need to hold a shared pointer to the executor to extend its lifetime past current execution. //! This is handled automatically thanks to this helper. template <typename S> struct is_deferred< S, std::enable_if_t<std::is_same<Status, decltype(std::declval<S>().scheduleDeferred( std::declval<IScheduleFunction*>(), std::declval<SchedulingInfo>()))>::value>> { static constexpr bool value = true; }; public: //! Structure passed to the traversal algorithm collecting all necessary data for easy access. struct Info { private: Executor* m_executor; public: const ExecutionTask& currentTask; //!< The node currently being processed. INode* nextNode; //!< The node to be visited next. //! Constructor. Info(Executor* executor, const ExecutionTask& task, INode* next) noexcept : m_executor(executor), currentTask(task), nextNode(next) { } //! Returns the @ref Executor defined data for the node. NodeData& getNodeData() { return m_executor->getNodeData(nextNode); } //! Returns a reference to the owning @ref Executor. ThisExecutor* getExecutor() { return m_executor; } //! Returns the current context (i.e. @ref IExecutionContext) in which the @ref Executor is executing. IExecutionContext* getContext() { return currentTask.getContext(); } //! Returns the current context's execution stamp/version (i.e. @ref IExecutionContext::getExecutionStamp()). Stamp getExecutionStamp() { return getContext()->getExecutionStamp(); } //! Returns the upstream path of the node that is currently being processed. const ExecutionPath& getUpstreamPath() const { return currentTask.getUpstreamPath(); } //! Schedules the given task. Status schedule(ExecutionTask&& newTask) { return m_executor->scheduleInternal(std::move(newTask)); } //! Returns the given task's SchedulingInfo. SchedulingInfo getSchedulingInfo(const ExecutionTask& task) const { return m_executor->getSchedulingInfo(task); } //! Returns the @ref Executor's scheduler. Scheduler& getScheduler() { return m_executor->m_scheduler; } //! Tells the @ref Executor to processes the given task/node's children. This allows it to //! generate additional work after the given task has executed. Status continueExecute(ExecutionTask& currentTask) { return m_executor->continueExecute_abi(&currentTask); } }; //! Scheduling constraint to use when dispatching given task. SchedulingInfo getSchedulingInfo(const ExecutionTask& task) const { return SchedulingStrategy::getSchedulingInfo(task); } //! Access custom data associated with each node. NodeData& getNodeData(INode* node) { return m_nodeData[node->getIndexInTopology()]; } //! Execution path to node instantiating graph def associated with this executor. const ExecutionPath& getPath() const { return m_path; } //! Execution context. IExecutionContext* getContext() const { return m_task.getContext(); } //! Factory method for this executor static ThisExecutorPtr create(omni::core::ObjectParam<ITopology> toExecute, const ExecutionTask& thisTask) { return omni::core::steal(new ThisExecutor(toExecute.get(), thisTask)); } protected: //! Default constructor is removed Executor() = delete; //! Constructor used by factory method. //! //! @param toExecute Graph topology used to generate the work. //! @param currentTask Task causing this execution. Used to generate execution path. explicit Executor(ITopology* toExecute, const ExecutionTask& currentTask) noexcept : m_path((currentTask.getNode() != toExecute->getRoot()) ? ExecutionPath(currentTask.getUpstreamPath(), currentTask.getNode()) : currentTask.getUpstreamPath()), m_task(currentTask.getContext(), toExecute->getRoot(), m_path), m_nodeData(toExecute->getNodeCount()), m_scheduler(currentTask.getContext()) { } // ef-docs executor-execute-begin //! Main execution method. Called once by each node instantiating same graph definition. Status execute_abi() noexcept override { (void)continueExecute_abi(&m_task); // give a chance for scheduler to complete the execution of potentially parallel work which should complete // within current execution. all background tasks will continue pass this point. // scheduler is responsible for collecting the execution status for everything that this executor generated return m_scheduler.getStatus() | m_schedulerBypass; } // ef-docs executor-execute-end // ef-docs executor-continue-execute-begin //! Implementation of the base class method to generate additional work after the given task has executed but //! before it has completed. Status continueExecute_abi(ExecutionTask* currentTask) noexcept override { if (currentTask->getNode()->getChildren().empty()) { return Status::eSuccess | currentTask->getExecutionStatus(); } Status ret = Status::eUnknown; for (auto child : currentTask->getNode()->getChildren()) { ret |= ExecStrategy::tryVisit(Info(this, *currentTask, child)); } return ret | currentTask->getExecutionStatus(); } // ef-docs executor-continue-execute-end //! Implementation of base class schedule method available for work generation outside of traversal loop. Status schedule_abi(IScheduleFunction* fn, SchedulingInfo schedInfo) noexcept override { return scheduleExternal(fn, schedInfo); } //! Scheduling spawner of a task generated by traversal implementation template <typename S = Scheduler> Status scheduleInternal(ExecutionTask&& newTask, typename std::enable_if_t<!is_deferred<S>::value>* = nullptr) { // ef-docs executor-schedule-internal-begin Status ret = Status::eUnknown; SchedulingInfo schedInfo = getSchedulingInfo(newTask); if (schedInfo != SchedulingInfo::eSchedulerBypass) { // this task will finish before we exit executor...just capture as reference to avoid unnecessary cost ret = m_scheduler.schedule([executor = this, task = std::move(newTask)]() mutable -> Status { return task.execute(executor); }, schedInfo); } else // bypass the scheduler...no need for extra scheduling overhead { m_schedulerBypass |= newTask.execute(this); } return ret; // ef-docs executor-schedule-internal-end } //! Deferred scheduling spawner of a task generated by traversal implementation template <typename S = Scheduler> Status scheduleInternal(ExecutionTask&& newTask, typename std::enable_if_t<is_deferred<S>::value>* = nullptr) { // ef-docs executor-schedule-deferred-begin SchedulingInfo schedInfo = getSchedulingInfo(newTask); // for deferred tasks, we capture executor as a shared_ptr (extra cost, but keeps object alive) Status ret = m_scheduler.scheduleDeferred( [executor = omni::core::borrow(this), task = std::move(newTask)]() mutable -> Status { return task.execute(executor); }, schedInfo); return ret; // ef-docs executor-schedule-deferred-end } //! Scheduling spawner of a task generated by currently running task template <typename S = Scheduler> Status scheduleExternal(IScheduleFunction* fn, SchedulingInfo schedInfo, typename std::enable_if_t<!is_deferred<S>::value>* = nullptr) { if (schedInfo != SchedulingInfo::eSchedulerBypass) { return m_scheduler.schedule(fn, schedInfo); } else // bypass the scheduler...no need for extra scheduling overhead { return fn->invoke(); } } //! Deferred scheduling spawner of a task generated by currently running task template <typename S = Scheduler> Status scheduleExternal(IScheduleFunction* fn, SchedulingInfo schedInfo, typename std::enable_if_t<is_deferred<S>::value>* = nullptr) { Status ret = m_scheduler.scheduleDeferred(fn, schedInfo); return ret; } ExecutionPath m_path; //!< Execution path helping discover state associated with current instance of the graph. ExecutionTask m_task; //!< Task starting the execution NodeDataArray m_nodeData; //!< Storage for per node custom data Scheduler m_scheduler; //!< An interface for spawning tasks for dispatch by scheduler and waiting for completion. Status m_schedulerBypass{ Status::eUnknown }; //!< Execution status for tasks bypassing scheduler. }; //! Default executor used by all node graph definitions that don't explicitly pass the executor factory method. using ExecutorFallback = Executor<INode, ExecutionVisit, ExecutionNodeData, SerialScheduler, DefaultSchedulingStrategy>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/exec/unstable/ITopology.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file ITopology.h //! //! @brief Defines @ref omni::graph::exec::unstable::ITopology. #pragma once #include <omni/graph/exec/unstable/IBase.h> #include <omni/graph/exec/unstable/Stamp.h> #include <omni/graph/exec/unstable/Types.h> #include <type_traits> namespace omni { namespace graph { namespace exec { namespace unstable { class IInvalidationForwarder; class INode; class ITopology_abi; class ITopology; //! Unique owner of the callback. Meaning is up to the caller. Essentially a void* pointer. using InvalidationForwarderId = uint64_t; static_assert(sizeof(uint64_t) >= sizeof(void*), "Target platform's pointer size is larger than expected."); //! The Topology of a graph is stored in this class. //! //! @ref omni::graph::exec::unstable::ITopology is a helper interface used to quickly invalidate the topology, quickly //! determine if the topology has been invalidated, assign each node in the topology a unique index (suitable for access //! in contiguous memory), and provide access to the root node. //! //! Topologies play a large role in graph invalidation. See @rstref{Graph Invalidation <ef_graph_invalidation>} for //! details. //! //! To better understand how this object relates to other objects in the Execution Framework, see //! @rstref{Graph Concepts <ef_graph_concepts>}. //! //! See @ref omni::graph::exec::unstable::Topology for a concrete implementation of this interface. class ITopology_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.ITopology")> { protected: //! Returns how many nodes are alive in this topology. Some of the counted nodes may not be connected and //! discoverable from the root node. //! //! @thread_safety This method is thread safe. virtual uint64_t getNodeCount_abi() noexcept = 0; //! Returns the topology's root node that allows reaching all of the valid nodes in the topology. //! //! The returned @ref omni::graph::exec::unstable::INode will *not* have @ref omni::core::IObject::acquire() called //! before being returned. //! //! The returned pointer will remain valid for the lifetime of this object. //! //! @thread_safety This method is thread safe. // ef-docs i_topology_abi_get_root_abi_begin virtual OMNI_ATTR("no_acquire") INode* getRoot_abi() noexcept = 0; // ef-docs i_topology_abi_get_root_abi_end //! Returns the topology stamp. This stamp is updated each time the topology is invalidated. //! //! See omni::graph::exec::unstable::ITopology::invalidate() to invalidate the topology (and thereby update this //! Stamp). //! //! @thread_safety This method is thread safe. virtual Stamp getStamp_abi() noexcept = 0; //! Invalidate topology. All edges of the graph will be dropped (lazily), nodes remain valid and can be used to //! build new topology. //! //! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details about how this method is used during //! invalidation. //! //! It is not recommended to call this method during graph execution. Rather, defer invalidation until after //! execution. //! //! @thread_safety This method is thread safe. virtual void invalidate_abi() noexcept = 0; //! Returns a unique index for a node in this topology. //! //! Users should not call this method. Only the constructors of implementations of @ref //! omni::graph::exec::unstable::INode should call this method. //! //! Returns an error if an index could not be acquired. //! //! See @ref omni::graph::exec::unstable::ITopology::releaseNodeIndex(). //! //! @thread_safety This method is not thread safe. virtual OMNI_ATTR("throw_result") omni::core::Result acquireNodeIndex_abi(OMNI_ATTR("not_null, throw_if_null, out, *return") NodeIndexInTopology* out) noexcept = 0; //! Release unique index of a node in this topology. Shouldn't be used by anything else than a node's destructor. //! //! See @ref omni::graph::exec::unstable::ITopology::acquireNodeIndex(). //! //! @thread_safety This method is not thread safe. virtual void releaseNodeIndex_abi(NodeIndexInTopology index) noexcept = 0; //! Add a callback to forward invalidation to other entities. //! //! At a minimum, the top-level @ref omni::graph::exec::unstable::IGraph will register a invalidation callback with //! all topologies created within a pass pipeline. This allows tracking invalidation and triggering minimal graph //! rebuild. //! //! In the future, override passes can generate new graphs and still track authoring invalidation by registering to //! the original graph topologies invalidation. //! //! The given @ref omni::graph::exec::unstable::IInvalidationForwarder will be stored and have @ref //! omni::core::IObject::acquire() called. //! //! If @p owner has a current forwarder, it will be replaced with the given forwarder. //! //! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details about how this method is used during //! invalidation. //! //! See @ref omni::graph::exec::unstable::ITopology::removeInvalidationForwarder(). //! //! @thread_safety This method is not thread safe. virtual omni::core::Result addInvalidationForwarder_abi(InvalidationForwarderId owner, OMNI_ATTR("not_null, throw_if_null") IInvalidationForwarder* callback) noexcept = 0; //! Remove invalidation forwarding for a given owner. //! //! If the given owner is not known, this method does nothing. //! //! See @ref omni::graph::exec::unstable::ITopology::addInvalidationForwarder(). //! //! @thread_safety This method is not thread safe. virtual void removeInvalidationForwarder_abi(InvalidationForwarderId owner) noexcept = 0; //! Get construction version this topology is synchronized with. //! //! @thread_safety This method is thread safe. virtual SyncStamp getConstructionStamp_abi() noexcept = 0; //! Private method only for IGraphBuilder, used to tag construction version. //! //! @thread_safety Calling this method concurrently is not recommended. virtual void _setConstructionInSync_abi(Stamp toSync) noexcept = 0; }; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/ITopology.gen.h> //! @copydoc omni::graph::exec::unstable::ITopology_abi //! //! @ingroup groupOmniGraphExecInterfaces class omni::graph::exec::unstable::ITopology : public omni::core::Generated<omni::graph::exec::unstable::ITopology_abi> { public: //! Check if the topology is valid. //! //! A topology is considered valid if the topology's root node stamp is in-sync with the topology (this usually //! happens during graph construction). //! //! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details about how this method is used during //! invalidation. //! //! @thread_safety This method is thread safe. inline bool isValid() noexcept; //! Add a callback allowing to forward invalidation to other graphs. //! //! At a minimum, the top-level @ref omni::graph::exec::unstable::IGraph will register a invalidation callback with //! all topologies created within a pass pipeline. This allows tracking invalidation and triggering minimal graph //! rebuild. //! //! In the future, override passes can generate new graphs and still track authoring invalidation by registering to //! the original graph topologies invalidation. //! //! If @p owner has a current forwarder, it will be replaced with the given forwarder. //! //! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details about how this method is used during //! invalidation. //! //! See @ref omni::graph::exec::unstable::ITopology::removeInvalidationForwarder(). //! //! The supplied function should have the signature of `void(ITopology*)`. //! //! @thread_safety This method is not thread safe. template <typename Fn> inline void addInvalidationForwarder(InvalidationForwarderId owner, Fn&& fn); }; #include <omni/graph/exec/unstable/IInvalidationForwarder.h> #include <omni/graph/exec/unstable/INode.h> inline bool omni::graph::exec::unstable::ITopology::isValid() noexcept { return getRoot()->isValidTopology(); } template <typename Fn> inline void omni::graph::exec::unstable::ITopology::addInvalidationForwarder(InvalidationForwarderId owner, Fn&& fn) { class Forwarder : public Implements<IInvalidationForwarder> { public: Forwarder(Fn&& fn) : m_fn(std::move(fn)) { } protected: void invoke_abi(ITopology* topology) noexcept override { m_fn(topology); } Fn m_fn; }; addInvalidationForwarder_abi(owner, omni::core::steal(new Forwarder(std::forward<Fn>(fn))).get()); } #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/ITopology.gen.h>
omniverse-code/kit/include/omni/graph/exec/unstable/IBackgroundResult.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Class representing a result of asynchronous computation. //! //! Create via @ref omni::graph::exec::unstable::IBackgroundTask::getBackgroundResult(). //! //! Call @ref omni::graph::exec::unstable::IBackgroundResult::isReady() or @ref //! omni::graph::exec::unstable::IBackgroundResult::waitFor() to make sure the result is ready. Once the result is //! ready, call @ref omni::graph::exec::unstable::IBackgroundResult::write() to make the result visible. //! //! Operates much like `std::future`. template <> class omni::core::Generated<omni::graph::exec::unstable::IBackgroundResult_abi> : public omni::graph::exec::unstable::IBackgroundResult_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IBackgroundResult") //! Check if background computation has a result available for consumption. //! //! @return @c true when it is safe to call omni::graph::exec::unstable::IBackgroundResult::write(), @c false //! otherwise. //! //! Once @ref omni::graph::exec::unstable::IBackgroundResult::write() has been called, this method will return an //! error. //! //! This method is not thread safe. //! //! May throw. bool isReady(); //! Request background processing cancellation //! //! @param blocking If @c true, this call won't exit until background processing is completed. //! //! This method is not thread safe. //! //! May throw. void cancel(bool blocking); //! Write the result. //! //! This method is not thread safe. //! //! An error is returned if this method is called more than once. //! //! May throw. omni::graph::exec::unstable::Status write(omni::graph::exec::unstable::ExecutionTask& info); //! Waits for the specified time for the result to become ready. //! //! If the result becomes ready in the specified time (or is already ready) @ref //! omni::graph::exec::unstable::BackgroundResultStatus::eReady is returned. Otherwise, @ref //! omni::graph::exec::unstable::BackgroundResultStatus::eTimeout is returned. //! //! This method is not thread safe. //! //! Returns an error if the result has already been consumed. //! //! May throw. omni::graph::exec::unstable::BackgroundResultStatus waitFor(uint64_t nanoseconds); }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline bool omni::core::Generated<omni::graph::exec::unstable::IBackgroundResult_abi>::isReady() { bool ready; OMNI_THROW_IF_FAILED(isReady_abi(&ready)); return ready; } inline void omni::core::Generated<omni::graph::exec::unstable::IBackgroundResult_abi>::cancel(bool blocking) { OMNI_THROW_IF_FAILED(cancel_abi(blocking)); } inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IBackgroundResult_abi>::write( omni::graph::exec::unstable::ExecutionTask& info) { omni::graph::exec::unstable::Status out; OMNI_THROW_IF_FAILED(write_abi(&info, &out)); return out; } inline omni::graph::exec::unstable::BackgroundResultStatus omni::core::Generated< omni::graph::exec::unstable::IBackgroundResult_abi>::waitFor(uint64_t nanoseconds) { omni::graph::exec::unstable::BackgroundResultStatus out; OMNI_THROW_IF_FAILED(waitFor_abi(nanoseconds, &out)); return out; } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/IApplyOnEachFunction.gen.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Interface wrapping a function (possibly with storage) to apply on all instantiations of a given definition. template <> class omni::core::Generated<omni::graph::exec::unstable::IApplyOnEachFunction_abi> : public omni::graph::exec::unstable::IApplyOnEachFunction_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IApplyOnEachFunction") //! Invokes the wrapped function. void invoke(const omni::graph::exec::unstable::ExecutionPath& path) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline void omni::core::Generated<omni::graph::exec::unstable::IApplyOnEachFunction_abi>::invoke( const omni::graph::exec::unstable::ExecutionPath& path) noexcept { invoke_abi(&path); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutor.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IExecutor.h //! //! @brief Defines @ref omni::graph::exec::unstable::IExecutor. #pragma once #include <omni/graph/exec/unstable/ExecutionTask.h> #include <omni/graph/exec/unstable/IBase.h> #include <omni/graph/exec/unstable/SchedulingInfo.h> #include <omni/graph/exec/unstable/Status.h> namespace omni { namespace graph { namespace exec { namespace unstable { class IExecutor_abi; class IExecutor; class IScheduleFunction; //! Executes the node in a graph definition. //! //! The purpose of an executor is to generate work for the nodes in an graph definition. @ref //! omni::graph::exec::unstable::IExecutor is a minimal interface that defines enough methods to accomplish just that. //! //! However, @ref omni::graph::exec::unstable::IExecutor's minimal nature is not what most users want when customizing //! execution for their graph definitions. Rather, they want something useful. @ref //! omni::graph::exec::unstable::Executor is an useful implementation of @ref omni::graph::exec::unstable::IExecutor //! designed for graph definition authors to extend. See //! @ref omni::graph::exec::unstable::Executor's documentation to better understand the purpose, duties, and //! capabilities of an executor. //! //! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during //! execution. //! //! See @rstref{Creating an Executor <ef_executor_creation>} for a guide on creating a customize executor for your graph //! defintion. class IExecutor_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IExecutor")> { protected: //! Main execute method. Returning status of the execution. virtual Status execute_abi() noexcept = 0; //! Request for scheduling of additional work generated during execution of a task. //! //! @param fn Function to call once the work is dispatched. //! @param schedInfo Scheduling constraints to use when dispatching this work. virtual OMNI_ATTR("no_api") Status schedule_abi(IScheduleFunction* fn, SchedulingInfo schedInfo) noexcept = 0; // TODO does a memory allocation, return result? //! Request for scheduling of additional work after the given task has executed but before it has completed. //! //! @param task The current task virtual Status continueExecute_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* task) noexcept = 0; }; //! Smart pointer managing an instance of @ref IExecutor. using ExectorPtr = omni::core::ObjectPtr<IExecutor>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IExecutor.gen.h> //! @copydoc omni::graph::exec::unstable::IExecutor_abi //! //! @ingroup groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IExecutor : public omni::core::Generated<omni::graph::exec::unstable::IExecutor_abi> { public: //! Schedules the supplied function. //! //! The given function must have the signature of `Status(void)`. template <typename Fn> inline Status schedule(Fn&& fn, SchedulingInfo schedInfo); // may throw }; #include <omni/graph/exec/unstable/IScheduleFunction.h> #ifndef DOXYGEN_BUILD // templates confuse doxygen :( template <typename Fn> inline omni::graph::exec::unstable::Status omni::graph::exec::unstable::IExecutor::schedule(Fn&& fn, SchedulingInfo schedInfo) { class ScheduleFunction : public Implements<IScheduleFunction> { public: static omni::core::ObjectPtr<ScheduleFunction> create(Fn&& fn) { return omni::core::steal(new ScheduleFunction(std::forward<Fn>(fn))); } protected: Status invoke_abi() noexcept override { return m_fn(); } ScheduleFunction(Fn&& fn) : m_fn(std::move(fn)) { } private: Fn m_fn; }; return schedule_abi(ScheduleFunction::create(std::forward<Fn>(fn)).get(), schedInfo); } #endif // DOXYGEN_BUILD #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IExecutor.gen.h>
omniverse-code/kit/include/omni/graph/exec/unstable/PartitioningUtils.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file PartitioningUtils.h //! //! @brief Defines classes and algorithms to use with @ref omni::graph::exec::unstable::IPartitionPass. #pragma once #include <omni/graph/exec/unstable/INode.h> #include <omni/graph/exec/unstable/ITopology.h> #include <omni/graph/exec/unstable/Span.h> #include <omni/graph/exec/unstable/Traversal.h> #include <cstdint> #include <type_traits> #include <vector> namespace omni { namespace graph { namespace exec { namespace unstable { //! Specialized container to enable partitioning algorithm via disjoint set like data structure //! //! Modifications to typically known algorithm (https://en.wikipedia.org/wiki/Disjoint-set_data_structure) //! - index set "0" is a special one and matching the root of the graph, i.e. root node will always have index 0 //! - only "selected" nodes are inserted into sets and only these nodes are merged to form groups of nodes based //! on partitioning algorithm //! //! This object is NOT ABI-safe. class PartitionSet { //! Deleted default constructor PartitionSet() = delete; //! Reserved index for root nodes. We leverage that to distinguish between selected and not selected nodes. enum : uint64_t { kReservedRootIndex = 0 }; public: //! Construct a set for a static topology of a given @p topologyNodeCount nodes. PartitionSet(std::size_t topologyNodeCount) noexcept : m_parent(topologyNodeCount), m_rank(topologyNodeCount) { } //! Initialize selected nodes template <typename V> void makeSelectedSets(V& selected) { for (INode* node : selected) { auto index = node->getIndexInTopology(); m_parent[index] = index; } } //! Return true is give node is selected for partitioning and has a set allocated. bool isMarked(INode* node) const { auto index = node->getIndexInTopology(); return m_parent[index] != kReservedRootIndex; } //! Find the set this node belongs to. Forwards the call to underlying implementation. uint64_t find(INode* node) { return find(node->getIndexInTopology()); } //! Merge two sets. Forwards the call to underlying implementation. void merge(INode* nodeA, INode* nodeB) { merge(nodeA->getIndexInTopology(), nodeB->getIndexInTopology()); } //! Find set that this index belongs to. //! //! Search has a side effect, i.e. it flattens the set links directly to the last link in the chain. //! This allows for faster search next time same find is performed. uint64_t find(uint64_t index) { if (m_parent[index] != index) { m_parent[index] = find(m_parent[index]); } return m_parent[index]; } //! Merge two sets. //! //! Implementation uses rank to prioritize merging into sets that received more merges. //! This improves the search time. void merge(uint64_t a, uint64_t b) { auto aSet = find(a); auto bSet = find(b); if (aSet == bSet) return; // Shouldn't happen. Make sure we only merge marked nodes OMNI_GRAPH_EXEC_ASSERT(a != kReservedRootIndex && b != kReservedRootIndex); if (m_rank[aSet] < m_rank[bSet]) { m_parent[aSet] = bSet; } else if (m_rank[aSet] > m_rank[bSet]) { m_parent[bSet] = aSet; } else { m_parent[bSet] = aSet; m_rank[aSet] += 1; } } std::vector<uint64_t> m_parent; //!< We have as many elements as nodes in the topology. Each element represents //!< a unique set (if it points to its own index), or a link to another set (if //!< merged) std::vector<uint64_t> m_rank; //!< Rank per element used to prevent growing the tree to high and optimizes searches }; //! Algorithm to group selected nodes into valid partitions based on node ordering. //! //! Partition is only valid when there is no path that leaves and comes back to the same group. //! Such partition would introduce cycles in the graph. //! //! Quick algorithm uses a single traversal over the entire graph to determine unique partition index for group of //! nodes. During traversal, the partition index is based on the node the traversal comes from and: //! - the partition index is incremented by 1 if edge crosses selected and unselected nodes //! - the partition index is assigned to the currently visited node only if it is higher than currently set //! //! The traversal algorithm visits all the edges in the graph and does continuation on the last visit to the node. template <typename VerifyAndCreateFn> void quickPartitioning(ITopology* topology, Span<INode*> selectedNodes, VerifyAndCreateFn&& verifyAndCommitPartitionFn) { if (selectedNodes.size() == 0) return; std::vector<INode*> nodes(selectedNodes.begin(), selectedNodes.end()); PartitionSet partitions(topology->getNodeCount()); partitions.makeSelectedSets(nodes); struct QuickPartitioningNodeData { uint64_t partition{ 0 }; bool marked{ false }; }; traversal_dfs<VisitAll, QuickPartitioningNodeData>( topology->getRoot(), [&partitions](auto info, INode* prev, INode* curr) { auto& currUserData = info.userData(curr); if (info.isFirstVisit()) { currUserData.marked = partitions.isMarked(curr); } auto& prevUserData = info.userData(prev); uint64_t edgePartition = prevUserData.partition; if (currUserData.marked != prevUserData.marked) { edgePartition += 1; } if (currUserData.partition < edgePartition) { currUserData.partition = edgePartition; } if (info.isLastVisit()) { if (currUserData.marked) { for (INode* parent : curr->getParents()) { auto& parentUserData = info.userData(parent); if (parentUserData.partition == currUserData.partition) { partitions.merge(parent, curr); } } } info.continueVisit(curr); } }); // sort vector by partitions std::sort(nodes.begin(), nodes.end(), [&partitions](INode* a, INode* b) { return partitions.find(a) < partitions.find(b); }); // verify and create partitions in the graph auto partitionSetBegin = nodes.begin(); auto partitionSet = partitions.find(*partitionSetBegin); auto partitionSetNext = partitionSetBegin; do { partitionSetNext++; if (partitionSetNext == nodes.end()) { verifyAndCommitPartitionFn(&(*partitionSetBegin), partitionSetNext - partitionSetBegin); } else if (partitions.find(*partitionSetNext) != partitionSet) { verifyAndCommitPartitionFn(&(*partitionSetBegin), partitionSetNext - partitionSetBegin); // switch to next partition partitionSetBegin = partitionSetNext; partitionSet = partitions.find(*partitionSetNext); } } while (partitionSetNext != nodes.end()); } } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/exec/unstable/NodeDefLambda.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file NodeDefLambda.h //! //! @brief Defines @ref omni::graph::exec::unstable::NodeDefLambda. #pragma once #include <omni/graph/exec/unstable/NodeDef.h> #include <functional> namespace omni { namespace graph { namespace exec { namespace unstable { //! Wrapper of a lambda function into opaque node definition //! //! This class is great for quick prototyping and nodes that won't be statically analyzed. It is recommended to convert //! such nodes into real classes to avoid paying the price of std::function call from already virtual method and to //! provide type information. // ef-docs node-def-lambda-begin class NodeDefLambda : public NodeDef { public: //! Templated constructor for wrapper class //! //! @tparam Fn Function type taking execution task and returning status. Compiler should detect it for you. //! @param definitionName Definition name is considered as a token that transformation passes can register against //! @param fn Execute function body. Signature should be `Status(ExecutionTask&)`. //! @param schedInfo Fixed at runtime scheduling constraint //! //! May throw. template <typename Fn> static omni::core::ObjectPtr<NodeDefLambda> create(const char* definitionName, Fn&& fn, SchedulingInfo schedInfo) { OMNI_THROW_IF_ARG_NULL(definitionName); return omni::core::steal(new NodeDefLambda(definitionName, std::forward<Fn>(fn), schedInfo)); } protected: //! Templated and protected constructor for wrapper class. //! //! Use factory method to construct objects of this class. template <typename Fn> NodeDefLambda(const char* definitionName, Fn&& fn, SchedulingInfo schedInfo) : NodeDef(definitionName), m_fn(std::move(fn)), m_schedulingInfo(schedInfo) { } //! Core implementation of @ref omni::graph::exec::unstable::IDef::execute_abi for @ref NodeDefLambda //! //! Calling captured lambda Status execute_abi(ExecutionTask* info) noexcept override { return m_fn(*info); } //! Core implementation of @ref omni::graph::exec::unstable::IDef::getSchedulingInfo_abi for @ref NodeDefLambda //! //! Returns scheduling information provided to the constructor SchedulingInfo getSchedulingInfo_abi(const ExecutionTask* info) noexcept override { return m_schedulingInfo; } private: std::function<Status(ExecutionTask&)> m_fn; //!< Execute function body SchedulingInfo m_schedulingInfo; //!< Scheduling constraint }; // ef-docs node-def-lambda-end } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutionContext.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Stores and provides access to the execution state of the graph. //! //! The @rstref{execution graph <ef_execution_graph>} is only a description of what needs to be executed. The actual //! graph state is stored separately in an instance of this object. //! //! The execution context allows computing the same graph description within multiple contexts. It also enables the //! ability to perform this computation concurrently. Some example use cases of this class: //! //! - Computing the state of a graph at a time different than the current time (e.g. asynchronous caching, fake //! dynamics) //! //! - Computing the state of a graph with a inputs different than the current input state (e.g. double solve) //! //! All execution begins with a call to one of the execute methods on this interface. @ref //! omni::graph::exec::unstable::IExecutionContext::execute() is used to execute the entire execution graph while @ref //! omni::graph::exec::unstable::IExecutionContext::executeNode() can be used to execute only a part of the graph. //! //! Part of this interface defines a key/value store. The *key* in this store is an @ref //! omni::graph::exec::unstable::ExecutionPath. The *value* is an implementation of @ref //! omni::graph::exec::unstable::IExecutionStateInfo, which in addition to storing computation state, can also store //! user defined data. The computation state and user data can be access with one of the `getStateInfo` / //! `setStateInfo` methods though if you wish to access the user data using one of the `getNodeData` / `setNodeData` is //! slightly faster. //! //! Another feature of @ref omni::graph::exec::unstable::IExecutionContext is the ability to quickly search for nodes //! using a particular definition and apply a function on them. Definitions can be searched by name or by pointer (see //! @ref omni::graph::exec::unstable::IExecutionContext::applyOnEach()). These methods are used extensively during //! @rstref{Graph Construction <ef_pass_concepts>}. //! //! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during //! execution. //! //! Since multiple threads can concurrently traverse a graph, implementors of methods within this class should expect //! that multiple threads will be accessing this object in parallel. template <> class omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi> : public omni::graph::exec::unstable::IExecutionContext_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IExecutionContext") //! Current execution version. Incremented with each execution of the context. //! //! @thread_safety See thread safety information in interface description. omni::graph::exec::unstable::Stamp getExecutionStamp() noexcept; //! Returns `true` if context is currently executing //! //! See thread safety information in interface description. bool inExecute() noexcept; //! Returns `true` if the current thread is the one who started this context's execution. //! //! Note, do not assume that the thread that started the context's execution is the "main" thread. //! //! @thread_safety See thread safety information in interface description. bool isExecutingThread() noexcept; //! Main execution method. Executes the entire execution graph. //! //! @thread_safety See thread safety information in interface description. omni::graph::exec::unstable::Status execute() noexcept; //! On-demand execution method. Executes the given node. //! //! @thread_safety See thread safety information in interface description. omni::graph::exec::unstable::Status executeNode(const omni::graph::exec::unstable::ExecutionPath& upstreamPath, omni::core::ObjectParam<omni::graph::exec::unstable::INode> node); //! Context initialization. Responsible to propagate initialization to graphs. //! //! @thread_safety See thread safety information in interface description. void initialize(); //! Access state for a given execution path. //! //! If the given node is not @c nullptr, a copy of the given path with the node appended will be used as the lookup //! key. //! //! @thread_safety See thread safety information in interface description. //! //! @warning This method should be used for read only access by downstream nodes, example accessing graph state //! when executing downstream nodes. Extra care needs to be taken if this state has to be mutated //! concurrently. omni::graph::exec::unstable::IExecutionStateInfo* getStateInfo( const omni::graph::exec::unstable::ExecutionPath& path, omni::core::ObjectParam<omni::graph::exec::unstable::INode> node); //! Returns a value from a node's key/value datastore. //! //! The node from which to grab data is identified by the given path and //! node. The @p node may be @c nullptr. //! //! The key is used as a look-up in the node's key/value datastore. //! //! The type of each data item is returned in @p outTypeId. //! //! @p outPtr will be updated with a pointer to the actual data. //! //! @p outItemSize store the size of each item in the returned array. //! //! @p outItemCount contains the number of items returned (i.e. the number //! of items @p outPtr points to). For an array, this will be greater than //! 1. //! //! If the key is not found, @p outPtr is set to @c nullptr and @p //! outItemCount is set to 0. //! //! Accessing the node's key/value datastore is not thread safe. //! //! An exception is thrown on all other errors. void getNodeData(const omni::graph::exec::unstable::ExecutionPath& path, omni::core::ObjectParam<omni::graph::exec::unstable::INode> node, omni::graph::exec::unstable::NodeDataKey key, omni::core::TypeId* outTypeId, void** outPtr, uint64_t* outItemSize, uint64_t* outItemCount); //! Sets a value in a node's key/value datastore. //! //! The node in which to set the data is identified by the given path and //! node. The @p node may be @c nullptr. //! //! The key is used as a look-up in the node's key/value datastore. //! //! The type of each data item is set with @p typeId. //! //! @p data points to an array of data items. //! //! @p itemSize is the size of each item in the given array. //! //! @p itemCount contains the number of items pointed to by @p data. For an //! array, this will be greater than 1. //! //! @p deleter is a function used to delete @p data when either a new value //! is set at the key or the context is invalidated. If @p deleter is @c //! nullptr, it is up to the calling code to manage the lifetime of the @p //! data. //! //! Accessing the node's key/value datastore is not thread safe. //! //! An exception is thrown on all other errors. void setNodeData(const omni::graph::exec::unstable::ExecutionPath& path, omni::core::ObjectParam<omni::graph::exec::unstable::INode> node, omni::graph::exec::unstable::NodeDataKey key, omni::core::TypeId typeId, void* data, uint64_t itemSize, uint64_t itemCount, omni::graph::exec::unstable::NodeDataDeleterFn* deleter); //! Discover all execution paths leading to given definition and invoke given function with each of them. //! //! Implementation of this interface will cache results to remove the traversal cost for subsequent call. Change in //! execution graph topology will cause invalidation of the cache. //! //! @thread_safety This method is thread-safe and thread-efficient and can be called during execution, as well as //! from within the provided @p callback. //! //! This method must not be called during graph construction. //! //! @p def definition to look for //! @p callback wrapped function into a callback to execute with each path to given definition void applyOnEachDef(omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def, omni::core::ObjectParam<omni::graph::exec::unstable::IApplyOnEachFunction> callback) noexcept; //! Discover all execution paths leading to definition with the given name and invoke the given function with each //! of them //! //! Implementation of this interface will cache results to remove the traversal cost for subsequent call. Change in //! execution graph topology will cause invalidation of the cache. //! //! @thread_safety This method is thread-safe and thread-efficient and can be called during execution, as well as //! from within the provided @p callback. //! //! This method must not be called during graph construction. //! //! @p name definition to look for //! @p callback wrapped function into a callback to execute with each path to given definition void applyOnEachDefWithName(const omni::graph::exec::unstable::ConstName* name, omni::core::ObjectParam<omni::graph::exec::unstable::IApplyOnEachFunction> callback) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::graph::exec::unstable::Stamp omni::core::Generated< omni::graph::exec::unstable::IExecutionContext_abi>::getExecutionStamp() noexcept { return getExecutionStamp_abi(); } inline bool omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::inExecute() noexcept { return inExecute_abi(); } inline bool omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::isExecutingThread() noexcept { return isExecutingThread_abi(); } inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::execute() noexcept { return execute_abi(); } inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::executeNode( const omni::graph::exec::unstable::ExecutionPath& upstreamPath, omni::core::ObjectParam<omni::graph::exec::unstable::INode> node) { OMNI_THROW_IF_ARG_NULL(node); auto return_ = executeNode_abi(&upstreamPath, node.get()); return return_; } inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::initialize() { OMNI_THROW_IF_FAILED(initialize_abi()); } inline omni::graph::exec::unstable::IExecutionStateInfo* omni::core:: Generated<omni::graph::exec::unstable::IExecutionContext_abi>::getStateInfo( const omni::graph::exec::unstable::ExecutionPath& path, omni::core::ObjectParam<omni::graph::exec::unstable::INode> node) { omni::graph::exec::unstable::IExecutionStateInfo* out; OMNI_THROW_IF_FAILED(getStateInfo_abi(&path, node.get(), &out)); return out; } inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::getNodeData( const omni::graph::exec::unstable::ExecutionPath& path, omni::core::ObjectParam<omni::graph::exec::unstable::INode> node, omni::graph::exec::unstable::NodeDataKey key, omni::core::TypeId* outTypeId, void** outPtr, uint64_t* outItemSize, uint64_t* outItemCount) { OMNI_THROW_IF_ARG_NULL(outTypeId); OMNI_THROW_IF_ARG_NULL(outPtr); OMNI_THROW_IF_ARG_NULL(outItemSize); OMNI_THROW_IF_ARG_NULL(outItemCount); OMNI_THROW_IF_FAILED(getNodeData_abi(&path, node.get(), key, outTypeId, outPtr, outItemSize, outItemCount)); } inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::setNodeData( const omni::graph::exec::unstable::ExecutionPath& path, omni::core::ObjectParam<omni::graph::exec::unstable::INode> node, omni::graph::exec::unstable::NodeDataKey key, omni::core::TypeId typeId, void* data, uint64_t itemSize, uint64_t itemCount, omni::graph::exec::unstable::NodeDataDeleterFn* deleter) { OMNI_THROW_IF_ARG_NULL(data); OMNI_THROW_IF_FAILED(setNodeData_abi(&path, node.get(), key, typeId, data, itemSize, itemCount, deleter)); } inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::applyOnEachDef( omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def, omni::core::ObjectParam<omni::graph::exec::unstable::IApplyOnEachFunction> callback) noexcept { applyOnEachDef_abi(def.get(), callback.get()); } inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::applyOnEachDefWithName( const omni::graph::exec::unstable::ConstName* name, omni::core::ObjectParam<omni::graph::exec::unstable::IApplyOnEachFunction> callback) noexcept { applyOnEachDefWithName_abi(name, callback.get()); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/IPassFactory.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IPassFactory.h //! //! @brief Defines @ref omni::graph::exec::unstable::IPassFactory. #pragma once #include <omni/graph/exec/unstable/IBase.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class IGraphBuilder; class IPass; class IPassFactory; class IPassFactory_abi; //! Factory interface for creating @ref omni::graph::exec::unstable::IPass objects. //! //! Usually used in conjunction with @ref omni::graph::exec::unstable::IPassRegistry. //! //! See @ref omni::graph::exec::unstable::createPassFactory() to generate one of these objects from an invocable object //! (e.g. @c std::function). //! //! See @ref groupOmniGraphExecPassRegistration for more information about pass registration. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. class IPassFactory_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IPassFactory")> { protected: //! Creates and returns a pass. //! //! The returned @ref omni::graph::exec::unstable::IPass will have @ref omni::core::IObject::acquire() called on it. virtual OMNI_ATTR("throw_result") omni::core::Result createPass_abi(OMNI_ATTR("not_null, throw_if_null") IGraphBuilder* builder, OMNI_ATTR("not_null, throw_if_null, out, *return") IPass** out) noexcept = 0; }; //! Smart pointer managing an instance of @ref IPassFactory. using PassFactoryPtr = omni::core::ObjectPtr<IPassFactory>; //! Generates an @ref IPassFactory from an invocable object such as a function pointer, functor, etc. //! //! The given function should have the signature `IPass*(IGraphBuilder*)`. template <typename Fn> PassFactoryPtr createPassFactory(Fn&& fn); } // namespace unstable } // namespace exec } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IPassFactory.gen.h> //! @copydoc omni::graph::exec::unstable::IPassFactory_abi //! //! @ingroup groupOmniGraphExecPassRegistration groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IPassFactory : public omni::core::Generated<omni::graph::exec::unstable::IPassFactory_abi> { }; // additional headers needed for API implementation #include <omni/graph/exec/unstable/IGraphBuilder.h> #include <omni/graph/exec/unstable/IPass.h> #ifndef DOXYGEN_BUILD template <typename Fn> omni::graph::exec::unstable::PassFactoryPtr omni::graph::exec::unstable::createPassFactory(Fn&& fn) { class FactoryImpl : public Implements<IPassFactory> { public: FactoryImpl(Fn&& fn) : m_fn(std::move(fn)) { } protected: omni::core::Result createPass_abi(IGraphBuilder* builder, IPass** out) noexcept override { try { PassPtr pass = m_fn(builder); // may throw *out = pass.detach(); return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION() } Fn m_fn; }; return omni::core::steal(new FactoryImpl(std::forward<Fn>(fn))); } #endif // DOXYGEN_BUILD // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IPassFactory.gen.h>
omniverse-code/kit/include/omni/graph/exec/unstable/INodeDef.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Opaque node definition. //! //! Nodes are opaque because execution framework has no knowledge or what the execution method will do and does not //! orchestrate generation and dispatch of the tasks. //! //! Node definitions can be shared across multiple nodes and graphs. The implementation should leverage execution task //! to operate within proper task state. //! //! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during //! execution. //! //! See also @ref omni::graph::exec::unstable::ExecutionTask, @ref omni::graph::exec::unstable::ExecutionPath. template <> class omni::core::Generated<omni::graph::exec::unstable::INodeDef_abi> : public omni::graph::exec::unstable::INodeDef_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::INodeDef") }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
omniverse-code/kit/include/omni/graph/exec/unstable/INodeDef.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file INodeDef.h //! //! @brief Defines @ref omni::graph::exec::unstable::INodeDef. #pragma once #include <omni/graph/exec/unstable/IDef.h> namespace omni { namespace graph { namespace exec { namespace unstable { class INodeDef_abi; class INodeDef; //! Opaque node definition. //! //! Nodes are opaque because execution framework has no knowledge or what the execution method will do and does not //! orchestrate generation and dispatch of the tasks. //! //! Node definitions can be shared across multiple nodes and graphs. The implementation should leverage execution task //! to operate within proper task state. //! //! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during //! execution. //! //! See also @ref omni::graph::exec::unstable::ExecutionTask, @ref omni::graph::exec::unstable::ExecutionPath. class INodeDef_abi : public omni::core::Inherits<IDef, OMNI_TYPE_ID("omni.graph.exec.unstable.INodeDef")> { }; //! Smart pointer managing an instance of @ref omni::graph::exec::unstable::INodeDef. using NodeDefPtr = omni::core::ObjectPtr<INodeDef>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/INodeDef.gen.h> //! @copydoc omni::graph::exec::unstable::INodeDef_abi //! //! @ingroup groupOmniGraphExecInterfaces class omni::graph::exec::unstable::INodeDef : public omni::core::Generated<omni::graph::exec::unstable::INodeDef_abi> { }; #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/INodeDef.gen.h>
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutionContext.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IExecutionContext.h //! //! @brief Defines @ref omni::graph::exec::unstable::IExecutionContext. #pragma once #include <omni/graph/exec/unstable/ConstName.h> #include <omni/graph/exec/unstable/IBase.h> #include <omni/graph/exec/unstable/Span.h> #include <omni/graph/exec/unstable/Stamp.h> #include <omni/graph/exec/unstable/Status.h> #include <omni/graph/exec/unstable/Types.h> #include <memory> namespace omni { namespace graph { namespace exec { namespace unstable { class ExecutionPath; class ExecutionTask; class IApplyOnEachFunction; class IDef; class IExecutionContext_abi; class IExecutionContext; class IExecutionStateInfo; class INode; //! Stores and provides access to the execution state of the graph. //! //! The @rstref{execution graph <ef_execution_graph>} is only a description of what needs to be executed. The actual //! graph state is stored separately in an instance of this object. //! //! The execution context allows computing the same graph description within multiple contexts. It also enables the //! ability to perform this computation concurrently. Some example use cases of this class: //! //! - Computing the state of a graph at a time different than the current time (e.g. asynchronous caching, fake //! dynamics) //! //! - Computing the state of a graph with a inputs different than the current input state (e.g. double solve) //! //! All execution begins with a call to one of the execute methods on this interface. @ref //! omni::graph::exec::unstable::IExecutionContext::execute() is used to execute the entire execution graph while @ref //! omni::graph::exec::unstable::IExecutionContext::executeNode() can be used to execute only a part of the graph. //! //! Part of this interface defines a key/value store. The *key* in this store is an @ref //! omni::graph::exec::unstable::ExecutionPath. The *value* is an implementation of @ref //! omni::graph::exec::unstable::IExecutionStateInfo, which in addition to storing computation state, can also store //! user defined data. The computation state and user data can be access with one of the `getStateInfo` / //! `setStateInfo` methods though if you wish to access the user data using one of the `getNodeData` / `setNodeData` is //! slightly faster. //! //! Another feature of @ref omni::graph::exec::unstable::IExecutionContext is the ability to quickly search for nodes //! using a particular definition and apply a function on them. Definitions can be searched by name or by pointer (see //! @ref omni::graph::exec::unstable::IExecutionContext::applyOnEach()). These methods are used extensively during //! @rstref{Graph Construction <ef_pass_concepts>}. //! //! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during //! execution. //! //! Since multiple threads can concurrently traverse a graph, implementors of methods within this class should expect //! that multiple threads will be accessing this object in parallel. class IExecutionContext_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IExecutionContext")> { protected: //! Current execution version. Incremented with each execution of the context. //! //! @thread_safety See thread safety information in interface description. virtual Stamp getExecutionStamp_abi() noexcept = 0; //! Returns `true` if context is currently executing //! //! See thread safety information in interface description. virtual bool inExecute_abi() noexcept = 0; //! Returns `true` if the current thread is the one who started this context's execution. //! //! Note, do not assume that the thread that started the context's execution is the "main" thread. //! //! @thread_safety See thread safety information in interface description. virtual bool isExecutingThread_abi() noexcept = 0; //! Main execution method. Executes the entire execution graph. //! //! @thread_safety See thread safety information in interface description. virtual Status execute_abi() noexcept = 0; //! On-demand execution method. Executes the given node. //! //! @thread_safety See thread safety information in interface description. virtual Status executeNode_abi(OMNI_ATTR("in, not_null, throw_if_null, ref") const ExecutionPath* upstreamPath, OMNI_ATTR("not_null, throw_if_null") INode* node) noexcept = 0; //! Context initialization. Responsible to propagate initialization to graphs. //! //! @thread_safety See thread safety information in interface description. virtual OMNI_ATTR("throw_result") omni::core::Result initialize_abi() noexcept = 0; //! Access state for a given execution path. //! //! If the given node is not @c nullptr, a copy of the given path with the node appended will be used as the lookup //! key. //! //! @thread_safety See thread safety information in interface description. //! //! @warning This method should be used for read only access by downstream nodes, example accessing graph state //! when executing downstream nodes. Extra care needs to be taken if this state has to be mutated //! concurrently. virtual OMNI_ATTR("throw_result") omni::core::Result getStateInfo_abi(OMNI_ATTR("in, not_null, throw_if_null, ref") const ExecutionPath* path, INode* node, OMNI_ATTR("out, *return, not_null, throw_if_null, *no_acquire") IExecutionStateInfo** out) noexcept = 0; //! Returns a value from a node's key/value datastore. //! //! The node from which to grab data is identified by the given path and //! node. The @p node may be @c nullptr. //! //! The key is used as a look-up in the node's key/value datastore. //! //! The type of each data item is returned in @p outTypeId. //! //! @p outPtr will be updated with a pointer to the actual data. //! //! @p outItemSize store the size of each item in the returned array. //! //! @p outItemCount contains the number of items returned (i.e. the number //! of items @p outPtr points to). For an array, this will be greater than //! 1. //! //! If the key is not found, @p outPtr is set to @c nullptr and @p //! outItemCount is set to 0. //! //! Accessing the node's key/value datastore is not thread safe. //! //! An exception is thrown on all other errors. virtual OMNI_ATTR("throw_result") omni::core::Result getNodeData_abi(OMNI_ATTR("in, not_null, throw_if_null, ref") const ExecutionPath* path, INode* node, NodeDataKey key, OMNI_ATTR("out, not_null, throw_if_null") omni::core::TypeId* outTypeId, OMNI_ATTR("out, not_null, throw_if_null, *out, *in") void** outPtr, OMNI_ATTR("out, not_null, throw_if_null") uint64_t* outItemSize, OMNI_ATTR("out, not_null, throw_if_null") uint64_t* outItemCount) noexcept = 0; //! Sets a value in a node's key/value datastore. //! //! The node in which to set the data is identified by the given path and //! node. The @p node may be @c nullptr. //! //! The key is used as a look-up in the node's key/value datastore. //! //! The type of each data item is set with @p typeId. //! //! @p data points to an array of data items. //! //! @p itemSize is the size of each item in the given array. //! //! @p itemCount contains the number of items pointed to by @p data. For an //! array, this will be greater than 1. //! //! @p deleter is a function used to delete @p data when either a new value //! is set at the key or the context is invalidated. If @p deleter is @c //! nullptr, it is up to the calling code to manage the lifetime of the @p //! data. //! //! Accessing the node's key/value datastore is not thread safe. //! //! An exception is thrown on all other errors. virtual OMNI_ATTR("throw_result") omni::core::Result setNodeData_abi(OMNI_ATTR("in, not_null, throw_if_null, ref") const ExecutionPath* path, INode* node, NodeDataKey key, omni::core::TypeId typeId, OMNI_ATTR("in, out, not_null, throw_if_null") void* data, uint64_t itemSize, uint64_t itemCount, OMNI_ATTR("in, out") NodeDataDeleterFn* deleter) noexcept = 0; //! Discover all execution paths leading to given definition and invoke given function with each of them. //! //! Implementation of this interface will cache results to remove the traversal cost for subsequent call. Change in //! execution graph topology will cause invalidation of the cache. //! //! @thread_safety This method is thread-safe and thread-efficient and can be called during execution, as well as //! from within the provided @p callback. //! //! This method must not be called during graph construction. //! //! @p def definition to look for //! @p callback wrapped function into a callback to execute with each path to given definition virtual void applyOnEachDef_abi(OMNI_ATTR("not_null") IDef* def, OMNI_ATTR("not_null") IApplyOnEachFunction* callback) noexcept = 0; //! Discover all execution paths leading to definition with the given name and invoke the given function with each //! of them //! //! Implementation of this interface will cache results to remove the traversal cost for subsequent call. Change in //! execution graph topology will cause invalidation of the cache. //! //! @thread_safety This method is thread-safe and thread-efficient and can be called during execution, as well as //! from within the provided @p callback. //! //! This method must not be called during graph construction. //! //! @p name definition to look for //! @p callback wrapped function into a callback to execute with each path to given definition virtual void applyOnEachDefWithName_abi(OMNI_ATTR("in, not_null") const ConstName* name, OMNI_ATTR("not_null") IApplyOnEachFunction* callback) noexcept = 0; }; //! Smart pointer managing an instance of @ref IExecutionContext. using ExecutionContextPtr = omni::core::ObjectPtr<IExecutionContext>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IExecutionContext.gen.h> //! @copydoc omni::graph::exec::unstable::IExecutionContext_abi //! //! @ingroup groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IExecutionContext : public omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi> { public: //! @copydoc omni::graph::exec::unstable::IExecutionContext_abi::executeNode_abi Status execute(const ExecutionPath& path, omni::core::ObjectParam<INode> node) { return executeNode(path, node); } using Generated<IExecutionContext_abi>::execute; //! Retrieves the state info for the given task. //! //! The task's upstream path and node are hashed for the lookup. //! //! The returned @ref omni::graph::exec::unstable::IExecutionStateInfo will //! not have @ref omni::core::IObject::acquire() called on it. inline IExecutionStateInfo* getStateInfo(const ExecutionTask& task); using Generated<IExecutionContext_abi>::getStateInfo; //! Retrieves the state info at the given execution path. //! //! The returned @ref omni::graph::exec::unstable::IExecutionStateInfo will //! not have @ref omni::core::IObject::acquire() called on it. inline IExecutionStateInfo* getStateInfo(const ExecutionPath& path); //! Access state for a given execution path. //! //! See thread safety information in interface description. template <typename T> inline T* getStateInfoAs(const ExecutionTask& info); //! Access state for a given execution path. //! //! See thread safety information in interface description. template <typename T> inline T* getStateInfoAs(const ExecutionPath& path); //! Access state for a given execution path. //! //! See thread safety information in interface description. template <typename T> inline T* getStateInfoAs(const ExecutionPath& path, omni::core::ObjectParam<INode> node); //! Returns a pointer to a value stored in a node's key/value datastore. //! //! The node whose key/value datastore should be used is identified by combining the given path and node. @p node //! may be @c nullptr. //! //! If there is no value stored at the given @p key an empty span is returned. //! //! Accessing the node's key/value datastore is not thread safe. //! //! If the type @c T does not match the type of the store data, an exception is thrown. //! //! An exception is thrown on all other errors. //! //! Prefer using @ref OMNI_GRAPH_EXEC_GET_NODE_DATA_AS() instead of this method, which will populate the type id for //! you. template <typename T> inline Span<T> getNodeDataAs(omni::core::TypeId desiredType, const ExecutionPath& path, omni::core::ObjectParam<INode> node, NodeDataKey key); //! Returns a pointer to a value stored in a node's key/value datastore. //! //! The node whose key/value datastore should be used is identified by combining the given path and node in the //! given task. //! //! If there is no value stored at the given @p key an empty span is returned. //! //! Accessing the node's key/value datastore is not thread safe. //! //! If the type @c T does not match the type of the store data, an exception is thrown. //! //! An exception is thrown on all other errors. //! //! Prefer using @ref OMNI_GRAPH_EXEC_GET_NODE_DATA_AS() instead of this method, which will populate the type id for //! you. template <typename T> inline Span<T> getNodeDataAs(omni::core::TypeId desiredType, const ExecutionTask& path, NodeDataKey key); //! Stores a value in a node's key/value datastore. //! //! The node whose key/value datastore should be used is identified by combining the given path and node. @p node //! may be @c nullptr. //! //! If a value is already stored at the given @p key it will be replaced. //! //! Accessing the node's key/value datastore is not thread safe. //! //! An exception is thrown on all errors. //! //! Prefer using @ref OMNI_GRAPH_EXEC_SET_NODE_DATA() instead of this method, which will populate the type id for //! you. template <typename SpecifiedT, typename DataT> inline void setNodeData(omni::core::TypeId itemType, const ExecutionPath& path, omni::core::ObjectParam<INode> node, NodeDataKey key, std::unique_ptr<DataT> data); //! Stores a value in a node's key/value datastore. //! //! The node whose key/value datastore should be used is identified by combining the given path and node in the //! given task. //! //! If a value is already stored at the given @p key it will be replaced. //! //! Accessing the node's key/value datastore is not thread safe. //! //! An exception is thrown on all errors. //! //! Prefer using @ref OMNI_GRAPH_EXEC_SET_NODE_DATA() instead of this method, which will populate the type id for //! you. template <typename SpecifiedT, typename DataT> inline void setNodeData(omni::core::TypeId itemType, const ExecutionTask& path, NodeDataKey key, std::unique_ptr<DataT> data); //! Discover all execution paths leading to given definition and invoke given function with each of them //! //! This inline implementation wraps lambda into IApplyOnEachFunction //! //! The supplied function should have the signature of `void(const ExecutionPath&)`. template <typename FN> inline void applyOnEach(omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def, FN&& callback); //! Discover all execution paths leading to definition with the given name and invoke the given function with each //! of them //! //! This inline implementation wraps lambda into IApplyOnEachFunction //! //! The supplied function should have the signature of `void(const ExecutionPath&)`. template <typename FN> inline void applyOnEach(const ConstName& name, FN&& callback); }; #include <omni/graph/exec/unstable/ExecutionPath.h> #include <omni/graph/exec/unstable/ExecutionTask.h> #include <omni/graph/exec/unstable/IApplyOnEachFunction.h> #include <omni/graph/exec/unstable/IDef.h> #include <omni/graph/exec/unstable/IExecutionStateInfo.h> #include <omni/graph/exec/unstable/INode.h> #ifndef DOXYGEN_BUILD // templates are hard to understand, even for doxygen... inline omni::graph::exec::unstable::IExecutionStateInfo* omni::graph::exec::unstable::IExecutionContext::getStateInfo( const ExecutionTask& info) { IExecutionStateInfo* out; OMNI_THROW_IF_FAILED(getStateInfo_abi(&(info.getUpstreamPath()), info.getNode(), &out)); return out; } inline omni::graph::exec::unstable::IExecutionStateInfo* omni::graph::exec::unstable::IExecutionContext::getStateInfo( const ExecutionPath& path) { IExecutionStateInfo* out; OMNI_THROW_IF_FAILED(getStateInfo_abi(&path, nullptr, &out)); return out; } template <typename T> inline T* omni::graph::exec::unstable::IExecutionContext::getStateInfoAs(const ExecutionTask& info) { IExecutionStateInfo* out; OMNI_THROW_IF_FAILED(getStateInfo_abi(&(info.getUpstreamPath()), info.getNode(), &out)); return omni::graph::exec::unstable::cast<T>(out); } template <typename T> inline T* omni::graph::exec::unstable::IExecutionContext::getStateInfoAs(const ExecutionPath& path) { IExecutionStateInfo* out; OMNI_THROW_IF_FAILED(getStateInfo_abi(&path, nullptr, &out)); return omni::graph::exec::unstable::cast<T>(out); } template <typename T> inline T* omni::graph::exec::unstable::IExecutionContext::getStateInfoAs(const ExecutionPath& path, omni::core::ObjectParam<INode> node) { IExecutionStateInfo* out; OMNI_THROW_IF_FAILED(getStateInfo_abi(&path, node.get(), &out)); return omni::graph::exec::unstable::cast<T>(out); } template <typename T> inline omni::graph::exec::unstable::Span<T> omni::graph::exec::unstable::IExecutionContext::getNodeDataAs( omni::core::TypeId desiredType, const ExecutionPath& path, omni::core::ObjectParam<INode> node, NodeDataKey key) { omni::core::TypeId outType; void* outPtr; uint64_t outItemSize, outItemCount; OMNI_THROW_IF_FAILED(getNodeData_abi(&path, node.get(), key, &outType, &outPtr, &outItemSize, &outItemCount)); if (outPtr) { if (outType != desiredType) { throw omni::core::ResultError(omni::core::kResultInvalidDataType); } if (outItemSize != sizeof(T)) { throw omni::core::ResultError(omni::core::kResultInvalidDataSize); } } return Span<T>{ reinterpret_cast<T*>(outPtr), outItemCount }; } template <typename T> inline omni::graph::exec::unstable::Span<T> omni::graph::exec::unstable::IExecutionContext::getNodeDataAs( omni::core::TypeId desiredType, const ExecutionTask& info, NodeDataKey key) { return getNodeDataAs<T>(desiredType, info.getUpstreamPath(), info.getNode(), key); } template <typename SpecifiedT, typename DataT> inline void omni::graph::exec::unstable::IExecutionContext::setNodeData(omni::core::TypeId itemType, const ExecutionPath& path, omni::core::ObjectParam<INode> node, NodeDataKey key, std::unique_ptr<DataT> data) { static_assert(std::is_same<SpecifiedT, DataT>::value, "given TypeId does not match the data type"); static_assert(!std::is_array<DataT>::value, "setting arrays as node data via unique_ptr not yet implemented"); OMNI_THROW_IF_FAILED(setNodeData_abi(&path, node.get(), key, itemType, data.get(), sizeof(DataT), 1, [](void* p) { typename std::unique_ptr<DataT>::deleter_type deleter; deleter(reinterpret_cast<DataT*>(p)); })); data.release(); // now safe to release ownership } template <typename SpecifiedT, typename DataT> inline void omni::graph::exec::unstable::IExecutionContext::setNodeData(omni::core::TypeId itemType, const ExecutionTask& task, NodeDataKey key, std::unique_ptr<DataT> data) { return setNodeData<SpecifiedT>(itemType, task.getUpstreamPath(), task.getNode(), key, std::move(data)); } template <typename FN> inline void omni::graph::exec::unstable::IExecutionContext::applyOnEach( omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def, FN&& callback) { class Forwarder : public Implements<IApplyOnEachFunction> { public: Forwarder(FN&& fn) : m_fn(std::move(fn)) { } protected: void invoke_abi(const ExecutionPath* path) noexcept override { m_fn(*path); } FN m_fn; }; applyOnEachDef_abi(def.get(), omni::core::steal(new Forwarder(std::forward<FN>(callback))).get()); } template <typename FN> inline void omni::graph::exec::unstable::IExecutionContext::applyOnEach(const ConstName& name, FN&& callback) { class Forwarder : public Implements<IApplyOnEachFunction> { public: Forwarder(FN&& fn) : m_fn(std::move(fn)) { } protected: void invoke_abi(const ExecutionPath* path) noexcept override { m_fn(*path); } FN m_fn; }; applyOnEachDefWithName_abi(&name, omni::core::steal(new Forwarder(std::forward<FN>(callback))).get()); } #endif // DOXYGEN_BUILD #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IExecutionContext.gen.h>
omniverse-code/kit/include/omni/graph/exec/unstable/IBackgroundResultWriter.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IBackgroundResultWriter.h //! //! @brief Defines @ref omni::graph::exec::unstable::IBackgroundResultWriter. #pragma once #include <omni/graph/exec/unstable/IBase.h> #include <omni/graph/exec/unstable/Status.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class ExecutionTask; class IBackgroundResultWriter; class IBackgroundResultWriter_abi; //! Functor interface used to write the result of a background task. class IBackgroundResultWriter_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IBackgroundResultWriter")> { protected: //! Write the result. virtual Status write_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* info) noexcept = 0; }; //! Smart pointer managing an instance of @ref IBackgroundResultWriter. using BackgroundResultWriterPtr = omni::core::ObjectPtr<IBackgroundResultWriter>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IBackgroundResultWriter.gen.h> //! @copydoc omni::graph::exec::unstable::IBackgroundResultWriter_abi class omni::graph::exec::unstable::IBackgroundResultWriter : public omni::core::Generated<omni::graph::exec::unstable::IBackgroundResultWriter_abi> { }; // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IBackgroundResultWriter.gen.h>