file_path
stringlengths 21
202
| content
stringlengths 12
1.02M
| size
int64 12
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 10
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/include/omni/ui/scene/bind/BindManipulator.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindAbstractContainer.h"
#include "DocManipulator.h"
#include "DocManipulatorModelHelper.h"
OMNIUI_PROTECT_PYBIND11_OBJECT(OMNIUI_SCENE_NS::Manipulator, Manipulator);
// clang-format off
#define OMNIUI_PYBIND_INIT_PyManipulator \
OMNIUI_PYBIND_INIT_AbstractContainer \
OMNIUI_PYBIND_INIT_CAST(model, setModel, std::shared_ptr<AbstractManipulatorModel>) \
OMNIUI_PYBIND_INIT_CAST(gesture, addGesture, std::shared_ptr<ManipulatorGesture>) \
OMNIUI_PYBIND_INIT_CAST(gestures, setGestures, std::vector<std::shared_ptr<ManipulatorGesture>>) \
OMNIUI_PYBIND_INIT_CALLBACK(on_build_fn, setOnBuildFn, void(Manipulator const*))
#define OMNIUI_PYBIND_KWARGS_DOC_Manipulator \
"\n `model : `\n " \
OMNIUI_PYBIND_DOC_ManipulatorModelHelper_getModel \
"\n `gesture : `\n " \
OMNIUI_PYBIND_DOC_Manipulator_getGestures \
"\n `gestures : `\n " \
OMNIUI_PYBIND_DOC_Manipulator_getGestures \
"\n `on_build_fn : `\n " \
OMNIUI_PYBIND_DOC_Manipulator_onBuild \
OMNIUI_PYBIND_KWARGS_DOC_AbstractContainer
// clang-format on
| 2,495 | C | 64.684209 | 120 | 0.457315 |
omniverse-code/kit/include/omni/ui/scene/bind/BindManipulatorGesture.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindAbstractGesture.h"
#include "DocManipulatorGesture.h"
#include <omni/ui/bind/BindUtils.h>
OMNIUI_PROTECT_PYBIND11_OBJECT(OMNIUI_SCENE_NS::ManipulatorGesture, ManipulatorGesture);
#define OMNIUI_PYBIND_INIT_ManipulatorGesture OMNIUI_PYBIND_INIT_AbstractGesture
#define OMNIUI_PYBIND_INIT_PyManipulatorGesture OMNIUI_PYBIND_INIT_ManipulatorGesture
#define OMNIUI_PYBIND_KWARGS_DOC_ManipulatorGesture OMNIUI_PYBIND_KWARGS_DOC_AbstractGesture
#define OMNIUI_PYBIND_KWARGS_DOC_PyManipulatorGesture OMNIUI_PYBIND_KWARGS_DOC_ManipulatorGesture
OMNIUI_SCENE_NAMESPACE_OPEN_SCOPE
class PyManipulatorGesture : public ManipulatorGesture
{
public:
static std::shared_ptr<PyManipulatorGesture> create(pybind11::handle derivedFrom);
void process() override;
const pybind11::handle& getHandle() const;
private:
pybind11::handle m_derivedFrom;
};
OMNIUI_SCENE_NAMESPACE_CLOSE_SCOPE
| 1,353 | C | 33.717948 | 97 | 0.810791 |
omniverse-code/kit/include/omni/ui/scene/bind/BindDoubleClickGesture.h | // Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindClickGesture.h"
#include "DocDoubleClickGesture.h"
#include <omni/ui/scene/DoubleClickGesture.h>
OMNIUI_PROTECT_PYBIND11_OBJECT(OMNIUI_SCENE_NS::DoubleClickGesture, DoubleClickGesture);
// clang-format off
#define OMNIUI_PYBIND_INIT_PyDoubleClickGesture \
OMNIUI_PYBIND_INIT_PyClickGesture
#define OMNIUI_PYBIND_KWARGS_DOC_DoubleClickGesture \
OMNIUI_PYBIND_KWARGS_DOC_ClickGesture
#define OMNIUI_PYBIND_DOC_DoubleClickGesture_OnEnded OMNIUI_PYBIND_DOC_ClickGesture_OnEnded
// clang-format on
| 1,113 | C | 37.413792 | 120 | 0.703504 |
omniverse-code/kit/include/omni/ui/scene/bind/DocTransform.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_Transform \
"Transforms children with component affine transformations.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_Transform_transform "Single transformation matrix.\n"
#define OMNIUI_PYBIND_DOC_Transform_scaleTo \
"Which space the current transform will be rescaled before applying the matrix. It's useful to make the object the same size regardless the distance to the camera.\n"
#define OMNIUI_PYBIND_DOC_Transform_lookAt "Rotates this transform to align the direction with the camera.\n"
#define OMNIUI_PYBIND_DOC_Transform_basis "A custom basis for representing this transform's coordinate system.\n"
#define OMNIUI_PYBIND_DOC_Transform_Transform "Constructor.\n"
| 1,368 | C | 44.633332 | 170 | 0.665205 |
omniverse-code/kit/include/omni/ui/scene/bind/DocScreen.h | // Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_Screen \
"The empty shape that triggers all the gestures at any place. Is used to track gestures when the user clicked the empty space. For example for cameras.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_Screen_getGesturePayload "Contains all the information about the intersection.\n"
#define OMNIUI_PYBIND_DOC_Screen_getGesturePayload01 \
"Contains all the information about the intersection at the specific state.\n"
#define OMNIUI_PYBIND_DOC_Screen_Screen "Constructor.\n"
| 1,159 | C | 47.333331 | 160 | 0.651424 |
omniverse-code/kit/include/omni/ui/scene/bind/BindAbstractShape.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindAbstractItem.h"
#include "DocAbstractShape.h"
// clang-format off
#define OMNIUI_PYBIND_INIT_AbstractShape \
OMNIUI_PYBIND_INIT_CALL(gesture, setGestures, pythonToGestures) \
OMNIUI_PYBIND_INIT_CALL(gestures, setGestures, pythonToGestures) \
OMNIUI_PYBIND_INIT_AbstractItem
#define OMNIUI_PYBIND_KWARGS_DOC_AbstractShape \
"\n `gesture : `\n " \
OMNIUI_PYBIND_DOC_AbstractShape_getGestures \
"\n `gestures : `\n " \
OMNIUI_PYBIND_DOC_AbstractShape_getGestures \
OMNIUI_PYBIND_KWARGS_DOC_AbstractItem
// clang-format on
OMNIUI_SCENE_NAMESPACE_OPEN_SCOPE
std::vector<std::shared_ptr<ShapeGesture>> pythonToGestures(pybind11::handle obj);
OMNIUI_SCENE_NAMESPACE_CLOSE_SCOPE
| 1,752 | C | 49.085713 | 120 | 0.512557 |
omniverse-code/kit/include/omni/ui/scene/bind/BindTransform.h | // Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindAbstractContainer.h"
#include "DocTransform.h"
// clang-format off
#define OMNIUI_PYBIND_INIT_Transform \
OMNIUI_PYBIND_INIT_AbstractContainer \
OMNIUI_PYBIND_INIT_CALL(transform, setTransform, pythonToMatrix4) \
OMNIUI_PYBIND_INIT_CAST(scale_to, setScaleTo, Space) \
OMNIUI_PYBIND_INIT_CAST(look_at, setLookAt, Transform::LookAt) \
OMNIUI_PYBIND_INIT_CAST(basis, setBasis, std::shared_ptr<TransformBasis>)
#define OMNIUI_PYBIND_KWARGS_DOC_Transform \
"\n `transform : `\n " \
OMNIUI_PYBIND_DOC_Transform_transform \
"\n `scale_to : `\n " \
OMNIUI_PYBIND_DOC_Transform_scaleTo \
"\n `look_at : `\n " \
OMNIUI_PYBIND_DOC_Transform_lookAt \
"\n `basis : `\n " \
OMNIUI_PYBIND_DOC_Transform_basis \
OMNIUI_PYBIND_KWARGS_DOC_AbstractContainer
// clang-format on
| 2,377 | C | 66.942855 | 120 | 0.395036 |
omniverse-code/kit/include/omni/ui/scene/bind/BindLabel.h | // Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindAbstractShape.h"
#include "DocLabel.h"
// clang-format off
#define OMNIUI_PYBIND_INIT_Label \
OMNIUI_PYBIND_INIT_AbstractShape \
OMNIUI_PYBIND_INIT_CALL(color, setColor, pythonToColor4) \
OMNIUI_PYBIND_INIT_CAST(size, setSize, float) \
OMNIUI_PYBIND_INIT_CAST(alignment, setAlignment, Alignment) \
OMNIUI_PYBIND_INIT_CAST(text, setText, std::string)
#define OMNIUI_PYBIND_KWARGS_DOC_Label \
"\n `color : `\n " \
OMNIUI_PYBIND_DOC_Label_color \
"\n `size : `\n " \
OMNIUI_PYBIND_DOC_Label_size \
"\n `alignment : `\n " \
OMNIUI_PYBIND_DOC_Label_alignment \
"\n `text : `\n " \
OMNIUI_PYBIND_DOC_Label_text \
OMNIUI_PYBIND_KWARGS_DOC_AbstractShape
// clang-format on
| 2,341 | C | 67.882351 | 120 | 0.365229 |
omniverse-code/kit/include/omni/ui/scene/bind/DocShapeGesture.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_ShapeGesture \
"The base class for the gestures to provides a way to capture mouse events in 3d scene.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_ShapeGesture_dispatchInput \
"Called by scene to process the mouse inputs and do intersections with shapes. It can be an entry point to simulate the mouse input.\n" \
"Todo\n" \
"We probably don't need projection-view here. We can get it from manager.\n"
#define OMNIUI_PYBIND_DOC_ShapeGesture_getSender "Returns the relevant shape driving the gesture.\n"
| 1,311 | C | 56.043476 | 141 | 0.578947 |
omniverse-code/kit/include/omni/ui/scene/bind/DocAbstractContainer.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_AbstractContainer \
"Base class for all the items that have children.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_AbstractContainer_transformSpace \
"Transform the given point from the coordinate system fromspace to the coordinate system tospace.\n"
#define OMNIUI_PYBIND_DOC_AbstractContainer_transformSpace01 \
"Transform the given vector from the coordinate system fromspace to the coordinate system tospace.\n"
#define OMNIUI_PYBIND_DOC_AbstractContainer_addChild \
"Adds item to this container in a manner specific to the container. If it's allowed to have one sub-widget only, it will be overwriten.\n"
#define OMNIUI_PYBIND_DOC_AbstractContainer_clear "Removes the container items from the container.\n"
| 1,526 | C | 51.655171 | 142 | 0.619266 |
omniverse-code/kit/include/omni/ui/scene/bind/DocScrollGesture.h | // Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_ScrollGesture \
"The gesture that provides a way to capture mouse scroll event.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_ScrollGesture_preProcess "Called before processing to determine the state of the gesture.\n"
#define OMNIUI_PYBIND_DOC_ScrollGesture_process "Process the gesture and call callbacks if necessary.\n"
#define OMNIUI_PYBIND_DOC_ScrollGesture_onEnded "Called if the callback is not set when the user scrolls.\n"
#define OMNIUI_PYBIND_DOC_ScrollGesture_mouseButton "The mouse button this gesture is watching.\n"
#define OMNIUI_PYBIND_DOC_ScrollGesture_modifiers "The modifier that should be pressed to trigger this gesture.\n"
#define OMNIUI_PYBIND_DOC_ScrollGesture_OnEnded "Called when the user scrolls.\n"
#define OMNIUI_PYBIND_DOC_ScrollGesture_getScroll "Returns the current scroll state.\n"
#define OMNIUI_PYBIND_DOC_ScrollGesture_ScrollGesture \
"Constructs an gesture to track when the user clicked the mouse.\n" \
"\n" \
"\n" \
"### Arguments:\n" \
"\n" \
" `onEnded :`\n" \
" Function that is called when the user clicked the mouse button.\n"
| 2,366 | C | 51.599999 | 120 | 0.509298 |
omniverse-code/kit/include/omni/ui/scene/bind/DocClickGesture.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_ClickGesture \
"The gesture that provides a way to capture click mouse event.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_ClickGesture_preProcess "Called before processing to determine the state of the gesture.\n"
#define OMNIUI_PYBIND_DOC_ClickGesture_process "Process the gesture and call callbacks if necessary.\n"
#define OMNIUI_PYBIND_DOC_ClickGesture_onEnded \
"Called if the callback is not set when the user releases the mouse button.\n"
#define OMNIUI_PYBIND_DOC_ClickGesture_mouseButton "The mouse button this gesture is watching.\n"
#define OMNIUI_PYBIND_DOC_ClickGesture_modifiers "The modifier that should be pressed to trigger this gesture.\n"
#define OMNIUI_PYBIND_DOC_ClickGesture_OnEnded "Called when the user releases the button.\n"
#define OMNIUI_PYBIND_DOC_ClickGesture_ClickGesture \
"Constructs an gesture to track when the user clicked the mouse.\n" \
"\n" \
"\n" \
"### Arguments:\n" \
"\n" \
" `onEnded :`\n" \
" Function that is called when the user clicked the mouse button.\n"
| 2,371 | C | 54.162789 | 120 | 0.482497 |
omniverse-code/kit/include/omni/ui/scene/bind/DocLabel.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_Label \
"\n" \
"Defines a standard label for user interface items\n"
#define OMNIUI_PYBIND_DOC_Label_text "This property holds the label's text.\n"
#define OMNIUI_PYBIND_DOC_Label_color "The color of the text.\n"
#define OMNIUI_PYBIND_DOC_Label_alignment \
"This property holds the alignment of the label's contents. By default, the contents of the label are left-aligned and vertically-centered.\n"
#define OMNIUI_PYBIND_DOC_Label_size "The font size.\n"
#define OMNIUI_PYBIND_DOC_Label_Label \
"A standard label for user interface items.\n" \
"\n" \
"\n" \
"### Arguments:\n" \
"\n" \
" `text :`\n" \
" The string with the text to display\n"
| 2,130 | C | 56.594593 | 146 | 0.397183 |
omniverse-code/kit/include/omni/ui/scene/bind/DocImage.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_Image \
"\n" \
"\n"
#define OMNIUI_PYBIND_DOC_Image_sourceUrl \
"This property holds the image URL. It can be an \"omni:\" path, a \"file:\" path, a direct path or the path " \
"relative to the application root directory.\n"
#define OMNIUI_PYBIND_DOC_Image_imageProvider \
"This property holds the image provider. It can be an \"omni:\" path, a \"file:\" path, a direct path or the path "\
"relative to the application root directory.\n"
#define OMNIUI_PYBIND_DOC_Image_fillPolicy \
"Define what happens when the source image has a different size than the item.\n"
#define OMNIUI_PYBIND_DOC_Image_Image "Created an image with the given URL.\n"
#define OMNIUI_PYBIND_DOC_Image_Image01 "Created an image with the given provider.\n"
#define OMNIUI_PYBIND_DOC_Image_Image2 "Created an empty image.\n"
| 1,743 | C | 46.135134 | 120 | 0.557659 |
omniverse-code/kit/include/omni/ui/scene/bind/BindPoints.h | // Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindAbstractShape.h"
#include "DocPoints.h"
// clang-format off
#define OMNIUI_PYBIND_INIT_Points \
OMNIUI_PYBIND_INIT_AbstractShape \
OMNIUI_PYBIND_INIT_CALL(positions, setPositions, pythonListToVector3) \
OMNIUI_PYBIND_INIT_CALL(colors, setColors, pythonListToVector4) \
OMNIUI_PYBIND_INIT_CAST(sizes, setSizes, std::vector<float>) \
OMNIUI_PYBIND_INIT_CAST(intersection_sizes, setIntersectionSize, float)
#define OMNIUI_PYBIND_KWARGS_DOC_Points \
"\n `positions : `\n " \
OMNIUI_PYBIND_DOC_Points_positions \
"\n `colors : `\n " \
OMNIUI_PYBIND_DOC_Points_colors \
"\n `sizes : `\n " \
OMNIUI_PYBIND_DOC_Points_sizes \
"\n `intersection_sizes : `\n " \
OMNIUI_PYBIND_DOC_Points_intersectionSize \
OMNIUI_PYBIND_KWARGS_DOC_AbstractShape
// clang-format on
| 2,363 | C | 66.542855 | 120 | 0.397799 |
omniverse-code/kit/include/omni/ui/scene/bind/DocSceneView.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_SceneView \
"The widget to render omni.ui.scene.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_SceneView_getProjection \
"The camera projection matrix. It's a shortcut for Matrix44(SceneView.model.get_as_floats(\"projection\"))\n"
#define OMNIUI_PYBIND_DOC_SceneView_getView \
"The camera view matrix. It's a shortcut for Matrix44(SceneView.model.get_as_floats(\"view\"))\n"
#define OMNIUI_PYBIND_DOC_SceneView_onModelUpdated \
"Called by the model when the model value is changed. The class should react to the changes.\n" \
"\n" \
"\n" \
"### Arguments:\n" \
"\n" \
" `item :`\n" \
" The item in the model that is changed. If it's NULL, the root is changed.\n\n"
#define OMNIUI_PYBIND_DOC_SceneView_scene "The container that holds the shapes, gestures and managers.\n"
#define OMNIUI_PYBIND_DOC_SceneView_screenAspectRatio \
"Aspect ratio of the rendering screen. This screen will be fit to the widget. " \
"SceneView simulates the behavior of the Kit viewport where the rendered image (screen) fits into the viewport " \
"(widget), and the camera has multiple policies that modify the camera projection matrix's aspect ratio to match " \
"it to the screen aspect ratio. " \
"When screen_aspect_ratio is 0, Screen size matches the Widget bounds.\n"
#define OMNIUI_PYBIND_DOC_SceneView_childWindowsInput \
"When it's false, the mouse events from other widgets inside the bounds are ignored. We need it to filter out mouse events from mouse events of widgets in `ui.VStack(content_clipping=1)`.\n"
#define OMNIUI_PYBIND_DOC_SceneView_aspectRatioPolicy \
"Define what happens when the aspect ratio of the camera is different from the aspect ratio of the widget.\n"
#define OMNIUI_PYBIND_DOC_SceneView_SceneView "Constructor.\n"
| 3,513 | C | 64.074073 | 194 | 0.478508 |
omniverse-code/kit/include/omni/ui/scene/bind/DocDoubleClickGesture.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_DoubleClickGesture \
"The gesture that provides a way to capture double clicks.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_DoubleClickGesture_preProcess \
"Called before processing to determine the state of the gesture.\n"
#define OMNIUI_PYBIND_DOC_DoubleClickGesture_DoubleClickGesture \
"Construct the gesture to track double clicks.\n" \
"\n" \
"\n" \
"### Arguments:\n" \
"\n" \
" `onEnded :`\n" \
" Called when the user double clicked\n"
| 1,800 | C | 63.321426 | 120 | 0.388333 |
omniverse-code/kit/include/omni/ui/scene/bind/DocCameraModel.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_CameraModel \
"\n" \
"A model that holds projection and view matrices\n"
#define OMNIUI_PYBIND_DOC_CameraModel_CameraModel "Initialize the camera with the given projection/view matrices.\n"
#define OMNIUI_PYBIND_DOC_CameraModel_getItem "Returns the items that represents the identifier.\n"
#define OMNIUI_PYBIND_DOC_CameraModel_getAsFloats "Returns the float values of the item.\n"
#define OMNIUI_PYBIND_DOC_CameraModel_getAsInts "Returns the int values of the item.\n"
#define OMNIUI_PYBIND_DOC_CameraModel_setFloats "Sets the float values of the item.\n"
#define OMNIUI_PYBIND_DOC_CameraModel_setInts "Sets the int values of the item.\n"
#define OMNIUI_PYBIND_DOC_CameraModel_getProjection "The camera projection matrix.\n"
#define OMNIUI_PYBIND_DOC_CameraModel_getView "The camera view matrix.\n"
#define OMNIUI_PYBIND_DOC_CameraModel_setProjection "Set the camera projection matrix.\n"
#define OMNIUI_PYBIND_DOC_CameraModel_setView "Set the camera view matrix.\n"
| 1,661 | C | 36.772726 | 120 | 0.69416 |
omniverse-code/kit/include/omni/ui/scene/bind/DocManipulator.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_Manipulator \
"The base object for the custom manipulators.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_Manipulator_onBuild "Called when Manipulator is dirty to build the content.\n"
#define OMNIUI_PYBIND_DOC_Manipulator_invalidate \
"Make Manipulator dirty so onBuild will be executed in _preDrawContent.\n"
#define OMNIUI_PYBIND_DOC_Manipulator_onModelUpdated \
"Called by the model when the model value is changed. The class should react to the changes.\n" \
"\n" \
"\n" \
"### Arguments:\n" \
"\n" \
" `item :`\n" \
" The item in the model that is changed. If it's NULL, the root is changed.\n"
#define OMNIUI_PYBIND_DOC_Manipulator_getGestures "All the gestures assigned to this manipulator.\n"
#define OMNIUI_PYBIND_DOC_Manipulator_setGestures "Replace the gestures of the manipulator.\n"
#define OMNIUI_PYBIND_DOC_Manipulator_addGesture "Add a single gesture to the manipulator.\n"
#define OMNIUI_PYBIND_DOC_Manipulator_OnBuild \
"Called when Manipulator is dirty to build the content. It's another way to build the manipulator's content on the case the user doesn't want to reimplement the class.\n"
#define OMNIUI_PYBIND_DOC_Manipulator_Manipulator "Constructor.\n"
| 2,610 | C | 54.55319 | 174 | 0.488123 |
omniverse-code/kit/include/omni/ui/scene/bind/BindDragGesture.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindShapeGesture.h"
#include "DocDragGesture.h"
#include <omni/ui/scene/DragGesture.h>
OMNIUI_PROTECT_PYBIND11_OBJECT(OMNIUI_SCENE_NS::DragGesture, DragGesture);
// clang-format off
#define OMNIUI_PYBIND_INIT_PyDragGesture \
OMNIUI_PYBIND_INIT_CAST(mouse_button, setMouseButton, uint32_t) \
OMNIUI_PYBIND_INIT_CAST(modifiers, setModifiers, uint32_t) \
OMNIUI_PYBIND_INIT_CAST(check_mouse_moved, setCheckMouseMoved, bool) \
OMNIUI_PYBIND_INIT_CALLBACK(on_began_fn, setOnBeganFn, void(AbstractShape const*)) \
OMNIUI_PYBIND_INIT_CALLBACK(on_changed_fn, setOnChangedFn, void(AbstractShape const*)) \
OMNIUI_PYBIND_INIT_CALLBACK(on_ended_fn, setOnEndedFn, void(AbstractShape const*)) \
OMNIUI_PYBIND_INIT_ShapeGesture
#define OMNIUI_PYBIND_KWARGS_DOC_DragGesture \
"\n `mouse_button : `\n " \
OMNIUI_PYBIND_DOC_DragGesture_mouseButton \
"\n `modifiers : `\n " \
OMNIUI_PYBIND_DOC_DragGesture_modifiers \
"\n `check_mouse_moved : `\n " \
OMNIUI_PYBIND_DOC_DragGesture_checkMouseMoved \
"\n `on_began_fn : `\n " \
OMNIUI_PYBIND_DOC_DragGesture_onBegan \
"\n `on_changed_fn : `\n " \
OMNIUI_PYBIND_DOC_DragGesture_onChanged \
"\n `on_ended_fn : `\n " \
OMNIUI_PYBIND_DOC_DragGesture_onEnded \
OMNIUI_PYBIND_KWARGS_DOC_ShapeGesture
// clang-format on
| 3,163 | C | 69.31111 | 120 | 0.413215 |
omniverse-code/kit/include/omni/ui/scene/bind/DocDragGesture.h | // Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_DragGesture \
"The gesture that provides a way to capture click-and-drag mouse event.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_DragGesture_preProcess "Called before processing to determine the state of the gesture.\n"
#define OMNIUI_PYBIND_DOC_DragGesture_process "Process the gesture and call callbacks if necessary.\n"
#define OMNIUI_PYBIND_DOC_DragGesture_onBegan \
"Called if the callback is not set when the user clicks the mouse button.\n"
#define OMNIUI_PYBIND_DOC_DragGesture_onChanged \
"Called if the callback is not set when the user moves the clicked button.\n"
#define OMNIUI_PYBIND_DOC_DragGesture_onEnded \
"Called if the callback is not set when the user releases the mouse button.\n"
#define OMNIUI_PYBIND_DOC_DragGesture_mouseButton "Mouse button that should be active to start the gesture.\n"
#define OMNIUI_PYBIND_DOC_DragGesture_modifiers "The keyboard modifier that should be active ti start the gesture.\n"
#define OMNIUI_PYBIND_DOC_DragGesture_checkMouseMoved \
"The check_mouse_moved property is a boolean flag that determines whether the DragGesture should verify if the 2D screen position of the mouse has changed before invoking the on_changed method. This property is essential in a 3D environment, as changes in the camera position can result in the mouse pointing to different locations in the 3D world even when the 2D screen position remains unchanged.\n\n" \
"Usage\n" \
"When check_mouse_moved is set to True, the DragGesture will only call the on_changed method if the actual 2D screen position of the mouse has changed. This can be useful when you want to ensure that the on_changed method is only triggered when there is a genuine change in the mouse's 2D screen position.\n" \
"If check_mouse_moved is set to False, the DragGesture will not check for changes in the mouse's 2D screen position before calling the on_changed method. This can be useful when you want the on_changed method to be invoked even if the mouse's 2D screen position hasn't changed, such as when the camera position is altered, and the mouse now points to a different location in the 3D world.\n"
#define OMNIUI_PYBIND_DOC_DragGesture_OnBegan "Called when the user starts drag.\n"
#define OMNIUI_PYBIND_DOC_DragGesture_OnChanged "Called when the user is dragging.\n"
#define OMNIUI_PYBIND_DOC_DragGesture_OnEnded "Called when the user releases the mouse and finishes the drag.\n"
#define OMNIUI_PYBIND_DOC_DragGesture_DragGesture "Construct the gesture to track mouse drags.\n"
| 4,201 | C | 72.719297 | 410 | 0.562247 |
omniverse-code/kit/include/omni/ui/scene/bind/DocPoints.h | // Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_Points \
"Represents the point cloud.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_Points_getGesturePayload "Contains all the information about the intersection.\n"
#define OMNIUI_PYBIND_DOC_Points_getGesturePayload01 \
"Contains all the information about the intersection at the specific state.\n"
#define OMNIUI_PYBIND_DOC_Points_getIntersectionDistance \
"The distance in pixels from mouse pointer to the shape for the intersection.\n"
#define OMNIUI_PYBIND_DOC_Points_positions "List with positions of the points.\n"
#define OMNIUI_PYBIND_DOC_Points_colors "List of colors of the points.\n"
#define OMNIUI_PYBIND_DOC_Points_sizes "List of point sizes.\n"
#define OMNIUI_PYBIND_DOC_Points_intersectionSize "The size of the points for the intersection.\n"
#define OMNIUI_PYBIND_DOC_Points_Points \
"Constructs the point cloud object.\n" \
"\n" \
"\n" \
"### Arguments:\n" \
"\n" \
" `positions :`\n" \
" List of positions\n"
| 2,440 | C | 50.936169 | 120 | 0.442213 |
omniverse-code/kit/include/omni/ui/scene/bind/DocMatrix44.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_Matrix44 \
"Stores a 4x4 matrix of float elements. A basic type.\n" \
"Matrices are defined to be in row-major order.\n" \
"The matrix mode is required to define the matrix that resets the transformation to fit the geometry into NDC, Screen space, or rotate it to the camera direction.\n"
#define OMNIUI_PYBIND_DOC_Matrix44_setLookAtView \
"Rotates the matrix to be aligned with the camera.\n" \
"\n" \
"\n" \
"### Arguments:\n" \
"\n" \
" `view :`\n" \
" The view matrix of the camera\n"
#define OMNIUI_PYBIND_DOC_Matrix44_getTranslationMatrix \
"Creates a matrix to specify a translation at the given coordinates.\n"
#define OMNIUI_PYBIND_DOC_Matrix44_getRotationMatrix \
"Creates a matrix to specify a rotation around each axis.\n" \
"\n" \
"\n" \
"### Arguments:\n" \
"\n" \
" `degrees :`\n" \
" true if the angles are specified in degrees\n"
#define OMNIUI_PYBIND_DOC_Matrix44_getScaleMatrix \
"Creates a matrix to specify a scaling with the given scale factor per axis.\n"
| 3,191 | C | 73.232556 | 169 | 0.340019 |
omniverse-code/kit/include/omni/ui/scene/bind/DocImageHelper.h | // Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_ImageHelper \
"The helper class for the widgets that are working with image, e.g. sc.Image and sc.TexturedMesh.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_ImageHelper_imageWidth "The resolution for rasterization of svg and for ImageProvider.\n"
#define OMNIUI_PYBIND_DOC_ImageHelper_imageHeight "The resolution of rasterization of svg and for ImageProvider.\n"
| 940 | C | 46.049998 | 120 | 0.704255 |
omniverse-code/kit/include/omni/ui/scene/bind/DocAbstractItem.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_AbstractItem \
"\n" \
"\n"
#define OMNIUI_PYBIND_DOC_AbstractItem_destroy "Removes all the callbacks and circular references.\n"
#define OMNIUI_PYBIND_DOC_AbstractItem_transformSpace \
"Transform the given point from the coordinate system fromspace to the coordinate system tospace.\n"
#define OMNIUI_PYBIND_DOC_AbstractItem_transformSpace01 \
"Transform the given vector from the coordinate system fromspace to the coordinate system tospace.\n"
#define OMNIUI_PYBIND_DOC_AbstractItem_computeVisibility \
"Calculate the effective visibility of this prim, as defined by its most ancestral invisible opinion, if any.\n"
#define OMNIUI_PYBIND_DOC_AbstractItem_sceneView "The current SceneView this item is parented to.\n"
#define OMNIUI_PYBIND_DOC_AbstractItem_visible "This property holds whether the item is visible.\n"
| 1,705 | C | 47.742856 | 120 | 0.610557 |
omniverse-code/kit/include/omni/ui/scene/bind/BindAbstractGesture.h | // Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "DocAbstractGesture.h"
#include <omni/ui/bind/BindUtils.h>
OMNIUI_PROTECT_PYBIND11_OBJECT(OMNIUI_SCENE_NS::AbstractGesture, AbstractGesture);
// clang-format off
#define OMNIUI_PYBIND_INIT_AbstractGesture \
OMNIUI_PYBIND_INIT_CAST(name, setName, std::string) \
OMNIUI_PYBIND_INIT_CAST(manager, setManager, std::shared_ptr<GestureManager>)
#define OMNIUI_PYBIND_KWARGS_DOC_AbstractGesture \
"\n `name : `\n " \
OMNIUI_PYBIND_DOC_AbstractGesture_name \
"\n `manager : `\n " \
OMNIUI_PYBIND_DOC_AbstractGesture_getManager
// clang-format on
| 1,506 | C | 49.233332 | 120 | 0.518592 |
omniverse-code/kit/include/omni/ui/scene/bind/BindImage.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindRectangle.h"
#include "DocImage.h"
#include "DocImageHelper.h"
// clang-format off
#define OMNIUI_PYBIND_INIT_Image \
OMNIUI_PYBIND_INIT_Rectangle \
OMNIUI_PYBIND_INIT_CAST(source_url, setSourceUrl, std::string) \
OMNIUI_PYBIND_INIT_CAST(image_provider, setImageProvider, std::shared_ptr<ImageProvider>) \
OMNIUI_PYBIND_INIT_CAST(fill_policy, setFillPolicy, Image::FillPolicy) \
OMNIUI_PYBIND_INIT_CAST(image_width, setImageWidth, uint32_t) \
OMNIUI_PYBIND_INIT_CAST(image_height, setImageHeight, uint32_t) \
#define OMNIUI_PYBIND_KWARGS_DOC_Image \
"\n `source_url : `\n " \
OMNIUI_PYBIND_DOC_Image_sourceUrl \
"\n `image_provider : `\n " \
OMNIUI_PYBIND_DOC_Image_imageProvider \
"\n `fill_policy : `\n " \
OMNIUI_PYBIND_DOC_Image_fillPolicy \
"\n `image_width : `\n " \
OMNIUI_PYBIND_DOC_ImageHelper_imageWidth \
"\n `image_height : `\n " \
OMNIUI_PYBIND_DOC_ImageHelper_imageHeight \
OMNIUI_PYBIND_KWARGS_DOC_Rectangle
// clang-format on
| 2,770 | C | 70.05128 | 120 | 0.387004 |
omniverse-code/kit/include/omni/ui/scene/bind/DocAbstractManipulatorModel.h | // Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_AbstractManipulatorModel \
"\n" \
"Bridge to data.\n" \
"Operates with double and int arrays.\n" \
"No strings.\n" \
"No tree, it's a flat list of items.\n" \
"Manipulator requires the model has specific items.\n"
#define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_getItem "Returns the items that represents the identifier.\n"
#define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_getAsFloats "Returns the Float values of the item.\n"
#define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_getAsInts "Returns the int values of the item.\n"
#define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_setFloats "Sets the Float values of the item.\n"
#define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_setInts "Sets the int values of the item.\n"
#define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_subscribe \
"Subscribe ManipulatorModelHelper to the changes of the model.\n" \
"We need to use regular pointers because we subscribe in the constructor of the widget and unsubscribe in the destructor. In constructor smart pointers are not available. We also don't allow copy and move of the widget.\n"
#define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_unsubscribe \
"Unsubscribe the ItemModelHelper widget from the changes of the model.\n"
#define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_addItemChangedFn \
"Adds the function that will be called every time the value changes.\n" \
"The id of the callback that is used to remove the callback.\n"
#define OMNIUI_PYBIND_DOC_AbstractManipulatorModel_removeItemChangedFn \
"Remove the callback by its id.\n" \
"\n" \
"\n" \
"### Arguments:\n" \
"\n" \
" `id :`\n" \
" The id that addValueChangedFn returns.\n"
| 3,651 | C | 63.070174 | 226 | 0.455218 |
omniverse-code/kit/include/omni/ui/scene/bind/BindCameraModel.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "DocCameraModel.h"
// clang-format off
#define OMNIUI_PYBIND_INIT_CameraModel
#define OMNIUI_PYBIND_KWARGS_DOC_CameraModel \
"\n `projection : `\n " \
OMNIUI_PYBIND_DOC_CameraModel_getProjection \
"\n `view : `\n " \
OMNIUI_PYBIND_DOC_CameraModel_getView
// clang-format on
| 1,088 | C | 46.347824 | 120 | 0.523897 |
omniverse-code/kit/include/omni/ui/scene/bind/DocWidget.h | // Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_Widget \
"The shape that contains the omni.ui widgets. It automatically creates IAppWindow and transfers its content to the texture of the rectangle. It interacts with the mouse and sends the mouse events to the underlying window, so interacting with the UI on this rectangle is smooth for the user.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_Widget_getFrame "Return the main frame of the widget.\n"
#define OMNIUI_PYBIND_DOC_Widget_invalidate \
"Rebuild and recapture the widgets at the next frame. If\n" \
"frame\n" \
"build_fn\n"
#define OMNIUI_PYBIND_DOC_Widget_fillPolicy \
"Define what happens when the source image has a different size than the item.\n"
#define OMNIUI_PYBIND_DOC_Widget_updatePolicy "Define when to redraw the widget.\n"
#define OMNIUI_PYBIND_DOC_Widget_resolutionScale "The resolution scale of the widget.\n"
#define OMNIUI_PYBIND_DOC_Widget_resolutionWidth "The resolution of the widget framebuffer.\n"
#define OMNIUI_PYBIND_DOC_Widget_resolutionHeight "The resolution of the widget framebuffer.\n"
#define OMNIUI_PYBIND_DOC_Widget_Widget "Created an empty image.\n"
| 2,182 | C | 50.976189 | 299 | 0.567369 |
omniverse-code/kit/include/omni/ui/scene/bind/DocLine.h | // Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_Line \
"\n" \
"\n"
#define OMNIUI_PYBIND_DOC_Line_getGesturePayload "Contains all the information about the intersection.\n"
#define OMNIUI_PYBIND_DOC_Line_getGesturePayload01 \
"Contains all the information about the intersection at the specific state.\n"
#define OMNIUI_PYBIND_DOC_Line_getIntersectionDistance \
"The distance in pixels from mouse pointer to the shape for the intersection.\n"
#define OMNIUI_PYBIND_DOC_Line_start "The start point of the line.\n"
#define OMNIUI_PYBIND_DOC_Line_end "The end point of the line.\n"
#define OMNIUI_PYBIND_DOC_Line_color "The line color.\n"
#define OMNIUI_PYBIND_DOC_Line_thickness "The line thickness.\n"
#define OMNIUI_PYBIND_DOC_Line_intersectionThickness "The thickness of the line for the intersection.\n"
#define OMNIUI_PYBIND_DOC_Line_Line \
"A simple line.\n" \
"\n" \
"\n" \
"### Arguments:\n" \
"\n" \
" `start :`\n" \
" The start point of the line\n" \
"\n" \
" `end :`\n" \
" The end point of the line\n"
| 2,855 | C | 52.886791 | 120 | 0.380385 |
omniverse-code/kit/include/omni/ui/scene/bind/DocAbstractGesture.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_AbstractGesture \
"The base class for the gestures to provides a way to capture mouse events in 3d scene.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_AbstractGesture_setManager "Set the Manager that controld this gesture.\n"
#define OMNIUI_PYBIND_DOC_AbstractGesture_getManager "The Manager that controld this gesture.\n"
#define OMNIUI_PYBIND_DOC_AbstractGesture_dispatchInput \
"Called by scene to process the mouse inputs and do intersections with shapes. It can be an entry point to simulate the mouse input.\n" \
"Todo\n" \
"We probably don't need projection-view here. We can get it from manager.\n"
#define OMNIUI_PYBIND_DOC_AbstractGesture_preProcess \
"Called before the processing to determine the state of the gesture.\n"
#define OMNIUI_PYBIND_DOC_AbstractGesture_process "Process the gesture and call callbacks if necessary.\n"
#define OMNIUI_PYBIND_DOC_AbstractGesture_postProcess "Gestures are finished. Clean-up.\n"
#define OMNIUI_PYBIND_DOC_AbstractGesture_getState "Get the internal state of the gesture.\n"
#define OMNIUI_PYBIND_DOC_AbstractGesture_setState \
"Set the internal state of the gesture. It's the way to cancel, prevent, or restore the gesture.\n"
#define OMNIUI_PYBIND_DOC_AbstractGesture_isStateChanged \
"Returns true if the gesture is just changed at the current frame. If the state is not changed,\n" \
"process()\n"
#define OMNIUI_PYBIND_DOC_AbstractGesture_getSender "Returns the relevant shape driving the gesture.\n"
#define OMNIUI_PYBIND_DOC_AbstractGesture_getGesturePayload \
"Shortcut for sender.get_gesturePayload.\n" \
"OMNIUI_SCENE_API const*\n"
#define OMNIUI_PYBIND_DOC_AbstractGesture_getGesturePayload01 \
"Shortcut for sender.get_gesturePayload.\n" \
"OMNIUI_SCENE_API const*\n"
#define OMNIUI_PYBIND_DOC_AbstractGesture_name "The name of the object. It's used for debugging.\n"
| 3,156 | C | 48.328124 | 141 | 0.576996 |
omniverse-code/kit/include/omni/ui/scene/bind/BindLine.h | // Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindAbstractShape.h"
#include "DocLine.h"
// clang-format off
#define OMNIUI_PYBIND_INIT_Line \
OMNIUI_PYBIND_INIT_AbstractShape \
OMNIUI_PYBIND_INIT_CALL(start, setStart, pythonToVector3) \
OMNIUI_PYBIND_INIT_CALL(end, setEnd, pythonToVector3) \
OMNIUI_PYBIND_INIT_CALL(color, setColor, pythonToColor4) \
OMNIUI_PYBIND_INIT_CAST(thickness, setThickness, float) \
OMNIUI_PYBIND_INIT_CAST(intersection_thickness, setIntersectionThickness, float)
#define OMNIUI_PYBIND_KWARGS_DOC_Line \
"\n `start : `\n " \
OMNIUI_PYBIND_DOC_Line_start \
"\n `end : `\n " \
OMNIUI_PYBIND_DOC_Line_end \
"\n `color : `\n " \
OMNIUI_PYBIND_DOC_Line_color \
"\n `thickness : `\n " \
OMNIUI_PYBIND_DOC_Line_thickness \
"\n `intersection_thickness : `\n " \
OMNIUI_PYBIND_DOC_Line_intersectionThickness \
OMNIUI_PYBIND_KWARGS_DOC_AbstractShape
// clang-format on
| 2,733 | C | 70.947367 | 120 | 0.363337 |
omniverse-code/kit/include/omni/ui/scene/bind/BindScrollGesture.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindShapeGesture.h"
#include "DocScrollGesture.h"
#include <omni/ui/scene/ScrollGesture.h>
OMNIUI_PROTECT_PYBIND11_OBJECT(OMNIUI_SCENE_NS::ScrollGesture, ScrollGesture);
// clang-format off
#define OMNIUI_PYBIND_INIT_PyScrollGesture \
OMNIUI_PYBIND_INIT_CAST(mouse_button, setMouseButton, uint32_t) \
OMNIUI_PYBIND_INIT_CAST(modifiers, setModifiers, uint32_t) \
OMNIUI_PYBIND_INIT_CALLBACK(on_ended_fn, setOnEndedFn, void(AbstractShape const*)) \
OMNIUI_PYBIND_INIT_ShapeGesture
#define OMNIUI_PYBIND_KWARGS_DOC_ScrollGesture \
"\n `mouse_button : `\n " \
OMNIUI_PYBIND_DOC_ScrollGesture_mouseButton \
"\n `modifiers : `\n " \
OMNIUI_PYBIND_DOC_ScrollGesture_modifiers \
"\n `on_ended_fn : `\n " \
OMNIUI_PYBIND_DOC_ScrollGesture_OnEnded \
OMNIUI_PYBIND_KWARGS_DOC_ShapeGesture
// clang-format on
| 2,082 | C | 56.86111 | 120 | 0.470701 |
omniverse-code/kit/include/omni/ui/scene/bind/BindRectangle.h | // Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindAbstractShape.h"
#include "DocRectangle.h"
// clang-format off
#define OMNIUI_PYBIND_INIT_Rectangle \
OMNIUI_PYBIND_INIT_AbstractShape \
OMNIUI_PYBIND_INIT_CAST(width, setWidth, Float) \
OMNIUI_PYBIND_INIT_CAST(height, setHeight, Float) \
OMNIUI_PYBIND_INIT_CAST(thickness, setThickness, float) \
OMNIUI_PYBIND_INIT_CAST(intersection_thickness, setIntersectionThickness, float) \
OMNIUI_PYBIND_INIT_CALL(color, setColor, pythonToColor4) \
OMNIUI_PYBIND_INIT_CAST(axis, setAxis, uint8_t) \
OMNIUI_PYBIND_INIT_CAST(wireframe, setWireframe, bool)
#define OMNIUI_PYBIND_KWARGS_DOC_Rectangle \
"\n `width : `\n " \
OMNIUI_PYBIND_DOC_Rectangle_width \
"\n `height : `\n " \
OMNIUI_PYBIND_DOC_Rectangle_height \
"\n `thickness : `\n " \
OMNIUI_PYBIND_DOC_Rectangle_thickness \
"\n `intersection_thickness : `\n " \
OMNIUI_PYBIND_DOC_Rectangle_intersectionThickness \
"\n `color : `\n " \
OMNIUI_PYBIND_DOC_Rectangle_color \
"\n `axis : `\n " \
OMNIUI_PYBIND_DOC_Rectangle_axis \
"\n `wireframe : `\n " \
OMNIUI_PYBIND_DOC_Rectangle_wireframe \
OMNIUI_PYBIND_KWARGS_DOC_AbstractShape
// clang-format on
| 3,438 | C | 77.159089 | 120 | 0.344677 |
omniverse-code/kit/include/omni/ui/scene/bind/DocHoverGesture.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_HoverGesture \
"The gesture that provides a way to capture event when mouse enters/leaves the item.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_HoverGesture_preProcess "Called before processing to determine the state of the gesture.\n"
#define OMNIUI_PYBIND_DOC_HoverGesture_process "Process the gesture and call callbacks if necessary.\n"
#define OMNIUI_PYBIND_DOC_HoverGesture_onBegan "Called if the callback is not set and the mouse enters the item.\n"
#define OMNIUI_PYBIND_DOC_HoverGesture_onChanged \
"Called if the callback is not set and the mouse is hovering the item.\n"
#define OMNIUI_PYBIND_DOC_HoverGesture_onEnded "Called if the callback is not set and the mouse leaves the item.\n"
#define OMNIUI_PYBIND_DOC_HoverGesture_mouseButton "The mouse button this gesture is watching.\n"
#define OMNIUI_PYBIND_DOC_HoverGesture_modifiers "The modifier that should be pressed to trigger this gesture.\n"
#define OMNIUI_PYBIND_DOC_HoverGesture_triggerOnViewHover "Determines whether the gesture is triggered only when the SceneView is being hovered by the mouse.\n"
#define OMNIUI_PYBIND_DOC_HoverGesture_OnBegan "Called when the mouse enters the item.\n"
#define OMNIUI_PYBIND_DOC_HoverGesture_OnChanged "Called when the mouse is hovering the item.\n"
#define OMNIUI_PYBIND_DOC_HoverGesture_OnEnded "Called when the mouse leaves the item.\n"
#define OMNIUI_PYBIND_DOC_HoverGesture_HoverGesture \
"Constructs an gesture to track when the user clicked the mouse.\n" \
"\n" \
"\n" \
"### Arguments:\n" \
"\n" \
" `onEnded :`\n" \
" Function that is called when the user clicked the mouse button.\n"
| 2,953 | C | 49.931034 | 160 | 0.549949 |
omniverse-code/kit/include/omni/ui/scene/bind/BindAbstractItem.h | // Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "DocAbstractItem.h"
// clang-format off
#define OMNIUI_PYBIND_INIT_AbstractItem OMNIUI_PYBIND_INIT_CAST(visible, setVisible, bool)
#define OMNIUI_PYBIND_KWARGS_DOC_AbstractItem \
"\n `visible : `\n " \
OMNIUI_PYBIND_DOC_AbstractItem_visible
// clang-format on
| 821 | C | 36.363635 | 90 | 0.690621 |
omniverse-code/kit/include/omni/ui/scene/bind/BindPolygonMesh.h | // Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindAbstractShape.h"
#include "DocPolygonMesh.h"
// clang-format off
#define OMNIUI_PYBIND_INIT_PolygonMesh \
OMNIUI_PYBIND_INIT_AbstractShape \
OMNIUI_PYBIND_INIT_CALL(positions, setPositions, pythonListToVector3) \
OMNIUI_PYBIND_INIT_CALL(colors, setColors, pythonListToVector4) \
OMNIUI_PYBIND_INIT_CAST(vertex_counts, setVertexCounts, std::vector<uint32_t>) \
OMNIUI_PYBIND_INIT_CAST(vertex_indices, setVertexIndices, std::vector<uint32_t>) \
OMNIUI_PYBIND_INIT_CAST(thicknesses, setThicknesses, std::vector<float>) \
OMNIUI_PYBIND_INIT_CAST(intersection_thickness, setIntersectionThickness, float) \
OMNIUI_PYBIND_INIT_CAST(wireframe, setWireframe, bool)
#define OMNIUI_PYBIND_KWARGS_DOC_PolygonMesh \
"\n `positions : `\n " \
OMNIUI_PYBIND_DOC_PolygonMesh_positions \
"\n `colors : `\n " \
OMNIUI_PYBIND_DOC_PolygonMesh_colors \
"\n `vertex_counts : `\n " \
OMNIUI_PYBIND_DOC_PolygonMesh_vertexCounts \
"\n `vertex_indices : `\n " \
OMNIUI_PYBIND_DOC_PolygonMesh_vertexIndices \
"\n `thicknesses : `\n " \
OMNIUI_PYBIND_DOC_PolygonMesh_thicknesses \
"\n `intersection_thickness : `\n " \
OMNIUI_PYBIND_DOC_PolygonMesh_intersectionThickness \
"\n `wireframe: `\n " \
OMNIUI_PYBIND_DOC_PolygonMesh_wireframe \
OMNIUI_PYBIND_KWARGS_DOC_AbstractShape
// clang-format on
| 3,440 | C | 77.204544 | 120 | 0.390116 |
omniverse-code/kit/include/omni/ui/scene/bind/BindCurve.h | // Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindAbstractShape.h"
#include "DocCurve.h"
// clang-format off
#define OMNIUI_PYBIND_INIT_Curve \
OMNIUI_PYBIND_INIT_AbstractShape \
OMNIUI_PYBIND_INIT_CALL(positions, setPositions, pythonListToVector3) \
OMNIUI_PYBIND_INIT_CALL(colors, setColors, pythonListToVector4) \
OMNIUI_PYBIND_INIT_CAST(thicknesses, setThicknesses, std::vector<float>) \
OMNIUI_PYBIND_INIT_CAST(intersection_thickness, setIntersectionThickness, float) \
OMNIUI_PYBIND_INIT_CAST(curve_type, setCurveType, Curve::CurveType) \
OMNIUI_PYBIND_INIT_CAST(tessellation, setTessellation, uint16_t)
#define OMNIUI_PYBIND_KWARGS_DOC_Curve \
"\n `positions : `\n " \
OMNIUI_PYBIND_DOC_Curve_positions \
"\n `colors : `\n " \
OMNIUI_PYBIND_DOC_Curve_colors \
"\n `thicknesses : `\n " \
OMNIUI_PYBIND_DOC_Curve_thicknesses \
"\n `intersection_thickness : `\n " \
OMNIUI_PYBIND_DOC_Curve_intersectionThickness \
"\n `curve_type : `\n " \
OMNIUI_PYBIND_DOC_Curve_curveType \
"\n `tessellation : `\n " \
OMNIUI_PYBIND_DOC_Curve_tessellation \
OMNIUI_PYBIND_KWARGS_DOC_AbstractShape
// clang-format on
| 3,081 | C | 74.17073 | 120 | 0.379747 |
omniverse-code/kit/include/omni/ui/scene/bind/DocArc.h | // Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_Arc \
"\n" \
"\n"
#define OMNIUI_PYBIND_DOC_Arc_getGesturePayload "Contains all the information about the intersection.\n"
#define OMNIUI_PYBIND_DOC_Arc_getGesturePayload01 \
"Contains all the information about the intersection at the specific state.\n"
#define OMNIUI_PYBIND_DOC_Arc_getIntersectionDistance \
"The distance in pixels from mouse pointer to the shape for the intersection.\n"
#define OMNIUI_PYBIND_DOC_Arc_radius "The radius of the circle.\n"
#define OMNIUI_PYBIND_DOC_Arc_begin \
"The start angle of the arc. " \
"Angle placement and directions are (0 to 90): Y to Z, Z to X, X to Y\n"
#define OMNIUI_PYBIND_DOC_Arc_end \
"The end angle of the arc. " \
"Angle placement and directions are (0 to 90): Y to Z, Z to X, X to Y\n"
#define OMNIUI_PYBIND_DOC_Arc_thickness "The thickness of the line.\n"
#define OMNIUI_PYBIND_DOC_Arc_intersectionThickness "The thickness of the line for the intersection.\n"
#define OMNIUI_PYBIND_DOC_Arc_color "The color of the line.\n"
#define OMNIUI_PYBIND_DOC_Arc_tesselation "Number of points on the curve.\n"
#define OMNIUI_PYBIND_DOC_Arc_wireframe "When true, it's a line. When false it's a mesh.\n"
#define OMNIUI_PYBIND_DOC_Arc_sector "Draw two radii of the circle.\n"
#define OMNIUI_PYBIND_DOC_Arc_axis "The axis the circle plane is perpendicular to.\n"
#define OMNIUI_PYBIND_DOC_Arc_culling "Draw two radii of the circle.\n"
#define OMNIUI_PYBIND_DOC_Arc_Arc "Constructs Arc.\n"
| 2,645 | C | 39.707692 | 120 | 0.553875 |
omniverse-code/kit/include/omni/ui/scene/bind/DocManipulatorModelHelper.h | // Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_ManipulatorModelHelper \
"The ManipulatorModelHelper class provides the basic model functionality.\n"
#define OMNIUI_PYBIND_DOC_ManipulatorModelHelper_onModelUpdated \
"Called by the model when the model value is changed. The class should react to the changes.\n" \
"\n" \
"\n" \
"### Arguments:\n" \
"\n" \
" `item :`\n" \
" The item in the model that is changed. If it's NULL, the root is changed.\n"
#define OMNIUI_PYBIND_DOC_ManipulatorModelHelper_getRayFromNdc "Convert NDC 2D [-1..1] coordinates to 3D ray.\n"
#define OMNIUI_PYBIND_DOC_ManipulatorModelHelper_setModel "Set the current model.\n"
#define OMNIUI_PYBIND_DOC_ManipulatorModelHelper_getModel "Returns the current model.\n"
| 1,887 | C | 57.999998 | 120 | 0.480127 |
omniverse-code/kit/include/omni/ui/scene/bind/BindWidget.h | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindRectangle.h"
#include "DocWidget.h"
// clang-format off
#define OMNIUI_PYBIND_INIT_Widget \
OMNIUI_PYBIND_INIT_Rectangle \
OMNIUI_PYBIND_INIT_CAST(fill_policy, setFillPolicy, Widget::FillPolicy) \
OMNIUI_PYBIND_INIT_CAST(update_policy, setUpdatePolicy, Widget::UpdatePolicy) \
OMNIUI_PYBIND_INIT_CAST(resolution_scale, setResolutionScale, float) \
OMNIUI_PYBIND_INIT_CAST(resolution_width, setResolutionWidth, uint32_t) \
OMNIUI_PYBIND_INIT_CAST(resolution_height, setResolutionHeight, uint32_t)
#define OMNIUI_PYBIND_KWARGS_DOC_Widget \
"\n `fill_policy : `\n " \
OMNIUI_PYBIND_DOC_Widget_fillPolicy \
"\n `update_policy : `\n " \
OMNIUI_PYBIND_DOC_Widget_updatePolicy \
"\n `resolution_scale : `\n " \
OMNIUI_PYBIND_DOC_Widget_resolutionScale \
"\n `resolution_width : `\n " \
OMNIUI_PYBIND_DOC_Widget_resolutionWidth \
"\n `resolution_height : `\n " \
OMNIUI_PYBIND_DOC_Widget_resolutionHeight \
OMNIUI_PYBIND_KWARGS_DOC_Rectangle
// clang-format on
| 2,720 | C | 70.605261 | 120 | 0.402941 |
omniverse-code/kit/include/omni/ui/scene/bind/BindHoverGesture.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindShapeGesture.h"
#include "DocHoverGesture.h"
#include <omni/ui/scene/HoverGesture.h>
OMNIUI_PROTECT_PYBIND11_OBJECT(OMNIUI_SCENE_NS::HoverGesture, HoverGesture);
// clang-format off
#define OMNIUI_PYBIND_INIT_PyHoverGesture \
OMNIUI_PYBIND_INIT_CAST(mouse_button, setMouseButton, uint32_t) \
OMNIUI_PYBIND_INIT_CAST(modifiers, setModifiers, uint32_t) \
OMNIUI_PYBIND_INIT_CAST(trigger_on_view_hover, setTriggerOnViewHover, bool) \
OMNIUI_PYBIND_INIT_CALLBACK(on_began_fn, setOnBeganFn, void(AbstractShape const*)) \
OMNIUI_PYBIND_INIT_CALLBACK(on_changed_fn, setOnChangedFn, void(AbstractShape const*)) \
OMNIUI_PYBIND_INIT_CALLBACK(on_ended_fn, setOnEndedFn, void(AbstractShape const*)) \
OMNIUI_PYBIND_INIT_ShapeGesture
#define OMNIUI_PYBIND_KWARGS_DOC_HoverGesture \
"\n `mouse_button : `\n " \
OMNIUI_PYBIND_DOC_HoverGesture_mouseButton \
"\n `modifiers : `\n " \
OMNIUI_PYBIND_DOC_HoverGesture_modifiers \
"\n `trigger_on_view_hover : `\n " \
OMNIUI_PYBIND_DOC_HoverGesture_triggerOnViewHover \
"\n `on_began_fn : `\n " \
OMNIUI_PYBIND_DOC_HoverGesture_onBegan \
"\n `on_changed_fn : `\n " \
OMNIUI_PYBIND_DOC_HoverGesture_onChanged \
"\n `on_ended_fn : `\n " \
OMNIUI_PYBIND_DOC_HoverGesture_onEnded \
OMNIUI_PYBIND_KWARGS_DOC_ShapeGesture
// clang-format on
| 3,167 | C | 69.399998 | 120 | 0.420272 |
omniverse-code/kit/include/omni/ui/scene/bind/DocScene.h | // Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_Scene "Top level module string\n" \
"Represents the root of the scene and holds the shapes, gestures and managers.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_Scene_Scene "Constructor"
#define OMNIUI_PYBIND_DOC_Scene_getDrawListBufferCount "Return the number of buffers used. Using for unit testing."
| 874 | C | 47.611108 | 120 | 0.696796 |
omniverse-code/kit/include/omni/ui/scene/bind/DocAbstractShape.h | // Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_AbstractShape \
"Base class for all the items that can be drawn and intersected with mouse pointer.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_AbstractShape_getGestures "All the gestures assigned to this shape.\n"
#define OMNIUI_PYBIND_DOC_AbstractShape_setGestures "Replace the gestures of the shape.\n"
#define OMNIUI_PYBIND_DOC_AbstractShape_addGesture "Add a single gesture to the shape.\n"
#define OMNIUI_PYBIND_DOC_AbstractShape_getGesturePayload "Contains all the information about the intersection.\n"
#define OMNIUI_PYBIND_DOC_AbstractShape_getGesturePayload01 \
"Contains all the information about the intersection at the specific state.\n"
| 1,311 | C | 42.733332 | 120 | 0.688024 |
omniverse-code/kit/include/omni/ui/scene/bind/BindTexturedMesh.h | // Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindPolygonMesh.h"
#include "DocTexturedMesh.h"
#include "DocImageHelper.h"
// clang-format off
#define OMNIUI_PYBIND_INIT_TexturedMesh \
OMNIUI_PYBIND_INIT_PolygonMesh \
OMNIUI_PYBIND_INIT_CALL(uvs, setUvs, pythonListToVector2) \
OMNIUI_PYBIND_INIT_CAST(source_url, setSourceUrl, std::string) \
OMNIUI_PYBIND_INIT_CAST(image_provider, setImageProvider, std::shared_ptr<ImageProvider>) \
OMNIUI_PYBIND_INIT_CAST(image_width, setImageWidth, uint32_t) \
OMNIUI_PYBIND_INIT_CAST(image_height, setImageHeight, uint32_t)
#define OMNIUI_PYBIND_KWARGS_DOC_TexturedMesh \
"\n `uvs : `\n " \
OMNIUI_PYBIND_DOC_TexturedMesh_uvs \
"\n `source_url : `\n " \
OMNIUI_PYBIND_DOC_TexturedMesh_sourceUrl \
"\n `image_provider : `\n " \
OMNIUI_PYBIND_DOC_TexturedMesh_imageProvider \
"\n `image_width : `\n " \
OMNIUI_PYBIND_DOC_ImageHelper_imageWidth \
"\n `image_height : `\n " \
OMNIUI_PYBIND_DOC_ImageHelper_imageHeight \
OMNIUI_PYBIND_KWARGS_DOC_PolygonMesh
// clang-format on
| 2,742 | C | 69.333332 | 120 | 0.399708 |
omniverse-code/kit/include/omni/ui/scene/bind/BindArc.h | // Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindAbstractShape.h"
#include "DocArc.h"
// clang-format off
#define OMNIUI_PYBIND_INIT_Arc \
OMNIUI_PYBIND_INIT_AbstractShape \
OMNIUI_PYBIND_INIT_CAST(begin, setBegin, Float) \
OMNIUI_PYBIND_INIT_CAST(end, setEnd, Float) \
OMNIUI_PYBIND_INIT_CAST(thickness, setThickness, float) \
OMNIUI_PYBIND_INIT_CAST(intersection_thickness, setIntersectionThickness, float) \
OMNIUI_PYBIND_INIT_CALL(color, setColor, pythonToColor4) \
OMNIUI_PYBIND_INIT_CAST(tesselation, setTesselation, uint16_t) \
OMNIUI_PYBIND_INIT_CAST(axis, setAxis, uint8_t) \
OMNIUI_PYBIND_INIT_CAST(sector, setSector, bool) \
OMNIUI_PYBIND_INIT_CAST(culling, setCulling, Culling) \
OMNIUI_PYBIND_INIT_CAST(wireframe, setWireframe, bool)
#define OMNIUI_PYBIND_KWARGS_DOC_Arc \
"\n `begin : `\n " \
OMNIUI_PYBIND_DOC_Arc_begin \
"\n `end : `\n " \
OMNIUI_PYBIND_DOC_Arc_end \
"\n `thickness : `\n " \
OMNIUI_PYBIND_DOC_Arc_thickness \
"\n `intersection_thickness : `\n " \
OMNIUI_PYBIND_DOC_Arc_intersectionThickness \
"\n `color : `\n " \
OMNIUI_PYBIND_DOC_Arc_color \
"\n `tesselation : `\n " \
OMNIUI_PYBIND_DOC_Arc_tesselation \
"\n `axis : `\n " \
OMNIUI_PYBIND_DOC_Arc_axis \
"\n `sector : `\n " \
OMNIUI_PYBIND_DOC_Arc_sector \
"\n `culling : `\n " \
OMNIUI_PYBIND_DOC_Arc_culling \
"\n `wireframe : `\n " \
OMNIUI_PYBIND_DOC_Arc_wireframe \
OMNIUI_PYBIND_KWARGS_DOC_AbstractShape
// clang-format on
| 4,521 | C | 84.320753 | 120 | 0.299934 |
omniverse-code/kit/include/omni/ui/scene/bind/DocGestureManager.h | // Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_GestureManager \
"The object that controls batch processing and preventing of gestures. Typically each scene has a default manager and if the user wants to have own prevention logic, he can reimplement it.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_GestureManager_GestureManager "Constructor.\n"
#define OMNIUI_PYBIND_DOC_GestureManager_setView \
"Set the camera.\n" \
"Todo\n" \
"resolution\n"
#define OMNIUI_PYBIND_DOC_GestureManager_preProcess "Process mouse inputs and do all the intersections.\n"
#define OMNIUI_PYBIND_DOC_GestureManager_prevent "Process all the prevention logic and reduce the number of gestures.\n"
#define OMNIUI_PYBIND_DOC_GestureManager_process "Process the gestures.\n"
#define OMNIUI_PYBIND_DOC_GestureManager_postProcess "Clean-up caches, save states.\n"
#define OMNIUI_PYBIND_DOC_GestureManager_canBePrevented \
"Called per gesture. Determines if the gesture can be prevented.\n"
#define OMNIUI_PYBIND_DOC_GestureManager_shouldPrevent \
"Called per gesture. Determines if the gesture should be prevented with another gesture. Useful to resolve intersections.\n"
#define OMNIUI_PYBIND_DOC_GestureManager_amendInput \
"Called once a frame. Should be overriden to inject own input to the gestures.\n"
| 2,371 | C | 49.468084 | 197 | 0.574019 |
omniverse-code/kit/include/omni/ui/scene/bind/DocPolygonMesh.h | // Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_PolygonMesh \
"Encodes a mesh.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_PolygonMesh_getGesturePayload "Contains all the information about the intersection.\n"
#define OMNIUI_PYBIND_DOC_PolygonMesh_getGesturePayload01 \
"Contains all the information about the intersection at the specific state.\n"
#define OMNIUI_PYBIND_DOC_PolygonMesh_getIntersectionDistance \
"The distance in pixels from mouse pointer to the shape for the intersection.\n"
#define OMNIUI_PYBIND_DOC_PolygonMesh_positions "The primary geometry attribute, describes points in local space.\n"
#define OMNIUI_PYBIND_DOC_PolygonMesh_colors "Describes colors per vertex.\n"
#define OMNIUI_PYBIND_DOC_PolygonMesh_vertexCounts \
"Provides the number of vertices in each face of the mesh, which is also the number of consecutive indices in vertex_indices that define the face. The length of this attribute is the number of faces in the mesh.\n"
#define OMNIUI_PYBIND_DOC_PolygonMesh_vertexIndices \
"Flat list of the index (into the points attribute) of each vertex of each face in the mesh.\n"
#define OMNIUI_PYBIND_DOC_PolygonMesh_thicknesses "When wireframe is true, it defines the thicknesses of lines.\n"
#define OMNIUI_PYBIND_DOC_PolygonMesh_intersectionThickness "The thickness of the line for the intersection.\n"
#define OMNIUI_PYBIND_DOC_PolygonMesh_wireframe "When true, the mesh is drawn as lines.\n"
#define OMNIUI_PYBIND_DOC_PolygonMesh_PolygonMesh \
"Construct a mesh with predefined properties.\n" \
"\n" \
"\n" \
"### Arguments:\n" \
"\n" \
" `positions :`\n" \
" Describes points in local space.\n" \
"\n" \
" `colors :`\n" \
" Describes colors per vertex.\n" \
"\n" \
" `vertexCounts :`\n" \
" The number of vertices in each face.\n" \
"\n" \
" `vertexIndices :`\n" \
" The list of the index of each vertex of each face in the mesh.\n"
| 4,340 | C | 63.791044 | 218 | 0.404608 |
omniverse-code/kit/include/omni/ui/scene/bind/DocRectangle.h | // Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_Rectangle \
"\n" \
"\n"
#define OMNIUI_PYBIND_DOC_Rectangle_getGesturePayload "Contains all the information about the intersection.\n"
#define OMNIUI_PYBIND_DOC_Rectangle_getGesturePayload01 \
"Contains all the information about the intersection at the specific state.\n"
#define OMNIUI_PYBIND_DOC_Rectangle_getIntersectionDistance \
"The distance in pixels from mouse pointer to the shape for the intersection.\n"
#define OMNIUI_PYBIND_DOC_Rectangle_width "The size of the rectangle.\n"
#define OMNIUI_PYBIND_DOC_Rectangle_height "The size of the rectangle.\n"
#define OMNIUI_PYBIND_DOC_Rectangle_thickness "The thickness of the line.\n"
#define OMNIUI_PYBIND_DOC_Rectangle_intersectionThickness "The thickness of the line for the intersection.\n"
#define OMNIUI_PYBIND_DOC_Rectangle_color "The color of the line.\n"
#define OMNIUI_PYBIND_DOC_Rectangle_wireframe "When true, it's a line. When false it's a mesh.\n"
#define OMNIUI_PYBIND_DOC_Rectangle_axis "The axis the rectangle is perpendicular to.\n"
#define OMNIUI_PYBIND_DOC_Rectangle_Rectangle \
"Construct a rectangle with predefined size.\n" \
"\n" \
"\n" \
"### Arguments:\n" \
"\n" \
" `width :`\n" \
" The size of the rectangle\n" \
"\n" \
" `height :`\n" \
" The size of the rectangle\n"
| 3,091 | C | 51.406779 | 120 | 0.429311 |
omniverse-code/kit/include/omni/ui/scene/bind/BindClickGesture.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BindShapeGesture.h"
#include "DocClickGesture.h"
#include <omni/ui/scene/ClickGesture.h>
OMNIUI_PROTECT_PYBIND11_OBJECT(OMNIUI_SCENE_NS::ClickGesture, ClickGesture);
// clang-format off
#define OMNIUI_PYBIND_INIT_PyClickGesture \
OMNIUI_PYBIND_INIT_CAST(mouse_button, setMouseButton, uint32_t) \
OMNIUI_PYBIND_INIT_CAST(modifiers, setModifiers, uint32_t) \
OMNIUI_PYBIND_INIT_CALLBACK(on_ended_fn, setOnEndedFn, void(AbstractShape const*)) \
OMNIUI_PYBIND_INIT_ShapeGesture
#define OMNIUI_PYBIND_KWARGS_DOC_ClickGesture \
"\n `mouse_button : `\n " \
OMNIUI_PYBIND_DOC_ClickGesture_mouseButton \
"\n `modifiers : `\n " \
OMNIUI_PYBIND_DOC_ClickGesture_modifiers \
"\n `on_ended_fn : `\n " \
OMNIUI_PYBIND_DOC_ClickGesture_OnEnded \
OMNIUI_PYBIND_KWARGS_DOC_ShapeGesture
// clang-format on
| 2,078 | C | 56.749998 | 120 | 0.467276 |
omniverse-code/kit/include/omni/ui/scene/bind/BindMath.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "DocMatrix44.h"
#include <omni/ui/scene/Math.h>
#include <omni/ui/bind/Pybind.h>
OMNIUI_SCENE_NAMESPACE_OPEN_SCOPE
pybind11::object matrix4ToPython(const Matrix44& matrix);
Matrix44 pythonToMatrix4(const pybind11::handle& obj);
pybind11::object vector2ToPython(const Vector2& vec);
Vector2 pythonToVector2(const pybind11::handle& obj);
pybind11::object vector3ToPython(const Vector3& vec);
Vector3 pythonToVector3(const pybind11::handle& obj);
pybind11::object vector4ToPython(const Vector4& vec);
Vector4 pythonToVector4(const pybind11::handle& obj);
Color4 pythonToColor4(const pybind11::handle& obj);
std::vector<Vector4> pythonListToVector4(const pybind11::handle& obj);
std::vector<Vector3> pythonListToVector3(const pybind11::handle& obj);
std::vector<Vector2> pythonListToVector2(const pybind11::handle& obj);
pybind11::object vector4ToPythonList(const std::vector<Vector4>& vec);
pybind11::object vector3ToPythonList(const std::vector<Vector3>& vec);
pybind11::object vector2ToPythonList(const std::vector<Vector2>& vec);
OMNIUI_SCENE_NAMESPACE_CLOSE_SCOPE
| 1,527 | C | 41.444443 | 77 | 0.800262 |
omniverse-code/kit/include/omni/ui/scene/bind/BindSceneView.h | // Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "DocManipulatorModelHelper.h"
#include "DocSceneView.h"
#include <omni/ui/bind/BindWidget.h>
// clang-format off
#define OMNIUI_PYBIND_INIT_SceneView \
OMNIUI_PYBIND_INIT_CAST(aspect_ratio_policy, setAspectRatioPolicy, SceneView::AspectRatioPolicy) \
OMNIUI_PYBIND_INIT_CAST(screen_aspect_ratio, setScreenAspectRatio, float) \
OMNIUI_PYBIND_INIT_CAST(child_windows_input, setChildWindowsInput, bool) \
OMNIUI_PYBIND_INIT_CAST(scene, setScene, std::shared_ptr<Scene>) \
OMNIUI_PYBIND_INIT_CAST(model, setModel, std::shared_ptr<AbstractManipulatorModel>) \
OMNIUI_PYBIND_INIT_Widget
#define OMNIUI_PYBIND_KWARGS_DOC_SceneView \
"\n `aspect_ratio_policy : `\n " \
OMNIUI_PYBIND_DOC_SceneView_aspectRatioPolicy \
"\n `model : `\n " \
OMNIUI_PYBIND_DOC_SceneView_getView \
"\n `screen_aspect_ratio : `\n " \
OMNIUI_PYBIND_DOC_SceneView_screenAspectRatio \
"\n `child_windows_input : `\n " \
OMNIUI_PYBIND_DOC_SceneView_childWindowsInput \
OMNIUI_PYBIND_KWARGS_DOC_Widget
// clang-format on
| 2,478 | C | 67.861109 | 120 | 0.454802 |
omniverse-code/kit/include/omni/ui/scene/bind/DocCurve.h | // Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#define OMNIUI_PYBIND_DOC_Curve \
"Represents the curve.\n" \
"\n"
#define OMNIUI_PYBIND_DOC_Curve_getGesturePayload "Contains all the information about the intersection.\n"
#define OMNIUI_PYBIND_DOC_Curve_getGesturePayload01 \
"Contains all the information about the intersection at the specific state.\n"
#define OMNIUI_PYBIND_DOC_Curve_getIntersectionDistance \
"The distance in pixels from mouse pointer to the shape for the intersection.\n"
#define OMNIUI_PYBIND_DOC_Curve_positions \
"The list of positions which defines the curve. It has at least two positions. The curve has len(positions)-1\n"
#define OMNIUI_PYBIND_DOC_Curve_colors \
"The list of colors which defines color per vertex. It has the same length as positions.\n"
#define OMNIUI_PYBIND_DOC_Curve_thicknesses \
"The list of thicknesses which defines thickness per vertex. It has the same length as positions.\n"
#define OMNIUI_PYBIND_DOC_Curve_intersectionThickness "The thickness of the line for the intersection.\n"
#define OMNIUI_PYBIND_DOC_Curve_curveType "The curve interpolation type.\n"
#define OMNIUI_PYBIND_DOC_Curve_tessellation "The number of points per curve segment. It can't be less than 2.\n"
#define OMNIUI_PYBIND_DOC_Curve_Curve \
"Constructs Curve.\n" \
"\n" \
"\n" \
"### Arguments:\n" \
"\n" \
" `positions :`\n" \
" List of positions\n"
| 3,101 | C | 54.392856 | 120 | 0.44534 |
omniverse-code/kit/include/omni/experimental/job/IJob.gen.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/OmniAttr.h>
#include <omni/core/Interface.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <utility>
#include <type_traits>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
/**
* Interface for providing a CPU affinity mask to the plugin. Instances of this interface can be thought of as an array
* of \c MaskType values, which allows for setting affinities on machines with more than 64 processors. Each affinity
* mask this object contains is a bitmask that represents the associated CPUs.
*
* On Linux, this object is treated as one large bitset analogous to cpu_set_t. So \c get_affinity_mask(0) represents
* CPUs 0-63, \c get_affinity_mask(1) represents CPUs 64-127, etc.
*
* On Windows, each affinity mask in this object applies to its own Processor Group, so \c get_affinity_mask(0) is for
* Processor Group 0, \c get_affinity_mask(1) for Processor Group 1, etc.
*/
template <>
class omni::core::Generated<omni::experimental::job::IAffinityMask_abi> : public omni::experimental::job::IAffinityMask_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::experimental::job::IAffinityMask")
/**
* Gets the affinity mask at \c index.
*
* @note \c index must be less than \ref get_mask_count_abi()
*
* @param index Index to get affinity mask for.
*
* @return The affinity mask at the provided index.
*/
omni::experimental::job::MaskType get_affinity_mask(size_t index) noexcept;
/**
* Gets the affinity \c mask at \c index.
*
* @note \c index must be less than \ref get_mask_count_abi()
*
* @param index Index to set affinity mask for.
* @param mask Mask to set.
*/
void set_affinity_mask(size_t index, omni::experimental::job::MaskType mask) noexcept;
/**
* Gets the current number of affinity masks stored by this object.
*
* @return The current number of affinity masks stored by this object.
*/
size_t get_mask_count() noexcept;
/**
* Gets the default number of affinity masks stored by this object.
*
* @return The default number of affinity masks stored by this object.
*/
size_t get_default_mask_count() noexcept;
/**
* Sets the number of affinity masks stored by this object to \c count.
*
* If \c count is greater than the current size, the appended affinity masks will bet set to \c 0. If \c count
* is less than the current size, then this object will only contain the first \c count elements after this call.
*
* @param count Number of affinity masks to set the size to.
*/
void set_mask_count(size_t count) noexcept;
};
/**
* Basic interface for launching jobs on a foreign job system.
*/
template <>
class omni::core::Generated<omni::experimental::job::IJob_abi> : public omni::experimental::job::IJob_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::experimental::job::IJob")
/**
* Adds a new job to be executed.
*
* @param job_fn User provided function to be executed by a worker.
* @param job_data User provided data for the job, the memory must not be released until it no longer needed by the
* task.
*/
void enqueue_job(omni::experimental::job::JobFunction job_fn, void* job_data) noexcept;
};
/**
* Interface for managing the number of workers in the job system.
*/
template <>
class omni::core::Generated<omni::experimental::job::IJobWorker_abi> : public omni::experimental::job::IJobWorker_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::experimental::job::IJobWorker")
/**
* Returns default number of workers used for creation of a new Job system.
*
* @return The default number of workers.
*/
size_t get_default_worker_count() noexcept;
/**
* Returns the number of worker threads in the job system.
*
* @returns The number of worker threads.
*/
size_t get_worker_count() noexcept;
/**
* Sets the number of workers in the job system.
*
* This function may stop all current threads and reset any previously set thread affinity.
*
* @param count The new number of workers to set in the system. A value of 0 means to use the default value returned
* by getDefaultWorkerCount()
*/
void set_worker_count(size_t count) noexcept;
};
/**
* Interface for setting CPU affinity for the job system.
*/
template <>
class omni::core::Generated<omni::experimental::job::IJobAffinity_abi> : public omni::experimental::job::IJobAffinity_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::experimental::job::IJobAffinity")
/**
* Gets the current affinity of a worker.
*
* @param worker_id The worker to set the affinity of. If this id is larger than the current number of workers,
* a \c nullptr will be returned.
*
* @return The current affinity being used by the worker. The returned value may be \c nullptr if the worker's
* affinity could not be determined.
*/
omni::core::ObjectPtr<omni::experimental::job::IAffinityMask> get_affinity(size_t worker_id) noexcept;
/**
* Attempts to set the affinity for the specified worker.
*
* @note On Windows each thread can only belong to a single Processor Group, so the CPU Affinity will only be set
* to the first non-zero entry. That is to say, if both \c mask->get_affinity_mask(0) and
* \c mask->get_affinity_mask(1) both have bits sets, only the CPUs in \c mask->get_affinity_mask(0) will be set for
* the affinity.
*
* @param worker_id The worker to set the affinity of. If this id is larger than the current number of workers,
* false will be returned.
* @param mask The affinity values to set.
*
* @return true if the affinity was successfully set, false otherwise.
*/
bool set_affinity(size_t worker_id, omni::core::ObjectParam<omni::experimental::job::IAffinityMask> mask) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::experimental::job::MaskType omni::core::Generated<omni::experimental::job::IAffinityMask_abi>::get_affinity_mask(
size_t index) noexcept
{
return get_affinity_mask_abi(index);
}
inline void omni::core::Generated<omni::experimental::job::IAffinityMask_abi>::set_affinity_mask(
size_t index, omni::experimental::job::MaskType mask) noexcept
{
set_affinity_mask_abi(index, mask);
}
inline size_t omni::core::Generated<omni::experimental::job::IAffinityMask_abi>::get_mask_count() noexcept
{
return get_mask_count_abi();
}
inline size_t omni::core::Generated<omni::experimental::job::IAffinityMask_abi>::get_default_mask_count() noexcept
{
return get_default_mask_count_abi();
}
inline void omni::core::Generated<omni::experimental::job::IAffinityMask_abi>::set_mask_count(size_t count) noexcept
{
set_mask_count_abi(count);
}
inline void omni::core::Generated<omni::experimental::job::IJob_abi>::enqueue_job(
omni::experimental::job::JobFunction job_fn, void* job_data) noexcept
{
enqueue_job_abi(job_fn, job_data);
}
inline size_t omni::core::Generated<omni::experimental::job::IJobWorker_abi>::get_default_worker_count() noexcept
{
return get_default_worker_count_abi();
}
inline size_t omni::core::Generated<omni::experimental::job::IJobWorker_abi>::get_worker_count() noexcept
{
return get_worker_count_abi();
}
inline void omni::core::Generated<omni::experimental::job::IJobWorker_abi>::set_worker_count(size_t count) noexcept
{
set_worker_count_abi(count);
}
inline omni::core::ObjectPtr<omni::experimental::job::IAffinityMask> omni::core::Generated<
omni::experimental::job::IJobAffinity_abi>::get_affinity(size_t worker_id) noexcept
{
return omni::core::steal(get_affinity_abi(worker_id));
}
inline bool omni::core::Generated<omni::experimental::job::IJobAffinity_abi>::set_affinity(
size_t worker_id, omni::core::ObjectParam<omni::experimental::job::IAffinityMask> mask) noexcept
{
return set_affinity_abi(worker_id, mask.get());
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 8,693 | C | 34.056451 | 126 | 0.695962 |
omniverse-code/kit/include/omni/experimental/job/IJob.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief IJob definition file.
#pragma once
#include "../../../carb/Interface.h"
#include "../../core/IObject.h"
#include "../../../carb/IObject.h"
namespace omni
{
/** Namespace for experimental Interfaces and functionality. */
namespace experimental
{
namespace job
{
/**
* Defines the function for performing a user-provided job.
*
* @param job_data User provided data for the job, the memory must not be released until it no longer needed by the
* task.
*/
using JobFunction = void (*)(void* job_data);
/** Forward declaration of the IAffinityMask interface. */
OMNI_DECLARE_INTERFACE(IAffinityMask);
/**
* Alias for an affinity mask.
*/
using MaskType = uint64_t;
/**
* Interface for providing a CPU affinity mask to the plugin. Instances of this interface can be thought of as an array
* of \c MaskType values, which allows for setting affinities on machines with more than 64 processors. Each affinity
* mask this object contains is a bitmask that represents the associated CPUs.
*
* On Linux, this object is treated as one large bitset analogous to cpu_set_t. So \c get_affinity_mask(0) represents
* CPUs 0-63, \c get_affinity_mask(1) represents CPUs 64-127, etc.
*
* On Windows, each affinity mask in this object applies to its own Processor Group, so \c get_affinity_mask(0) is for
* Processor Group 0, \c get_affinity_mask(1) for Processor Group 1, etc.
*/
class IAffinityMask_abi
: public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.experimental.job.IAffinityMask")>
{
protected:
/**
* Gets the affinity mask at \c index.
*
* @note \c index must be less than \ref get_mask_count_abi()
*
* @param index Index to get affinity mask for.
*
* @return The affinity mask at the provided index.
*/
virtual MaskType get_affinity_mask_abi(size_t index) noexcept = 0;
/**
* Gets the affinity \c mask at \c index.
*
* @note \c index must be less than \ref get_mask_count_abi()
*
* @param index Index to set affinity mask for.
* @param mask Mask to set.
*/
virtual void set_affinity_mask_abi(size_t index, MaskType mask) noexcept = 0;
/**
* Gets the current number of affinity masks stored by this object.
*
* @return The current number of affinity masks stored by this object.
*/
virtual size_t get_mask_count_abi() noexcept = 0;
/**
* Gets the default number of affinity masks stored by this object.
*
* @return The default number of affinity masks stored by this object.
*/
virtual size_t get_default_mask_count_abi() noexcept = 0;
/**
* Sets the number of affinity masks stored by this object to \c count.
*
* If \c count is greater than the current size, the appended affinity masks will bet set to \c 0. If \c count
* is less than the current size, then this object will only contain the first \c count elements after this call.
*
* @param count Number of affinity masks to set the size to.
*/
virtual void set_mask_count_abi(size_t count) noexcept = 0;
};
/** Forward declaration of the IJob interface. */
OMNI_DECLARE_INTERFACE(IJob);
/** Forward declaration of the IJobWorker interface. */
OMNI_DECLARE_INTERFACE(IJobWorker);
/** Forward declaration of the IJobAffinity interface. */
OMNI_DECLARE_INTERFACE(IJobAffinity);
/**
* Basic interface for launching jobs on a foreign job system.
*/
class IJob_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.experimental.job.IJob")>
{
protected:
/**
* Adds a new job to be executed.
*
* @param job_fn User provided function to be executed by a worker.
* @param job_data User provided data for the job, the memory must not be released until it no longer needed by the
* task.
*/
virtual void enqueue_job_abi(JobFunction job_fn, OMNI_ATTR("in, out") void* job_data) noexcept = 0;
};
/**
* Interface for managing the number of workers in the job system.
*/
class IJobWorker_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.experimental.job.IJobWorker")>
{
protected:
/**
* Returns default number of workers used for creation of a new Job system.
*
* @return The default number of workers.
*/
virtual size_t get_default_worker_count_abi() noexcept = 0;
/**
* Returns the number of worker threads in the job system.
*
* @returns The number of worker threads.
*/
virtual size_t get_worker_count_abi() noexcept = 0;
/**
* Sets the number of workers in the job system.
*
* This function may stop all current threads and reset any previously set thread affinity.
*
* @param count The new number of workers to set in the system. A value of 0 means to use the default value returned
* by getDefaultWorkerCount()
*/
virtual void set_worker_count_abi(size_t count) noexcept = 0;
};
/**
* Interface for setting CPU affinity for the job system.
*/
class IJobAffinity_abi
: public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.experimental.job.IJobAffinity")>
{
protected:
/**
* Gets the current affinity of a worker.
*
* @param worker_id The worker to set the affinity of. If this id is larger than the current number of workers,
* a \c nullptr will be returned.
*
* @return The current affinity being used by the worker. The returned value may be \c nullptr if the worker's
* affinity could not be determined.
*/
virtual IAffinityMask* get_affinity_abi(size_t worker_id) noexcept = 0;
/**
* Attempts to set the affinity for the specified worker.
*
* @note On Windows each thread can only belong to a single Processor Group, so the CPU Affinity will only be set
* to the first non-zero entry. That is to say, if both \c mask->get_affinity_mask(0) and
* \c mask->get_affinity_mask(1) both have bits sets, only the CPUs in \c mask->get_affinity_mask(0) will be set for
* the affinity.
*
* @param worker_id The worker to set the affinity of. If this id is larger than the current number of workers,
* false will be returned.
* @param mask The affinity values to set.
*
* @return true if the affinity was successfully set, false otherwise.
*/
virtual bool set_affinity_abi(size_t worker_id, OMNI_ATTR("not_null") IAffinityMask* mask) noexcept = 0;
};
} // namespace job
} // namespace experimental
} // namespace omni
#include "IJob.gen.h"
| 7,005 | C | 34.20603 | 121 | 0.684511 |
omniverse-code/kit/include/omni/experimental/url/IUrl.gen.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/OmniAttr.h>
#include <omni/core/Interface.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <utility>
#include <type_traits>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
template <>
class omni::core::Generated<omni::experimental::IUrl_abi> : public omni::experimental::IUrl_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::experimental::IUrl")
/**
* Clears this URL
*/
void clear() noexcept;
/**
* Return the string representation of this URL
*/
omni::string to_string() noexcept;
/**
* Return the string representation of this URL, but with valid UTF-8 characters
* decoded. This will leave invalid UTF-8 byte sequences and certain ASCII characters
* encoded; including control codes, and characters that are reserved by the URL
* specification as sub-delimiters.
*/
omni::string to_string_utf8() noexcept;
/**
* Sets this URL from a string
*/
void from_string(const omni::string& url_string) noexcept;
/**
* Sets this URL from a posix file path
* The scheme will be "file" and the path will be the normalized and encoded file path
* Normalization includes removing redundant path segments such as "//", "/./" and
* collapsing ".." segments if possible. For example, it will convert "a/b/../" to "a"
*/
void from_filepath_posix(const omni::string& filepath) noexcept;
/**
* Sets this URL from a windows file path
* The scheme will be "file" and the path will be the normalized and encoded file path
* Path normalization includes everything from "from_filepath_posix_abi" plus:
* - The drive letter is made uppercase
* - Path separators are converted from \\ to /
* - UNC paths such as "\\\\server\\share\path" or "\\\\?\\C:\\path" are handled correctly
*/
void from_filepath_windows(const omni::string& filepath) noexcept;
/**
* Sets this URL from a file path based on the native OS.
* This calls either from_filepath_posix_abi or from_filepath_windows_abi
*/
void from_filepath_native(const omni::string& filepath) noexcept;
/**
* Returns true if the URL has a scheme component.
* "scheme" is the part before the first colon, for example "http" or "omniverse".
* A URL without a scheme component can only be a relative reference.
*
* @see get_scheme()
* @see set_scheme()
*/
bool has_scheme() noexcept;
/**
* Returns true if the URL has an authority component.
* "authority" is the part between the // and /
* For example "user@server:port"
*
* @see get_authority_encoded()
* @see set_authority_encoded()
* @see has_userinfo()
* @see has_host()
* @see has_port()
*/
bool has_authority() noexcept;
/**
* Returns true if the URL has a userinfo sub-component.
* "userinfo" is the part of the authority before @
*
* @see get_userinfo()
* @see set_userinfo()
* @see has_authority()
*/
bool has_userinfo() noexcept;
/**
* Returns true if the URL has a host sub-component.
* "host" is the part of the authority between @ and :
*
* @see get_host()
* @see set_host()
* @see has_authority()
*/
bool has_host() noexcept;
/**
* Returns true if the URL has a port sub-component.
* "port" is the part of the authority after :
*
* @see get_port()
* @see set_port()
* @see has_authority()
*/
bool has_port() noexcept;
/**
* Returns true if the URL has a path component.
* "path" is the part after _abi(and including) /
* For example "/path/to/my/file.txt"
*
* @see get_path_encoded()
* @see set_path_encoded()
* @see set_path_decoded()
*/
bool has_path() noexcept;
/**
* Returns true if the URL has a query component.
* "query" is the part after ? but before #
*
* @see get_query_encoded()
* @see set_query_encoded()
* @see set_query_decoded()
*/
bool has_query() noexcept;
/**
* Returns true if the URL has a fragment component.
* "fragment" is the part after #
*
* @see get_fragment_encoded()
* @see set_fragment_encoded()
* @see set_fragment_decoded()
*/
bool has_fragment() noexcept;
/**
* Returns the scheme.
* The scheme will always be fully decoded and in lower case.
*
* @see has_scheme()
* @see set_scheme()
*/
omni::string get_scheme() noexcept;
/**
* Returns the authority, which may contain percent-encoded data
* For example if the 'userinfo' contains : or @ it must be percent-encoded.
*
* @see set_authority_encoded()
* @see get_userinfo()
* @see get_host()
* @see get_port()
*/
omni::string get_authority_encoded() noexcept;
/**
* Returns the userinfo, fully decoded.
*
* @see get_authority_encoded()
* @see set_userinfo()
* @see has_userinfo()
*/
omni::string get_userinfo() noexcept;
/**
* Returns the host, fully decoded.
*
* @see get_authority_encoded()
* @see set_host()
* @see has_host()
*/
omni::string get_host() noexcept;
/**
* Returns the port number
*
* @see get_authority_encoded()
* @see set_port()
* @see has_port()
*/
uint16_t get_port() noexcept;
/**
* Returns the percent-encoded path component.
*
* @see get_path_utf8()
* @see set_path_encoded()
* @see set_path_decoded()
* @see has_path()
*/
omni::string get_path_encoded() noexcept;
/**
* Returns the path component with all printable ascii and valid UTF-8 characters decoded
* Invalid UTF-8 and ASCII control codes will still be percent-encoded.
* It's generally safe to print the result of this function on screen and in log files.
*
* @see get_path_encoded()
* @see set_path_encoded()
* @see set_path_decoded()
* @see has_path()
*/
omni::string get_path_utf8() noexcept;
/**
* Returns the percent-encoded query component.
*
* @see get_query_encoded()
* @see set_query_encoded()
* @see set_query_decoded()
* @see has_query()
*/
omni::string get_query_encoded() noexcept;
/**
* Returns the percent-encoded fragment component.
*
* @see get_fragment_encoded()
* @see set_fragment_encoded()
* @see set_fragment_decoded()
* @see has_fragment()
*/
omni::string get_fragment_encoded() noexcept;
/**
* Sets the scheme.
*
* @see has_scheme()
* @see get_scheme()
*/
void set_scheme(const omni::string& scheme) noexcept;
/**
* Sets the authority, which is expected to have all the sub-components percent-encoded.
* If characters that _MUST_ be encoded are detected, they will be percent-encoded, however the percent sign itself
* will _NOT_ be encoded.
*
* @see get_authority_encoded()
* @see set_userinfo()
* @see set_host()
* @see set_port()
*/
void set_authority_encoded(const omni::string& authority) noexcept;
/**
* Sets the userinfo. This function expects the userinfo is not already percent-encoded.
*
* @see set_authority_encoded()
* @see get_userinfo()
* @see has_userinfo()
*/
void set_userinfo(const omni::string& userinfo) noexcept;
/**
* Sets the host. This function expects the host is not already percent-encoded.
*
* @see set_authority_encoded()
* @see get_host()
* @see has_host()
*/
void set_host(const omni::string& host) noexcept;
/**
* Sets the port number
*
* @see set_authority_encoded()
* @see get_port()
* @see has_port()
*/
void set_port(uint16_t port) noexcept;
/**
* Sets the path, which is already percent-encoded.
* If characters that _MUST_ be encoded are detected, they will be percent-encoded, however the percent sign itself
* will _NOT_ be encoded.
*
* @see get_path_encoded()
* @see set_path_decoded()
* @see has_path()
*/
void set_path_encoded(const omni::string& path_encoded) noexcept;
/**
* Sets the path, which is NOT already percent-encoded.
* If characters that _MUST_ be encoded are detected, they will be percent-encoded, including the percent sign
* itself
*
* @see get_path_encoded()
* @see set_path_encoded()
* @see has_path()
*/
void set_path_decoded(const omni::string& path_decoded) noexcept;
/**
* Sets the query, which is already percent-encoded.
* If characters that _MUST_ be encoded are detected, they will be percent-encoded, however the percent sign itself
* will _NOT_ be encoded.
*
* @see get_query_encoded()
* @see set_query_decoded()
* @see has_query()
*/
void set_query_encoded(const omni::string& query_encoded) noexcept;
/**
* Sets the query, which is NOT already percent-encoded.
* If characters that _MUST_ be encoded are detected, they will be percent-encoded, including the percent sign
* itself
*
* @see get_query_encoded()
* @see set_query_encoded()
* @see has_query()
*/
void set_query_decoded(const omni::string& query_decoded) noexcept;
/**
* Sets the fragment, which is already percent-encoded.
* If characters that _MUST_ be encoded are detected, they will be percent-encoded, however the percent sign itself
* will _NOT_ be encoded.
*
* @see get_fragment_encoded()
* @see set_fragment_decoded()
* @see has_fragment()
*/
void set_fragment_encoded(const omni::string& fragment_encoded) noexcept;
/**
* Sets the fragment, which is NOT already percent-encoded.
* If characters that _MUST_ be encoded are detected, they will be percent-encoded, including the percent sign
* itself
*
* @see get_fragment_encoded()
* @see set_fragment_encoded()
* @see has_fragment()
*/
void set_fragment_decoded(const omni::string& fragment_decoded) noexcept;
/**
* Create a new IUrl object that represents the shortest possible URL that makes @p other_url relative to this URL.
*
* Relative URLs are described in section 5.2 "Relative Resolution" of RFC-3986
*
* @param other_url URL to make a relative URL to.
*
* @return A new IUrl object that is the relative URL between this URL and @p other_url.
*/
omni::core::ObjectPtr<omni::experimental::IUrl> make_relative(
omni::core::ObjectParam<omni::experimental::IUrl> other_url) noexcept;
/**
* Creates a new IUrl object that is the result of resolving the provided @p relative_url with this URL as the base
* URL.
*
* The algorithm for doing the combination is described in section 5.2 "Relative Resolution" of RFC-3986.
*
* @param relative_url URL to resolve with this URL as the base URL.
*
* @return A new IUrl object that is the result of resolving @p relative_url with this URL.
*/
omni::core::ObjectPtr<omni::experimental::IUrl> resolve_relative(
omni::core::ObjectParam<omni::experimental::IUrl> relative_url) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline void omni::core::Generated<omni::experimental::IUrl_abi>::clear() noexcept
{
clear_abi();
}
inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::to_string() noexcept
{
return to_string_abi();
}
inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::to_string_utf8() noexcept
{
return to_string_utf8_abi();
}
inline void omni::core::Generated<omni::experimental::IUrl_abi>::from_string(const omni::string& url_string) noexcept
{
from_string_abi(url_string);
}
inline void omni::core::Generated<omni::experimental::IUrl_abi>::from_filepath_posix(const omni::string& filepath) noexcept
{
from_filepath_posix_abi(filepath);
}
inline void omni::core::Generated<omni::experimental::IUrl_abi>::from_filepath_windows(const omni::string& filepath) noexcept
{
from_filepath_windows_abi(filepath);
}
inline void omni::core::Generated<omni::experimental::IUrl_abi>::from_filepath_native(const omni::string& filepath) noexcept
{
from_filepath_native_abi(filepath);
}
inline bool omni::core::Generated<omni::experimental::IUrl_abi>::has_scheme() noexcept
{
return has_scheme_abi();
}
inline bool omni::core::Generated<omni::experimental::IUrl_abi>::has_authority() noexcept
{
return has_authority_abi();
}
inline bool omni::core::Generated<omni::experimental::IUrl_abi>::has_userinfo() noexcept
{
return has_userinfo_abi();
}
inline bool omni::core::Generated<omni::experimental::IUrl_abi>::has_host() noexcept
{
return has_host_abi();
}
inline bool omni::core::Generated<omni::experimental::IUrl_abi>::has_port() noexcept
{
return has_port_abi();
}
inline bool omni::core::Generated<omni::experimental::IUrl_abi>::has_path() noexcept
{
return has_path_abi();
}
inline bool omni::core::Generated<omni::experimental::IUrl_abi>::has_query() noexcept
{
return has_query_abi();
}
inline bool omni::core::Generated<omni::experimental::IUrl_abi>::has_fragment() noexcept
{
return has_fragment_abi();
}
inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::get_scheme() noexcept
{
return get_scheme_abi();
}
inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::get_authority_encoded() noexcept
{
return get_authority_encoded_abi();
}
inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::get_userinfo() noexcept
{
return get_userinfo_abi();
}
inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::get_host() noexcept
{
return get_host_abi();
}
inline uint16_t omni::core::Generated<omni::experimental::IUrl_abi>::get_port() noexcept
{
return get_port_abi();
}
inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::get_path_encoded() noexcept
{
return get_path_encoded_abi();
}
inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::get_path_utf8() noexcept
{
return get_path_utf8_abi();
}
inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::get_query_encoded() noexcept
{
return get_query_encoded_abi();
}
inline omni::string omni::core::Generated<omni::experimental::IUrl_abi>::get_fragment_encoded() noexcept
{
return get_fragment_encoded_abi();
}
inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_scheme(const omni::string& scheme) noexcept
{
set_scheme_abi(scheme);
}
inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_authority_encoded(const omni::string& authority) noexcept
{
set_authority_encoded_abi(authority);
}
inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_userinfo(const omni::string& userinfo) noexcept
{
set_userinfo_abi(userinfo);
}
inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_host(const omni::string& host) noexcept
{
set_host_abi(host);
}
inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_port(uint16_t port) noexcept
{
set_port_abi(port);
}
inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_path_encoded(const omni::string& path_encoded) noexcept
{
set_path_encoded_abi(path_encoded);
}
inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_path_decoded(const omni::string& path_decoded) noexcept
{
set_path_decoded_abi(path_decoded);
}
inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_query_encoded(const omni::string& query_encoded) noexcept
{
set_query_encoded_abi(query_encoded);
}
inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_query_decoded(const omni::string& query_decoded) noexcept
{
set_query_decoded_abi(query_decoded);
}
inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_fragment_encoded(
const omni::string& fragment_encoded) noexcept
{
set_fragment_encoded_abi(fragment_encoded);
}
inline void omni::core::Generated<omni::experimental::IUrl_abi>::set_fragment_decoded(
const omni::string& fragment_decoded) noexcept
{
set_fragment_decoded_abi(fragment_decoded);
}
inline omni::core::ObjectPtr<omni::experimental::IUrl> omni::core::Generated<omni::experimental::IUrl_abi>::make_relative(
omni::core::ObjectParam<omni::experimental::IUrl> other_url) noexcept
{
return omni::core::steal(make_relative_abi(other_url.get()));
}
inline omni::core::ObjectPtr<omni::experimental::IUrl> omni::core::Generated<omni::experimental::IUrl_abi>::resolve_relative(
omni::core::ObjectParam<omni::experimental::IUrl> relative_url) noexcept
{
return omni::core::steal(resolve_relative_abi(relative_url.get()));
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 17,715 | C | 29.078098 | 126 | 0.654361 |
omniverse-code/kit/include/omni/experimental/url/IUrl.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../../core/IObject.h"
#include "../../String.h"
namespace omni
{
namespace experimental
{
OMNI_DECLARE_INTERFACE(IUrl);
class IUrl_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.IUrl")>
{
protected:
/**
* Clears this URL
*/
virtual void clear_abi() noexcept = 0;
/**
* Return the string representation of this URL
*/
virtual omni::string to_string_abi() noexcept = 0;
/**
* Return the string representation of this URL, but with valid UTF-8 characters
* decoded. This will leave invalid UTF-8 byte sequences and certain ASCII characters
* encoded; including control codes, and characters that are reserved by the URL
* specification as sub-delimiters.
*/
virtual omni::string to_string_utf8_abi() noexcept = 0;
/**
* Sets this URL from a string
*/
virtual void from_string_abi(omni::string const& url_string) noexcept = 0;
/**
* Sets this URL from a posix file path
* The scheme will be "file" and the path will be the normalized and encoded file path
* Normalization includes removing redundant path segments such as "//", "/./" and
* collapsing ".." segments if possible. For example, it will convert "a/b/../" to "a"
*/
virtual void from_filepath_posix_abi(omni::string const& filepath) noexcept = 0;
/**
* Sets this URL from a windows file path
* The scheme will be "file" and the path will be the normalized and encoded file path
* Path normalization includes everything from "from_filepath_posix_abi" plus:
* - The drive letter is made uppercase
* - Path separators are converted from \\ to /
* - UNC paths such as "\\\\server\\share\path" or "\\\\?\\C:\\path" are handled correctly
*/
virtual void from_filepath_windows_abi(omni::string const& filepath) noexcept = 0;
/**
* Sets this URL from a file path based on the native OS.
* This calls either from_filepath_posix_abi or from_filepath_windows_abi
*/
virtual void from_filepath_native_abi(omni::string const& filepath) noexcept = 0;
/**
* Returns true if the URL has a scheme component.
* "scheme" is the part before the first colon, for example "http" or "omniverse".
* A URL without a scheme component can only be a relative reference.
*
* @see get_scheme()
* @see set_scheme()
*/
virtual bool has_scheme_abi() noexcept = 0;
/**
* Returns true if the URL has an authority component.
* "authority" is the part between the // and /
* For example "user@server:port"
*
* @see get_authority_encoded()
* @see set_authority_encoded()
* @see has_userinfo()
* @see has_host()
* @see has_port()
*/
virtual bool has_authority_abi() noexcept = 0;
/**
* Returns true if the URL has a userinfo sub-component.
* "userinfo" is the part of the authority before @
*
* @see get_userinfo()
* @see set_userinfo()
* @see has_authority()
*/
virtual bool has_userinfo_abi() noexcept = 0;
/**
* Returns true if the URL has a host sub-component.
* "host" is the part of the authority between @ and :
*
* @see get_host()
* @see set_host()
* @see has_authority()
*/
virtual bool has_host_abi() noexcept = 0;
/**
* Returns true if the URL has a port sub-component.
* "port" is the part of the authority after :
*
* @see get_port()
* @see set_port()
* @see has_authority()
*/
virtual bool has_port_abi() noexcept = 0;
/**
* Returns true if the URL has a path component.
* "path" is the part after _abi(and including) /
* For example "/path/to/my/file.txt"
*
* @see get_path_encoded()
* @see set_path_encoded()
* @see set_path_decoded()
*/
virtual bool has_path_abi() noexcept = 0;
/**
* Returns true if the URL has a query component.
* "query" is the part after ? but before #
*
* @see get_query_encoded()
* @see set_query_encoded()
* @see set_query_decoded()
*/
virtual bool has_query_abi() noexcept = 0;
/**
* Returns true if the URL has a fragment component.
* "fragment" is the part after #
*
* @see get_fragment_encoded()
* @see set_fragment_encoded()
* @see set_fragment_decoded()
*/
virtual bool has_fragment_abi() noexcept = 0;
/**
* Returns the scheme.
* The scheme will always be fully decoded and in lower case.
*
* @see has_scheme()
* @see set_scheme()
*/
virtual omni::string get_scheme_abi() noexcept = 0;
/**
* Returns the authority, which may contain percent-encoded data
* For example if the 'userinfo' contains : or @ it must be percent-encoded.
*
* @see set_authority_encoded()
* @see get_userinfo()
* @see get_host()
* @see get_port()
*/
virtual omni::string get_authority_encoded_abi() noexcept = 0;
/**
* Returns the userinfo, fully decoded.
*
* @see get_authority_encoded()
* @see set_userinfo()
* @see has_userinfo()
*/
virtual omni::string get_userinfo_abi() noexcept = 0;
/**
* Returns the host, fully decoded.
*
* @see get_authority_encoded()
* @see set_host()
* @see has_host()
*/
virtual omni::string get_host_abi() noexcept = 0;
/**
* Returns the port number
*
* @see get_authority_encoded()
* @see set_port()
* @see has_port()
*/
virtual uint16_t get_port_abi() noexcept = 0;
/**
* Returns the percent-encoded path component.
*
* @see get_path_utf8()
* @see set_path_encoded()
* @see set_path_decoded()
* @see has_path()
*/
virtual omni::string get_path_encoded_abi() noexcept = 0;
/**
* Returns the path component with all printable ascii and valid UTF-8 characters decoded
* Invalid UTF-8 and ASCII control codes will still be percent-encoded.
* It's generally safe to print the result of this function on screen and in log files.
*
* @see get_path_encoded()
* @see set_path_encoded()
* @see set_path_decoded()
* @see has_path()
*/
virtual omni::string get_path_utf8_abi() noexcept = 0;
/**
* Returns the percent-encoded query component.
*
* @see get_query_encoded()
* @see set_query_encoded()
* @see set_query_decoded()
* @see has_query()
*/
virtual omni::string get_query_encoded_abi() noexcept = 0;
/**
* Returns the percent-encoded fragment component.
*
* @see get_fragment_encoded()
* @see set_fragment_encoded()
* @see set_fragment_decoded()
* @see has_fragment()
*/
virtual omni::string get_fragment_encoded_abi() noexcept = 0;
/**
* Sets the scheme.
*
* @see has_scheme()
* @see get_scheme()
*/
virtual void set_scheme_abi(omni::string const& scheme) noexcept = 0;
/**
* Sets the authority, which is expected to have all the sub-components percent-encoded.
* If characters that _MUST_ be encoded are detected, they will be percent-encoded, however the percent sign itself
* will _NOT_ be encoded.
*
* @see get_authority_encoded()
* @see set_userinfo()
* @see set_host()
* @see set_port()
*/
virtual void set_authority_encoded_abi(omni::string const& authority) noexcept = 0;
/**
* Sets the userinfo. This function expects the userinfo is not already percent-encoded.
*
* @see set_authority_encoded()
* @see get_userinfo()
* @see has_userinfo()
*/
virtual void set_userinfo_abi(omni::string const& userinfo) noexcept = 0;
/**
* Sets the host. This function expects the host is not already percent-encoded.
*
* @see set_authority_encoded()
* @see get_host()
* @see has_host()
*/
virtual void set_host_abi(omni::string const& host) noexcept = 0;
/**
* Sets the port number
*
* @see set_authority_encoded()
* @see get_port()
* @see has_port()
*/
virtual void set_port_abi(uint16_t port) noexcept = 0;
/**
* Sets the path, which is already percent-encoded.
* If characters that _MUST_ be encoded are detected, they will be percent-encoded, however the percent sign itself
* will _NOT_ be encoded.
*
* @see get_path_encoded()
* @see set_path_decoded()
* @see has_path()
*/
virtual void set_path_encoded_abi(omni::string const& path_encoded) noexcept = 0;
/**
* Sets the path, which is NOT already percent-encoded.
* If characters that _MUST_ be encoded are detected, they will be percent-encoded, including the percent sign
* itself
*
* @see get_path_encoded()
* @see set_path_encoded()
* @see has_path()
*/
virtual void set_path_decoded_abi(omni::string const& path_decoded) noexcept = 0;
/**
* Sets the query, which is already percent-encoded.
* If characters that _MUST_ be encoded are detected, they will be percent-encoded, however the percent sign itself
* will _NOT_ be encoded.
*
* @see get_query_encoded()
* @see set_query_decoded()
* @see has_query()
*/
virtual void set_query_encoded_abi(omni::string const& query_encoded) noexcept = 0;
/**
* Sets the query, which is NOT already percent-encoded.
* If characters that _MUST_ be encoded are detected, they will be percent-encoded, including the percent sign
* itself
*
* @see get_query_encoded()
* @see set_query_encoded()
* @see has_query()
*/
virtual void set_query_decoded_abi(omni::string const& query_decoded) noexcept = 0;
/**
* Sets the fragment, which is already percent-encoded.
* If characters that _MUST_ be encoded are detected, they will be percent-encoded, however the percent sign itself
* will _NOT_ be encoded.
*
* @see get_fragment_encoded()
* @see set_fragment_decoded()
* @see has_fragment()
*/
virtual void set_fragment_encoded_abi(omni::string const& fragment_encoded) noexcept = 0;
/**
* Sets the fragment, which is NOT already percent-encoded.
* If characters that _MUST_ be encoded are detected, they will be percent-encoded, including the percent sign
* itself
*
* @see get_fragment_encoded()
* @see set_fragment_encoded()
* @see has_fragment()
*/
virtual void set_fragment_decoded_abi(omni::string const& fragment_decoded) noexcept = 0;
/**
* Create a new IUrl object that represents the shortest possible URL that makes @p other_url relative to this URL.
*
* Relative URLs are described in section 5.2 "Relative Resolution" of RFC-3986
*
* @param other_url URL to make a relative URL to.
*
* @return A new IUrl object that is the relative URL between this URL and @p other_url.
*/
virtual IUrl* make_relative_abi(IUrl* other_url) noexcept = 0;
/**
* Creates a new IUrl object that is the result of resolving the provided @p relative_url with this URL as the base
* URL.
*
* The algorithm for doing the combination is described in section 5.2 "Relative Resolution" of RFC-3986.
*
* @param relative_url URL to resolve with this URL as the base URL.
*
* @return A new IUrl object that is the result of resolving @p relative_url with this URL.
*/
virtual IUrl* resolve_relative_abi(IUrl* relative_url) noexcept = 0;
};
} // namespace experimental
} // namespace omni
#include "IUrl.gen.h"
| 12,193 | C | 30.672727 | 119 | 0.620848 |
omniverse-code/kit/include/omni/graph/io/BundleAttrib.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#pragma message("omni/graph/io/BundleAttrib.h is deprecated. Include omni/graph/core/BundleAttrib.h instead.")
// ====================================================================================================
/* _____ _ _ _ _ _
| __ \ | \ | | | | | | | |
| | | | ___ | \| | ___ | |_ | | | |___ ___
| | | |/ _ \ | . ` |/ _ \| __| | | | / __|/ _ \
| |__| | (_) | | |\ | (_) | |_ | |__| \__ \ __/
|_____/ \___/ |_| \_|\___/ \__| \____/|___/\___|
This is a temporary interface that can change at any time.
*/
// ====================================================================================================
#include "IDirtyID.h"
#include <omni/graph/core/IBundle.h>
namespace omni
{
namespace graph
{
namespace io
{
class BundlePrim;
class ConstBundlePrim;
using BundleAttribSourceType OMNI_GRAPH_IO_DEPRECATED = uint8_t;
/**
* BundleAttributeSource is used to differentiate between UsdAttributes
* and UsdRelationships.
*
* TODO: Investigate why we can't use eRelationship for this purpose.
*/
enum class OMNI_GRAPH_IO_DEPRECATED BundleAttribSource : BundleAttribSourceType
{
Attribute,
Relationship,
};
/**
* Attribute in bundle primitive.
*
* In contrast to (Const)BundlePrim and (Const)BundlePrims, PrimAttribute uses
* const qualifier to express constness of the attribute.
*
* TODO: Review if const qualifier is appropriate.
*/
class OMNI_GRAPH_IO_DEPRECATED BundleAttrib
{
public:
/**
* Backward compatibility alias.
*/
using SourceType = BundleAttribSourceType;
using Source = BundleAttribSource;
BundleAttrib() = default;
/**
* Read initialization.
*/
BundleAttrib(ConstBundlePrim& prim, omni::graph::core::NameToken name) noexcept;
/**
* Read-Write initialization.
*/
BundleAttrib(BundlePrim& prim,
omni::graph::core::NameToken name,
omni::graph::core::Type type,
size_t arrayElementCount,
BundleAttribSource source) noexcept;
BundleAttrib(BundleAttrib const&) = delete;
BundleAttrib(BundleAttrib&&) noexcept = delete;
BundleAttrib& operator=(BundleAttrib const&) = delete;
BundleAttrib& operator=(BundleAttrib&&) noexcept = delete;
/**
* @return Bundle Primitive where this attribute belongs to.
*/
ConstBundlePrim* getBundlePrim() const noexcept;
/**
* @return Bundle Primitive where this attribute belongs to.
*/
BundlePrim* getBundlePrim() noexcept;
/**
* @return Non const attribute handle of this attribute.
*/
omni::graph::core::AttributeDataHandle handle() noexcept;
/**
* @return Const attribute handle of this attribute.
*/
omni::graph::core::ConstAttributeDataHandle handle() const noexcept;
/**
* @return Name of this attribute.
*/
omni::graph::core::NameToken name() const noexcept;
/**
* @return Type of this attribute.
*/
omni::graph::core::Type type() const noexcept;
/**
* @return Interpolation of this attribute.
*/
omni::graph::core::NameToken interpolation() const noexcept;
/**
* Set interpolation for this attribute.
*
* @return True if operation successful, false otherwise.
*/
bool setInterpolation(omni::graph::core::NameToken interpolation) noexcept;
/**
* Clean interpolation information for this attribute.
*/
void clearInterpolation() noexcept;
/**
* @return Dirty Id of this attribute.
*/
DirtyIDType dirtyID() const noexcept;
/**
* Set dirty id to given value.
*
* @return True if successful, false otherwise.
*/
bool setDirtyID(DirtyIDType dirtyID) noexcept;
/**
* Bump dirty id for this attribute.
*
* @return True if successful, false otherwise.
*/
bool bumpDirtyID() noexcept;
/**
* Set source for this attribute.
*
* @return True if successful, false otherwise.
*/
bool setSource(Source source) noexcept;
/**
* Reset source to default value for this attribute.
*/
void clearSource() noexcept;
/**
* @return True if this attribute is an array attribute.
*/
bool isArray() const noexcept;
/**
* @return Size of this attribute. If attribute is not an array, then size is 1.
*/
size_t size() const noexcept;
/**
* Changes size of this attribute.
*/
void resize(size_t arrayElementCount) noexcept;
/**
* Copy attribute contents from another attribute.
* Destination name is preserved.
*/
void copyContentsFrom(BundleAttrib const& sourceAttr) noexcept;
/**
* @return Internal data as void pointer.
*/
void* getDataInternal() noexcept;
/**
* @return Internal data as void pointer.
*/
void const* getDataInternal() const noexcept;
template <typename T>
T get() const noexcept;
// NOTE: If this is not an array type attribute, this pointer may not be valid once any prim,
// even if it's not the prim containing this attribute, has an attribute added or removed,
// due to how attribute data is stored.
template <typename T>
T* getData() noexcept;
template <typename T>
T const* getData() const noexcept;
template <typename T>
T const* getConstData() const noexcept;
template <typename T>
void set(T const& value) noexcept;
template <typename T>
void set(T const* values, size_t elementCount) noexcept;
/***********************************************************************************************
*
* TODO: Following methods might be deprecated in the future.
* In the next iteration when real interface starts to emerge, we can retire those methods.
*
***********************************************************************************************/
/**
* @todo First iteration of MPiB didn't use 'eRelationship' type to describe relationships.
* Thus, strange approach was created to treat attribute, that is a relationship as a "source".
*/
Source source() const noexcept;
/**
* @return true if this attribute is data.
*/
bool isAttributeData() const noexcept;
/**
* @return true if this attribute is relationship.
*/
bool isRelationshipData() const noexcept;
/**
* @deprecated IBundle2 interface does not require prefixing, use getName().
*/
omni::graph::core::NameToken prefixedName() const noexcept;
private:
/**
* Remove attribute and its internal data.
*/
void clearContents() noexcept;
omni::graph::core::IConstBundle2* getConstBundlePtr() const noexcept;
omni::graph::core::IBundle2* getBundlePtr() noexcept;
ConstBundlePrim* m_bundlePrim{ nullptr };
// Attribute Definition:
omni::graph::core::NameToken m_name = carb::flatcache::kUninitializedToken;
carb::flatcache::TypeC m_type;
// Attribute Property Cached Values:
omni::graph::core::NameToken m_interpolation = carb::flatcache::kUninitializedToken;
DirtyIDType m_dirtyID{ kInvalidDirtyID };
Source m_source { BundleAttribSource::Attribute };
friend class ConstBundlePrims;
friend class BundlePrim;
};
/**
* Do not use! Backward compatibility alias.
*/
using BundleAttributeInfo OMNI_GRAPH_IO_DEPRECATED = BundleAttrib;
} // namespace io
} // namespace graph
} // namespace omni
#include "BundleAttribImpl.h"
| 8,030 | C | 27.178947 | 110 | 0.608219 |
omniverse-code/kit/include/omni/graph/io/BundlePrims.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#pragma message("omni/graph/io/BundlePrims.h is deprecated. Include omni/graph/core/BundlePrims.h instead.")
// ====================================================================================================
/* _____ _ _ _ _ _
| __ \ | \ | | | | | | | |
| | | | ___ | \| | ___ | |_ | | | |___ ___
| | | |/ _ \ | . ` |/ _ \| __| | | | / __|/ _ \
| |__| | (_) | | |\ | (_) | |_ | |__| \__ \ __/
|_____/ \___/ |_| \_|\___/ \__| \____/|___/\___|
This is a temporary interface that can change at any time.
*/
// ====================================================================================================
#include "ConstBundlePrims.h"
namespace omni
{
namespace graph
{
namespace io
{
class BundlePrims;
class BundlePrimIterator;
class BundlePrimAttrIterator;
/**
* Collection of read-write attributes in a primitive.
*/
class OMNI_GRAPH_IO_DEPRECATED BundlePrim : public ConstBundlePrim
{
public:
using AttrMapIteratorType = BundleAttributeMap::iterator;
/**
* @return Parent of this bundle prim.
*/
BundlePrims* getBundlePrims() noexcept;
/**
* @return Bundle handle of this primitive.
*/
omni::graph::core::BundleHandle handle() noexcept;
/**
* Sets type of the primitive.
*/
void setType(omni::graph::core::NameToken type) noexcept;
/**
* @return Cached instance of BundleAttrib if attribute is found successfully, nullptr otherwise.
*/
BundleAttrib* getAttr(omni::graph::core::NameToken attrName) noexcept;
/**
* @return BundleAttrib if attribute is added successfully, nullptr otherwise.
*/
BundleAttrib* addAttr(omni::graph::core::NameToken attrName,
omni::graph::core::Type type,
size_t arrayElementCount = 0,
BundleAttrib::Source source = BundleAttrib::Source::Attribute) noexcept;
/**
* Convenience structure for adding attributes.
*/
struct AddAttrInfo
{
omni::graph::core::NameToken attrName;
omni::graph::core::Type type;
size_t arrayElementCount;
BundleAttrib::Source source;
};
/**
* Adds a list of attributes to this bundle prim.
*
* @param[in] attrList Vector of all the new attributes to be added to this prim
* @returns True if all (new) attributes were added successfully
*
* @todo Weakness of this interface is that it forces usage of std::vector.
*/
bool addAttrs(std::vector<AddAttrInfo> const& attrList) noexcept;
/**
* Remove attribute with a given name from this primitive.
*/
void removeAttr(omni::graph::core::NameToken attrName) noexcept;
/**
* Recursively remove all attributes from this primitive.
*/
void clearContents() noexcept;
/**
* Copy contents from another bundle prim.
*/
void copyContentsFrom(ConstBundlePrim& source, bool removeAttrsNotInSource = true) noexcept;
/**
* Bump dirty id for this bundle prim.
*/
void bumpDirtyID() noexcept;
/**
* Set dirty id for this bundle prim.
*/
void setDirtyID(DirtyIDType dirtyID) noexcept;
/**
* @return Attribute iterator pointing to the first attribute in this bundle.
*/
BundlePrimAttrIterator begin() noexcept;
/**
* @return Attribute iterator pointing to the last attribute in this bundle.
*/
BundlePrimAttrIterator end() noexcept;
/**
* @return Attribute iterator pointing to the first attribute in this bundle.
*/
ConstBundlePrimAttrIterator cbegin() noexcept;
/**
* @return Attribute iterator pointing to the last attribute in this bundle.
*/
ConstBundlePrimAttrIterator cend() noexcept;
/***********************************************************************************************
*
* TODO: Following methods might be deprecated in the future.
* In the next iteration when real interface starts to emerge, we can retire those methods.
*
***********************************************************************************************/
/**
* Create an attribute that is a relationship type.
*/
BundleAttrib* addRelationship(omni::graph::core::NameToken name, size_t targetCount) noexcept;
/**
* @deprecated Do not use! It doesn't do anything, it's kept for backward compatibility.
*/
void setPath(omni::graph::core::NameToken path) noexcept;
/**
* @deprecated Use getBundlePrims.
*/
BundlePrims* bundlePrims() noexcept;
/**
* @deprecated Do not use!
*/
void copyContentsFrom(ConstBundlePrim const& source, bool removeAttrsNotInSource = true) noexcept;
private:
/**
* Direct initialization with IBundle interface.
*
* ConstBundlePrim and BundlePrim take advantage of polymorphic relationship
* between IConstBundle and IBundle interfaces.
* In order to modify bundles, BundlePrim makes an attempt to cast IConstBundle
* to IBundle interface. When this process is successful then, bundle can be modified.
*
* Only BundlePrims is allowed to create instances of BundlePrim.
*/
BundlePrim(BundlePrims& bundlePrims, BundlePrimIndex primIndex, omni::core::ObjectPtr<IBundle2> bundle);
/**
* Clear contents of IBundle.
*/
void recursiveClearContents(omni::graph::core::GraphContextObj const& context,
omni::graph::core::IBundleFactory* factory,
omni::graph::core::IBundle2* bundle) noexcept;
/**
* @return Make an attempt to cast IConstBundle interface to IBundle. Returns nullptr if operation failed.
*/
omni::graph::core::IBundle2* getBundlePtr(omni::graph::core::IConstBundle2* constBundle) noexcept;
/**
* @return Make an attempt to cast IConstBundle interface to IBundle. Returns nullptr if operation failed.
*/
omni::graph::core::IBundle2* getBundlePtr() noexcept;
/**
* @return True if primitive is an instance of common attributes.
*/
bool isCommonAttrs() const noexcept
{
return m_primIndex == kInvalidBundlePrimIndex;
}
friend class BundlePrimIterator;
friend class BundlePrims;
friend class BundleAttrib;
};
/**
* Collection of read-write primitives in a bundle.
*
* Bundle Primitives is not movable, not copyable. It lifespan is managed by the user.
*/
class OMNI_GRAPH_IO_DEPRECATED BundlePrims : public ConstBundlePrims
{
public:
/**
* Acquire access to a bundle primitives under given handle.
*/
BundlePrims(omni::graph::core::GraphContextObj const& context, omni::graph::core::BundleHandle const& bundle);
~BundlePrims() noexcept;
/**
* @return Bundle handle of this primitive.
*/
omni::graph::core::BundleHandle handle() noexcept;
/**
* @return BundlePrim under given index, or nullptr if prim is not found.
*/
BundlePrim* getPrim(BundlePrimIndex primIndex) noexcept;
/**
* @return BundlePrim allowing access to attributes to this bundle primitives.
*/
BundlePrim& getCommonAttrs() noexcept;
/**
* Add new primitives to this bundle.
*
* @return Number of successfully added primitives.
*/
size_t addPrims(size_t primCountToAdd) noexcept;
/**
* Remove primitive under given index.
*/
bool removePrim(BundlePrimIndex primIndex) noexcept;
/**
* Cleans up this primitive bundle. Remove all primitives and attributes.
*/
void clearContents() noexcept;
/**
* Bump id of this bundle primitives.
*/
DirtyIDType bumpBundleDirtyID() noexcept;
/**
* @return Primitive iterator pointing to the first primitive in this bundle.
*/
BundlePrimIterator begin() noexcept;
/**
* @return Primitive iterator pointing to the last primitive in this bundle.
*/
BundlePrimIterator end() noexcept;
/**
* @return Primitive iterator pointing to the first primitive in this bundle.
*/
ConstBundlePrimIterator cbegin() noexcept;
/**
* @return Primitive iterator pointing to the last primitive in this bundle.
*/
ConstBundlePrimIterator cend() noexcept;
/***********************************************************************************************
*
* TODO: Following methods might be deprecated in the future.
* In the next iteration when real interface starts to emerge, we can retire those methods.
*
***********************************************************************************************/
/**
* @deprecated Don't use! Read attach() description.
*/
BundlePrims();
/**
* @deprecated Use appropriate constructor and heap allocate BundlePrims.
*
* @todo: There is no benefit of using this method. Cache has to be rebuild from scratch
* whenever BundlePrims is attached/detached.
* It would be better to remove default constructor and enforce cache construction
* through constructor with arguments.
*/
void attach(omni::graph::core::GraphContextObj const& context, omni::graph::core::BundleHandle const& bundle) noexcept;
/**
* @deprecated Use appropriate constructor and heap allocate ConstBundlePrims.
*/
void detach() noexcept;
/**
* @deprecated Do not use! This function is deprecated. Adding prim types has been moved to attach.
* ConstBundlePrims attach function will iterate over prims and collect their
* paths and types.@brief
*/
omni::graph::core::NameToken* addPrimTypesIfMissing() noexcept;
/**
* @deprecated Do not use! Use removePrim with index. This override introduces ambiguity where int can
* be converted to a pointer.
*
* @todo: Weakness of removePrim design is that it introduces two overrides with following arguments:
* * pointer
* * integer
* This leads to ambiguity during override resolution. Override with a pointer should be avoided
* and removed in the future.
*/
bool removePrim(ConstBundlePrim* prim) noexcept;
/**
* @deprecated Do not use! There is no need for this function to exist.
* Get the primitive and call clearContents().
*/
BundlePrim* getClearedPrim(BundlePrimIndex primIndex) noexcept;
/**
* @deprecated Responsibility to cache primitive's attributes has been moved to BundlePrim.
*/
void ensurePrimAttrsCached(BundlePrimIndex primIndex) noexcept;
/**
* This method exists for backward compatibility only. With new interface bundle holds the type.
*
* @deprecated Do not use! Use getConstPrimTypes().
*/
omni::graph::core::NameToken* getPrimTypes() noexcept;
/**
* This method exists for backward compatibility only. With new interface bundle holds the path.
*
* @deprecated Do not use! Use getConstPrimPaths().
*/
omni::graph::core::NameToken* getPrimPaths() noexcept;
/**
* @deprecated Do not use! Path is a part of IBundle interface.
*/
omni::graph::core::NameToken* addPrimPathsIfMissing() noexcept;
private:
/**
* @return Returns nullptr if bundle is read only, or IBundle2 instance otherwise.
*/
omni::graph::core::IBundle2* getBundlePtr() noexcept;
/**
* @return Get prim dirty ids of this bundle.
*/
DirtyIDType* getPrimDirtyIDs() noexcept;
// cached attribute handles
using AttributeDataHandle = omni::graph::core::AttributeDataHandle;
AttributeDataHandle m_bundleDirtyIDAttr{ AttributeDataHandle::invalidValue() };
AttributeDataHandle m_primDirtyIDsAttr{ AttributeDataHandle::invalidValue() };
AttributeDataHandle m_primPathsAttr{ AttributeDataHandle::invalidValue() };
AttributeDataHandle m_primTypesAttr{ AttributeDataHandle::invalidValue() };
AttributeDataHandle m_primIndexAttr{ AttributeDataHandle::invalidValue() };
friend class BundlePrim;
friend class BundleAttrib;
};
/**
* Primitives in Bundle iterator.
*/
class OMNI_GRAPH_IO_DEPRECATED BundlePrimIterator
{
public:
BundlePrimIterator(BundlePrims& bundlePrims, BundlePrimIndex primIndex = 0) noexcept;
BundlePrimIterator(BundlePrimIterator const& that) noexcept = default;
BundlePrimIterator& operator=(BundlePrimIterator const& that) noexcept = default;
bool operator==(BundlePrimIterator const& that) const noexcept;
bool operator!=(BundlePrimIterator const& that) const noexcept;
BundlePrim& operator*() noexcept;
BundlePrim* operator->() noexcept;
BundlePrimIterator& operator++() noexcept;
private:
BundlePrims* m_bundlePrims;
BundlePrimIndex m_primIndex;
};
/**
* Attributes in Primitive iterator.
*/
class OMNI_GRAPH_IO_DEPRECATED BundlePrimAttrIterator
{
public:
BundlePrimAttrIterator(BundlePrim& bundlePrim, BundlePrim::AttrMapIteratorType attrIter) noexcept;
BundlePrimAttrIterator(BundlePrimAttrIterator const& that) noexcept = default;
BundlePrimAttrIterator& operator=(BundlePrimAttrIterator const& that) noexcept = default;
bool operator==(BundlePrimAttrIterator const& that) const noexcept;
bool operator!=(BundlePrimAttrIterator const& that) const noexcept;
BundleAttrib& operator*() noexcept;
BundleAttrib* operator->() noexcept;
BundlePrimAttrIterator& operator++() noexcept;
BundleAttrib const* getConst() noexcept;
private:
BundlePrim* m_bundlePrim;
BundlePrim::AttrMapIteratorType m_attrIter;
};
} // namespace io
} // namespace graph
} // namespace omni
#include "BundlePrimsImpl.h"
| 14,273 | C | 31.738532 | 123 | 0.637497 |
omniverse-code/kit/include/omni/graph/io/ConstBundlePrimsImpl.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "ConstBundlePrims.h"
#include <omni/graph/core/CppWrappers.h>
#include <omni/graph/core/iComputeGraph.h>
#include <omni/graph/core/ComputeGraph.h>
#include <algorithm>
namespace omni
{
namespace graph
{
namespace io
{
// ====================================================================================================
//
// Const Bundle Primitive
//
// ====================================================================================================
inline ConstBundlePrim::ConstBundlePrim(ConstBundlePrims& bundlePrims,
BundlePrimIndex primIndex,
omni::core::ObjectPtr<omni::graph::core::IConstBundle2> bundle)
: m_bundlePrims{ &bundlePrims }, m_bundle{ std::move(bundle) }, m_primIndex{ primIndex }
{
// Read and cache all non internal attributes.
readAndCacheAttributes();
}
inline void ConstBundlePrim::readAndCacheAttributes() noexcept
{
using namespace omni::graph::core;
IConstBundle2* bundle = getConstBundlePtr();
GraphContextObj const& context = getConstBundlePrims()->context();
std::vector<ConstAttributeDataHandle> attrHandles(bundle->getAttributeCount());
bundle->getConstAttributes(attrHandles.data(), attrHandles.size());
auto& attrs = getAttributes();
for(ConstAttributeDataHandle& attrHandle : attrHandles)
{
if(!attrHandle.isValid())
continue;
NameToken attrName = context.iAttributeData->getName(context, attrHandle);
attrs.insert(std::make_pair(attrName, std::make_unique<BundleAttrib>(*this, attrName)));
}
}
inline BundleAttrib const* ConstBundlePrim::getConstAttr(core::NameToken attrName) noexcept
{
using namespace omni::graph::core;
// Try to find cached attributes
auto& attrMap = getAttributes();
auto it = attrMap.find(attrName);
if (it != attrMap.end())
{
return it->second.get();
}
// Try to find attribute in this bundle.
IConstBundle2* bundle = getConstBundlePtr();
ConstAttributeDataHandle attributeHandle = bundle->getConstAttributeByName(attrName);
if (!attributeHandle.isValid())
{
// attribute is not found, ensure entry is removed from the cache.
auto it = attrMap.find(attrName);
if (it != attrMap.end())
{
attrMap.erase(it);
}
return nullptr;
}
// Check if attribute in the bundle is stale
auto newPrimAttribute = new BundleAttrib{ *this, attrName};
std::unique_ptr<BundleAttrib> primAttributePtr{ newPrimAttribute };
attrMap.emplace(attrName, std::move(primAttributePtr));
return newPrimAttribute;
}
inline BundleAttrib const* ConstBundlePrim::getAttr(omni::graph::core::NameToken attrName) const noexcept
{
return const_cast<ConstBundlePrim*>(this)->getConstAttr(attrName);
}
inline size_t ConstBundlePrim::attrCount() noexcept
{
return getAttributes().size();
}
inline BundlePrimIndex ConstBundlePrim::primIndex() noexcept
{
return m_primIndex;
}
inline omni::graph::core::NameToken ConstBundlePrim::path() noexcept
{
using namespace omni::graph::core;
ConstBundlePrims* bundlePrims = getConstBundlePrims();
BundlePrimIndex const primIndex = this->primIndex();
if (primIndex >= bundlePrims->getPrimCount())
{
return carb::flatcache::kUninitializedToken;
}
NameToken const* paths = bundlePrims->getConstPrimPaths();
return (paths != nullptr) ? paths[primIndex] : carb::flatcache::kUninitializedToken;
}
inline omni::graph::core::NameToken ConstBundlePrim::path() const noexcept
{
return const_cast<ConstBundlePrim*>(this)->path();
}
inline omni::graph::core::NameToken ConstBundlePrim::type() noexcept
{
using namespace omni::graph::core;
ConstBundlePrims* bundlePrims = getConstBundlePrims();
BundlePrimIndex const primIndex = this->primIndex();
if (primIndex >= bundlePrims->getPrimCount())
{
return carb::flatcache::kUninitializedToken;
}
NameToken const* types = bundlePrims->getConstPrimTypes();
return (types != nullptr) ? types[primIndex] : carb::flatcache::kUninitializedToken;
}
inline omni::graph::core::NameToken ConstBundlePrim::type() const noexcept
{
return const_cast<ConstBundlePrim*>(this)->type();
}
inline DirtyIDType ConstBundlePrim::dirtyID() noexcept
{
ConstBundlePrims* bundlePrims = getConstBundlePrims();
if (primIndex() >= bundlePrims->getPrimCount())
{
return bundlePrims->getBundleDirtyID();
}
DirtyIDType const* dirtyIDs = m_bundlePrims->getPrimDirtyIDs();
return (dirtyIDs != nullptr) ? dirtyIDs[m_primIndex] : kInvalidDirtyID;
}
inline DirtyIDType ConstBundlePrim::dirtyID() const noexcept
{
return const_cast<ConstBundlePrim*>(this)->dirtyID();
}
inline ConstBundlePrims* ConstBundlePrim::getConstBundlePrims() noexcept
{
return m_bundlePrims;
}
inline ConstBundlePrimAttrIterator ConstBundlePrim::begin() noexcept
{
return ConstBundlePrimAttrIterator(*this, getAttributes().begin());
}
inline ConstBundlePrimAttrIterator ConstBundlePrim::end() noexcept
{
return ConstBundlePrimAttrIterator(*this, getAttributes().end());
}
inline ConstBundlePrimAttrIterator ConstBundlePrim::begin() const noexcept
{
ConstBundlePrim& thisPrim = const_cast<ConstBundlePrim&>(*this);
return ConstBundlePrimAttrIterator(thisPrim, thisPrim.getAttributes().begin());
}
inline ConstBundlePrimAttrIterator ConstBundlePrim::end() const noexcept
{
ConstBundlePrim& thisPrim = const_cast<ConstBundlePrim&>(*this);
return ConstBundlePrimAttrIterator(thisPrim, thisPrim.getAttributes().end());
}
inline omni::graph::core::IConstBundle2* ConstBundlePrim::getConstBundlePtr() noexcept
{
return m_bundle.get();
}
inline ConstBundlePrim::BundleAttributeMap& ConstBundlePrim::getAttributes() noexcept
{
return m_attributes;
}
// ====================================================================================================
//
// Const Bundle Primitives
//
// ====================================================================================================
inline ConstBundlePrims::ConstBundlePrims()
{
}
inline ConstBundlePrims::ConstBundlePrims(omni::graph::core::GraphContextObj const& context,
omni::graph::core::ConstBundleHandle const& bundle)
: ConstBundlePrims()
{
attach(context, bundle);
}
inline void ConstBundlePrims::detach() noexcept
{
m_bundleDirtyID = kInvalidDirtyID;
m_primTypes = nullptr;
m_primPaths = nullptr;
m_primDirtyIDs = nullptr;
m_iDirtyID = nullptr;
m_primitives.clear();
m_commonAttributes.reset();
m_context = omni::graph::core::GraphContextObj{};
m_bundle.release();
m_factory.release();
}
inline omni::graph::core::NameToken const* ConstBundlePrims::getConstPrimPaths() noexcept
{
return m_primPaths;
}
inline ConstBundlePrims::BundlePrimArray& ConstBundlePrims::getPrimitives() noexcept
{
return m_primitives;
}
inline omni::graph::core::NameToken const* ConstBundlePrims::getConstPrimTypes() noexcept
{
return m_primTypes;
}
inline omni::graph::core::ConstBundleHandle ConstBundlePrims::getConstHandle() noexcept
{
return m_bundle->getConstHandle();
}
template <typename FUNC>
ConstBundlePrim* ConstBundlePrims::getConstPrim(BundlePrimIndex primIndex, FUNC createBundlePrim) noexcept
{
// Return invalid const bundle prim if out of bounds.
size_t const bundlePrimCount = getPrimCount();
if (primIndex >= bundlePrimCount)
{
return nullptr;
}
// Search and return if in cache.
auto& prims = getPrimitives();
if (prims.size() != bundlePrimCount)
{
prims.resize(bundlePrimCount);
}
if (prims[primIndex] != nullptr)
{
return prims[primIndex].get();
}
// update the cache and return the bundle prim.
std::unique_ptr<ConstBundlePrim> newBundlePrim{ createBundlePrim() };
if (!newBundlePrim)
{
return nullptr;
}
ConstBundlePrim* newBundlePrimPtr = newBundlePrim.get();
prims[primIndex] = std::move(newBundlePrim);
return newBundlePrimPtr;
}
inline ConstBundlePrim* ConstBundlePrims::getPrim(BundlePrimIndex primIndex) noexcept
{
return getConstPrim(primIndex);
}
inline ConstBundlePrim* ConstBundlePrims::getConstPrim(BundlePrimIndex primIndex) noexcept
{
using namespace omni::graph::core;
auto createBundlePrim = [this, &bundlePrims = *this, &primIndex]() -> ConstBundlePrim*
{
ConstBundleHandle bundleHandle = getConstBundlePtr()->getConstChildBundle(primIndex);
if (!bundleHandle.isValid())
{
return nullptr;
}
omni::core::ObjectPtr<IConstBundle2> childBundle = getBundleFactoryPtr()->getConstBundle(context(), bundleHandle);
if (!childBundle)
{
return nullptr;
}
return new ConstBundlePrim{ bundlePrims, primIndex, childBundle };
};
return getConstPrim(primIndex, createBundlePrim);
}
inline DirtyIDType ConstBundlePrims::getBundleDirtyID() noexcept
{
return m_bundleDirtyID;
}
inline DirtyIDType const* ConstBundlePrims::getPrimDirtyIDs() noexcept
{
return m_primDirtyIDs;
}
inline ConstBundlePrim& ConstBundlePrims::getConstCommonAttrs() noexcept
{
return *m_commonAttributes;
}
inline omni::graph::core::GraphContextObj const& ConstBundlePrims::context() noexcept
{
using namespace omni::graph::core;
if (m_bundle)
{
m_context = m_bundle->getContext();
}
else
{
m_context = GraphContextObj{};
}
return m_context;
}
inline DirtyIDType ConstBundlePrims::getNextDirtyID() noexcept
{
return m_iDirtyID->getNextDirtyID();
}
inline void ConstBundlePrims::attach(omni::graph::core::GraphContextObj const& context,
omni::graph::core::ConstBundleHandle const& bundleHandle) noexcept
{
using namespace omni::graph::core;
ComputeGraph* computeGraph = carb::getCachedInterface<ComputeGraph>();
omni::core::ObjectPtr<IBundleFactory> factory = computeGraph->getBundleFactoryInterfacePtr();
omni::core::ObjectPtr<IConstBundle2> bundle = factory->getConstBundle(context, bundleHandle);
attach(std::move(factory), std::move(bundle));
}
inline void ConstBundlePrims::attach(omni::core::ObjectPtr<omni::graph::core::IBundleFactory>&& factoryPtr,
omni::core::ObjectPtr<omni::graph::core::IConstBundle2>&& bundlePtr) noexcept
{
using namespace omni::graph::core;
// Initialize members
m_factory = std::move(factoryPtr);
m_bundle = std::move(bundlePtr);
// Initialize common attributes to provide access to ConstBundlePrims attributes.
m_commonAttributes.reset(new ConstBundlePrim(*this, kInvalidBundlePrimIndex, m_bundle));
// Acquire IDirtyID interface
m_iDirtyID = carb::getCachedInterface<omni::graph::io::IDirtyID>();
if (!m_bundle->isValid())
{
return;
}
// TODO: Following code is necessary for backward compatibility.
IConstBundle2* bundle = getConstBundlePtr();
GraphContextObj const& context = this->context();
// Bundle DirtyID.
auto& bundleDirtyIDDef = detail::getBundleDirtyIDDefinition();
ConstAttributeDataHandle bundleDirtyIDHandle = bundle->getConstBundleMetadataByName(bundleDirtyIDDef.token);
if (bundleDirtyIDHandle.isValid())
{
setBundleDirtyID(*getDataR<DirtyIDType>(context, bundleDirtyIDHandle));
}
else
{
setBundleDirtyID(kInvalidDirtyID);
}
// Prim DirtyIDs.
auto& primDirtyIDsDef = detail::getPrimDirtyIDsDefinition();
ConstAttributeDataHandle primDirtyIDsHandle = bundle->getConstBundleMetadataByName(primDirtyIDsDef.token);
if (primDirtyIDsHandle.isValid())
{
size_t arrayLength = 0;
context.iAttributeData->getElementCount(&arrayLength, context, &primDirtyIDsHandle, 1);
setPrimDirtyIDsData(*getDataR<DirtyIDType*>(context, primDirtyIDsHandle));
}
else
{
setPrimDirtyIDsData(nullptr);
}
// Prim Paths.
auto& primPathsDef = detail::getPrimPathsDefinition();
ConstAttributeDataHandle primPathsHandle = bundle->getConstBundleMetadataByName(primPathsDef.token);
if (primPathsHandle.isValid())
{
size_t arrayLength = 0;
context.iAttributeData->getElementCount(&arrayLength, context, &primPathsHandle, 1);
setPrimPathsData(*getDataR<NameToken*>(context, primPathsHandle));
}
else
{
setPrimPathsData(nullptr);
}
// Prim Types.
auto& primTypesDef = detail::getPrimTypesDefinition();
ConstAttributeDataHandle primTypesHandle = bundle->getConstBundleMetadataByName(primTypesDef.token);
if (primTypesHandle.isValid())
{
size_t arrayLength = 0;
context.iAttributeData->getElementCount(&arrayLength, context, &primTypesHandle, 1);
setPrimTypesData(*getDataR<NameToken*>(context, primTypesHandle));
}
else
{
setPrimTypesData(nullptr);
}
}
inline void ConstBundlePrims::setBundleDirtyID(DirtyIDType bundleDirtyID) noexcept
{
m_bundleDirtyID = bundleDirtyID;
}
inline void ConstBundlePrims::setPrimDirtyIDsData(DirtyIDType const* primDirtyIDs) noexcept
{
m_primDirtyIDs = primDirtyIDs;
}
inline void ConstBundlePrims::setPrimPathsData(omni::graph::core::NameToken const* primPaths) noexcept
{
m_primPaths = primPaths;
}
inline void ConstBundlePrims::setPrimTypesData(omni::graph::core::NameToken const* primTypes) noexcept
{
m_primTypes = primTypes;
}
inline omni::graph::core::IBundleFactory* ConstBundlePrims::getBundleFactoryPtr() noexcept
{
return m_factory.get();
}
inline omni::graph::core::IConstBundle2* ConstBundlePrims::getConstBundlePtr() noexcept
{
return m_bundle.get();
}
inline size_t ConstBundlePrims::getPrimCount() noexcept
{
if (IConstBundle2* bundle = getConstBundlePtr())
{
return bundle->getChildBundleCount();
}
return 0;
}
inline ConstBundlePrimIterator ConstBundlePrims::begin() noexcept
{
return ConstBundlePrimIterator(*this);
}
inline ConstBundlePrimIterator ConstBundlePrims::end() noexcept
{
return ConstBundlePrimIterator(*this, getPrimCount());
}
/***********************************************************************************************
*
* TODO: Following methods might be deprecated in the future, but are kept for backward compatibility.
* In the next iteration when real interface starts to emerge, we can retire those methods.
*
***********************************************************************************************/
inline ConstBundlePrim& ConstBundlePrims::getCommonAttrs() noexcept
{
return getConstCommonAttrs();
}
inline omni::graph::core::ConstBundleHandle ConstBundlePrims::handle() noexcept
{
return m_bundle->getConstHandle();
}
inline omni::graph::core::NameToken const* ConstBundlePrims::getPrimPaths() noexcept
{
return getConstPrimPaths();
}
inline void ConstBundlePrims::separateAttrs() noexcept
{
// There is nothing to separate. This function is deprecated.
}
inline void ConstBundlePrims::ensurePrimAttrsCached(BundlePrimIndex primIndex) noexcept
{
// Responsibility of caching attributes was moved to Bundle Prim.
}
// ====================================================================================================
//
// Const Bundle Primitive Iterator
//
// ====================================================================================================
inline ConstBundlePrimIterator::ConstBundlePrimIterator(ConstBundlePrims& bundlePrims, BundlePrimIndex primIndex) noexcept
: m_bundlePrims(&bundlePrims), m_primIndex(primIndex)
{
}
inline bool ConstBundlePrimIterator::operator==(ConstBundlePrimIterator const& that) const noexcept
{
return m_bundlePrims == that.m_bundlePrims && m_primIndex == that.m_primIndex;
}
inline bool ConstBundlePrimIterator::operator!=(ConstBundlePrimIterator const& that) const noexcept
{
return !(*this == that);
}
inline ConstBundlePrim& ConstBundlePrimIterator::operator*() noexcept
{
return *(m_bundlePrims->getConstPrim(m_primIndex));
}
inline ConstBundlePrim* ConstBundlePrimIterator::operator->() noexcept
{
return m_bundlePrims->getConstPrim(m_primIndex);
}
inline ConstBundlePrimIterator& ConstBundlePrimIterator::operator++() noexcept
{
++m_primIndex;
return *this;
}
// ====================================================================================================
//
// Const Bundle Primitive Attribute Iterator
//
// ====================================================================================================
inline ConstBundlePrimAttrIterator::ConstBundlePrimAttrIterator(ConstBundlePrim& bundlePrim, ConstBundlePrim::AttrMapIteratorType attrIter) noexcept
: m_bundlePrim(&bundlePrim), m_attrIter(attrIter)
{
}
inline bool ConstBundlePrimAttrIterator::operator==(ConstBundlePrimAttrIterator const& that) const noexcept
{
return m_bundlePrim == that.m_bundlePrim && m_attrIter == that.m_attrIter;
}
inline bool ConstBundlePrimAttrIterator::operator!=(ConstBundlePrimAttrIterator const& that) const noexcept
{
return !(*this == that);
}
inline BundleAttrib const& ConstBundlePrimAttrIterator::operator*() const noexcept
{
CARB_ASSERT(m_attrIter->second);
return *(m_attrIter->second);
}
inline BundleAttrib const*ConstBundlePrimAttrIterator:: operator->() const noexcept
{
CARB_ASSERT(m_attrIter->second);
return m_attrIter->second.get();
}
inline ConstBundlePrimAttrIterator& ConstBundlePrimAttrIterator::operator++() noexcept
{
++m_attrIter;
return *this;
}
} // namespace io
} // namespace graph
} // namespace omni
| 18,404 | C | 29.522388 | 148 | 0.672843 |
omniverse-code/kit/include/omni/graph/io/BundleAttribImpl.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BundleAttrib.h"
#include <omni/graph/core/iComputeGraph.h>
#include <omni/graph/core/CppWrappers.h>
#include <omni/math/linalg/vec.h>
#include <omni/math/linalg/matrix.h>
#include <omni/math/linalg/quat.h>
#include <omni/math/linalg/half.h>
namespace omni
{
namespace math
{
namespace linalg
{
template <typename T>
struct TypeToBaseType
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eUnknown;
};
template <>
struct TypeToBaseType<half>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eHalf;
};
template <>
struct TypeToBaseType<float>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eFloat;
};
template <>
struct TypeToBaseType<double>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eDouble;
};
template <>
struct TypeToBaseType<bool>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eBool;
};
template <>
struct TypeToBaseType<unsigned char>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eUChar;
};
template <>
struct TypeToBaseType<int>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eInt;
};
template <>
struct TypeToBaseType<int64_t>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eInt64;
};
template <>
struct TypeToBaseType<unsigned int>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eUInt;
};
template <>
struct TypeToBaseType<uint64_t>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eUInt64;
};
template <>
struct TypeToBaseType<carb::flatcache::Token>
{
constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eToken;
};
template <typename T, size_t N>
struct TypeToBaseType<base_vec<T, N>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T>
struct TypeToBaseType<vec2<T>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T>
struct TypeToBaseType<vec3<T>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T>
struct TypeToBaseType<vec4<T>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T>
struct TypeToBaseType<quat<T>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T, size_t N>
struct TypeToBaseType<base_matrix<T, N>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T>
struct TypeToBaseType<matrix2<T>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T>
struct TypeToBaseType<matrix3<T>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T>
struct TypeToBaseType<matrix4<T>>
{
constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType;
};
template <typename T>
struct TypeToComponentCount
{
constexpr static size_t count = 1;
};
template <typename T, size_t N>
struct TypeToComponentCount<base_vec<T,N>>
{
constexpr static size_t count = N;
};
template <typename T>
struct TypeToComponentCount<vec2<T>>
{
constexpr static size_t count = 2;
};
template <typename T>
struct TypeToComponentCount<vec3<T>>
{
constexpr static size_t count = 3;
};
template <typename T>
struct TypeToComponentCount<vec4<T>>
{
constexpr static size_t count = 4;
};
template <typename T>
struct TypeToComponentCount<quat<T>>
{
constexpr static size_t count = 4;
};
template <typename T, size_t N>
struct TypeToComponentCount<base_matrix<T,N>>
{
constexpr static size_t count = N*N;
};
template <typename T>
struct TypeToComponentCount<matrix2<T>>
{
constexpr static size_t count = 4;
};
template <typename T>
struct TypeToComponentCount<matrix3<T>>
{
constexpr static size_t count = 9;
};
template <typename T>
struct TypeToComponentCount<matrix4<T>>
{
constexpr static size_t count = 16;
};
} // namespace linalg
} // namespace math
} // namespace omni
namespace omni
{
namespace graph
{
namespace io
{
namespace detail
{
//
// Non-owning string buffer with compile time size evaluation
//
class StringBuffer
{
public:
using value_type = char const*;
using size_type = std::size_t;
using const_iterator = char const*;
constexpr StringBuffer(value_type data, size_type size) noexcept : m_data{ data }, m_size{ size }
{
}
constexpr explicit StringBuffer(value_type data) noexcept : StringBuffer{ data, len(data) }
{
}
constexpr StringBuffer(StringBuffer const&) = default;
constexpr StringBuffer(StringBuffer&&) = default;
constexpr value_type data() const noexcept
{
return m_data;
}
constexpr size_type size() const noexcept
{
return m_size;
}
constexpr const_iterator begin() const noexcept
{
return m_data;
}
constexpr const_iterator end() const noexcept
{
return m_data + m_size;
}
private:
constexpr size_type len(value_type start) const noexcept
{
value_type end = start;
for (; *end != '\0'; ++end)
;
return end - start;
}
value_type m_data;
size_type m_size;
};
// Helper class to keep name and type together.
struct AttrDefinition
{
AttrDefinition(StringBuffer _name, omni::graph::core::Type _type, omni::graph::core::NameToken _token) noexcept
: name{ _name }
, type{ _type }
, token{ _token }
{
}
AttrDefinition(carb::flatcache::IToken const* iToken, char const* _text, omni::graph::core::Type _type) noexcept
: AttrDefinition{ StringBuffer{_text}, _type, iToken->getHandle(_text) }
{
}
AttrDefinition(AttrDefinition const&) = delete;
AttrDefinition(AttrDefinition&&) = delete;
AttrDefinition& operator=(AttrDefinition const&) = delete;
AttrDefinition& operator=(AttrDefinition&&) = delete;
StringBuffer name; // Name and size of the attribute
omni::graph::core::Type type; // Type of the attribute
omni::graph::core::NameToken token; // Token representation of the name
};
// Attribute Level Definitions:
inline AttrDefinition const& getAttrInterpolationDefinition() noexcept
{
using namespace carb::flatcache;
using namespace omni::graph::core;
static AttrDefinition d{ carb::getCachedInterface<IToken>(), "interpolation", Type{ BaseDataType::eToken, 1, 0 } };
return d;
}
inline AttrDefinition const& getAttrDirtyIdDefinition() noexcept
{
using namespace carb::flatcache;
using namespace omni::graph::core;
static AttrDefinition d{ carb::getCachedInterface<IToken>(), "dirtyID", Type{ BaseDataType::eUInt64, 1, 0 } };
return d;
}
inline AttrDefinition const& getAttrSourceDefinition() noexcept
{
using namespace carb::flatcache;
using namespace omni::graph::core;
static AttrDefinition d{ carb::getCachedInterface<IToken>(), "source", Type{ BaseDataType::eUChar, 1, 0 } };
return d;
}
// Primitive Level Definitions:
inline AttrDefinition const& getPrimPathsDefinition() noexcept
{
using namespace carb::flatcache;
using namespace omni::graph::core;
static AttrDefinition d{ carb::getCachedInterface<IToken>(), "primPaths", Type{ BaseDataType::eToken, 1, 1 } };
return d;
}
inline AttrDefinition const& getPrimTypesDefinition() noexcept
{
using namespace carb::flatcache;
using namespace omni::graph::core;
static AttrDefinition d{ carb::getCachedInterface<IToken>(), "primTypes", Type{ BaseDataType::eToken, 1, 1 } };
return d;
}
inline AttrDefinition const& getPrimIndexDefinition() noexcept
{
using namespace carb::flatcache;
using namespace omni::graph::core;
static AttrDefinition d{ carb::getCachedInterface<IToken>(), "primIndex", Type{ BaseDataType::eUInt64, 1, 0 } };
return d;
}
inline AttrDefinition const& getPrimDirtyIDsDefinition() noexcept
{
using namespace carb::flatcache;
using namespace omni::graph::core;
static AttrDefinition d{ carb::getCachedInterface<IToken>(), "primDirtyIDs", Type{ BaseDataType::eUInt64, 1, 1 } };
return d;
}
// Bundle Level Definitions
inline AttrDefinition const& getBundleDirtyIDDefinition() noexcept
{
using namespace carb::flatcache;
using namespace omni::graph::core;
static AttrDefinition d{ carb::getCachedInterface<IToken>(), "bundleDirtyID", Type{ BaseDataType::eUInt64, 1, 0 } };
return d;
}
// Constant types.
constexpr omni::graph::core::Type s_relationshipType{ omni::graph::core::BaseDataType::eToken, 1, 1 };
} // namespace detail
inline bool BundleAttrib::isRelationshipData() const noexcept
{
return m_source == Source::Relationship && type() == detail::s_relationshipType;
}
inline bool BundleAttrib::setInterpolation(omni::graph::core::NameToken interpolation) noexcept
{
using namespace omni::graph::core;
if (m_interpolation == interpolation)
return true;
if (interpolation == carb::flatcache::kUninitializedToken)
{
clearInterpolation();
return true;
}
if (IBundle2* bundle = getBundlePtr())
{
auto& interpDef = detail::getAttrInterpolationDefinition();
AttributeDataHandle interpolationAttr = bundle->getAttributeMetadataByName(m_name, interpDef.token);
if (!interpolationAttr.isValid())
{
interpolationAttr = bundle->createAttributeMetadata(m_name, interpDef.token, interpDef.type);
}
m_interpolation = interpolation;
auto context = bundle->getContext();
*getDataW<NameToken>(context, interpolationAttr) = interpolation;
return true;
}
return false;
}
inline bool BundleAttrib::setDirtyID(DirtyIDType dirtyID) noexcept
{
using namespace omni::graph::core;
if(m_dirtyID == dirtyID)
return true;
if (IBundle2* bundle = getBundlePtr())
{
auto& dirtyIdDef = detail::getAttrDirtyIdDefinition();
AttributeDataHandle dirtyIDAttr = bundle->getAttributeMetadataByName(m_name, dirtyIdDef.token);
if (!dirtyIDAttr.isValid())
{
dirtyIDAttr = bundle->createAttributeMetadata(m_name, dirtyIdDef.token, dirtyIdDef.type);
}
m_dirtyID = dirtyID;
auto context = bundle->getContext();
*omni::graph::core::getDataW<DirtyIDType>(context, dirtyIDAttr) = dirtyID;
return true;
}
return false;
}
inline bool BundleAttrib::setSource(Source source) noexcept
{
using namespace omni::graph::core;
if(m_source == source)
return true;
if (IBundle2* bundle = getBundlePtr())
{
auto& sourceDef = detail::getAttrSourceDefinition();
AttributeDataHandle sourceAttr = bundle->getAttributeMetadataByName(m_name, sourceDef.token);
if(!sourceAttr.isValid())
{
sourceAttr = bundle->createAttributeMetadata(m_name, sourceDef.token, sourceDef.type);
}
m_source = source;
auto context = bundle->getContext();
*omni::graph::core::getDataW<SourceType>(context, sourceAttr) = static_cast<SourceType>(source);
return true;
}
return false;
}
inline void BundleAttrib::copyContentsFrom(BundleAttrib const& sourceAttr) noexcept
{
using namespace omni::graph::core;
if (m_dirtyID == sourceAttr.m_dirtyID)
return;
IBundle2* dstBundle = getBundlePtr();
IConstBundle2* srcBundle = sourceAttr.getConstBundlePtr();
if (!dstBundle)
{
return;
}
auto context = dstBundle->getContext();
// Copy Attribute
AttributeDataHandle dstAttrHandle = dstBundle->getAttributeByName(m_name);
ConstAttributeDataHandle srcAttrHandle = srcBundle->getConstAttributeByName(sourceAttr.m_name);
// Ensure that copyData updated the type correctly, if needed.
CARB_ASSERT(context.iAttributeData->getType(context, dstAttrHandle) == Type(m_type));
context.iAttributeData->copyData(dstAttrHandle, context, srcAttrHandle);
// Copy the cached type
m_type = sourceAttr.m_type;
// Copy the interpolation (does nothing if the same; clears interpolation if none on sourceAttr)
setInterpolation(sourceAttr.interpolation());
// Copy the dirty ID
setDirtyID(sourceAttr.m_dirtyID);
// Copy source
setSource(sourceAttr.m_source);
}
inline void BundleAttrib::clearInterpolation() noexcept
{
using namespace omni::graph::core;
if (IBundle2* bundle = getBundlePtr())
{
auto context = bundle->getContext();
auto& interpDef = detail::getAttrInterpolationDefinition();
bundle->removeAttributeMetadata(m_name, interpDef.token);
m_interpolation = carb::flatcache::kUninitializedToken;
}
}
inline ConstBundlePrim* BundleAttrib::getBundlePrim() const noexcept
{
return m_bundlePrim;
}
inline void BundleAttrib::clearSource() noexcept
{
using namespace omni::graph::core;
if (IBundle2* bundle = getBundlePtr())
{
auto context = bundle->getContext();
auto& sourceDef = detail::getAttrSourceDefinition();
bundle->removeAttributeMetadata(m_name, sourceDef.token);
m_source = BundleAttribSource::Attribute;
}
}
inline omni::graph::core::NameToken BundleAttrib::name() const noexcept
{
return m_name;
}
inline omni::graph::core::NameToken BundleAttrib::interpolation() const noexcept
{
return m_interpolation;
}
inline DirtyIDType BundleAttrib::dirtyID() const noexcept
{
return m_dirtyID;
}
inline omni::graph::core::Type BundleAttrib::type() const noexcept
{
return omni::graph::core::Type(m_type);
}
inline bool BundleAttrib::isArray() const noexcept
{
omni::graph::core::Type type{ carb::flatcache::TypeC{ m_type } };
CARB_ASSERT(type.arrayDepth < 2);
return (type.arrayDepth != 0);
}
inline BundleAttrib::Source BundleAttrib::source() const noexcept
{
return m_source;
}
inline bool BundleAttrib::isAttributeData() const noexcept
{
return m_source == Source::Attribute;
}
inline omni::graph::core::NameToken BundleAttrib::prefixedName() const noexcept
{
return m_name;
}
inline size_t BundleAttrib::size() const noexcept
{
using namespace omni::graph::core;
if (!isArray())
{
return 1;
}
IConstBundle2* bundle = getConstBundlePtr();
auto context = bundle->getContext();
ConstAttributeDataHandle attr = bundle->getConstAttributeByName(m_name);
size_t count;
context.iAttributeData->getElementCount(&count, context, &attr, 1);
return count;
}
inline void BundleAttrib::resize(size_t arrayElementCount) noexcept
{
using namespace omni::graph::core;
CARB_ASSERT(isArray());
if (IBundle2* bundle = getBundlePtr())
{
auto context = bundle->getContext();
AttributeDataHandle attr = bundle->getAttributeByName(m_name);
context.iAttributeData->setElementCount(context, attr, arrayElementCount);
}
}
inline void* BundleAttrib::getDataInternal() noexcept
{
using namespace omni::graph::core;
if (IBundle2* bundle = getBundlePtr())
{
auto context = bundle->getContext();
AttributeDataHandle attr = bundle->getAttributeByName(m_name);
if (Type(m_type).arrayDepth == 0)
{
return getDataW<void>(context, attr);
}
return *getDataW<void*>(context, attr);
}
return nullptr;
}
inline void const* BundleAttrib::getDataInternal() const noexcept
{
using namespace omni::graph::core;
IConstBundle2* constBundle = getConstBundlePtr();
GraphContextObj context = constBundle->getContext();
ConstAttributeDataHandle attr = constBundle->getConstAttributeByName(m_name);
if (Type(m_type).arrayDepth == 0)
{
return getDataR<void const>(context, attr);
}
return *getDataR<void const*>(context, attr);
}
inline omni::graph::core::AttributeDataHandle BundleAttrib::handle() noexcept
{
using namespace omni::graph::core;
if (IBundle2* bundle = getBundlePtr())
{
return AttributeDataHandle(AttrKey(bundle->getHandle(), m_name.token));
}
return AttributeDataHandle{ AttributeDataHandle::invalidValue() };
}
inline omni::graph::core::ConstAttributeDataHandle BundleAttrib::handle() const noexcept
{
using namespace omni::graph::core;
if(IConstBundle2* bundle = getConstBundlePtr())
{
return ConstAttributeDataHandle{ AttrKey(bundle->getConstHandle(), m_name.token) };
}
return ConstAttributeDataHandle{ ConstAttributeDataHandle::invalidValue() };
}
template <typename T>
T* BundleAttrib::getData() noexcept
{
// It must be valid to request a pointer to type T.
// requesting a float* or vec3f* for a vec3f type is valid, but double* or vec2f* is not.
using namespace omni::math::linalg;
using Type = omni::graph::core::Type;
bool const isSameBaseType = TypeToBaseType<T>::baseType == Type(m_type).baseType;
bool const isSameCount = TypeToComponentCount<T>::count == Type(m_type).componentCount;
bool const isValidCast = isSameBaseType && (TypeToComponentCount<T>::count == 1 || isSameCount);
return isValidCast ? reinterpret_cast<T*>(getDataInternal()) : nullptr;
}
template <typename T>
T const* BundleAttrib::getData() const noexcept
{
// It must be valid to request a pointer to type T.
// requesting a float* or vec3f* for a vec3f type is valid, but double* or vec2f* is not.
using namespace omni::math::linalg;
using Type = omni::graph::core::Type;
bool const isValidCast =
TypeToBaseType<T>::baseType == Type(m_type).baseType &&
(TypeToComponentCount<T>::count == 1 ||
TypeToComponentCount<T>::count == Type(m_type).componentCount);
return isValidCast ? reinterpret_cast<T const*>(getDataInternal()) : nullptr;
}
template <typename T>
T const* BundleAttrib::getConstData() const noexcept
{
return getData<T>();
}
template <typename T>
T BundleAttrib::get() const noexcept
{
using namespace omni::math::linalg;
using Type = omni::graph::core::Type;
// TODO: Figure out how to support array attributes here.
CARB_ASSERT(Type(m_type).arrayDepth == 0);
// This has stronger requirements than getData, since get<float>() isn't valid
// for a vec3f attribute, but getData<float>() is valid for a vec3f attribute.
CARB_ASSERT(TypeToComponentCount<T>::count == Type(m_type).componentCount);
return *getConstData<T>();
}
template <typename T>
void BundleAttrib::set(T const& value) noexcept
{
using namespace omni::math::linalg;
using Type = omni::graph::core::Type;
CARB_ASSERT(Type(m_type).arrayDepth == 0);
// This has stronger requirements than getData, since set(1.0f) isn't valid
// for a vec3f attribute, but getData<float>() is valid for a vec3f attribute.
CARB_ASSERT(TypeToComponentCount<T>::count == Type(m_type).componentCount);
*getData<T>() = value;
}
template <typename T>
void BundleAttrib::set(T const* values, size_t elementCount) noexcept
{
using namespace omni::math::linalg;
using Type = omni::graph::core::Type;
CARB_ASSERT(Type(m_type).arrayDepth == 1);
// This has stronger requirements than getData, since set(float const*,size_t) isn't valid
// for a vec3f attribute, but getData<float>() is valid for a vec3f attribute.
CARB_ASSERT(TypeToComponentCount<T>::count == Type(m_type).componentCount);
resize(elementCount);
if (elementCount > 0)
{
T* p = getData<T>();
for (size_t i = 0; i < elementCount; ++i)
{
p[i] = values[i];
}
}
}
inline void BundleAttrib::clearContents() noexcept
{
using namespace omni::graph::core;
/**
* Remove attribute. Its metadata will be removed automatically together with it.
*/
IBundle2* bundle = getBundlePtr();
bundle->removeAttributeByName(m_name);
/**
* Invalidate data.
*/
m_source = BundleAttribSource::Attribute;
m_dirtyID = kInvalidDirtyID;
m_interpolation = carb::flatcache::kUninitializedToken;
m_type = carb::flatcache::kUnknownType;
m_name = carb::flatcache::kUninitializedToken;
m_bundlePrim = nullptr;
}
} // namespace io
} // namespace graph
} // namespace omni
| 21,248 | C | 27.483914 | 120 | 0.694324 |
omniverse-code/kit/include/omni/graph/io/IDirtyID.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#pragma message("omni/graph/io/IDirtyID.h is deprecated. Include omni/graph/core/IDirtyID.h instead.")
#define OMNI_GRAPH_IO_DEPRECATED [[deprecated("Use the counterpart in the omni::graph::core namespace instead.")]]
#include <carb/Interface.h>
#include <stddef.h>
#include <stdint.h>
namespace omni
{
namespace graph
{
namespace io
{
using DirtyIDType OMNI_GRAPH_IO_DEPRECATED = uint64_t;
OMNI_GRAPH_IO_DEPRECATED static constexpr DirtyIDType kInvalidDirtyID = ~DirtyIDType(0);
OMNI_GRAPH_IO_DEPRECATED static constexpr size_t kFunctionSize = sizeof(void (*)());
struct OMNI_GRAPH_IO_DEPRECATED IDirtyID
{
CARB_PLUGIN_INTERFACE("omni::graph::io::IDirtyID", 1, 0);
/**
* @return The next dirty ID, atomically incrementing the counter inside.
*/
DirtyIDType(CARB_ABI* getNextDirtyID)() = nullptr;
};
// Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle
static_assert(offsetof(IDirtyID, getNextDirtyID) == 0 * kFunctionSize,
"New IDirtyID ABI methods must be added at the end");
// DEPRECATED, BundlePrims class caches iDirtyID interface internally
// This must be instantiated in every extension that uses it, similar to Token::iToken.
// The exact location doesn't matter too much, though the PluginInterface.cpp is probably the best option.
OMNI_GRAPH_IO_DEPRECATED extern const IDirtyID* iDirtyID;
template <typename PREVIOUS_T>
OMNI_GRAPH_IO_DEPRECATED inline bool checkDirtyIDChanged(PREVIOUS_T& previousID, DirtyIDType newID)
{
if (newID != previousID)
{
previousID = newID;
return true;
}
// Equal, but if they're invalid, still treat them as changed
return (newID == kInvalidDirtyID);
}
}
}
}
| 2,187 | C | 33.187499 | 114 | 0.745313 |
omniverse-code/kit/include/omni/graph/io/BundlePrimsImpl.h | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "BundlePrims.h"
namespace omni
{
namespace graph
{
namespace io
{
// ====================================================================================================
//
// Bundle Attribute
//
// Because entire Bundle Prims is inlined, we have to put definition of those functions
// after declaration of ConstBundlePrim and ConstBundlePrims.
// ====================================================================================================
inline BundleAttrib::BundleAttrib(ConstBundlePrim& prim, omni::graph::core::NameToken name) noexcept
{
using namespace omni::graph::core;
// Get attribute handle and attribute properties
IConstBundle2* bundle = prim.getConstBundlePtr();
ConstAttributeDataHandle attributeHandle = bundle->getConstAttributeByName(name);
if(!attributeHandle.isValid())
{
return;
}
GraphContextObj const& context = prim.getConstBundlePrims()->context();
m_bundlePrim = &prim;
m_name = name;
m_type = carb::flatcache::TypeC(context.iAttributeData->getType(context, attributeHandle));
// Read attribute properties.
ConstAttributeDataHandle propertyAttributeHandle;
propertyAttributeHandle =
bundle->getConstAttributeMetadataByName(name, detail::getAttrInterpolationDefinition().token);
if(propertyAttributeHandle.isValid())
{
m_interpolation = *getDataR<NameToken>(context, propertyAttributeHandle);
}
propertyAttributeHandle =
bundle->getConstAttributeMetadataByName(name, detail::getAttrDirtyIdDefinition().token);
if(propertyAttributeHandle.isValid())
{
m_dirtyID = *getDataR<DirtyIDType>(context, propertyAttributeHandle);
}
propertyAttributeHandle =
bundle->getConstAttributeMetadataByName(name, detail::getAttrSourceDefinition().token);
if(propertyAttributeHandle.isValid())
{
m_source = static_cast<Source>(*getDataR<SourceType>(context, propertyAttributeHandle));
}
}
inline BundleAttrib::BundleAttrib(BundlePrim& prim, omni::graph::core::NameToken name, omni::graph::core::Type type, size_t arrayElementCount, Source source) noexcept
: BundleAttrib{ prim, name }
{
using namespace omni::graph::core;
// Attribute exists!
if (m_bundlePrim)
{
return;
}
// Attribute does not exist.
IBundle2* bundle = prim.getBundlePtr();
GraphContextObj const& context = prim.getConstBundlePrims()->context();
auto handle = bundle->createAttribute(name, type, arrayElementCount);
omni::graph::core::getDataW<void*>(context, handle); // remove after OM-50059 is merged.
m_bundlePrim = &prim;
m_name = name;
m_type = carb::flatcache::TypeC(type);
// Interpolation is optional.
// DirtyId is a unique id that tracks if attribute has changed.
setDirtyID(prim.getConstBundlePrims()->getNextDirtyID());
// Source of the attribute identifies "data" or "relationship"
setSource(source);
}
inline BundlePrim* BundleAttrib::getBundlePrim() noexcept
{
IConstBundle2* constBundle = getConstBundlePtr();
if(auto bundle = omni::cast<IBundle2>(constBundle))
{
return static_cast<BundlePrim*>(m_bundlePrim);
}
return nullptr;
}
inline omni::graph::core::IConstBundle2* BundleAttrib::getConstBundlePtr() const noexcept
{
ConstBundlePrim* bundlePrim = getBundlePrim();
return bundlePrim->getConstBundlePtr();
}
inline omni::graph::core::IBundle2* BundleAttrib::getBundlePtr() noexcept
{
BundlePrim* bundlePrim = getBundlePrim();
return bundlePrim->getBundlePtr();
}
inline bool BundleAttrib::bumpDirtyID() noexcept
{
BundlePrim* bundlePrim = getBundlePrim();
bundlePrim->bumpDirtyID();
DirtyIDType nextId = bundlePrim->getConstBundlePrims()->getNextDirtyID();
return setDirtyID(nextId);
}
// ====================================================================================================
//
// Bundle Primitive
//
// ====================================================================================================
inline BundlePrim::BundlePrim(BundlePrims& bundlePrims, BundlePrimIndex primIndex, omni::core::ObjectPtr<omni::graph::core::IBundle2> bundle)
: ConstBundlePrim{ bundlePrims, primIndex, std::move(bundle) }
{
}
inline void BundlePrim::setPath(omni::graph::core::NameToken path) noexcept
{
using namespace omni::graph::core;
BundlePrims* bundlePrims = getBundlePrims();
NameToken* primPaths = bundlePrims->addPrimPathsIfMissing();
primPaths[primIndex()] = path;
}
inline void BundlePrim::setType(omni::graph::core::NameToken type) noexcept
{
using namespace omni::graph::core;
BundlePrims* bundlePrims = getBundlePrims();
NameToken* primTypes = bundlePrims->addPrimTypesIfMissing();
primTypes[primIndex()] = type;
}
inline void BundlePrim::setDirtyID(DirtyIDType dirtyID) noexcept
{
auto primDirtyIDs = getBundlePrims()->getPrimDirtyIDs();
primDirtyIDs[m_primIndex] = dirtyID;
}
inline void BundlePrim::bumpDirtyID() noexcept
{
if (isCommonAttrs())
{
BundlePrims* bundlePrims = getBundlePrims();
bundlePrims->bumpBundleDirtyID();
}
else
{
DirtyIDType nextId = getBundlePrims()->getNextDirtyID();
setDirtyID(nextId);
}
}
inline BundleAttrib* BundlePrim::addAttr(omni::graph::core::NameToken attrName,
omni::graph::core::Type type,
size_t arrayElementCount,
BundleAttribSource source) noexcept
{
using namespace omni::graph::core;
auto& attrs = getAttributes();
// Erase existing attribute.
auto it = attrs.find(attrName);
if (it != attrs.end())
{
it->second->clearContents();
attrs.erase(it);
}
auto attr = new BundleAttrib{ *this, attrName, type, arrayElementCount, source };
attrs.emplace(attrName, attr);
return attr;
}
inline BundleAttrib* BundlePrim::addRelationship(omni::graph::core::NameToken name, size_t targetCount) noexcept
{
return addAttr(name, detail::s_relationshipType, targetCount, BundleAttribSource::Relationship);
}
inline bool BundlePrim::addAttrs(std::vector<BundlePrim::AddAttrInfo> const& attrList) noexcept
{
using namespace omni::graph::core;
IBundle2* bundle = getBundlePtr();
auto& attrs = getAttributes();
// Remove attributes that exists but properties are different.
std::vector<BundlePrim::AddAttrInfo> attrToCreate;
attrToCreate.reserve(attrList.size());
for (auto const& newAttr : attrList) {
auto it = attrs.find(newAttr.attrName);
if (it == attrs.end())
{
attrToCreate.push_back(newAttr);
continue;
}
BundleAttrib const* attr = it->second.get();
if (attr->type() != newAttr.type ||
attr->size() != newAttr.arrayElementCount ||
attr->source() != newAttr.source)
{
it->second->clearContents();
attrs.erase(it);
attrToCreate.push_back(newAttr);
}
// attribute is the same nothing to do.
}
// Create attributes that require instantiation.
for (auto const& tmp : attrToCreate)
{
auto attr = new BundleAttrib{ *this, tmp.attrName, tmp.type, tmp.arrayElementCount, tmp.source };
attrs.emplace(tmp.attrName, attr);
}
return true;
}
inline void BundlePrim::removeAttr(omni::graph::core::NameToken attrName) noexcept
{
using namespace omni::graph::core;
// Remove attribute from internal member.
auto& attrs = getAttributes();
auto it = attrs.find(attrName);
if (it != attrs.end())
{
it->second->clearContents();
attrs.erase(it);
}
}
inline void BundlePrim::clearContents() noexcept
{
using namespace omni::graph::core;
auto& attrs = getAttributes();
for (auto& attr : attrs)
{
attr.second->clearContents();
}
getAttributes().clear();
}
inline void BundlePrim::copyContentsFrom(ConstBundlePrim const& source, bool removeAttrsNotInSource /* = true*/) noexcept
{
return copyContentsFrom(const_cast<ConstBundlePrim&>(source), removeAttrsNotInSource);
}
inline void BundlePrim::copyContentsFrom(ConstBundlePrim& source, bool removeAttrsNotInSource /* = true*/) noexcept
{
using namespace omni::graph::core;
// Nothing to do if they're already equal.
if (dirtyID() == source.dirtyID())
return;
BundlePrims* bundlePrims = getBundlePrims();
// Add/set any attributes from source, if the dirty IDs are different, being sure to copy the dirty IDs.
// first we batch add them, then we copy the contents
std::vector<BundlePrim::AddAttrInfo> attrsToAdd;
attrsToAdd.reserve(source.attrCount());
for (auto const& sourceAttr : source)
{
NameToken name = sourceAttr.name();
// NOTE: Request a const attribute, to avoid bumping its dirty ID.
BundleAttrib const* constDestAttr = getConstAttr(name);
if (constDestAttr != nullptr && constDestAttr->dirtyID() == sourceAttr.dirtyID())
{
continue;
}
if (constDestAttr == nullptr)
{
attrsToAdd.push_back(
{ sourceAttr.m_name, Type(carb::flatcache::TypeC{ sourceAttr.m_type }), 0, sourceAttr.m_source });
}
}
// add the attributes
addAttrs(attrsToAdd);
// copy the data
for (auto const& sourceAttr : source)
{
NameToken name = sourceAttr.name();
// NOTE: Request a const attribute, to avoid bumping its dirty ID.
BundleAttrib const* constDestAttr = getConstAttr(name);
CARB_ASSERT(constDestAttr != nullptr);
if (constDestAttr == nullptr || constDestAttr->dirtyID() == sourceAttr.dirtyID())
{
continue;
}
const_cast<BundleAttrib*>(constDestAttr)->copyContentsFrom(sourceAttr);
}
CARB_ASSERT(attrCount() >= source.attrCount());
// If there are more attributes in this than in source, remove any that aren't in source.
auto& attrMap = getAttributes();
if (attrCount() > source.attrCount() && removeAttrsNotInSource)
{
std::vector<NameToken> attrsToRemove;
for (auto it = attrMap.begin(); it != attrMap.end();)
{
if (source.getConstAttr(it->second->name()) == nullptr)
{
it->second->clearContents();
it = attrMap.erase(it);
}
else
{
++it;
}
}
}
}
inline BundleAttrib* BundlePrim::getAttr(omni::graph::core::NameToken attrName) noexcept
{
auto& attrs = getAttributes();
auto it = attrs.find(attrName);
if (it == attrs.end())
{
return nullptr;
}
BundleAttrib* attr = it->second.get();
// TODO: Consider whether it's worth bumping the dirty ID later, when modification occurs.
attr->bumpDirtyID();
return attr;
}
inline omni::graph::core::BundleHandle BundlePrim::handle() noexcept
{
return getBundlePtr()->getHandle();
}
inline BundlePrims* BundlePrim::getBundlePrims() noexcept
{
omni::graph::core::IBundle2* bundle = getBundlePtr();
if (bundle)
{
ConstBundlePrims* bundlePrims = ConstBundlePrim::getConstBundlePrims();
return static_cast<BundlePrims*>(bundlePrims);
}
return nullptr;
}
inline BundlePrims* BundlePrim::bundlePrims() noexcept
{
return getBundlePrims();
}
inline BundlePrimAttrIterator BundlePrim::begin() noexcept
{
return BundlePrimAttrIterator(*this, getAttributes().begin());
}
inline BundlePrimAttrIterator BundlePrim::end() noexcept
{
return BundlePrimAttrIterator(*this, getAttributes().end());
}
inline ConstBundlePrimAttrIterator BundlePrim::cbegin() noexcept
{
return ConstBundlePrim::begin();
}
inline ConstBundlePrimAttrIterator BundlePrim::cend() noexcept
{
return ConstBundlePrim::end();
}
inline omni::graph::core::IBundle2* BundlePrim::getBundlePtr(omni::graph::core::IConstBundle2* constBundle) noexcept
{
auto bundle = omni::cast<omni::graph::core::IBundle2>(constBundle);
return bundle.get();
}
inline omni::graph::core::IBundle2* BundlePrim::getBundlePtr() noexcept
{
using namespace omni::graph::core;
IConstBundle2* constBundle = getConstBundlePtr();
IBundle2* bundle = getBundlePtr(constBundle);
return bundle;
}
// ====================================================================================================
//
// Bundle Primitives
//
// ====================================================================================================
inline BundlePrims::~BundlePrims() noexcept
{
detach();
}
inline omni::graph::core::BundleHandle BundlePrims::handle() noexcept
{
using namespace omni::graph::core;
if (IBundle2* bundle = getBundlePtr())
{
return bundle->getHandle();
}
return BundleHandle{ BundleHandle::invalidValue() };
}
inline void BundlePrims::ensurePrimAttrsCached(BundlePrimIndex primIndex) noexcept
{
}
inline BundlePrims::BundlePrims()
: ConstBundlePrims()
{
}
inline BundlePrims::BundlePrims(omni::graph::core::GraphContextObj const& context,
omni::graph::core::BundleHandle const& bundle)
: BundlePrims()
{
attach(context, bundle);
}
inline void BundlePrims::attach(omni::graph::core::GraphContextObj const& context,
omni::graph::core::BundleHandle const& bundleHandle) noexcept
{
using namespace omni::graph::core;
ComputeGraph* computeGraph = carb::getCachedInterface<ComputeGraph>();
omni::core::ObjectPtr<IBundleFactory> factoryPtr = computeGraph->getBundleFactoryInterfacePtr();
omni::core::ObjectPtr<IBundle2> bundlePtr = factoryPtr->getBundle(context, bundleHandle);
ConstBundlePrims::attach(std::move(factoryPtr), std::move(bundlePtr));
IBundle2* bundle = getBundlePtr();
//
// Bundle Level Attributes
//
auto& bundleDirtyIDDef = detail::getBundleDirtyIDDefinition();
if (!m_bundleDirtyIDAttr.isValid())
{
m_bundleDirtyIDAttr = bundle->createBundleMetadata(bundleDirtyIDDef.token, bundleDirtyIDDef.type);
DirtyIDType newBundleDirtyID = getNextDirtyID();
setBundleDirtyID(newBundleDirtyID);
*getDataW<DirtyIDType>(context, m_bundleDirtyIDAttr) = newBundleDirtyID;
}
else
{
setBundleDirtyID(*getDataR<DirtyIDType>(context, m_bundleDirtyIDAttr));
}
auto& primDirtyIDsDef = detail::getPrimDirtyIDsDefinition();
m_primDirtyIDsAttr = bundle->getBundleMetadataByName(primDirtyIDsDef.token);
if(!m_primDirtyIDsAttr.isValid())
{
m_primDirtyIDsAttr = bundle->createBundleMetadata(primDirtyIDsDef.token, primDirtyIDsDef.type, 0);
setPrimDirtyIDsData(*getDataW<DirtyIDType*>(context, m_primDirtyIDsAttr));
}
auto& primPathsDef = detail::getPrimPathsDefinition();
m_primPathsAttr = bundle->getBundleMetadataByName(primPathsDef.token);
auto& primTypesDef = detail::getPrimTypesDefinition();
m_primTypesAttr = bundle->getBundleMetadataByName(primTypesDef.token);
auto& primIndexDef = detail::getPrimIndexDefinition();
m_primIndexAttr = bundle->getBundleMetadataByName(primIndexDef.token);
}
inline void BundlePrims::detach() noexcept
{
using omni::graph::core::AttributeDataHandle;
if(m_bundleDirtyIDAttr.isValid())
{
auto& context = this->context();
// Assume that the bundle has changed, given that this is a non-const bundle wrapper.
*getDataW<DirtyIDType>(context, m_bundleDirtyIDAttr) = getNextDirtyID();
m_bundleDirtyIDAttr = AttributeDataHandle{ AttributeDataHandle::invalidValue() };
}
m_primDirtyIDsAttr = AttributeDataHandle{ AttributeDataHandle::invalidValue() };
m_primIndexAttr = AttributeDataHandle{ AttributeDataHandle::invalidValue() };
m_primTypesAttr = AttributeDataHandle{ AttributeDataHandle::invalidValue() };
m_primPathsAttr = AttributeDataHandle{ AttributeDataHandle::invalidValue() };
ConstBundlePrims::detach();
}
inline BundlePrim* BundlePrims::getPrim(BundlePrimIndex primIndex) noexcept
{
using namespace omni::graph::core;
auto createBundlePrim = [this, &bundlePrims = *this, &primIndex]() -> BundlePrim*
{
BundleHandle bundleHandle = getBundlePtr()->getChildBundle(primIndex);
if (!bundleHandle.isValid())
{
return nullptr;
}
omni::core::ObjectPtr<IBundle2> childBundle = getBundleFactoryPtr()->getBundle(context(), bundleHandle);
if (!childBundle)
{
return nullptr;
}
return new BundlePrim{ bundlePrims, primIndex, childBundle };
};
// Since we acquire BundlePrim instance through BundlePrims interface,
// we are required to bump dirty id of this prim because intention is to modify it.
auto bundlePrim = static_cast<BundlePrim*>(ConstBundlePrims::getConstPrim(primIndex, createBundlePrim));
bundlePrim->bumpDirtyID();
return bundlePrim;
}
inline BundlePrim* BundlePrims::getClearedPrim(BundlePrimIndex primIndex) noexcept
{
BundlePrim* bundlePrim = getPrim(primIndex);
if(!bundlePrim)
{
return nullptr;
}
bundlePrim->clearContents();
return bundlePrim;
}
inline omni::graph::core::NameToken* BundlePrims::addPrimPathsIfMissing() noexcept
{
using namespace omni::graph::core;
// check if prims are valid size
if (m_primPathsAttr.isValid())
{
return getPrimPaths();
}
// Create a new primPaths attribute.
IBundle2* bundle = getBundlePtr();
auto& primPathsDef = detail::getPrimPathsDefinition();
size_t const primCount = getPrimCount();
m_primPathsAttr = bundle->createBundleMetadata(primPathsDef.token, primPathsDef.type, primCount);
NameToken* primPaths = *getDataW<NameToken*>(context(), m_primPathsAttr);
for(size_t i = 0; i < primCount; ++i)
{
primPaths[i] = carb::flatcache::kUninitializedToken;
}
setPrimPathsData(primPaths);
return primPaths;
}
inline omni::graph::core::NameToken* BundlePrims::addPrimTypesIfMissing() noexcept
{
using namespace omni::graph::core;
if(m_primTypesAttr.isValid())
{
return getPrimTypes();
}
// Create a new primTypes attribute.
IBundle2* bundle = getBundlePtr();
auto& primTypesDef = detail::getPrimTypesDefinition();
size_t const primCount = getPrimCount();
m_primTypesAttr = bundle->createBundleMetadata(primTypesDef.token, primTypesDef.type, primCount);
NameToken* primTypes = *getDataW<NameToken*>(context(), m_primTypesAttr);
for(size_t i = 0; i < primCount; ++i)
{
primTypes[i] = carb::flatcache::kUninitializedToken;
}
setPrimTypesData(primTypes);
return primTypes;
}
inline BundlePrim& BundlePrims::getCommonAttrs() noexcept
{
ConstBundlePrim& commonAttributes = ConstBundlePrims::getConstCommonAttrs();
return static_cast<BundlePrim&>(commonAttributes);
}
inline omni::graph::core::IBundle2* BundlePrims::getBundlePtr() noexcept
{
using namespace omni::graph::core;
auto constBundle = getConstBundlePtr();
auto bundle = omni::cast<IBundle2>(constBundle);
return bundle.get();
}
inline uint64_t BundlePrims::bumpBundleDirtyID() noexcept
{
if (m_bundleDirtyIDAttr.isValid())
{
auto& context = this->context();
DirtyIDType dirtyID = getNextDirtyID();
*getDataW<DirtyIDType>(context, m_bundleDirtyIDAttr) = dirtyID;
return dirtyID;
}
return kInvalidDirtyID;
}
inline void BundlePrims::clearContents() noexcept
{
for (BundlePrimIndex primIndex = getPrimCount(); primIndex != 0;)
{
--primIndex;
removePrim(primIndex);
}
// Delete all attributes from this bundle.
BundlePrim& thisBundle = getCommonAttrs();
thisBundle.clearContents();
// remove internal data
IBundle2* bundle = getBundlePtr();
using omni::graph::core::AttributeDataHandle;
// Clearing bundle prims internal attributes such as bundleDirtyID and others causes downstream problems.
// Initial implementation never cleared those attributes.
#if 0
auto bundlePrimsInternalAttributes = {
std::ref(m_bundleDirtyIDAttr), //
std::ref(m_primDirtyIDsAttr), //
std::ref(m_primPathsAttr), //
std::ref(m_primTypesAttr), //
std::ref(m_primIndexAttr), //
};
for (auto& internalAttribute : bundlePrimsInternalAttributes)
{
if (internalAttribute.get().isValid())
{
bundle->removeAttribute(internalAttribute.get());
}
internalAttribute.get() = AttributeDataHandle{ AttributeDataHandle::invalidValue() };
}
#endif
}
inline bool BundlePrims::removePrim(ConstBundlePrim* prim) noexcept
{
return removePrim(prim->primIndex());
}
inline bool BundlePrims::removePrim(BundlePrimIndex primIndex) noexcept
{
using namespace omni::graph::core;
IBundle2* bundle = getBundlePtr();
auto& context = this->context();
auto& prims = getPrimitives();
// remove children and attributes
BundlePrim* childBundlePrim = getPrim(primIndex);
if (!childBundlePrim)
{
return false;
}
// clear contents and remove bundle from a map
childBundlePrim->clearContents();
bundle->removeChildBundle(childBundlePrim->handle());
// If removed primitive is not the last one,
// swap last one with removed one and update index.
size_t const newPrimCount = prims.size() - 1;
if (primIndex != newPrimCount)
{
prims[primIndex] = std::move(prims[newPrimCount]);
prims[primIndex]->m_primIndex = primIndex;
}
prims.resize(newPrimCount);
// Update contents of array attributes
if (primIndex != newPrimCount)
{
auto primDirtyIDs = getPrimDirtyIDs();
primDirtyIDs[primIndex] = primDirtyIDs[newPrimCount];
auto primPaths = getPrimPaths();
if (primPaths != nullptr)
{
primPaths[primIndex] = primPaths[newPrimCount];
}
auto primTypes = getPrimTypes();
if (primTypes != nullptr)
{
primTypes[primIndex] = primTypes[newPrimCount];
}
}
// Reduce element counts of underlying attributes and update the pointers in case they've changed.
context.iAttributeData->setElementCount(context, m_primDirtyIDsAttr, newPrimCount);
setPrimDirtyIDsData(*getDataW<DirtyIDType*>(context, m_primDirtyIDsAttr));
if (m_primPathsAttr.isValid())
{
context.iAttributeData->setElementCount(context, m_primPathsAttr, newPrimCount);
setPrimPathsData(*getDataW<NameToken*>(context, m_primPathsAttr));
}
if (m_primTypesAttr.isValid())
{
context.iAttributeData->setElementCount(context, m_primTypesAttr, newPrimCount);
setPrimTypesData(*getDataW<NameToken*>(context, m_primTypesAttr));
}
return true;
}
inline size_t BundlePrims::addPrims(size_t primCountToAdd) noexcept
{
using namespace omni::graph::core;
size_t oldPrimCount = getConstBundlePtr()->getChildBundleCount();
if (primCountToAdd == 0)
{
return oldPrimCount;
}
size_t const newPrimCount = oldPrimCount + primCountToAdd;
CARB_ASSERT(newPrimCount > oldPrimCount);
IBundle2* bundle = getBundlePtr();
IBundleFactory* factory = getBundleFactoryPtr();
auto& context = this->context();
// Create primIndex that stores last index of the primitive.
if(!m_primIndexAttr.isValid())
{
auto& primIndexDef = detail::getPrimIndexDefinition();
m_primIndexAttr = bundle->getBundleMetadataByName(primIndexDef.token);
if(!m_primIndexAttr.isValid())
{
m_primIndexAttr = bundle->createBundleMetadata(primIndexDef.token, primIndexDef.type);
*getDataW<uint64_t>(context, m_primIndexAttr) = 0;
}
}
uint64_t* primIndexData = getDataW<uint64_t>(context, m_primIndexAttr);
// Create new child bundles.
// All children are called 'prim' + primIndex, because IBundle2 interface does not allow sparse hierarchy.
// Then child paths are stored as an attribute.
BundlePrimArray& prims = getPrimitives();
prims.resize(newPrimCount);
std::string primPathStr;
for (BundlePrimIndex primIndex = oldPrimCount; primIndex < newPrimCount; ++primIndex)
{
primPathStr = "prim" + std::to_string(*primIndexData + primIndex);
NameToken primName = context.iToken->getHandle(primPathStr.data());
BundleHandle childHandle = bundle->createChildBundle(primName);
auto childBundle = factory->getBundle(context, childHandle);
prims[primIndex].reset(new BundlePrim(*this, primIndex, std::move(childBundle)));
}
// Update primDirtyIDs.
if(m_primDirtyIDsAttr.isValid())
{
context.iAttributeData->setElementCount(context, m_primDirtyIDsAttr, newPrimCount);
}
else
{
auto& primDirtyIDsDef = detail::getPrimDirtyIDsDefinition();
m_primDirtyIDsAttr = bundle->createBundleMetadata(primDirtyIDsDef.token, primDirtyIDsDef.type, newPrimCount);
}
setPrimDirtyIDsData(*getDataW<DirtyIDType*>(context, m_primDirtyIDsAttr));
auto primDirtyIDs = getPrimDirtyIDs();
for (BundlePrimIndex i = oldPrimCount; i < newPrimCount; ++i)
{
primDirtyIDs[i] = getNextDirtyID();
}
// Update primPaths.
if(m_primPathsAttr.isValid())
{
context.iAttributeData->setElementCount(context, m_primPathsAttr, newPrimCount);
NameToken* primPathsData = *getDataW<NameToken*>(context, m_primPathsAttr);
for (BundlePrimIndex primIndex = oldPrimCount; primIndex < newPrimCount; ++primIndex)
{
primPathsData[primIndex] = carb::flatcache::kUninitializedToken;
}
setPrimPathsData(primPathsData);
}
// Update primTypes.
if(m_primTypesAttr.isValid())
{
context.iAttributeData->setElementCount(context, m_primTypesAttr, newPrimCount);
NameToken* primTypesData = *getDataW<NameToken*>(context, m_primTypesAttr);
for(BundlePrimIndex primIndex = oldPrimCount; primIndex < newPrimCount; ++primIndex)
{
primTypesData[primIndex] = carb::flatcache::kUninitializedToken;
}
setPrimTypesData(primTypesData);
}
*primIndexData += primCountToAdd; // Update prim index offset.
return oldPrimCount;
}
inline DirtyIDType* BundlePrims::getPrimDirtyIDs() noexcept
{
return const_cast<DirtyIDType*>(ConstBundlePrims::getPrimDirtyIDs());
}
inline omni::graph::core::NameToken* BundlePrims::getPrimTypes() noexcept
{
return const_cast<omni::graph::core::NameToken*>(ConstBundlePrims::getConstPrimTypes());
}
inline omni::graph::core::NameToken* BundlePrims::getPrimPaths() noexcept
{
return const_cast<omni::graph::core::NameToken*>(ConstBundlePrims::getConstPrimPaths());
}
inline BundlePrimIterator BundlePrims::begin() noexcept
{
return BundlePrimIterator(*this);
}
inline BundlePrimIterator BundlePrims::end() noexcept
{
return BundlePrimIterator(*this, getPrimCount());
}
inline ConstBundlePrimIterator BundlePrims::cbegin() noexcept
{
return ConstBundlePrims::begin();
}
inline ConstBundlePrimIterator BundlePrims::cend() noexcept
{
return ConstBundlePrims::end();
}
// ====================================================================================================
//
// Bundle Primitive Iterator
//
// ====================================================================================================
inline BundlePrimIterator::BundlePrimIterator(BundlePrims& bundlePrims, BundlePrimIndex primIndex) noexcept
: m_bundlePrims(&bundlePrims), m_primIndex(primIndex)
{
}
inline bool BundlePrimIterator::operator==(BundlePrimIterator const& that) const noexcept
{
return m_bundlePrims == that.m_bundlePrims && m_primIndex == that.m_primIndex;
}
inline bool BundlePrimIterator::operator!=(BundlePrimIterator const& that) const noexcept
{
return !(*this == that);
}
inline BundlePrim& BundlePrimIterator::operator*() noexcept
{
return *(m_bundlePrims->getPrim(m_primIndex));
}
inline BundlePrim* BundlePrimIterator::operator->() noexcept
{
return m_bundlePrims->getPrim(m_primIndex);
}
inline BundlePrimIterator& BundlePrimIterator::operator++() noexcept
{
++m_primIndex;
return *this;
}
// ====================================================================================================
//
// Bundle Primitive Attribute Iterator
//
// ====================================================================================================
inline BundlePrimAttrIterator::BundlePrimAttrIterator(BundlePrim& bundlePrim, BundlePrim::AttrMapIteratorType attrIter) noexcept
: m_bundlePrim(&bundlePrim), m_attrIter(attrIter)
{
}
inline bool BundlePrimAttrIterator::operator==(BundlePrimAttrIterator const& that) const noexcept
{
return m_bundlePrim == that.m_bundlePrim && m_attrIter == that.m_attrIter;
}
inline bool BundlePrimAttrIterator::operator!=(BundlePrimAttrIterator const& that) const noexcept
{
return !(*this == that);
}
inline BundleAttrib const* BundlePrimAttrIterator::getConst() noexcept
{
CARB_ASSERT(m_bundlePrim != nullptr);
CARB_ASSERT(m_attrIter->second);
BundleAttrib* attr = m_attrIter->second.get();
// NOTE: Does not bump the dirty ID, since this is const.
return attr;
}
inline BundleAttrib& BundlePrimAttrIterator::operator*() noexcept
{
CARB_ASSERT(m_bundlePrim != nullptr);
CARB_ASSERT(m_attrIter->second);
BundleAttrib* attr = m_attrIter->second.get();
// TODO: Consider bumping the dirty ID later, when modification occurs.
attr->bumpDirtyID();
return *attr;
}
inline BundleAttrib* BundlePrimAttrIterator::operator->() noexcept
{
CARB_ASSERT(m_bundlePrim != nullptr);
CARB_ASSERT(m_attrIter->second);
BundleAttrib* attr = m_attrIter->second.get();
// TODO: Consider bumping the dirty ID later, when modification occurs.
attr->bumpDirtyID();
return attr;
}
inline BundlePrimAttrIterator& BundlePrimAttrIterator::operator++() noexcept
{
++m_attrIter;
return *this;
}
} // namespace io
} // namespace graph
} // namespace omni
| 31,032 | C | 30.601833 | 166 | 0.659029 |
omniverse-code/kit/include/omni/graph/io/ConstBundlePrims.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#pragma message("omni/graph/io/ConstBundlePrims.h is deprecated. Include omni/graph/core/ConstBundlePrims.h instead.")
// ====================================================================================================
/* _____ _ _ _ _ _
| __ \ | \ | | | | | | | |
| | | | ___ | \| | ___ | |_ | | | |___ ___
| | | |/ _ \ | . ` |/ _ \| __| | | | / __|/ _ \
| |__| | (_) | | |\ | (_) | |_ | |__| \__ \ __/
|_____/ \___/ |_| \_|\___/ \__| \____/|___/\___|
This is a temporary interface that can change at any time.
*/
// ====================================================================================================
#include "BundleAttrib.h"
#include <omni/graph/core/IBundleFactory.h>
#include <unordered_map>
#include <memory>
#include <vector>
namespace omni
{
namespace graph
{
namespace io
{
class ConstBundlePrims;
class ConstBundlePrimIterator;
class ConstBundlePrimAttrIterator;
/**
* Index used to identify primitives in a bundle.
*/
using BundlePrimIndex OMNI_GRAPH_IO_DEPRECATED = size_t;
OMNI_GRAPH_IO_DEPRECATED constexpr BundlePrimIndex kInvalidBundlePrimIndex = ~BundlePrimIndex(0);
/**
* Collection of read-only attributes in a primitive.
*
* Const Bundle Primitive is not movable, not copyable. It lifespan is managed by Const Bundle Primitives.
*/
class OMNI_GRAPH_IO_DEPRECATED ConstBundlePrim
{
public:
using BundleAttributeMap = std::unordered_map<omni::graph::core::NameToken, std::unique_ptr<BundleAttrib>>;
using AttrMapIteratorType = BundleAttributeMap::const_iterator;
ConstBundlePrim(ConstBundlePrim const&) = delete;
ConstBundlePrim(ConstBundlePrim&&) = delete;
ConstBundlePrim& operator=(ConstBundlePrim const& that) = delete;
ConstBundlePrim& operator=(ConstBundlePrim&&) = delete;
/**
* @return Parent bundle prims of this primitive.
*/
ConstBundlePrims* getConstBundlePrims() noexcept;
/**
* @return Number of attributes in this primitive. Does not include internal attributes.
*/
size_t attrCount() noexcept;
/**
* @return PrimAttribute if attribute with given name is found, nullptr otherwise.
*/
BundleAttrib const* getConstAttr(omni::graph::core::NameToken attrName) noexcept;
/**
* @return Index of this primitive in parent bundle.
*/
BundlePrimIndex primIndex() noexcept;
/**
* @return Path of this primitive.
*/
omni::graph::core::NameToken path() noexcept;
/**
* @return Type of this primitive.
*/
omni::graph::core::NameToken type() noexcept;
/**
* @return Dirty id value of this primitive.
*/
DirtyIDType dirtyID() noexcept;
/**
* @return Attribute iterator pointing to the first attribute in this bundle.
*/
ConstBundlePrimAttrIterator begin() noexcept;
/**
* @return Attribute iterator pointing to the last attribute in this bundle.
*/
ConstBundlePrimAttrIterator end() noexcept;
/***********************************************************************************************
*
* TODO: Following methods might be deprecated in the future.
* In the next iteration when real interface starts to emerge, we can retire those methods.
*
***********************************************************************************************/
/**
* @deprecated Do not use!. Use getConstAttr().
*/
[[deprecated("Use non const instead.")]]
BundleAttrib const* getAttr(omni::graph::core::NameToken attrName) const noexcept;
/**
* @deprecated Do not use!. Use non-const variant of path().
*/
[[deprecated("Use non const instead.")]]
omni::graph::core::NameToken path() const noexcept;
/**
* @deprecated Do not use!. Use non-const variant of type().
*/
[[deprecated("Use non const instead.")]]
omni::graph::core::NameToken type() const noexcept;
/**
* @deprecated Do not use!. Use non-const variant of dirtyID().
*/
[[deprecated("Use non const instead.")]]
DirtyIDType dirtyID() const noexcept;
/**
* @deprecated Do not use!. Use non-const variant of begin().
*/
[[deprecated("Use non const instead.")]]
ConstBundlePrimAttrIterator begin() const noexcept;
/**
* @deprecated Do not use!. Use non-const variant of end().
*/
[[deprecated("Use non const instead.")]]
ConstBundlePrimAttrIterator end() const noexcept;
protected:
/**
* Direct initialization with IConstBundle interface.
*
* ConstBundlePrim and BundlePrim take advantage of polymorphic relationship
* between IConstBundle and IBundle interfaces.
* In order to modify bundles, BundlePrim makes attempt to down cast IConstBundle
* to IBundle interface. When this process is successful then, bundle can be modified.
*
* Only ConstBundlePrims is allowed to create instances of ConstBundlePrim.
*/
ConstBundlePrim(ConstBundlePrims& bundlePrims,
BundlePrimIndex primIndex,
omni::core::ObjectPtr<omni::graph::core::IConstBundle2> bundle);
/**
* @return IConstBundle interface for this bundle primitive.
*/
omni::graph::core::IConstBundle2* getConstBundlePtr() noexcept;
/**
* @return Get attribute used by ConstBundlePrims and BundlePrims.
*/
BundleAttributeMap& getAttributes() noexcept;
/**
* Reads public attributes from the bundle and caches them as BundleAttribs.
*/
void readAndCacheAttributes() noexcept;
private:
ConstBundlePrims* m_bundlePrims{ nullptr }; // Parent of this bundle prim.
omni::core::ObjectPtr<omni::graph::core::IConstBundle2> m_bundle;
BundlePrimIndex m_primIndex{ kInvalidBundlePrimIndex }; // Index of a child bundle of this primitive.
DirtyIDType m_dirtyId{ kInvalidDirtyID };
BundleAttributeMap m_attributes; // Cached public attributes that belong to this primitive.
friend class BundleAttrib; // Required to access IConstBundle interface.
friend class BundlePrim; // Required to access primitive type.
friend class BundlePrims; // Required to update internal indices.
friend class ConstBundlePrims; // Required to call constructor.
};
/**
* Collection of read-only primitives in a bundle.
*
* Const Bundle Primitives is not movable, not copyable. It lifespan is managed by the user.
*/
class OMNI_GRAPH_IO_DEPRECATED ConstBundlePrims
{
public:
ConstBundlePrims();
ConstBundlePrims(omni::graph::core::GraphContextObj const& context,
omni::graph::core::ConstBundleHandle const& bundle);
ConstBundlePrims(ConstBundlePrims const&) = delete;
ConstBundlePrims(ConstBundlePrims&&) = delete;
ConstBundlePrims& operator=(ConstBundlePrims const&) = delete;
ConstBundlePrims& operator=(ConstBundlePrims&&) = delete;
/**
* @return Bundle handle of this primitive.
*/
omni::graph::core::ConstBundleHandle getConstHandle() noexcept;
/**
* @return Number of primitives in this bundle of primitives.
*/
size_t getPrimCount() noexcept;
/**
* @return Get read only primitive under specified index.
*/
ConstBundlePrim* getConstPrim(BundlePrimIndex primIndex) noexcept;
/**
* @return Dirty Id of all primitives.
*/
DirtyIDType getBundleDirtyID() noexcept;
/**
* @return Bundle primitives dirty ids.
*/
DirtyIDType const* getPrimDirtyIDs() noexcept;
/**
* Paths of all primitives in this bundle.
*
* Primitive paths are lazily computed. This operation can be slow because iteration over all
* bundles is required. ConstBundlePrim access paths directly from primitives.
* Use only when necessary.
*
* @todo Paths should be represented by PathC type.
*
* @return Pointer to primitive paths array, or nullptr if there are no primitive paths.
*/
omni::graph::core::NameToken const* getConstPrimPaths() noexcept;
/**
* Get primitive types in this bundle of primitives. Once primitive is created path can not be changed.
* Primitive can be copied but not moved.
*/
omni::graph::core::NameToken const* getConstPrimTypes() noexcept;
/**
* Common Attributes are attributes that are shared for entire bundle.
* An example of a common attribute is "transform" attribute.
*
* @return ConstBundlePrims as ConstBundlePrim to access attributes.
*/
ConstBundlePrim& getConstCommonAttrs() noexcept;
/**
* @return Context where bundle primitives belongs to.
*/
omni::graph::core::GraphContextObj const& context() noexcept;
/**
* @return Primitive iterator pointing to the first primitive in this bundle.
*/
ConstBundlePrimIterator begin() noexcept;
/**
* @return Primitive iterator pointing to the last primitive in this bundle.
*/
ConstBundlePrimIterator end() noexcept;
/***********************************************************************************************
*
* TODO: Following methods might be deprecated in the future.
* In the next iteration when real interface starts to emerge, we can retire those methods.
*
***********************************************************************************************/
/**
* @deprecated Do not use! Use getConstPrim().
*/
ConstBundlePrim* getPrim(BundlePrimIndex primIndex) noexcept;
/**
* @deprecated Dirty id management is an internal state of this class and should be private.
*
* @return Next available id.
*/
DirtyIDType getNextDirtyID() noexcept;
/**
* @deprecated Use appropriate constructor and heap allocate ConstBundlePrims.
*
* @todo: There is no benefit of using this method. Cache has to be rebuild from scratch
* whenever ConstBundlePrims is attached/detached.
* It would be better to remove default constructor and enforce cache construction
* through constructor with arguments.
*/
void attach(omni::graph::core::GraphContextObj const& context,
omni::graph::core::ConstBundleHandle const& bundle) noexcept;
/**
* @deprecated Use appropriate constructor and heap allocate ConstBundlePrims.
*/
void detach() noexcept;
/**
* @deprecated Use getConstHandle.
*/
omni::graph::core::ConstBundleHandle handle() noexcept;
/**
* @deprecated Use getConstPrimPaths.
*/
omni::graph::core::NameToken const* getPrimPaths() noexcept;
/**
* @deprecated Use getConstCommonAttrs.
*/
ConstBundlePrim& getCommonAttrs() noexcept;
/**
* @deprecated There is no need to separate attributes. Inherently IBundle2 interface keeps them separated.
*/
void separateAttrs() noexcept;
/**
* @deprecated Caching attributes is not needed. Calling this method doesn't do anything.
*/
void ensurePrimAttrsCached(BundlePrimIndex primIndex) noexcept;
protected:
using ConstBundlePrimPtr = std::unique_ptr<ConstBundlePrim>;
using BundlePrimArray = std::vector<ConstBundlePrimPtr>;
/**
* Get bundle primitives in this bundle.
*/
BundlePrimArray& getPrimitives() noexcept;
/**
* IConstBundle2 is a polymorphic base for IBundle2, thus passing bundle argument allows passing
* version of the interface that allows mutations.
*/
void attach(omni::core::ObjectPtr<omni::graph::core::IBundleFactory>&& factory,
omni::core::ObjectPtr<omni::graph::core::IConstBundle2>&& bundle) noexcept;
/**
* @return Factory to spawn instances of IBundle interface.
*/
omni::graph::core::IBundleFactory* getBundleFactoryPtr() noexcept;
/**
* @return IBundle instance of this bundle.
*/
omni::graph::core::IConstBundle2* getConstBundlePtr() noexcept;
/**
* Instances of BundlePrim are instantiated on demand. Argument create allows
* instantiation mutable or immutable IConstBundle2 interface.
*/
template<typename FUNC>
ConstBundlePrim* getConstPrim(BundlePrimIndex primIndex, FUNC create) noexcept;
void setBundleDirtyID(DirtyIDType bundleDirtyID) noexcept;
void setPrimDirtyIDsData(DirtyIDType const* primDirtyIDs) noexcept;
void setPrimPathsData(omni::graph::core::NameToken const* primPaths) noexcept;
void setPrimTypesData(omni::graph::core::NameToken const* primTypes) noexcept;
private:
omni::core::ObjectPtr<omni::graph::core::IBundleFactory> m_factory;
omni::core::ObjectPtr<omni::graph::core::IConstBundle2> m_bundle;
omni::graph::core::GraphContextObj m_context; // Backward compatibility.
/**
* ConstBundlePrims is a bundle as well. To access attributes under this bundle we need to acquire
* an instance of ConstBundlePrim for this bundle. Common attributes, with unfortunate name,
* gives us ability to access those attributes.
*/
ConstBundlePrimPtr m_commonAttributes;
BundlePrimArray m_primitives; // Cached instances of BundlePrim.
IDirtyID* m_iDirtyID{ nullptr }; // Cached interface to manage generation of unique ids.
DirtyIDType m_bundleDirtyID;
DirtyIDType const* m_primDirtyIDs{ nullptr }; // Backward compatibility - cached prim ids.
omni::graph::core::NameToken const* m_primPaths{ nullptr }; // Backward compatibility - cached prim paths.
omni::graph::core::NameToken const* m_primTypes{ nullptr }; // Backward compatibility - cached prim types.
friend class ConstBundlePrim;
friend class BundlePrim;
friend class BundleAttrib;
};
/**
* Primitives in Bundle iterator.
*/
class OMNI_GRAPH_IO_DEPRECATED ConstBundlePrimIterator
{
public:
ConstBundlePrimIterator(ConstBundlePrims& bundlePrims, BundlePrimIndex primIndex = 0) noexcept;
ConstBundlePrimIterator(ConstBundlePrimIterator const& that) noexcept = default;
ConstBundlePrimIterator& operator=(ConstBundlePrimIterator const& that) noexcept = default;
bool operator==(ConstBundlePrimIterator const& that) const noexcept;
bool operator!=(ConstBundlePrimIterator const& that) const noexcept;
ConstBundlePrim& operator*() noexcept;
ConstBundlePrim* operator->() noexcept;
ConstBundlePrimIterator& operator++() noexcept;
private:
ConstBundlePrims* m_bundlePrims;
BundlePrimIndex m_primIndex;
};
/**
* Attributes in Primitive iterator.
*/
class OMNI_GRAPH_IO_DEPRECATED ConstBundlePrimAttrIterator
{
public:
ConstBundlePrimAttrIterator(ConstBundlePrim& bundlePrim, ConstBundlePrim::AttrMapIteratorType attrIter) noexcept;
ConstBundlePrimAttrIterator(ConstBundlePrimAttrIterator const& that) noexcept = default;
ConstBundlePrimAttrIterator& operator=(ConstBundlePrimAttrIterator const& that) noexcept = default;
bool operator==(ConstBundlePrimAttrIterator const& that) const noexcept;
bool operator!=(ConstBundlePrimAttrIterator const& that) const noexcept;
BundleAttrib const& operator*() const noexcept;
BundleAttrib const* operator->() const noexcept;
ConstBundlePrimAttrIterator& operator++() noexcept;
private:
ConstBundlePrim* m_bundlePrim;
ConstBundlePrim::AttrMapIteratorType m_attrIter;
};
} // namespace io
} // namespace graph
} // namespace omni
#include "ConstBundlePrimsImpl.h"
| 15,915 | C | 33.6 | 118 | 0.660446 |
omniverse-code/kit/include/omni/graph/action/IActionGraph.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IActionGraph.h
//!
//! @brief Defines @ref omni::graph::action::IActionGraph_abi
#pragma once
#include <omni/core/IObject.h>
#include <omni/core/Omni.h>
#include <omni/graph/core/iComputeGraph.h>
namespace omni
{
namespace graph
{
namespace action
{
//! Declare the IActionGraph interface definition
OMNI_DECLARE_INTERFACE(IActionGraph);
/**
* @brief Functions for implementing nodes which are used in `Action Graph`.
*
* Nodes in `Action Graph` have special functionality which is not present in other graph types.
*/
class IActionGraph_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.action.IActionGraph")>
{
protected:
/**
* @brief Indicate that the given output attribute should be enabled, so that execution flow should continue
* along downstream networks.
*
* @note This should only be called from within a node @c compute function.
*
* @param[in] attributeName attribute on the current node to be set
* @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph
*
* @return Success if executed successfully
*/
virtual OMNI_ATTR("no_py") omni::core::Result
setExecutionEnabled_abi(omni::graph::core::NameToken attributeName,
omni::graph::core::InstanceIndex instanceIdx) noexcept = 0;
/**
* @brief Indicate that the given output attribute should be enabled, and the current node should be pushed to the
* execution @c stack. This means that when the downstream execution flow has completed, this node will be
* @c popped from the execution stack and its @c compute function will be called again.
*
* @note This should only be called from within a node @c compute function.
*
* @param[in] attributeName attribute on the current node to be set
* @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph
*
* @return Success if executed successfully
*/
virtual OMNI_ATTR("no_py") omni::core::Result
setExecutionEnabledAndPushed_abi(omni::graph::core::NameToken attributeName,
omni::graph::core::InstanceIndex instanceIdx) noexcept = 0;
/**
* @brief Indicate that the current execution flow should be blocked at the given node, and the node should be
* @c ticked every update of the Graph (@c compute function called), until it calls \ref endLatentState_abi.
*
* @note This should only be called from within a node @c compute function.
*
* @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph
*
* @return Success if executed successfully
*/
virtual OMNI_ATTR("no_py") omni::core::Result
startLatentState_abi(omni::graph::core::InstanceIndex instanceIdx) noexcept = 0;
/**
* @brief Indicate that the current execution flow should be un-blocked at the given node, if it is currently in a
* latent state. It is an error to call this function before calling \ref startLatentState_abi.
*
* @note This should only be called from within a node @c compute function.
*
* @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph
*
* @return Success if executed successfully
*/
virtual OMNI_ATTR("no_py") omni::core::Result
endLatentState_abi(omni::graph::core::InstanceIndex instanceIdx) noexcept = 0;
/**
* @brief Read the current latent state of the node. This state is set using \ref startLatentState_abi and \ref endLatentState_abi
*
* @note This should only be called from within a node @c compute function.
*
* @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph
* @returns true if the node is currently in a latent state
*
* @return false if the node is not in a latent state, or the call failed
*/
virtual OMNI_ATTR("no_py") bool
getLatentState_abi(omni::graph::core::InstanceIndex instanceIdx) noexcept = 0;
/**
* @brief Read the enabled state of an input execution attribute. An input attribute is considered enabled if it is
* connected to the upstream node that was computed immediately prior to the currently computing node. Event nodes
* and nodes in latent state may not have any enabled input.
*
* @note This should only be called from within a node @c compute function.
*
* @param[in] attributeName attribute on the current node to be set
* @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph
* @returns true if the given attribute is considered enabled.
*
* @return false if the attribute is considered disabled or the call failed
*/
virtual OMNI_ATTR("no_py") bool
getExecutionEnabled_abi(omni::graph::core::NameToken attributeName,
omni::graph::core::InstanceIndex instanceIdx) noexcept = 0;
};
//! Access the IActionGraph interface. This is more efficient than creating an instance each time it is needed.
//!
//! The returned pointer is a singleton managed by *omni.graph.action*, and does *not* have @ref
//! omni::core::IObject::acquire() called on it before being returned. The caller should *not* call @ref
//! omni::core::IObject::release() on the returned raw pointer.
//!
//! @thread_safety This method is thread safe.
inline IActionGraph* getInterface() noexcept;
} // namespace action
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include "IActionGraph.gen.h"
// additional headers needed for API implementation
#include <omni/core/ITypeFactory.h>
inline omni::graph::action::IActionGraph* omni::graph::action::getInterface() noexcept
{
// createType() always calls acquire() and returns an ObjectPtr to make sure release() is called. we don't want to
// hold a ref here to avoid static destruction issues. here we allow the returned ObjectPtr to destruct (after
// calling get()) to release our ref. we know the DLL in which the singleton was created is maintaining a ref and
// will keep the singleton alive for the lifetime of the DLL.
static auto sSingleton = omni::core::createType<omni::graph::action::IActionGraph>().get();
return sSingleton;
}
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include "IActionGraph.gen.h"
| 7,073 | C | 44.057325 | 134 | 0.709459 |
omniverse-code/kit/include/omni/graph/action/IActionGraph.gen.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
/**
* @brief Functions for implementing nodes which are used in `Action Graph`.
*
* Nodes in `Action Graph` have special functionality which is not present in other graph types.
*/
template <>
class omni::core::Generated<omni::graph::action::IActionGraph_abi> : public omni::graph::action::IActionGraph_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::action::IActionGraph")
/**
* @brief Indicate that the given output attribute should be enabled, so that execution flow should continue
* along downstream networks.
*
* @note This should only be called from within a node @c compute function.
*
* @param[in] attributeName attribute on the current node to be set
* @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph
*
* @return Success if executed successfully
*/
omni::core::Result setExecutionEnabled(omni::graph::core::NameToken attributeName,
omni::graph::core::InstanceIndex instanceIdx) noexcept;
/**
* @brief Indicate that the given output attribute should be enabled, and the current node should be pushed to the
* execution @c stack. This means that when the downstream execution flow has completed, this node will be
* @c popped from the execution stack and its @c compute function will be called again.
*
* @note This should only be called from within a node @c compute function.
*
* @param[in] attributeName attribute on the current node to be set
* @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph
*
* @return Success if executed successfully
*/
omni::core::Result setExecutionEnabledAndPushed(omni::graph::core::NameToken attributeName,
omni::graph::core::InstanceIndex instanceIdx) noexcept;
/**
* @brief Indicate that the current execution flow should be blocked at the given node, and the node should be
* @c ticked every update of the Graph (@c compute function called), until it calls \ref endLatentState_abi.
*
* @note This should only be called from within a node @c compute function.
*
* @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph
*
* @return Success if executed successfully
*/
omni::core::Result startLatentState(omni::graph::core::InstanceIndex instanceIdx) noexcept;
/**
* @brief Indicate that the current execution flow should be un-blocked at the given node, if it is currently in a
* latent state. It is an error to call this function before calling \ref startLatentState_abi.
*
* @note This should only be called from within a node @c compute function.
*
* @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph
*
* @return Success if executed successfully
*/
omni::core::Result endLatentState(omni::graph::core::InstanceIndex instanceIdx) noexcept;
/**
* @brief Read the current latent state of the node. This state is set using \ref startLatentState_abi and \ref
* endLatentState_abi
*
* @note This should only be called from within a node @c compute function.
*
* @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph
* @returns true if the node is currently in a latent state
*
* @return false if the node is not in a latent state, or the call failed
*/
bool getLatentState(omni::graph::core::InstanceIndex instanceIdx) noexcept;
/**
* @brief Read the enabled state of an input execution attribute. An input attribute is considered enabled if it is
* connected to the upstream node that was computed immediately prior to the currently computing node. Event nodes
* and nodes in latent state may not have any enabled input.
*
* @note This should only be called from within a node @c compute function.
*
* @param[in] attributeName attribute on the current node to be set
* @param[in] instanceIdx In vectorized context, the instance index relative to the currently targeted graph
* @returns true if the given attribute is considered enabled.
*
* @return false if the attribute is considered disabled or the call failed
*/
bool getExecutionEnabled(omni::graph::core::NameToken attributeName,
omni::graph::core::InstanceIndex instanceIdx) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::core::Result omni::core::Generated<omni::graph::action::IActionGraph_abi>::setExecutionEnabled(
omni::graph::core::NameToken attributeName, omni::graph::core::InstanceIndex instanceIdx) noexcept
{
return setExecutionEnabled_abi(attributeName, instanceIdx);
}
inline omni::core::Result omni::core::Generated<omni::graph::action::IActionGraph_abi>::setExecutionEnabledAndPushed(
omni::graph::core::NameToken attributeName, omni::graph::core::InstanceIndex instanceIdx) noexcept
{
return setExecutionEnabledAndPushed_abi(attributeName, instanceIdx);
}
inline omni::core::Result omni::core::Generated<omni::graph::action::IActionGraph_abi>::startLatentState(
omni::graph::core::InstanceIndex instanceIdx) noexcept
{
return startLatentState_abi(instanceIdx);
}
inline omni::core::Result omni::core::Generated<omni::graph::action::IActionGraph_abi>::endLatentState(
omni::graph::core::InstanceIndex instanceIdx) noexcept
{
return endLatentState_abi(instanceIdx);
}
inline bool omni::core::Generated<omni::graph::action::IActionGraph_abi>::getLatentState(
omni::graph::core::InstanceIndex instanceIdx) noexcept
{
return getLatentState_abi(instanceIdx);
}
inline bool omni::core::Generated<omni::graph::action::IActionGraph_abi>::getExecutionEnabled(
omni::graph::core::NameToken attributeName, omni::graph::core::InstanceIndex instanceIdx) noexcept
{
return getExecutionEnabled_abi(attributeName, instanceIdx);
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 7,053 | C | 41.751515 | 119 | 0.716858 |
omniverse-code/kit/include/omni/graph/action/PyIActionGraph.gen.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
#pragma once
#include <omni/core/ITypeFactory.h>
#include <omni/python/PyBind.h>
#include <omni/python/PyString.h>
#include <omni/python/PyVec.h>
#include <sstream>
auto bindIActionGraph(py::module& m)
{
// hack around pybind11 issues with C++17
// - https://github.com/pybind/pybind11/issues/2234
// - https://github.com/pybind/pybind11/issues/2666
// - https://github.com/pybind/pybind11/issues/2856
py::class_<omni::core::Generated<omni::graph::action::IActionGraph_abi>,
omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::action::IActionGraph_abi>>,
omni::core::IObject>
clsParent(m, "_IActionGraph");
py::class_<omni::graph::action::IActionGraph, omni::core::Generated<omni::graph::action::IActionGraph_abi>,
omni::python::detail::PyObjectPtr<omni::graph::action::IActionGraph>, omni::core::IObject>
cls(m, "IActionGraph", R"OMNI_BIND_RAW_(@brief Functions for implementing nodes which are used in `Action Graph`.
Nodes in `Action Graph` have special functionality which is not present in other graph types.)OMNI_BIND_RAW_");
cls.def(py::init(
[](const omni::core::ObjectPtr<omni::core::IObject>& obj)
{
auto tmp = omni::core::cast<omni::graph::action::IActionGraph>(obj.get());
if (!tmp)
{
throw std::runtime_error("invalid type conversion");
}
return tmp;
}));
cls.def(py::init(
[]()
{
auto tmp = omni::core::createType<omni::graph::action::IActionGraph>();
if (!tmp)
{
throw std::runtime_error("unable to create omni::graph::action::IActionGraph instantiation");
}
return tmp;
}));
return omni::python::PyBind<omni::graph::action::IActionGraph>::bind(cls);
}
| 2,401 | C | 41.14035 | 121 | 0.640983 |
omniverse-code/kit/include/omni/graph/exec/unstable/IPopulatePass.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IPopulatePass.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IPopulatePass.
#pragma once
#include <omni/graph/exec/unstable/IPass.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IGraphBuilder;
class INode;
class IPopulatePass;
class IPopulatePass_abi;
//! Base class for populate passes.
//!
//! Register a populate pass with @ref OMNI_GRAPH_EXEC_REGISTER_POPULATE_PASS(). When registering a pass, a "name to
//! match" is also specified. This name is the name of a node or definition on which the registered pass should
//! populate.
//!
//! Populate passes are typically the first pass type to run in the pass pipeline. When a node is encountered during
//! construction, only a single populate pass will get a chance to populate the newly discovered node. If no pass is
//! registered against the node's name, the node definition's name is used to find a population pass to run.
//!
//! Populate pass is allowed to attach a new definition to a node it runs on.
//!
//! Minimal rebuild of the execution graph topology should be considered by the pass each time it runs. Pass pipeline
//! leaves the responsibility of deciding if pass needs to run to the implementation. At minimum it can rely on
//! verifying that topology of @ref omni::graph::exec::unstable::NodeGraphDef it generated before is still valid or
//! @ref omni::graph::exec::unstable::NodeDef has not changed.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
class IPopulatePass_abi : public omni::core::Inherits<IPass, OMNI_TYPE_ID("omni.graph.exec.unstable.IPopulatePass")>
{
protected:
//! Call from pass pipeline to apply graph transformations on a given node (definition or topology).
virtual OMNI_ATTR("throw_result") omni::core::Result run_abi(IGraphBuilder* builder, INode* node) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IPopulatePass.
using PopulatePassPtr = omni::core::ObjectPtr<IPopulatePass>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IPopulatePass.gen.h>
//! @copydoc omni::graph::exec::unstable::IPopulatePass_abi
//!
//! @ingroup groupOmniGraphExecPasses groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IPopulatePass
: public omni::core::Generated<omni::graph::exec::unstable::IPopulatePass_abi>
{
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IGraphBuilder.h>
#include <omni/graph/exec/unstable/INode.h>
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IPopulatePass.gen.h>
| 3,256 | C | 37.773809 | 117 | 0.764128 |
omniverse-code/kit/include/omni/graph/exec/unstable/IGraph.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Top-level container for storing the Execution Framework's graph of graphs.
//!
//! @ref omni::graph::exec::unstable::IGraph is the top-level container used to store the graph of graphs. This
//! top-level container is referred to as the <i>execution graph</i>.
//!
//! @ref omni::graph::exec::unstable::IGraph's responsibilities include:
//!
//! - Tracking if the graph is currently being constructed. See @ref omni::graph::exec::unstable::IGraph::inBuild().
//!
//! - Tracking gross changes to the topologies of graphs within the execution graph. This is done with the <i>global
//! topology stamp</i> (see @ref omni::graph::exec::unstable::IGraph::getGlobalTopologyStamp()). Each time a topology
//! is invalidated, the global topology stamp is incremented. Consumers of the execution graph can use this stamp to
//! detect changes in the graph. See @rstref{Graph Invalidation <ef_graph_invalidation>} for details.
//!
//! - Owning and providing access to the top level graph definition (see @ref
//! omni::graph::exec::unstable::IGraph::getNodeGraphDef()). The root node of the top-level graph definition is the
//! root of execution graph. @ref omni::graph::exec::unstable::IGraph is the only container, other than @ref
//! omni::graph::exec::unstable::INode, that attaches to definitions.
//!
//! See @rstref{Graph Concepts <ef_graph_concepts>} for more information on how @ref omni::graph::exec::unstable::IGraph
//! fits into the Execution Framework.
//!
//! See @ref omni::graph::exec::unstable::Graph for a concrete implementation of this interface.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IGraph_abi> : public omni::graph::exec::unstable::IGraph_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IGraph")
//! Access the top-level node graph definition.
//!
//! The returned @ref omni::graph::exec::unstable::INodeGraphDef will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
//!
//! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref
//! omni::graph::exec::unstable::IGraph.
omni::graph::exec::unstable::INodeGraphDef* getNodeGraphDef() noexcept;
//! Name set on the graph during construction.
//!
//! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref
//! omni::graph::exec::unstable::IGraph.
const omni::graph::exec::unstable::ConstName& getName() noexcept;
//! Return global topology of the graph. Useful when detecting that graph transformation pipeline needs to run.
//!
//! See @rstref{Graph Invalidation <ef_graph_invalidation>} to understand how this stamp is used to detect changes
//! in the graph.
//!
//! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref
//! omni::graph::exec::unstable::IGraph. It is up to the caller to mutate the stamp in a thread safe manner.
omni::graph::exec::unstable::Stamp* getGlobalTopologyStamp() noexcept;
//! Return @c true if a @ref omni::graph::exec::unstable::IGraphBuilder is currently building a part of this graph.
//!
//! @thread_safety This method is thread safe.
bool inBuild() noexcept;
//! Mark that an @ref omni::graph::exec::unstable::IGraphBuilder is currently building a part of this graph.
//!
//! Each builder should call @c _setInBuild(true) followed by @c _setInBuild(false) once building is complete. Since
//! multiple builders can be active at a time, it is safe for this method to be called multiple times.
//!
//! This method should only be called by @ref omni::graph::exec::unstable::IGraphBuilder.
//!
//! @thread_safety This method is thread safe.
void _setInBuild(bool inBuild) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::INodeGraphDef* omni::core::Generated<
omni::graph::exec::unstable::IGraph_abi>::getNodeGraphDef() noexcept
{
return getNodeGraphDef_abi();
}
inline const omni::graph::exec::unstable::ConstName& omni::core::Generated<omni::graph::exec::unstable::IGraph_abi>::getName() noexcept
{
return *(getName_abi());
}
inline omni::graph::exec::unstable::Stamp* omni::core::Generated<
omni::graph::exec::unstable::IGraph_abi>::getGlobalTopologyStamp() noexcept
{
return getGlobalTopologyStamp_abi();
}
inline bool omni::core::Generated<omni::graph::exec::unstable::IGraph_abi>::inBuild() noexcept
{
return inBuild_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraph_abi>::_setInBuild(bool inBuild) noexcept
{
_setInBuild_abi(inBuild);
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 5,648 | C | 42.122137 | 135 | 0.712819 |
omniverse-code/kit/include/omni/graph/exec/unstable/SmallVector.h | // Copied from USD repository: https://github.com/PixarAnimationStudios/USD
//
// Copyright 2019 Pixar
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
#pragma once
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <initializer_list>
#include <iterator>
#include <limits>
#include <memory>
#include <new>
#include <type_traits>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Contains parts of the small vector implementation that do not depend on
//! *all* of SmallVector's template parameters.
class SmallVectorBase
{
public:
//! Size type
using size_type = std::uint32_t;
//! Difference type
using difference_type = std::uint32_t;
//! Returns the local capacity that may be used without increasing the size
//! of the SmallVector. SmallVector<T, N> will never use more local
//! capacity than is specified by N but clients that wish to maximize local
//! occupancy in a generic way can compute N using this function.
template <typename U>
static constexpr size_type ComputeSerendipitousLocalCapacity()
{
return (alignof(U) <= alignof(_Data<U, 0>)) ? sizeof(_Data<U, 0>) / sizeof(U) : 0;
}
protected:
//! Invoke std::uninitialized_copy that either moves or copies entries,
//! depending on whether the type is move constructible or not.
template <typename Iterator>
static Iterator _UninitializedMove(Iterator first, Iterator last, Iterator dest)
{
return std::uninitialized_copy(std::make_move_iterator(first), std::make_move_iterator(last), dest);
}
//! Invokes either the move or copy constructor (via placement new),
//! depending on whether U is move constructible or not.
template <typename U>
static void _MoveConstruct(U* p, U* src)
{
new (p) U(std::move(*src));
}
#ifndef DOXYGEN_BUILD
//! The data storage, which is a union of both the local storage, as well
//! as a pointer, holding the address to the remote storage on the heap, if
//! used.
template <typename U, size_type M>
union _Data
{
public:
//! Returns raw pointer to local storage of type @c U
U* GetLocalStorage()
{
return reinterpret_cast<U*>(_local);
}
//! Returns const raw pointer to local storage of type @c U
const U* GetLocalStorage() const
{
return reinterpret_cast<const U*>(_local);
}
//! Returns raw pointer to remote storage of type @c U
U* GetRemoteStorage()
{
return _remote;
}
//! Returns const raw pointer to remote storage of type @c U
const U* GetRemoteStorage() const
{
return _remote;
}
//! Sets remote storage to @p p
void SetRemoteStorage(U* p)
{
_remote = p;
}
private:
alignas(U) char _local[sizeof(U) * M];
U* _remote;
};
//! For N == 0 the _Data class has been specialized to elide the local
//! storage completely. This way we don't have to rely on compiler-specific
//! support for 0-sized arrays.
template <typename U>
union _Data<U, 0>
{
public:
//! Specialization for 0-sized local storage. Returns nullptr.
U* GetLocalStorage()
{
// XXX: Could assert here. Introduce dependency on tf/diagnostic.h?
return nullptr;
}
//! Specialization for 0-sized local storage. Returns nullptr.
const U* GetLocalStorage() const
{
// XXX: Could assert here. Introduce dependency on tf/diagnostic.h?
return nullptr;
}
//! Returns raw pointer to remote storage of type @c U
U* GetRemoteStorage()
{
return _remote;
}
//! Returns const raw pointer to remote storage of type @c U
const U* GetRemoteStorage() const
{
return _remote;
}
//! Sets remote storage to @p p
void SetRemoteStorage(U* p)
{
_remote = p;
}
private:
U* _remote;
};
#endif // DOXYGEN_BUILD
};
//!
//! \class SmallVector
//!
//! This is a small-vector class with local storage optimization, the local
//! storage can be specified via a template parameter, and expresses the
//! number of entries the container can store locally.
//!
//! In addition to the local storage optimization, this vector is also
//! optimized for storing a smaller number of entries on the heap: It features
//! a reduced memory footprint (minimum 16 bytes) by limiting max_size() to
//! 2^32, which should still be more than enough for most use cases where a
//! small-vector is advantageous.
//!
//! SmallVector mimics the std::vector API, and can thus be easily used as a
//! drop-in replacement where appropriate. Note, however, that not all the
//! methods on std::vector are implemented here, and that SmallVector may
//! have methods in addition to those that you would find on std::vector.
//!
//! Note that a SmallVector that has grown beyond its local storage, will
//! NOT move its entries back into the local storage once it shrinks back to N.
//!
template <typename T, std::size_t N>
class SmallVector : public SmallVectorBase
{
public:
//! @{
//! Relevant type definitions
using value_type = T;
//! Relevant type definitions
using reference = T&;
//! Relevant type definitions
using const_reference = const T&;
//! }@
//! @{
//! Iterator Support
using iterator = T*;
//! Iterator Support
using const_iterator = const T*;
//! Iterator Support
using reverse_iterator = std::reverse_iterator<iterator>;
//! Iterator Support
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
//! }@
//! Default constructor.
//!
SmallVector() : _size(0), _capacity(N)
{
}
//! Construct a vector holding \p n value-initialized elements.
//!
explicit SmallVector(size_type n) : _capacity(N)
{
_InitStorage(n);
value_type* d = data();
for (size_type i = 0; i < n; ++i)
{
new (d + i) value_type();
}
}
//! Construct a vector holding \p n copies of \p v.
//!
SmallVector(size_type n, const value_type& v) : _capacity(N)
{
_InitStorage(n);
std::uninitialized_fill_n(data(), n, v);
}
//! Enum to disambiguate constructors
enum DefaultInitTag
{
DefaultInit
};
//! Construct a vector holding \p n default-initialized elements.
//!
SmallVector(size_type n, DefaultInitTag) : _capacity(N)
{
_InitStorage(n);
value_type* d = data();
for (size_type i = 0; i < n; ++i)
{
new (d + i) value_type;
}
}
//! Copy constructor.
//!
SmallVector(const SmallVector& rhs) : _capacity(N)
{
_InitStorage(rhs.size());
std::uninitialized_copy(rhs.begin(), rhs.end(), begin());
}
//! Move constructor.
//!
SmallVector(SmallVector&& rhs) : _size(0), _capacity(N)
{
// If rhs can not be stored locally, take rhs's remote storage and
// reset rhs to empty.
if (rhs.size() > N)
{
_data.SetRemoteStorage(rhs._data.GetRemoteStorage());
std::swap(_capacity, rhs._capacity);
}
// If rhs is stored locally, it's faster to simply move the entries
// into this vector's storage, destruct the entries at rhs, and swap
// sizes. Note that capacities will be the same in this case, so no
// need to swap those.
else
{
_UninitializedMove(rhs.begin(), rhs.end(), begin());
rhs._Destruct();
}
std::swap(_size, rhs._size);
}
//! Construct a new vector from initializer list
SmallVector(std::initializer_list<T> values) : SmallVector(values.begin(), values.end())
{
}
//! Compile time check to enabled method when forward iterator is available
template <typename _ForwardIterator>
using _EnableIfForwardIterator =
typename std::enable_if<std::is_convertible<typename std::iterator_traits<_ForwardIterator>::iterator_category,
std::forward_iterator_tag>::value>::type;
//! Creates a new vector containing copies of the data between
//! \p first and \p last.
template <typename ForwardIterator, typename = _EnableIfForwardIterator<ForwardIterator>>
SmallVector(ForwardIterator first, ForwardIterator last) : _capacity(N)
{
_InitStorage(static_cast<difference_type>(std::distance(first, last)));
std::uninitialized_copy(first, last, begin());
}
//! Destructor.
//!
~SmallVector()
{
_Destruct();
_FreeStorage();
}
//! Assignment operator.
//!
SmallVector& operator=(const SmallVector& rhs)
{
if (this != &rhs)
{
assign(rhs.begin(), rhs.end());
}
return *this;
}
//! Move assignment operator.
//!
SmallVector& operator=(SmallVector&& rhs)
{
if (this != &rhs)
{
swap(rhs);
}
return *this;
}
//! Replace existing contents with the contents of \p ilist.
//!
SmallVector& operator=(std::initializer_list<T> ilist)
{
assign(ilist.begin(), ilist.end());
return *this;
}
//! Swap two vector instances.
//!
void swap(SmallVector& rhs)
{
// Both this vector and rhs are stored locally.
if (_IsLocal() && rhs._IsLocal())
{
SmallVector* smaller = size() < rhs.size() ? this : &rhs;
SmallVector* larger = size() < rhs.size() ? &rhs : this;
// Swap all the entries up to the size of the smaller vector.
std::swap_ranges(smaller->begin(), smaller->end(), larger->begin());
// Move the tail end of the entries, and destruct them at the
// source vector.
for (size_type i = smaller->size(); i < larger->size(); ++i)
{
_MoveConstruct(smaller->data() + i, &(*larger)[i]);
(*larger)[i].~value_type();
}
// Swap sizes. Capacities are already equal in this case.
std::swap(smaller->_size, larger->_size);
}
// Both this vector and rhs are stored remotely. Simply swap the
// pointers, as well as size and capacity.
else if (!_IsLocal() && !rhs._IsLocal())
{
value_type* tmp = _data.GetRemoteStorage();
_data.SetRemoteStorage(rhs._data.GetRemoteStorage());
rhs._data.SetRemoteStorage(tmp);
std::swap(_size, rhs._size);
std::swap(_capacity, rhs._capacity);
}
// Either this vector or rhs is stored remotely, whereas the other
// one is stored locally.
else
{
SmallVector* remote = _IsLocal() ? &rhs : this;
SmallVector* local = _IsLocal() ? this : &rhs;
// Get a pointer to the remote storage. We'll be overwriting the
// pointer value below, so gotta retain it first.
value_type* remoteStorage = remote->_GetStorage();
// Move all the entries from the vector with the local storage, to
// the other vector's local storage. This will overwrite the pointer
// to the other vectors remote storage. Note that we will have to
// also destruct the elements at the source's local storage. The
// source will become the one with the remote storage, so those
// entries will be essentially freed.
for (size_type i = 0; i < local->size(); ++i)
{
_MoveConstruct(remote->_data.GetLocalStorage() + i, &(*local)[i]);
(*local)[i].~value_type();
}
// Swap the remote storage into the vector which previously had the
// local storage. It's been properly cleaned up now.
local->_data.SetRemoteStorage(remoteStorage);
// Swap sizes and capacities. Easy peasy.
std::swap(remote->_size, local->_size);
std::swap(remote->_capacity, local->_capacity);
}
}
//! Insert an rvalue-reference entry at the given iterator position.
//!
iterator insert(const_iterator it, value_type&& v)
{
return _Insert(it, std::move(v));
}
//! Insert an entry at the given iterator.
//!
iterator insert(const_iterator it, const value_type& v)
{
return _Insert(it, v);
}
//! Erase an entry at the given iterator.
//!
iterator erase(const_iterator it)
{
return erase(it, it + 1);
}
//! Erase entries between [ \p first, \p last ) from the vector.
//!
iterator erase(const_iterator it, const_iterator last)
{
value_type* p = const_cast<value_type*>(&*it);
value_type* q = const_cast<value_type*>(&*last);
// If we're not removing anything, bail out.
if (p == q)
{
return iterator(p);
}
const difference_type num = static_cast<difference_type>(std::distance(p, q));
// Move entries starting at last, down a few slots to starting a it.
value_type* e = data() + size();
std::move(q, e, p);
// Destruct all the freed up slots at the end of the vector.
for (value_type* i = (e - num); i != e; ++i)
{
i->~value_type();
}
// Bump down the size.
_size -= num;
// Return an iterator to the next entry.
return iterator(p);
}
//! Reserve storage for \p newCapacity entries.
//!
void reserve(size_type newCapacity)
{
// Only reserve storage if the new capacity would grow past the local
// storage, or the currently allocated storage. We'll grow to
// accommodate exactly newCapacity entries.
if (newCapacity > capacity())
{
_GrowStorage(newCapacity);
}
}
//! Resize the vector to \p newSize and insert copies of \p v.
//!
void resize(size_type newSize, const value_type& v = value_type())
{
// If the new size is smaller than the current size, let go of some
// entries at the tail.
if (newSize < size())
{
erase(const_iterator(data() + newSize), const_iterator(data() + size()));
}
// Otherwise, lets grow and fill: Reserve some storage, fill the tail
// end with copies of v, and update the new size.
else if (newSize > size())
{
reserve(newSize);
std::uninitialized_fill(data() + size(), data() + newSize, v);
_size = newSize;
}
}
//! Clear the entries in the vector. Does not let go of the underpinning
//! storage.
//!
void clear()
{
_Destruct();
_size = 0;
}
//! Clears any previously held entries, and copies entries between
//! [ \p first, \p last ) to this vector.
//!
template <typename ForwardIterator, typename = _EnableIfForwardIterator<ForwardIterator>>
void assign(ForwardIterator first, ForwardIterator last)
{
clear();
const difference_type newSize = static_cast<difference_type>(std::distance(first, last));
reserve(newSize);
std::uninitialized_copy(first, last, begin());
_size = newSize;
}
//! Replace existing contents with the contents of \p ilist.
//!
void assign(std::initializer_list<T> ilist)
{
assign(ilist.begin(), ilist.end());
}
//! Emplace an entry at the back of the vector.
//!
template <typename... Args>
void emplace_back(Args&&... args)
{
if (size() == capacity())
{
_GrowStorage(_NextCapacity());
}
new (data() + size()) value_type(std::forward<Args>(args)...);
_size += 1;
}
//! Copy an entry to the back of the vector,
//!
void push_back(const value_type& v)
{
emplace_back(v);
}
//! Move an entry to the back of the vector.
//!
void push_back(value_type&& v)
{
emplace_back(std::move(v));
}
//! Copy the range denoted by [\p first, \p last) into this vector
//! before \p pos.
//!
template <typename ForwardIterator>
void insert(iterator pos, ForwardIterator first, ForwardIterator last)
{
static_assert(std::is_convertible<typename std::iterator_traits<ForwardIterator>::iterator_category,
std::forward_iterator_tag>::value,
"Input Iterators not supported.");
// Check for the insert-at-end special case as the very first thing so
// that we give the compiler the best possible opportunity to
// eliminate the general case code.
const bool insertAtEnd = pos == end();
const difference_type numNewElems = (difference_type)std::distance(first, last);
const size_type neededCapacity = size() + numNewElems;
const size_type nextCapacity = std::max(_NextCapacity(), neededCapacity);
// Insertions at the end would be handled correctly by the code below
// without this special case. However, insert(end(), f, l) is an
// extremely common operation so we provide this fast path both to
// avoid unneeded work and to make it easier for the compiler to
// eliminate dead code when pos == end().
if (insertAtEnd)
{
// The reallocation here is not a simple reserve. We want to grow
// the storage only when there are too many new elements but the
// desired size is based on the growth factor.
if (neededCapacity > capacity())
{
_GrowStorage(nextCapacity);
}
std::uninitialized_copy(first, last, end());
_size += numNewElems;
return;
}
if (neededCapacity > capacity())
{
// Because we need to realloc, we can do the insertion by copying
// each range, [begin(), pos), [first, last), [pos, end()), into
// the new storage.
const size_type posI = (size_type)std::distance(begin(), pos);
value_type* newStorage = _Allocate(nextCapacity);
iterator newPrefixBegin = iterator(newStorage);
iterator newPos = newPrefixBegin + posI;
iterator newSuffixBegin = newPos + numNewElems;
_UninitializedMove(begin(), pos, newPrefixBegin);
std::uninitialized_copy(first, last, newPos);
_UninitializedMove(pos, end(), newSuffixBegin);
// Destroy old data and set up this new buffer.
_Destruct();
_FreeStorage();
_data.SetRemoteStorage(newStorage);
_capacity = nextCapacity;
}
else
{
// Insert in-place requires handling four ranges.
//
// For both the range-to-move [pos, end()) and the range-to-insert
// [first, last), there are two subranges: the subrange to copy
// and the subrange to uinitialized_copy. Note that only three of
// these ranges may be non-empty: either there is a non-empty
// prefix of [pos, end()) that needs to be copied over existing
// elements or there is a non-empty suffix of [first, last) that
// needs to be placed in uninitialized storage.
const difference_type numMoveElems = (difference_type)std::distance(pos, end());
const difference_type numUninitMoves = (difference_type)std::min(numNewElems, numMoveElems);
const difference_type numInitMoves = numMoveElems - numUninitMoves;
const difference_type numUninitNews = numNewElems - numUninitMoves;
const difference_type numInitNews = numNewElems - numUninitNews;
// Move our existing elements out of the way of new elements.
iterator umSrc = pos + numInitMoves;
iterator umDst = end() + numUninitNews;
_UninitializedMove(umSrc, end(), umDst);
std::copy_backward(pos, umSrc, umDst);
// Copy new elements into place.
for (difference_type i = 0; i < numInitNews; ++i, ++first, ++pos)
{
*pos = *first;
}
std::uninitialized_copy(first, last, end());
}
_size += numNewElems;
}
//! Insert elements from \p ilist starting at position \p pos.
//!
void insert(iterator pos, std::initializer_list<T> ilist)
{
insert(pos, ilist.begin(), ilist.end());
}
//! Remove the entry at the back of the vector.
//!
void pop_back()
{
back().~value_type();
_size -= 1;
}
//! Returns the current size of the vector.
//!
size_type size() const
{
return _size;
}
//! Returns the maximum size of this vector.
//!
static constexpr size_type max_size()
{
return std::numeric_limits<size_type>::max();
}
//! Returns \c true if this vector is empty.
//!
bool empty() const
{
return size() == 0;
}
//! Returns the current capacity of this vector. Note that if the returned
//! value is <= N, it does NOT mean the storage is local. A vector that has
//! previously grown beyond its local storage, will not move entries back to
//! the local storage once it shrinks to N.
//!
size_type capacity() const
{
return _capacity;
}
//! Returns the local storage capacity. The vector uses its local storage
//! if capacity() <= internal_capacity().
//! This method mimics the boost::container::small_vector interface.
//!
static constexpr size_type internal_capacity()
{
return N;
}
//! Returns an iterator to the beginning of the vector.
//! @{
iterator begin()
{
return iterator(_GetStorage());
}
//! Returns an iterator to the beginning of the vector.
const_iterator begin() const
{
return const_iterator(_GetStorage());
}
//! Returns an iterator to the beginning of the vector.
const_iterator cbegin() const
{
return begin();
}
//! @}
//! Returns an iterator to the end of the vector.
//! @{
iterator end()
{
return iterator(_GetStorage() + size());
}
//! Returns an iterator to the end of the vector.
const_iterator end() const
{
return const_iterator(_GetStorage() + size());
}
//! Returns an iterator to the end of the vector.
const_iterator cend() const
{
return end();
}
//! @}
//! Returns a reverse iterator to the beginning of the vector.
//! @{
reverse_iterator rbegin()
{
return reverse_iterator(end());
}
//! Returns a reverse iterator to the beginning of the vector.
const_reverse_iterator rbegin() const
{
return const_reverse_iterator(end());
}
//! Returns a reverse iterator to the beginning of the vector.
const_reverse_iterator crbegin() const
{
return rbegin();
}
//! @}
//! @{
//! Returns a reverse iterator to the end of the vector.
reverse_iterator rend()
{
return reverse_iterator(begin());
}
//! Returns a reverse iterator to the end of the vector.
const_reverse_iterator rend() const
{
return const_reverse_iterator(begin());
}
//! Returns a reverse iterator to the end of the vector.
const_reverse_iterator crend() const
{
return rend();
}
//! @}
//! Returns the first element in the vector.
//!
reference front()
{
return *begin();
}
//! Returns the first element in the vector.
//!
const_reference front() const
{
return *begin();
}
//! Returns the last element in the vector.
//!
reference back()
{
return *(data() + size() - 1);
}
//! Returns the last elements in the vector.
//!
const_reference back() const
{
return *(data() + size() - 1);
}
//! Access the specified element.
//!
reference operator[](size_type i)
{
return *(data() + i);
}
//! Access the specified element.
//!
const_reference operator[](size_type i) const
{
return *(data() + i);
}
//! Direct access to the underlying array.
//!
value_type* data()
{
return _GetStorage();
}
//! Direct access to the underlying array.
//!
const value_type* data() const
{
return _GetStorage();
}
//! Lexicographically compares the elements in the vectors for equality.
//!
bool operator==(const SmallVector& rhs) const
{
return size() == rhs.size() && std::equal(begin(), end(), rhs.begin());
}
//! Lexicographically compares the elements in the vectors for inequality.
//!
bool operator!=(const SmallVector& rhs) const
{
return !operator==(rhs);
}
private:
//! Returns true if the local storage is used.
bool _IsLocal() const
{
return _capacity <= N;
}
//! Return a pointer to the storage, which is either local or remote
//! depending on the current capacity.
value_type* _GetStorage()
{
return _IsLocal() ? _data.GetLocalStorage() : _data.GetRemoteStorage();
}
//! Return a const pointer to the storage, which is either local or remote
//! depending on the current capacity.
const value_type* _GetStorage() const
{
return _IsLocal() ? _data.GetLocalStorage() : _data.GetRemoteStorage();
}
//! Free the remotely allocated storage.
void _FreeStorage()
{
if (!_IsLocal())
{
free(_data.GetRemoteStorage());
}
}
//! Destructs all the elements stored in this vector.
void _Destruct()
{
value_type* b = data();
value_type* e = b + size();
for (value_type* p = b; p != e; ++p)
{
p->~value_type();
}
}
//! Allocate a buffer on the heap.
static value_type* _Allocate(size_type size)
{
return static_cast<value_type*>(malloc(sizeof(value_type) * size));
}
//! Initialize the vector with new storage, updating the capacity and size.
void _InitStorage(size_type size)
{
if (size > capacity())
{
_data.SetRemoteStorage(_Allocate(size));
_capacity = size;
}
_size = size;
}
//! Grow the storage to be able to accommodate newCapacity entries. This
//! always allocates remotes storage.
void _GrowStorage(const size_type newCapacity)
{
value_type* newStorage = _Allocate(newCapacity);
_UninitializedMove(begin(), end(), iterator(newStorage));
_Destruct();
_FreeStorage();
_data.SetRemoteStorage(newStorage);
_capacity = newCapacity;
}
//! Returns the next capacity to use for vector growth. The growth factor
//! here is 1.5. A constant 1 is added so that we do not have to special
//! case initial capacities of 0 and 1.
size_type _NextCapacity() const
{
const size_type cap = capacity();
return cap + (cap / 2) + 1;
}
//! Insert the value v at iterator it. We use this method that takes a
//! universal reference to de-duplicate the logic required for the insert
//! overloads, one taking an rvalue reference, and the other one taking a
//! const reference. This way, we can take the most optimal code path (
//! move, or copy without making redundant copies) based on whether v is
//! a rvalue reference or const reference.
template <typename U>
iterator _Insert(const_iterator it, U&& v)
{
value_type* newEntry;
// If the iterator points to the end, simply push back.
if (it == end())
{
push_back(std::forward<U>(v));
return end() - 1;
}
// Grow the remote storage, if we need to. This invalidates iterators,
// so special care must be taken in order to return a new, valid
// iterator.
else if (size() == capacity())
{
const size_type newCapacity = _NextCapacity();
value_type* newStorage = _Allocate(newCapacity);
value_type* i = const_cast<value_type*>(&*it);
value_type* curData = data();
newEntry = _UninitializedMove(curData, i, newStorage);
new (newEntry) value_type(std::forward<U>(v));
_UninitializedMove(i, curData + size(), newEntry + 1);
_Destruct();
_FreeStorage();
_data.SetRemoteStorage(newStorage);
_capacity = newCapacity;
}
// Our current capacity is big enough to allow us to simply shift
// elements up one slot and insert v at it.
else
{
// Move all the elements after it up by one slot.
newEntry = const_cast<value_type*>(&*it);
value_type* last = const_cast<value_type*>(&back());
new (data() + size()) value_type(std::move(*last));
std::move_backward(newEntry, last, last + 1);
// Move v into the slot at the supplied iterator position.
newEntry->~value_type();
new (newEntry) value_type(std::forward<U>(v));
}
// Bump size and return an iterator to the newly inserted entry.
++_size;
return iterator(newEntry);
}
//! The vector storage, which is a union of the local storage and a pointer
//! to the heap memory, if allocated.
_Data<value_type, N> _data;
//! The current size of the vector, i.e. how many entries it contains.
size_type _size;
//! The current capacity of the vector, i.e. how big the currently allocated
//! storage space is.
size_type _capacity;
};
//! Swap fuction for @ref SmallVector
template <typename T, std::size_t N>
void swap(SmallVector<T, N>& a, SmallVector<T, N>& b)
{
a.swap(b);
}
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 31,672 | C | 29.810311 | 119 | 0.584775 |
omniverse-code/kit/include/omni/graph/exec/unstable/IGraphBuilder.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Graph builder is the only class that has the ability to modify topology of a graph.
//!
//! Topological edits of the graph are only allowed during graph transformation and should never
//! be performed during execution of the graph. Construction of the builder will automatically drop
//! all the connections between nodes.
//!
//! Methods on this class mutating a graph topology are not thread-safe (unless documented otherwise)
template <>
class omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>
: public omni::graph::exec::unstable::IGraphBuilder_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IGraphBuilder")
//! Return owner of all graphs this builder touches
//!
//! The returned @ref omni::graph::exec::unstable::IGraph will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::graph::exec::unstable::IGraph* getGraph() noexcept;
//! Returns the topology this builder can modify.
//!
//! The returned @ref omni::graph::exec::unstable::ITopology will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::graph::exec::unstable::ITopology* getTopology() noexcept;
//! Returns the context in which this builder works.
//!
//! The returned @ref omni::graph::exec::unstable::IGraphBuilderContext will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::graph::exec::unstable::IGraphBuilderContext* getContext() noexcept;
//! Returns @ref omni::graph::exec::unstable::INodeGraphDef this builder can modify.
//!
//! The returned @ref omni::graph::exec::unstable::INodeGraphDef will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::graph::exec::unstable::INodeGraphDef* getNodeGraphDef() noexcept;
//! Connect two given nodes.
//!
//! It is an error if the two nodes are not in the same topology.
//!
//! Neither given node should be @c nullptr.
//!
//! Neither @ref omni::graph::exec::unstable::INode have @ref omni::core::IObject::acquire() called
//! during the connection process.
//!
//! May throw.
void connect(omni::core::ObjectParam<omni::graph::exec::unstable::INode> upstreamNode,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> downstreamNode);
//! Disconnect two given nodes.
//!
//! It is an error if the two nodes are not in the same topology.
//!
//! Neither given node should be @c nullptr.
//!
//! Neither @ref omni::graph::exec::unstable::INode have @ref omni::core::IObject::acquire() called
//! during the disconnection process.
//!
//! May throw.
void disconnect(omni::core::ObjectParam<omni::graph::exec::unstable::INode> upstreamNode,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> downstreamNode);
//! Remove a node from topology.
//!
//! The given node must not be @c nullptr.
//!
//! May throw.
void remove(omni::core::ObjectParam<omni::graph::exec::unstable::INode> node);
//! Sets the definition for given node.
//!
//! If a definition is already set, it will be replaced by the given definition.
//!
//! The given definition may be @c nullptr.
//!
//! @ref omni::core::IObject::acquire() is called on the given definition pointer.
//!
//! See also @ref omni::graph::exec::unstable::IGraphBuilder::setNodeGraphDef().
//!
//! This method is NOT thread safe.
void setNodeDef(omni::core::ObjectParam<omni::graph::exec::unstable::INode> node,
omni::core::ObjectParam<omni::graph::exec::unstable::INodeDef> nodeDef) noexcept;
//! Sets the definition for give node.
//!
//! If a definition is already set, it will be replaced by the given definition.
//!
//! The given definition may be @c nullptr.
//!
//! @ref omni::core::IObject::acquire() is called on the given definition pointer.
//!
//! See also @ref omni::graph::exec::unstable::IGraphBuilder::setNodeDef().
//!
//! This method is NOT thread safe.
void setNodeGraphDef(omni::core::ObjectParam<omni::graph::exec::unstable::INode> node,
omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept;
//! Unsets given node's definition.
//!
//! If the definition is already @c nullptr, this method does nothing.
//!
//! This method is NOT thread safe.
void clearDef(omni::core::ObjectParam<omni::graph::exec::unstable::INode> node) noexcept;
//! Replace well formed cluster of nodes with a single node and the given definition.
//!
//! All nodes must exist in the same and current topology, otherwise the entire operation is aborted.
//!
//! @ref omni::core::IObject::acquire() is called on the given definition pointer.
//!
//! This method is NOT thread safe.
void replacePartition(const omni::graph::exec::unstable::NodePartition& partition,
omni::core::ObjectParam<omni::graph::exec::unstable::IDef> definition);
//! Create a new node in current node graph def.
//!
//! The given node name must not be @c nullptr.
//!
//! The given node def can be @c nullptr.
//!
//! Node creation can return @c nullptr when current node graph def doesn't allow node construction outside
//! of the pass that created it.
//!
//! The returned @ref omni::graph::exec::unstable::INode will have @ref omni::core::IObject::acquire() called on it.
omni::core::ObjectPtr<omni::graph::exec::unstable::INode> createNode(
const char* name, omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def);
//! Access created nodes by this builder.
//!
//! Span is no longer valid when topology of the graph changes. You need to query it again.
//!
//! In case a node once created gets removed by another pass, returned list will continue to have it.
//! It is safe to do, because we do not delete underlying nodes until the next graph population.
//! Checking if node is valid in current topology allows to filter out these cases.
//!
//! The pointers in the span are non owning, i.e. @ref omni::graph::exec::unstable::INode will not have
//! @ref omni::core::IObject::acquire() called on it.
omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> getCreatedNodes() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::IGraph* omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::getGraph() noexcept
{
return getGraph_abi();
}
inline omni::graph::exec::unstable::ITopology* omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilder_abi>::getTopology() noexcept
{
return getTopology_abi();
}
inline omni::graph::exec::unstable::IGraphBuilderContext* omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilder_abi>::getContext() noexcept
{
return getContext_abi();
}
inline omni::graph::exec::unstable::INodeGraphDef* omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilder_abi>::getNodeGraphDef() noexcept
{
return getNodeGraphDef_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::connect(
omni::core::ObjectParam<omni::graph::exec::unstable::INode> upstreamNode,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> downstreamNode)
{
OMNI_THROW_IF_ARG_NULL(upstreamNode);
OMNI_THROW_IF_ARG_NULL(downstreamNode);
OMNI_THROW_IF_FAILED(connect_abi(upstreamNode.get(), downstreamNode.get()));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::disconnect(
omni::core::ObjectParam<omni::graph::exec::unstable::INode> upstreamNode,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> downstreamNode)
{
OMNI_THROW_IF_ARG_NULL(upstreamNode);
OMNI_THROW_IF_ARG_NULL(downstreamNode);
OMNI_THROW_IF_FAILED(disconnect_abi(upstreamNode.get(), downstreamNode.get()));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::remove(
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node)
{
OMNI_THROW_IF_ARG_NULL(node);
OMNI_THROW_IF_FAILED(remove_abi(node.get()));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::setNodeDef(
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node,
omni::core::ObjectParam<omni::graph::exec::unstable::INodeDef> nodeDef) noexcept
{
setNodeDef_abi(node.get(), nodeDef.get());
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::setNodeGraphDef(
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node,
omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept
{
setNodeGraphDef_abi(node.get(), nodeGraphDef.get());
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::clearDef(
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node) noexcept
{
clearDef_abi(node.get());
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::replacePartition(
const omni::graph::exec::unstable::NodePartition& partition,
omni::core::ObjectParam<omni::graph::exec::unstable::IDef> definition)
{
OMNI_THROW_IF_ARG_NULL(definition);
replacePartition_abi(&partition, definition.get());
}
inline omni::core::ObjectPtr<omni::graph::exec::unstable::INode> omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilder_abi>::createNode(const char* name,
omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def)
{
OMNI_THROW_IF_ARG_NULL(name);
auto return_ = omni::core::steal(createNode_abi(name, def.get()));
return return_;
}
inline omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilder_abi>::getCreatedNodes() noexcept
{
return getCreatedNodes_abi();
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 11,158 | C | 40.63806 | 134 | 0.684083 |
omniverse-code/kit/include/omni/graph/exec/unstable/GraphBuilder.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file GraphBuilder.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IGraphBuilder.
#pragma once
#include <carb/Format.h>
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/IGraph.h>
#include <omni/graph/exec/unstable/IGraphBuilder.h>
#include <omni/graph/exec/unstable/IGraphBuilderContext.h>
#include <omni/graph/exec/unstable/INode.h>
#include <omni/graph/exec/unstable/INodeDef.h>
#include <omni/graph/exec/unstable/INodeFactory.h>
#include <omni/graph/exec/unstable/INodeGraphDef.h>
#include <omni/graph/exec/unstable/INodeGraphDefDebug.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::IGraphBuilder
template <typename... Bases>
class GraphBuilderT : public Implements<Bases...>
{
public:
//! Construct graph builder for a root @ref INodeGraphDef.
//!
//! Construction of a graph builder has a side effect on underlying @c topology causing its invalidation.
//!
//! May throw.
static omni::core::ObjectPtr<GraphBuilderT> create(omni::core::ObjectParam<IGraphBuilderContext> context)
{
OMNI_THROW_IF_ARG_NULL(context);
OMNI_GRAPH_EXEC_ASSERT(
!omni::graph::exec::unstable::cast<INodeGraphDefDebug>(context->getGraph()->getNodeGraphDef()) ||
!omni::graph::exec::unstable::cast<INodeGraphDefDebug>(context->getGraph()->getNodeGraphDef())->isExecuting());
auto builder = omni::core::steal(new GraphBuilderT(context.get(), context->getGraph()->getNodeGraphDef()));
auto topology = builder->getTopology();
topology->invalidate();
builder->_modifiedTopology(topology);
return builder;
}
//! Construct graph builder for a given @ref INodeGraphDef.
//!
//! Construction of a graph builder has a side effect on underlying @c topology causing its invalidation.
//!
//! May throw.
static omni::core::ObjectPtr<GraphBuilderT> create(omni::core::ObjectParam<IGraphBuilderContext> context,
omni::core::ObjectParam<INodeGraphDef> nodeGraphDef)
{
OMNI_THROW_IF_ARG_NULL(context);
OMNI_THROW_IF_ARG_NULL(nodeGraphDef);
OMNI_GRAPH_EXEC_ASSERT(!omni::graph::exec::unstable::cast<INodeGraphDefDebug>(nodeGraphDef.get()) ||
!omni::graph::exec::unstable::cast<INodeGraphDefDebug>(nodeGraphDef.get())->isExecuting());
auto builder = omni::core::steal(new GraphBuilderT(context.get(), nodeGraphDef.get()));
auto topology = builder->getTopology();
topology->invalidate();
builder->_modifiedTopology(topology);
return builder;
}
//! Construct graph builder for a given @ref INodeGraphDef without causing topology invalidation.
//!
//! This builder is used by the pass pipeline when operations to the graph will alter existing topology.
//!
//! May throw.
static omni::core::ObjectPtr<GraphBuilderT> createForPass(omni::core::ObjectParam<IGraphBuilderContext> context,
omni::core::ObjectParam<INodeGraphDef> nodeGraphDef)
{
OMNI_THROW_IF_ARG_NULL(context);
OMNI_THROW_IF_ARG_NULL(nodeGraphDef);
OMNI_GRAPH_EXEC_ASSERT(!omni::graph::exec::unstable::cast<INodeGraphDefDebug>(nodeGraphDef.get()) ||
!omni::graph::exec::unstable::cast<INodeGraphDefDebug>(nodeGraphDef.get())->isExecuting());
auto builder = omni::core::steal(new GraphBuilderT(context.get(), nodeGraphDef.get()));
// Detect when node graph was constructed outside of the pass pipeline. Tag these defs are created
// during current construction stamp.
//
// This usage pattern we only have in tests currently.
auto topology = nodeGraphDef->getTopology();
if (!topology->getConstructionStamp().isValid())
{
builder->_modifiedTopology(topology);
}
return builder;
}
protected:
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::getGraph_abi
IGraph* getGraph_abi() noexcept override
{
return m_context->getGraph();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::getTopology_abi
ITopology* getTopology_abi() noexcept override
{
return m_nodeGraphDef->getTopology();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::getContext_abi
IGraphBuilderContext* getContext_abi() noexcept override
{
return m_context;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::getNodeGraphDef_abi
INodeGraphDef* getNodeGraphDef_abi() noexcept override
{
return m_nodeGraphDef;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::connect_abi
omni::core::Result connect_abi(INode* upstreamNode, INode* downstreamNode) noexcept override
{
try
{
_modifiedTopology(upstreamNode->getTopology());
IGraphBuilderNode* upstream = exec::unstable::cast<exec::unstable::IGraphBuilderNode>(upstreamNode);
IGraphBuilderNode* downstream = exec::unstable::cast<exec::unstable::IGraphBuilderNode>(downstreamNode);
if (_connect(upstream, downstream))
{
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_RETURN_ERROR(omni::core::kResultFail);
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::disconnect_abi
omni::core::Result disconnect_abi(INode* upstreamNode, INode* downstreamNode) noexcept override
{
try
{
_modifiedTopology(upstreamNode->getTopology());
IGraphBuilderNode* upstream = exec::unstable::cast<exec::unstable::IGraphBuilderNode>(upstreamNode);
IGraphBuilderNode* downstream = exec::unstable::cast<exec::unstable::IGraphBuilderNode>(downstreamNode);
if (_disconnect(upstream, downstream))
{
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_RETURN_ERROR(omni::core::kResultFail);
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::remove_abi
omni::core::Result remove_abi(INode* node) noexcept override
{
_modifiedTopology(node->getTopology());
try
{
IGraphBuilderNode* nodeToRemove = exec::unstable::cast<exec::unstable::IGraphBuilderNode>(node);
nodeToRemove->validateOrResetTopology();
// Nodes don't have an edge back to the root. Check if the removed node is a child of the root.
auto graphRoot = nodeToRemove->getRoot();
graphRoot->_removeChild(nodeToRemove); // Silently fails if node is not a root child.
// Cache these pointers to avoid virtual method overhead.
auto children = nodeToRemove->getChildren();
auto parents = nodeToRemove->getParents();
// Disconnect all parents from the node to be removed.
for (auto parent : parents)
{
parent->_removeChild(nodeToRemove);
}
// Disconnect all children from the node to be removed.
for (auto child : children)
{
child->_removeParent(nodeToRemove);
}
// Invalidate all remaining connections of the node to be removed.
nodeToRemove->_invalidateConnections();
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::setNodeDef_abi
void setNodeDef_abi(INode* node, INodeDef* nodeDef) noexcept override
{
_modifiedTopology(node->getTopology());
exec::unstable::cast<exec::unstable::IGraphBuilderNode>(node)->_setNodeDef(nodeDef);
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::setNodeGraphDef_abi
void setNodeGraphDef_abi(INode* node, INodeGraphDef* nodeGraphDef) noexcept override
{
_modifiedTopology(node->getTopology());
exec::unstable::cast<exec::unstable::IGraphBuilderNode>(node)->_setNodeGraphDef(nodeGraphDef);
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::clearDef_abi
void clearDef_abi(INode* node) noexcept override
{
_modifiedTopology(node->getTopology());
exec::unstable::cast<exec::unstable::IGraphBuilderNode>(node)->_clearDef();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::replacePartition_abi
void replacePartition_abi(const NodePartition* partition, IDef* definition) noexcept override
{
if (partition->size() == 0)
return;
// validate the partition
INode* rootNode = partition->front()->getRoot();
for (auto nodeInPartition : *partition)
{
if (!nodeInPartition->isValidTopology() || nodeInPartition->getRoot() != rootNode)
{
return;
}
}
// mutate the graph
_commitPartition(m_nodeGraphDef, partition, definition);
_modifiedTopology(m_nodeGraphDef->getTopology());
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::createNode_abi
INode* createNode_abi(const char* name, IDef* def) noexcept override
{
if (auto factory = m_nodeGraphDef->getNodeFactory())
{
auto newNode = factory->createNode(name, def);
m_createdNodes.push_back(newNode.get());
return newNode.detach();
}
else
{
return nullptr;
}
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::getCreatedNodes_abi
Span<INode* const> getCreatedNodes_abi() noexcept override
{
return m_createdNodes.size() ? Span<INode* const>{ m_createdNodes.begin(), m_createdNodes.size() } :
Span<INode* const>{ nullptr, 0 };
}
//! Constructor
GraphBuilderT(IGraphBuilderContext* context, INodeGraphDef* nodeGraphDef)
: m_context{ context }, m_nodeGraphDef{ nodeGraphDef }
{
m_context->getGraph()->_setInBuild(true);
}
~GraphBuilderT()
{
m_context->getGraph()->_setInBuild(false);
}
private:
//! This builder modified topology of a graph. Currently it is possible it is not modifying topology belonging to
//! NodeGraphDef it refers to.
//!
void _modifiedTopology(ITopology* modifiedTopology)
{
modifiedTopology->_setConstructionInSync(m_context->getConstructionStamp());
}
bool _connect(IGraphBuilderNode* upstream, IGraphBuilderNode* downstream)
{
if (upstream->getTopology() == downstream->getTopology())
{
upstream->validateOrResetTopology();
downstream->validateOrResetTopology();
if (!upstream->hasChild(downstream))
{
upstream->_addChild(downstream);
if (!upstream->isRoot())
{
downstream->_addParent(upstream);
}
}
return true;
}
return false;
}
bool _disconnect(IGraphBuilderNode* upstream, IGraphBuilderNode* downstream)
{
if (upstream->getTopology() == downstream->getTopology())
{
upstream->validateOrResetTopology();
downstream->validateOrResetTopology();
if (upstream->hasChild(downstream))
{
upstream->_removeChild(downstream);
if (!upstream->isRoot())
{
downstream->_removeParent(upstream);
}
}
return true;
}
return false;
}
//! Make changes to the topology with already validated partition and definition.
void _commitPartition(INodeGraphDef* nodeGraphDef, const NodePartition* partition, IDef* definition) noexcept
{
OMNI_GRAPH_EXEC_ASSERT(nodeGraphDef && definition && (partition->size() > 0));
OMNI_GRAPH_EXEC_ASSERT(nodeGraphDef->getNodeFactory().get());
// we affect the topology, but shouldn't require any memory operation
std::vector<INode*> parents, children;
// optimization, let's assume each node has one parent and one child from outside of the partition
parents.reserve(partition->size() * 2);
children.reserve(partition->size() * 2);
// we want cost to be linear and for that we are going to avoid searches in the partition
// we achieve this by collecting all parents/children (some will be in the partition),
// then invalidating the partition and cleaning up the immediate upstream and downstream
for (auto nodeInPartition : *partition)
{
for (auto parent : nodeInPartition->getParents())
{
parents.push_back(parent);
}
for (auto child : nodeInPartition->getChildren())
{
children.push_back(child);
}
// make the node invalid without invalidating the entire topology
exec::unstable::cast<IGraphBuilderNode>(nodeInPartition)->_invalidateConnections();
}
// generate replacement node
std::string nodeName = carb::fmt::format("Partition_{}", partition->front()->getName().getString().c_str());
auto newNode = createNode_abi(nodeName.c_str(), definition);
auto newBuilderNode = exec::unstable::cast<IGraphBuilderNode>(newNode);
// in one pass: cleanup the topology and reconnect to the new node
auto rootBuilderNode = exec::unstable::cast<IGraphBuilderNode>(partition->front()->getRoot());
rootBuilderNode->_removeInvalidChildren();
for (auto parent : parents)
{
if (parent->isValidTopology())
{
auto parentBuilderNode = exec::unstable::cast<IGraphBuilderNode>(parent);
parentBuilderNode->_removeInvalidChildren();
this->_connect(parentBuilderNode, newBuilderNode);
}
}
for (auto child : children)
{
if (child->isValidTopology())
{
auto childBuilderNode = exec::unstable::cast<IGraphBuilderNode>(child);
childBuilderNode->_removeInvalidParents();
this->_connect(newBuilderNode, childBuilderNode);
}
}
// Need to make sure we are connected to the root (indirectly, or directly if this is an entry node)
if (newNode->getParents().size() == 0)
{
this->_connect(rootBuilderNode, newBuilderNode);
}
}
IGraphBuilderContext* m_context{ nullptr }; //!< All graph builders are operating within a context. We store pointer
//!< to it.
INodeGraphDef* m_nodeGraphDef{ nullptr }; //!< Graph topology this builder can modify. This is not yet enforced in
//!< code.
//! Most of the time we won't be needing any space. The size of 2 was chosen arbitrary.
using NodeArray = SmallVector<INode*, 2>;
NodeArray m_createdNodes; //!< Collect nodes created dynamically to allow pass pipeline discover them.
};
//! Core GraphBuilder implementation for @ref omni::graph::exec::unstable::IGraphBuilder
using GraphBuilder = GraphBuilderT<IGraphBuilder>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 16,438 | C | 37.141531 | 123 | 0.63122 |
omniverse-code/kit/include/omni/graph/exec/unstable/IPassPipeline.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Runs registered passes.
//!
//! The role of pass pipeline is to populate and prepare the execution graph. The base implementation runs passes based
//! on the type and registration order. Most applications will define their own pass pipeline to control how the
//! execution graph is generated.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IPassPipeline_abi>
: public omni::graph::exec::unstable::IPassPipeline_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPassPipeline")
//! Test if pipeline needs to rebuild (mostly for its acceleration structures).
bool needsConstruction() noexcept;
//! Build the pipeline (mostly for its acceleration structures).
void construct();
//! Test if pipeline needs to run (after topology changes in the graph).
bool needsExecute(const omni::graph::exec::unstable::Stamp& globalTopology) noexcept;
//! Execute the graph transformations pipeline
void execute(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderContext> builderContext,
omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline bool omni::core::Generated<omni::graph::exec::unstable::IPassPipeline_abi>::needsConstruction() noexcept
{
return needsConstruction_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IPassPipeline_abi>::construct()
{
OMNI_THROW_IF_FAILED(construct_abi());
}
inline bool omni::core::Generated<omni::graph::exec::unstable::IPassPipeline_abi>::needsExecute(
const omni::graph::exec::unstable::Stamp& globalTopology) noexcept
{
return needsExecute_abi(globalTopology);
}
inline void omni::core::Generated<omni::graph::exec::unstable::IPassPipeline_abi>::execute(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderContext> builderContext,
omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef)
{
OMNI_THROW_IF_ARG_NULL(builderContext);
OMNI_THROW_IF_ARG_NULL(nodeGraphDef);
OMNI_THROW_IF_FAILED(execute_abi(builderContext.get(), nodeGraphDef.get()));
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 3,149 | C | 34.795454 | 119 | 0.741505 |
omniverse-code/kit/include/omni/graph/exec/unstable/SchedulingInfo.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file SchedulingInfo.h
//!
//! @brief Defines omni::graph::exec::unstable::SchedulingInfo.
#pragma once
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Constraints to be fulfilled by the scheduler when dispatching a task.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
enum class SchedulingInfo
{
eSerial, //!< Execution of a task should be serialized globally. No other serial task should be running.
eParallel, //!< Execution of a task can be done safely in parallel. Parallel tasks can run together with serial.
eIsolate, //!< Execution of a task has to be done in isolation. No other tasks can run concurrently.
eSchedulerBypass //!< Execution of a task should bypass the scheduler. Either to avoid overhead for lightweight
//!< tasks or to serialize within a thread generating the work.
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 1,481 | C | 35.146341 | 116 | 0.744092 |
omniverse-code/kit/include/omni/graph/exec/unstable/IBackgroundTask.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Class representing a background task.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IBackgroundTask_abi>
: public omni::graph::exec::unstable::IBackgroundTask_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IBackgroundTask")
//! Returns a @c std::future like object used to check if the background task has completed.
//!
//! A error is returned if this method is called more than once.
//!
//! This method is not thread safe.
omni::core::ObjectPtr<omni::graph::exec::unstable::IBackgroundResult> getBackgroundResult();
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::core::ObjectPtr<omni::graph::exec::unstable::IBackgroundResult> omni::core::Generated<
omni::graph::exec::unstable::IBackgroundTask_abi>::getBackgroundResult()
{
omni::core::ObjectPtr<omni::graph::exec::unstable::IBackgroundResult> out;
OMNI_THROW_IF_FAILED(getBackgroundResult_abi(out.put()));
return out;
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 1,874 | C | 30.77966 | 99 | 0.730523 |
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutionCurrentThread.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IExecutionCurrentThread.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IExecutionCurrentThread.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Status.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class ExecutionTask;
class IExecutionContext;
class IExecutionCurrentThread_abi;
class IExecutionCurrentThread;
class IExecutionStateInfo;
class IExecutor;
class IGraph;
//! Encapsulates the execution state for the current thread allowing callers to determine quantities like the @ref
//! omni::graph::exec::unstable::ExecutionTask currently executing on the thread.
//!
//! Because methods in this interface return thread local data, all methods in this interface are thread safe.
//!
//! This interface is usually accessed as a singleton via one of the following helper methods:
//!
//! - @ref omni::graph::exec::unstable::getCurrentTask()
//!
//! - @ref omni::graph::exec::unstable::getCurrentExecutor()
//!
//! This interface contains methods for graph and task execution. Users should not call these methods directly. See
//! the methods' docs below for the correct way to perform execution.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
class IExecutionCurrentThread_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.IExecutionCurrentThread")>
{
protected:
//! Executes the given @ref omni::graph::exec::unstable::Graph.
//!
//! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::IExecutionContext::execute().
//!
//! From an ABI point-of-view, the purpose of this method is to handle the special case of the top-level @ref
//! omni::graph::exec::unstable::INodeGraphDef being contained by @ref omni::graph::exec::unstable::IGraph rather
//! than pointed to by a node in another @ref omni::graph::exec::unstable::INodeGraphDef. Meaningful values are set
//! for the threads current task and executor (see @ref omni::graph::exec::unstable::getCurrentTask() and @ref
//! omni::graph::exec::unstable::getCurrentExecutor()).
//!
//! @thread_safety This method is thread safe.
virtual Status executeGraph_abi(OMNI_ATTR("not_null, throw_if_null") IGraph* graph,
OMNI_ATTR("not_null, throw_if_null") IExecutionContext* context) noexcept = 0;
//! Executes and sets the thread's "current" task to the given task.
//!
//! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::ExecutionTask::execute().
//!
//! This method executes the definition of the node pointed to by the given task. Importantly, this method sets
//! thread local data to track the currently running task and executor (see @ref
//! omni::graph::exec::unstable::getCurrentTask() and @ref omni::graph::exec::unstable::getCurrentExecutor()).
//!
//! @thread_safety This method is thread safe.
virtual Status execute_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* task,
IExecutor* executor,
OMNI_ATTR("in, out, not_null, throw_if_null") Status* taskStatus) noexcept = 0;
//! Access the task currently executing on the current thread.
//!
//! Useful when needing to access execution context state without having to pass it to every function.
//!
//! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::getCurrentTask().
//!
//! May return @c nullptr.
//!
//! @thread_safety This method is thread safe.
virtual ExecutionTask* getCurrentTask_abi() noexcept = 0;
//! Access the executor currently executing on the current thread.
//!
//! Useful when needing to spawn extra work within the scope of the graph.
//!
//! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::getCurrentExecutor().
//!
//! May return @c nullptr.
//!
//! @thread_safety This method is thread safe.
virtual OMNI_ATTR("no_acquire") IExecutor* getCurrentExecutor_abi() noexcept = 0;
};
//! Smart pointer managing an instance of @ref IExecutionCurrentThread.
using ExecutionCurrentThreadPtr = omni::core::ObjectPtr<IExecutionCurrentThread>;
//! Access current thread's execution state.
//!
//! The returned pointer is a singleton managed by *omni.graph.exec*, and does *not* have @ref
//! omni::core::IObject::acquire() called on it before being returned. The caller should *not* call @ref
//! omni::core::IObject::release() on the returned raw pointer.
//!
//! @thread_safety This method is thread safe.
inline IExecutionCurrentThread* getCurrentThread() noexcept;
//! Access task currently executed on a calling thread.
//!
//! May return @c nullptr.
//!
//! @thread_safety This method is thread safe.
inline ExecutionTask* getCurrentTask() noexcept;
//! Access executor currently used on a calling thread.
//!
//! Useful when needing to spawn extra work within the scope of the graph.
//!
//! The returned @ref IExecutor does *not* have @ref omni::core::IObject::acquire() called before being returned.
//!
//! May return @c nullptr.
//!
//! @thread_safety This method is thread safe.
inline IExecutor* getCurrentExecutor() noexcept;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IExecutionCurrentThread.gen.h>
//! @copydoc omni::graph::exec::unstable::IExecutionCurrentThread_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IExecutionCurrentThread
: public omni::core::Generated<omni::graph::exec::unstable::IExecutionCurrentThread_abi>
{
};
// additional headers needed for API implementation
#include <omni/core/ITypeFactory.h>
#include <omni/graph/exec/unstable/ExecutionTask.h>
#include <omni/graph/exec/unstable/IExecutionContext.h>
#include <omni/graph/exec/unstable/IExecutor.h>
#include <omni/graph/exec/unstable/IGraph.h>
inline omni::graph::exec::unstable::IExecutionCurrentThread* omni::graph::exec::unstable::getCurrentThread() noexcept
{
// createType() always calls acquire() and returns an ObjectPtr to make sure release() is called. we don't want to
// hold a ref here to avoid static destruction issues. here we allow the returned ObjectPtr to destruct (after
// calling get()) to release our ref. we know the DLL in which the singleton was created is maintaining a ref and
// will keep the singleton alive for the lifetime of the DLL.
static auto sSingleton = omni::core::createType<IExecutionCurrentThread>().get();
return sSingleton;
}
inline omni::graph::exec::unstable::ExecutionTask* omni::graph::exec::unstable::getCurrentTask() noexcept
{
return getCurrentThread()->getCurrentTask();
}
inline omni::graph::exec::unstable::IExecutor* omni::graph::exec::unstable::getCurrentExecutor() noexcept
{
return getCurrentThread()->getCurrentExecutor();
}
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IExecutionCurrentThread.gen.h>
| 7,916 | C | 41.794594 | 120 | 0.721071 |
omniverse-code/kit/include/omni/graph/exec/unstable/INodeFactory.gen.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Factory interface for creating @ref omni::graph::exec::unstable::INode objects.
//!
//! Usually used in conjunction with @ref omni::graph::exec::unstable::INodeGraphDef.
//!
//! See @ref omni::graph::exec::unstable::createNodeFactory() to generate one of these objects from an invocable object
//! (e.g. @c std::function).
template <>
class omni::core::Generated<omni::graph::exec::unstable::INodeFactory_abi>
: public omni::graph::exec::unstable::INodeFactory_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::INodeFactory")
//! Creates and returns a new node within a topology this factory came from.
//!
//! It is legal to pass nullptr as a definition, or either @ref omni::graph::exec::unstable::INodeDef
//! or @ref omni::graph::exec::unstable::INodeGraphDef
//!
//! The returned @ref omni::graph::exec::unstable::INode will have @ref omni::core::IObject::acquire() called on it.
omni::core::ObjectPtr<omni::graph::exec::unstable::INode> createNode(
const char* name, omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::core::ObjectPtr<omni::graph::exec::unstable::INode> omni::core::Generated<
omni::graph::exec::unstable::INodeFactory_abi>::createNode(const char* name,
omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def)
{
OMNI_THROW_IF_ARG_NULL(name);
omni::core::ObjectPtr<omni::graph::exec::unstable::INode> out;
OMNI_THROW_IF_FAILED(createNode_abi(name, def.get(), out.put()));
return out;
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 2,523 | C | 36.117647 | 126 | 0.696393 |
omniverse-code/kit/include/omni/graph/exec/unstable/IInvalidationForwarder.gen.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Interface wrapping a function (possibly with storage) to forward topology invalidation notices.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IInvalidationForwarder_abi>
: public omni::graph::exec::unstable::IInvalidationForwarder_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IInvalidationForwarder")
//! Invokes the wrapped function.
//!
//! The given topology must not be @c nullptr.
void invoke(omni::core::ObjectParam<omni::graph::exec::unstable::ITopology> topology);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline void omni::core::Generated<omni::graph::exec::unstable::IInvalidationForwarder_abi>::invoke(
omni::core::ObjectParam<omni::graph::exec::unstable::ITopology> topology)
{
OMNI_THROW_IF_ARG_NULL(topology);
invoke_abi(topology.get());
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 1,731 | C | 29.928571 | 99 | 0.737724 |
omniverse-code/kit/include/omni/graph/exec/unstable/Graph.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Graph.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IGraph.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/ExecutorFactory.h>
#include <omni/graph/exec/unstable/IGraph.h>
#include <omni/graph/exec/unstable/NodeGraphDef.h>
#include <memory>
#include <string>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::IGraph
template <typename... Bases>
class GraphT : public Implements<Bases...>
{
public:
//! Construct a graph with default executor attached to an empty node graph.
//!
//! May throw.
static omni::core::ObjectPtr<GraphT> create(const char* name)
{
OMNI_THROW_IF_ARG_NULL(name);
return omni::core::steal(new GraphT(name));
}
//! Construct a graph with a given executor and an empty node graph.
//!
//! May throw.
static omni::core::ObjectPtr<GraphT> create(const ExecutorFactory& executorFactory, const char* name)
{
OMNI_THROW_IF_ARG_NULL(name);
return omni::core::steal(new GraphT(executorFactory, name));
}
//! Construct a graph with the given node graph.
//!
//! The signature of @p nodeGraphDefFactory must be equivalent to `NodeGraphDefPtr(IGraph*)`.
//!
//! May throw.
template <typename Fn>
static omni::core::ObjectPtr<GraphT> create(const char* name, Fn&& nodeGraphDefFactory)
{
OMNI_THROW_IF_ARG_NULL(name);
return omni::core::steal(new GraphT(name, std::forward<Fn>(nodeGraphDefFactory)));
}
protected:
//! Core implementation of @ref omni::graph::exec::unstable::IGraph::getNodeGraphDef_abi
INodeGraphDef* getNodeGraphDef_abi() noexcept override
{
return m_nodeGraphDef.get();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraph::getName_abi
const ConstName* getName_abi() noexcept override
{
return &m_name;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraph::getGlobalTopologyStamp_abi
Stamp* getGlobalTopologyStamp_abi() noexcept override
{
return &m_globalTopologyStamp;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraph::inBuild_abi
virtual bool inBuild_abi() noexcept override
{
return (m_inBuild > 0);
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraph::_setInBuild_abi
virtual void _setInBuild_abi(bool inBuild) noexcept override
{
if (inBuild)
{
++m_inBuild;
}
else
{
--m_inBuild;
OMNI_GRAPH_EXEC_ASSERT(m_inBuild > -1);
}
}
//! Constructor
//!
//! Construct with a default top level graph definition
GraphT(const char* name) : m_name(name)
{
m_globalTopologyStamp.next();
m_nodeGraphDef = NodeGraphDef::create(this, "NODE-ROOT"); // may throw
}
//! Constructor
//!
//! Construct with a custom executor for a top level graph definition
GraphT(const ExecutorFactory& executorFactory, const char* name) : m_name(name)
{
m_globalTopologyStamp.next();
m_nodeGraphDef = NodeGraphDef::create(this, executorFactory, "NODE-ROOT"); // may throw
}
//! Constructor
//!
//! Construct with a custom top level graph factory
template <typename Fn>
GraphT(const char* name, Fn&& nodeGraphDefFactory) : m_name(name)
{
m_globalTopologyStamp.next();
m_nodeGraphDef = nodeGraphDefFactory(this); // may throw
}
private:
Stamp m_globalTopologyStamp; //!< Global graph topology. Incremented every time any nested topologies changes
omni::core::ObjectPtr<INodeGraphDef> m_nodeGraphDef; //!< Top level node graph definition
ConstName m_name; //!< Name of the execution graph
//! How many builders are active. Atomic since multiple builders may be running in parallel.
std::atomic<int> m_inBuild{ 0 };
};
//! Core Graph implementation for @ref omni::graph::exec::unstable::IGraph
using Graph = GraphT<IGraph>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 4,640 | C | 29.94 | 113 | 0.667457 |
omniverse-code/kit/include/omni/graph/exec/unstable/NodeGraphDef.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file NodeGraphDef.h
//!
//! @brief Declares @ref omni::graph::exec::unstable::NodeGraphDef
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/Executor.h>
#include <omni/graph/exec/unstable/ExecutorFactory.h>
#include <omni/graph/exec/unstable/INodeGraphDef.h>
#include <omni/graph/exec/unstable/INodeGraphDefDebug.h>
#include <omni/graph/exec/unstable/Topology.h>
#include <atomic>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::INodeGraphDef
template <typename... Bases>
class NodeGraphDefT : public Implements<Bases...>
{
public:
//! Construct graph node definition with default executor
//!
//! @param owner Execution graph having this graph as part of the global topology
//! @param definitionName Definition name is considered as a token that transformation passes can register against
//!
//! May throw.
static omni::core::ObjectPtr<NodeGraphDefT> create(omni::core::ObjectParam<IGraph> owner, const char* definitionName)
{
OMNI_THROW_IF_ARG_NULL(owner);
OMNI_THROW_IF_ARG_NULL(definitionName);
return omni::core::steal(new NodeGraphDefT(owner, definitionName));
}
//! Construct graph node definition with a given executor factory
//!
//! @param owner Execution graph having this graph as part of the global topology
//! @param executorFactory Factory returning executor for this graph
//! @param definitionName Definition name is considered as a token that transformation passes can register against
//!
//! May throw.
static omni::core::ObjectPtr<NodeGraphDefT> create(omni::core::ObjectParam<IGraph> owner,
const ExecutorFactory& executorFactory,
const char* definitionName)
{
OMNI_THROW_IF_ARG_NULL(owner);
OMNI_THROW_IF_ARG_NULL(definitionName);
return omni::core::steal(new NodeGraphDefT(owner, executorFactory, definitionName));
}
protected:
//! Core implementation of @ref omni::graph::exec::unstable::IDef::execute_abi for @ref NodeGraphDef
//!
//! Execution is delegated to @ref omni::graph::exec::unstable::IExecutor. The lifetime of an executor is only for a
//! single execution and any state that needs to persist longer than a single execution must be written with @ref
//! omni::graph::exec::unstable::IExecutionContext::setNodeData_abi()
Status execute_abi(ExecutionTask* info) noexcept override
{
// ef-docs nodegraphdef-execute-begin
omni::core::ObjectPtr<IExecutor> executor;
if (m_executorFactory)
{
executor = m_executorFactory(m_topology, *info);
}
else
{
executor = ExecutorFallback::create(m_topology, *info);
}
return executor->execute(); // execute the node specified by info->getNode()
// ef-docs nodegraphdef-execute-end
}
//! Core implementation of @ref omni::graph::exec::unstable::IDef::getSchedulingInfo_abi for @ref NodeGraphDef
SchedulingInfo getSchedulingInfo_abi(const ExecutionTask* info) noexcept override
{
return SchedulingInfo::eSerial;
}
//! Core implementation of @ref omni::graph::exec::unstable::IDef::getName_abi for @ref NodeGraphDef
const ConstName* getName_abi() noexcept override
{
return &m_name;
}
//! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDef::getTopology_abi
ITopology* getTopology_abi() noexcept override
{
return m_topology.get();
}
//! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDef::initializeState_abi
omni::core::Result initializeState_abi(ExecutionTask* rootTask) noexcept override
{
return omni::core::kResultSuccess;
}
//! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDef::preExecute_abi
Status preExecute_abi(ExecutionTask* info) noexcept override
{
return Status::eSuccess;
}
//! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDef::postExecute_abi
Status postExecute_abi(ExecutionTask* info) noexcept override
{
return Status::eSuccess;
}
//! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDef::getNodeFactory_abi
INodeFactory* getNodeFactory_abi() noexcept override
{
return nullptr;
}
//! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDefDebug::getExecutionCount_abi
uint64_t getExecutionCount_abi() noexcept override
{
return m_executionCount;
}
//! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDefDebug::incrementExecutionCount_abi
void incrementExecutionCount_abi() noexcept override
{
++m_executionCount;
}
//! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDefDebug::decrementExecutionCount_abi
void decrementExecutionCount_abi() noexcept override
{
--m_executionCount;
}
//! Constructor with a default executor
NodeGraphDefT(omni::core::ObjectParam<IGraph> owner, const char* definitionName) // may throw
: m_topology{ Topology::create(definitionName) }, m_name{ definitionName }
{
_addInvalidationForwarder(owner);
}
//! Constructor with a custom executor
NodeGraphDefT(omni::core::ObjectParam<IGraph> owner,
ExecutorFactory executorFactory,
const char* definitionName) // may throw
: m_topology{ Topology::create(definitionName) },
m_executorFactory(std::move(executorFactory)),
m_name{ definitionName }
{
_addInvalidationForwarder(owner);
}
private:
//! Private method that will allow forwarding of topology invalidation to the execution graph.
//! Invalid global topology will allow pass pipeline to execute and discover invalidated definitions.
void _addInvalidationForwarder(omni::core::ObjectParam<IGraph> owner) // may throw
{
m_topology->addInvalidationForwarder(reinterpret_cast<InvalidationForwarderId>(owner.get()),
[global = owner->getGlobalTopologyStamp()](ITopology*) -> void
{ global->next(); });
}
omni::core::ObjectPtr<ITopology> m_topology; //!< Graphs topology
ExecutorFactory m_executorFactory; //!< Executor factory (if empty, default executor will be used)
std::atomic<std::size_t> m_executionCount{ 0 }; //!< Debugging counter to detect illegal executions.
ConstName m_name; //!< Definition name
};
//! Core NodeGraphDef implementation for @ref omni::graph::exec::unstable::INodeGraphDef
using NodeGraphDef = NodeGraphDefT<INodeGraphDef, INodeGraphDefDebug>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 7,478 | C | 37.953125 | 121 | 0.684006 |
omniverse-code/kit/include/omni/graph/exec/unstable/INodeFactory.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file INodeFactory.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::INodeFactory.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/IBase.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IGraphBuilder;
class IDef;
class INode;
class INodeFactory;
class INodeFactory_abi;
//! Factory interface for creating @ref omni::graph::exec::unstable::INode objects.
//!
//! Usually used in conjunction with @ref omni::graph::exec::unstable::INodeGraphDef.
//!
//! See @ref omni::graph::exec::unstable::createNodeFactory() to generate one of these objects from an invocable object
//! (e.g. @c std::function).
class INodeFactory_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.INodeFactory")>
{
protected:
//! Creates and returns a new node within a topology this factory came from.
//!
//! It is legal to pass nullptr as a definition, or either @ref omni::graph::exec::unstable::INodeDef
//! or @ref omni::graph::exec::unstable::INodeGraphDef
//!
//! The returned @ref omni::graph::exec::unstable::INode will have @ref omni::core::IObject::acquire() called on it.
virtual OMNI_ATTR("throw_result") omni::core::Result
createNode_abi(OMNI_ATTR("in, not_null, throw_if_null, c_str") const char* name,
IDef* def,
OMNI_ATTR("not_null, throw_if_null, out, *return") INode** out) noexcept = 0;
};
//! Smart pointer managing an instance of @ref INodeFactory.
using NodeFactoryPtr = omni::core::ObjectPtr<INodeFactory>;
//! Generates an @ref INodeFactory from an invocable object such as a function pointer, functor, etc.
//!
//! The given function should have the signature `omni::core::ObjectPtr<INode>(char*, IDef*)`.
template <typename Fn>
NodeFactoryPtr createNodeFactory(Fn&& fn);
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/INodeFactory.gen.h>
//! @copydoc omni::graph::exec::unstable::INodeFactory_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::INodeFactory
: public omni::core::Generated<omni::graph::exec::unstable::INodeFactory_abi>
{
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IGraphBuilder.h>
#include <omni/graph/exec/unstable/INode.h>
#ifndef DOXYGEN_BUILD
template <typename Fn>
omni::graph::exec::unstable::NodeFactoryPtr omni::graph::exec::unstable::createNodeFactory(Fn&& fn)
{
class FactoryImpl : public Implements<INodeFactory>
{
public:
FactoryImpl(Fn&& fn) : m_fn(std::move(fn))
{
}
protected:
omni::core::Result createNode_abi(const char* name, IDef* def, INode** out) noexcept override
{
try
{
NodePtr newNode = m_fn(name, def); // may throw
*out = newNode.detach();
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
Fn m_fn;
};
return omni::core::steal(new FactoryImpl(std::forward<Fn>(fn)));
}
#endif // DOXYGEN_BUILD
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/INodeFactory.gen.h>
| 3,941 | C | 31.578512 | 124 | 0.694748 |
omniverse-code/kit/include/omni/graph/exec/unstable/IBackgroundResultWriter.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Functor interface used to write the result of a background task.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IBackgroundResultWriter_abi>
: public omni::graph::exec::unstable::IBackgroundResultWriter_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IBackgroundResultWriter")
//! Write the result.
omni::graph::exec::unstable::Status write(omni::graph::exec::unstable::ExecutionTask& info) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IBackgroundResultWriter_abi>::write(
omni::graph::exec::unstable::ExecutionTask& info) noexcept
{
return write_abi(&info);
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 1,627 | C | 29.716981 | 130 | 0.741856 |
omniverse-code/kit/include/omni/graph/exec/unstable/BackgroundTask.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file BackgroundTask.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::BackgroundTask.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/IBackgroundTask.h>
#include <future>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::IBackgroundTask
class BackgroundTask : public Implements<IBackgroundTask>
{
public:
//! Creates a new @ref BackgroundTask.
//!
//! May throw
static omni::core::ObjectPtr<BackgroundTask> create()
{
return omni::core::steal(new BackgroundTask);
}
protected:
//! Allows access to result of an async operation.
using Future = std::future<omni::core::ObjectPtr<IBackgroundResultWriter>>;
//! Allows setting the result of an async operation.
using Promise = std::promise<omni::core::ObjectPtr<IBackgroundResultWriter>>;
//! @copydoc IBackgroundTask_abi::getBackgroundResult_abi
omni::core::Result getBackgroundResult_abi(IBackgroundResult** out) noexcept override
{
class Result : public Implements<IBackgroundResult>
{
public:
Result(Future&& future) : m_future(std::move(future))
{
}
protected:
omni::core::Result isReady_abi(bool* out) noexcept override
{
try
{
*out = (m_future.wait_for(std::chrono::milliseconds(0)) == std::future_status::ready); // may throw
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
omni::core::Result cancel_abi(bool blocking) noexcept override
{
try
{
if (blocking)
{
m_future.wait(); // may throw
}
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
omni::core::Result write_abi(ExecutionTask* info, Status* out) noexcept override
{
try
{
*out = Status::eUnknown;
*out = m_future.get()->write(*info); // may throw
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
omni::core::Result waitFor_abi(uint64_t nanoseconds, BackgroundResultStatus* out) noexcept override
{
try
{
auto result = m_future.wait_for(std::chrono::nanoseconds(nanoseconds)); // may throw
switch (result)
{
case std::future_status::deferred: // ?
case std::future_status::ready:
*out = BackgroundResultStatus::eReady;
break;
case std::future_status::timeout:
*out = BackgroundResultStatus::eTimeout;
break;
default:
throw std::logic_error("unknown future state");
}
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
private:
Future m_future;
};
try
{
*out = new Result(m_promise.get_future());
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! @copydoc IBackgroundTask_abi::setResultWriter_abi
omni::core::Result setResultWriter_abi(IBackgroundResultWriter* writer) noexcept override
{
try
{
m_promise.set_value(omni::core::borrow(writer));
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
private:
Promise m_promise;
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 4,566 | C | 30.280822 | 119 | 0.552781 |
omniverse-code/kit/include/omni/graph/exec/unstable/Types.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Types.h
//!
//! @brief Defines typedefs used by interfaces.
//!
//! Because interface bindings are inlined and sometimes coupled, you sometimes need to break out typedefs into their
//! own file so that you can get the include order correct in interface .h files.
#pragma once
#include <omni/graph/exec/unstable/EnumBitops.h>
#include <cstdint>
#include <limits>
namespace omni
{
namespace graph
{
//! Omniverse Execution Framework (EF)
//!
//! The Execution Framework has no dependencies on OmniGraph and designed to be front-end agnostic. It could very
//! much live in its own namespace, but we decided to make it part of @ref omni::graph namespace. There is no runtime
//! without authoring front-end and we consider OmniGraph everyone knows as the front-end to runtime execution.
//! EF then sits at the backend, orchestrating execution of computation defined by one or many front-ends.
//!
//! OmniGraph is becoming an umbrella for authoring front-end and execution backend.
namespace exec
{
//! Unstable features currently in development. Do not depend on any API or ABI in this namespace, as it will change
//! without notice.
namespace unstable
{
//! Each node in an @ref ITopology is given a unique index (via @ref ITopology::acquireNodeIndex()).
using NodeIndexInTopology = uint64_t;
//! Type which store a unique identifier for a node or definition.
using NameHash = uint64_t;
//! Hash of each node's topology index in a path.
using ExecutionPathHash = uint64_t;
//! Key for a piece of data attached to a node.
using NodeDataKey = uint64_t;
//! Pass priority used by @ref IPassPipeline to resolve conflicts between passes.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
using PassPriority = uint32_t;
//! Constant to denote an @ref INode has not been assigned an index in an @ref ITopology.
constexpr const uint64_t kInvalidNodeIndexInTopology = std::numeric_limits<uint64_t>::max();
static_assert(std::numeric_limits<uint64_t>::max() == 0xFFFFFFFFFFFFFFFF, "unexpected uin64_t max value");
//! Grouping type for different passes.
//!
//! Graph transformation pass is registered with a given type and type can't be changed after.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
//!
//! @note We are not yet using all these states...expect changes.
//!
//! @ingroup groupOmniGraphExecPassRegistration
enum class PassType
{
ePopulate, //!< open-up graph types
ePartitioning, //!< change granularity of executions (including executors)
eGlobal, //!< pass is running over entire graph. no other passes can run now
eTypeInference, //!< resolve types
eOverrideExecution, //!< override compute methods, executors, etc
eScheduling, //!< memory access, pipeline stages, etc
eCount //!< total number of known pass types
};
//! Current execution status of pass pipeline a @ref omni::graph::exec::unstable::IPassPipeline.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
enum class PassPipelineStatus : uint32_t
{
eNone = 0, //!< Pipeline is not executing.
eExecuting = 1 << 0, //!< Pipeline is running
eTopologyChangesAllowed = 1 << 1, //!< Pipeline is allowing mutating changes to topology
};
//! Enable bitwise operations on PassPipelineStatus state.
template <>
struct EnumBitops<PassPipelineStatus> : EnumBitops<>::allow_bitops
{
};
//! Result of waiting for the result of a @ref omni::graph::exec::unstable::IBackgroundResult.
enum class BackgroundResultStatus
{
eReady, //!< The result is ready.
eTimeout, //!< The result did not become ready int he specified wait time.
};
//! Type specific function for deleting context specific execution data associated with a node.
//!
//! The function is expected to know the type given as the first arg and handle the deletion of the type in an
//! appropriate manner. Usually, this means casting the `void*` pointer to the proper type and calling `delete`.
using NodeDataDeleterFn = void(void*);
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 4,544 | C | 37.516949 | 117 | 0.746039 |
omniverse-code/kit/include/omni/graph/exec/unstable/IBase.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Base class for all @ref omni::graph::exec objects.
//!
//! Defines an interface for casting between objects without calling @ref omni::core::IObject::acquire().
template <>
class omni::core::Generated<omni::graph::exec::unstable::IBase_abi> : public omni::graph::exec::unstable::IBase_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IBase")
//! Casts this object to the type described the the given id.
//!
//! Returns @c nullptr if the cast was not successful.
//!
//! Unlike @ref omni::core::IObject::cast(), this casting method does not call @ref omni::core::IObject::acquire().
//!
//! @thread_safety This method is thread safe.
void* castWithoutAcquire(omni::core::TypeId id) noexcept;
//! Returns the number of different instances (this included) referencing the current object.
//!
//! @thread_safety This method is thread safe.
uint32_t getUseCount() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline void* omni::core::Generated<omni::graph::exec::unstable::IBase_abi>::castWithoutAcquire(omni::core::TypeId id) noexcept
{
return castWithoutAcquire_abi(id);
}
inline uint32_t omni::core::Generated<omni::graph::exec::unstable::IBase_abi>::getUseCount() noexcept
{
return getUseCount_abi();
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 2,190 | C | 30.753623 | 126 | 0.715982 |
omniverse-code/kit/include/omni/graph/exec/unstable/IPassFactory.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Factory interface for creating @ref omni::graph::exec::unstable::IPass objects.
//!
//! Usually used in conjunction with @ref omni::graph::exec::unstable::IPassRegistry.
//!
//! See @ref omni::graph::exec::unstable::createPassFactory() to generate one of these objects from an invocable object
//! (e.g. @c std::function).
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IPassFactory_abi>
: public omni::graph::exec::unstable::IPassFactory_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPassFactory")
//! Creates and returns a pass.
//!
//! The returned @ref omni::graph::exec::unstable::IPass will have @ref omni::core::IObject::acquire() called on it.
omni::core::ObjectPtr<omni::graph::exec::unstable::IPass> createPass(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::core::ObjectPtr<omni::graph::exec::unstable::IPass> omni::core::
Generated<omni::graph::exec::unstable::IPassFactory_abi>::createPass(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder)
{
OMNI_THROW_IF_ARG_NULL(builder);
omni::core::ObjectPtr<omni::graph::exec::unstable::IPass> out;
OMNI_THROW_IF_FAILED(createPass_abi(builder.get(), out.put()));
return out;
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 2,425 | C | 34.15942 | 120 | 0.722887 |
omniverse-code/kit/include/omni/graph/exec/unstable/ITopology.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! The Topology of a graph is stored in this class.
//!
//! @ref omni::graph::exec::unstable::ITopology is a helper interface used to quickly invalidate the topology, quickly
//! determine if the topology has been invalidated, assign each node in the topology a unique index (suitable for access
//! in contiguous memory), and provide access to the root node.
//!
//! Topologies play a large role in graph invalidation. See @rstref{Graph Invalidation <ef_graph_invalidation>} for
//! details.
//!
//! To better understand how this object relates to other objects in the Execution Framework, see
//! @rstref{Graph Concepts <ef_graph_concepts>}.
//!
//! See @ref omni::graph::exec::unstable::Topology for a concrete implementation of this interface.
template <>
class omni::core::Generated<omni::graph::exec::unstable::ITopology_abi> : public omni::graph::exec::unstable::ITopology_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::ITopology")
//! Returns how many nodes are alive in this topology. Some of the counted nodes may not be connected and
//! discoverable from the root node.
//!
//! @thread_safety This method is thread safe.
uint64_t getNodeCount() noexcept;
//! Returns the topology's root node that allows reaching all of the valid nodes in the topology.
//!
//! The returned @ref omni::graph::exec::unstable::INode will *not* have @ref omni::core::IObject::acquire() called
//! before being returned.
//!
//! The returned pointer will remain valid for the lifetime of this object.
//!
//! @thread_safety This method is thread safe.
omni::graph::exec::unstable::INode* getRoot() noexcept;
//! Returns the topology stamp. This stamp is updated each time the topology is invalidated.
//!
//! See omni::graph::exec::unstable::ITopology::invalidate() to invalidate the topology (and thereby update this
//! Stamp).
//!
//! @thread_safety This method is thread safe.
omni::graph::exec::unstable::Stamp getStamp() noexcept;
//! Invalidate topology. All edges of the graph will be dropped (lazily), nodes remain valid and can be used to
//! build new topology.
//!
//! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details about how this method is used during
//! invalidation.
//!
//! It is not recommended to call this method during graph execution. Rather, defer invalidation until after
//! execution.
//!
//! @thread_safety This method is thread safe.
void invalidate() noexcept;
//! Returns a unique index for a node in this topology.
//!
//! Users should not call this method. Only the constructors of implementations of @ref
//! omni::graph::exec::unstable::INode should call this method.
//!
//! Returns an error if an index could not be acquired.
//!
//! See @ref omni::graph::exec::unstable::ITopology::releaseNodeIndex().
//!
//! @thread_safety This method is not thread safe.
omni::graph::exec::unstable::NodeIndexInTopology acquireNodeIndex();
//! Release unique index of a node in this topology. Shouldn't be used by anything else than a node's destructor.
//!
//! See @ref omni::graph::exec::unstable::ITopology::acquireNodeIndex().
//!
//! @thread_safety This method is not thread safe.
void releaseNodeIndex(omni::graph::exec::unstable::NodeIndexInTopology index) noexcept;
//! Add a callback to forward invalidation to other entities.
//!
//! At a minimum, the top-level @ref omni::graph::exec::unstable::IGraph will register a invalidation callback with
//! all topologies created within a pass pipeline. This allows tracking invalidation and triggering minimal graph
//! rebuild.
//!
//! In the future, override passes can generate new graphs and still track authoring invalidation by registering to
//! the original graph topologies invalidation.
//!
//! The given @ref omni::graph::exec::unstable::IInvalidationForwarder will be stored and have @ref
//! omni::core::IObject::acquire() called.
//!
//! If @p owner has a current forwarder, it will be replaced with the given forwarder.
//!
//! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details about how this method is used during
//! invalidation.
//!
//! See @ref omni::graph::exec::unstable::ITopology::removeInvalidationForwarder().
//!
//! @thread_safety This method is not thread safe.
omni::core::Result addInvalidationForwarder(
omni::graph::exec::unstable::InvalidationForwarderId owner,
omni::core::ObjectParam<omni::graph::exec::unstable::IInvalidationForwarder> callback);
//! Remove invalidation forwarding for a given owner.
//!
//! If the given owner is not known, this method does nothing.
//!
//! See @ref omni::graph::exec::unstable::ITopology::addInvalidationForwarder().
//!
//! @thread_safety This method is not thread safe.
void removeInvalidationForwarder(omni::graph::exec::unstable::InvalidationForwarderId owner) noexcept;
//! Get construction version this topology is synchronized with.
//!
//! @thread_safety This method is thread safe.
omni::graph::exec::unstable::SyncStamp getConstructionStamp() noexcept;
//! Private method only for IGraphBuilder, used to tag construction version.
//!
//! @thread_safety Calling this method concurrently is not recommended.
void _setConstructionInSync(const omni::graph::exec::unstable::Stamp& toSync) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline uint64_t omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::getNodeCount() noexcept
{
return getNodeCount_abi();
}
inline omni::graph::exec::unstable::INode* omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::getRoot() noexcept
{
return getRoot_abi();
}
inline omni::graph::exec::unstable::Stamp omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::getStamp() noexcept
{
return getStamp_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::invalidate() noexcept
{
invalidate_abi();
}
inline omni::graph::exec::unstable::NodeIndexInTopology omni::core::Generated<
omni::graph::exec::unstable::ITopology_abi>::acquireNodeIndex()
{
omni::graph::exec::unstable::NodeIndexInTopology out;
OMNI_THROW_IF_FAILED(acquireNodeIndex_abi(&out));
return out;
}
inline void omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::releaseNodeIndex(
omni::graph::exec::unstable::NodeIndexInTopology index) noexcept
{
releaseNodeIndex_abi(index);
}
inline omni::core::Result omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::addInvalidationForwarder(
omni::graph::exec::unstable::InvalidationForwarderId owner,
omni::core::ObjectParam<omni::graph::exec::unstable::IInvalidationForwarder> callback)
{
OMNI_THROW_IF_ARG_NULL(callback);
auto return_ = addInvalidationForwarder_abi(owner, callback.get());
return return_;
}
inline void omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::removeInvalidationForwarder(
omni::graph::exec::unstable::InvalidationForwarderId owner) noexcept
{
removeInvalidationForwarder_abi(owner);
}
inline omni::graph::exec::unstable::SyncStamp omni::core::Generated<
omni::graph::exec::unstable::ITopology_abi>::getConstructionStamp() noexcept
{
return getConstructionStamp_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::_setConstructionInSync(
const omni::graph::exec::unstable::Stamp& toSync) noexcept
{
_setConstructionInSync_abi(toSync);
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 8,640 | C | 39.378504 | 128 | 0.709838 |
omniverse-code/kit/include/omni/graph/exec/unstable/EnumBitops.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file EnumBitops.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::EnumBitops.
#pragma once
#include <type_traits>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Enable bitwise operations on enum classes. Templates save on writing boiler plate code to allow this.
template <class T = void>
struct EnumBitops
{
};
#ifndef DOXYGEN_BUILD
template <>
struct EnumBitops<void>
{
struct _allow_bitops
{
static constexpr bool allow_bitops = true;
};
using allow_bitops = _allow_bitops;
template <class T, class R = T>
using t = typename std::enable_if<std::is_enum<T>::value && EnumBitops<T>::allow_bitops, R>::type;
template <class T>
using u = typename std::underlying_type<T>::type;
};
template <class T>
constexpr EnumBitops<>::t<T> operator~(T a)
{
return static_cast<T>(~static_cast<EnumBitops<>::u<T>>(a));
}
template <class T>
constexpr EnumBitops<>::t<T> operator|(T a, T b)
{
return static_cast<T>(static_cast<EnumBitops<>::u<T>>(a) | static_cast<EnumBitops<>::u<T>>(b));
}
template <class T>
constexpr EnumBitops<>::t<T> operator&(T a, T b)
{
return static_cast<T>(static_cast<EnumBitops<>::u<T>>(a) & static_cast<EnumBitops<>::u<T>>(b));
}
template <class T>
constexpr EnumBitops<>::t<T> operator^(T a, T b)
{
return static_cast<T>(static_cast<EnumBitops<>::u<T>>(a) ^ static_cast<EnumBitops<>::u<T>>(b));
}
template <class T>
constexpr EnumBitops<>::t<T, T&> operator|=(T& a, T b)
{
a = a | b;
return a;
}
template <class T>
constexpr EnumBitops<>::t<T, T&> operator&=(T& a, T b)
{
a = a & b;
return a;
}
template <class T>
constexpr EnumBitops<>::t<T, T&> operator^=(T& a, T b)
{
a = a ^ b;
return a;
}
template <class T, typename = EnumBitops<>::t<T>>
constexpr bool to_bool(T a)
{
return static_cast<EnumBitops<>::u<T>>(a) != 0;
}
#endif // DOXYGEN_BUILD
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 2,425 | C | 23.26 | 105 | 0.669278 |
omniverse-code/kit/include/omni/graph/exec/unstable/IApplyOnEachFunction.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IApplyOnEachFunction.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IApplyOnEachFunction.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class IApplyOnEachFunction_abi;
class IApplyOnEachFunction;
class ExecutionPath;
//! Interface wrapping a function (possibly with storage) to apply on all instantiations of a given definition.
class IApplyOnEachFunction_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.IApplyOnEachFunction")>
{
protected:
//! Invokes the wrapped function.
virtual void invoke_abi(OMNI_ATTR("in, not_null, throw_if_null, ref") const ExecutionPath* path) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IApplyOnEachFunction.
using ApplyOnEachFunctionPtr = omni::core::ObjectPtr<IApplyOnEachFunction>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IApplyOnEachFunction.gen.h>
//! @copydoc omni::graph::exec::unstable::IApplyOnEachFunction_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IApplyOnEachFunction
: public omni::core::Generated<omni::graph::exec::unstable::IApplyOnEachFunction_abi>
{
};
#include <omni/graph/exec/unstable/ExecutionPath.h>
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IApplyOnEachFunction.gen.h>
| 2,006 | C | 30.857142 | 114 | 0.759721 |
omniverse-code/kit/include/omni/graph/exec/unstable/ExecutionTask.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file ExecutionTask.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::ExecutionTask.
#pragma once
#include <omni/core/ITypeFactory.h>
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/Status.h>
#include <limits>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class ExecutionPath;
class IExecutionContext;
class IExecutionCurrentThread;
class IExecutor;
class INode;
//! A task tag can be used by other entities (such as @ref Executor) to group tasks together.
using ExecutionTaskTag = uint64_t;
//! Represents work item generated by an @ref IExecutor and passed to a scheduler for dispatch.
//!
//! ExecutionTask is a utility class that describes a task to be potentially executed on behalf of a @ref INode in a
//! given @ref IExecutionContext.
//!
//! @rst
//!
//! .. image:: /../docs/ef-execution-path-point-k.svg
//! :align: center
//!
//! @endrst
//!
//! @ref ExecutionTask stores four key pieces of information:
//!
//! - *A pointer to the Node to be executed*. The pointed to @ref INode contains a pointer to either an @ref INodeDef or
//! @ref NodeGraphDef which contains the computation definition. See @ref ExecutionTask::getNode().
//!
//! - *The unique path to the node*. In addition to the @ref INode to be executed, an @ref ExecutionPath to the node's
//! upstream (i.e. containing) node is stored. Combined, these two pieces of information form a unique id for the
//! node.
//!
//! Above, if an @ref ExecutionTask is describing the *k* node pointed to by the yellow arrow, @ref
//! ExecutionTask::getNode() would point to *k* and @ref ExecutionTask::getUpstreamPath() would return */f/p*. Note,
//! the @ref ExecutionTask::getUpstreamPath() *does not* store */f/p/k*, just */f/p*. This is a micro-optimization
//! that allows the same |ExecutionPath| to be reused while visiting nodes within the same
//! @ref INodeGraphDef.
//!
//! - *A pointer to the current execution's* @ref IExecutionContext. Execution always happen in a given context. It's
//! this context, @ref IExecutionContext, that stores the state of the execution. Multiple entities can be executing a
//! given @rstref{execution graph <ef_execution_graph>}, each execution using its own @ref IExecutionContext. In order
//! to understand which of these potentially many executions a task is a part, @ref ExecutionTask stores a reference
//! to the execution's @ref IExecutionContext. This @ref ExecutionTask::getContext() combined with @ref
//! ExecutionTask::getUpstreamPath() and @ref ExecutionTask::getNode() can be used to access the per-execution state
//! for the node (see @ref OMNI_GRAPH_EXEC_GET_NODE_DATA_AS() and @ref OMNI_GRAPH_EXEC_SET_NODE_DATA()).
//!
//! - *A "tag" to identify a task when multiple tasks are associated with a node.* If an @ref INode generates many
//! tasks during execution, @ref ExecutionTask::getTag() can be used to uniquely identify each of the node's tasks.
//! The semantic meaning of @ref ExecutionTask::getTag() is @ref IExecutor dependent and can be used for purposes
//! other than unique identification.
//!
//! This struct is ABI-safe.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
// @note Node definition needs to stay unchanged while there are executions to complete. In future we should fully
// decouple description of compute from compute symbol library. This would allow mutating the authoring side
// without the need to synchronize with execution.
class ExecutionTask
{
public:
enum : ExecutionTaskTag
{
kEmptyTag = std::numeric_limits<ExecutionTaskTag>::max() //!< Special value to represent an empty tag.
};
static_assert(std::numeric_limits<ExecutionTaskTag>::max() == 0xFFFFFFFFFFFFFFFF,
"unexpected ExecutionTaskTag max value");
//! Constructor for execution task
//!
//! @param context Context in which execution task is created. Task can only access state from this context.
//! @ref omni::core::IObject::acquire() is not called on this context. It is up to the calling
//! code to ensure the context remains valid for the lifetime of the ExecutionTask.
//!
//! @param node Node holding the execution definition omni::core::IObject::acquire() is not called on this
//! context. It is up to the calling code to ensure the context remains valid for the lifetime of
//! the ExecutionTask.
//!
//! @param upPath Execution path to the graph owning the node. Node can be executed multiple times with different
//! paths when graph definition is shared.
//!
//! @param tag Used to identify dynamically generated work items that node can compute.
ExecutionTask(IExecutionContext* context, INode* node, const ExecutionPath& upPath, ExecutionTaskTag tag = kEmptyTag) noexcept
: m_context(context), m_node(node), m_upstreamPath(&upPath), m_tag(tag)
{
OMNI_GRAPH_EXEC_ASSERT(context);
OMNI_GRAPH_EXEC_ASSERT(node);
static_assert(std::is_standard_layout<ExecutionTask>::value, "ExecutionTask is expected to be abi safe");
static_assert(offsetof(ExecutionTask, m_context) == 0, "unexpected context offset");
static_assert(offsetof(ExecutionTask, m_node) == 8, "unexpected node offset");
static_assert(offsetof(ExecutionTask, m_upstreamPath) == 16, "unexpected upstream path offset");
static_assert(offsetof(ExecutionTask, m_tag) == 24, "unexpected tag offset");
static_assert(offsetof(ExecutionTask, m_userIndex) == 32, "unexpected status offset");
static_assert(offsetof(ExecutionTask, m_status) == 40, "unexpected status offset");
static_assert(48 == sizeof(ExecutionTask), "ExecutionTask is an unexpected size");
}
//! Return context for this task.
//!
//! The returned @ref IExecutionContext will *not* have @ref omni::core::IObject::acquire() called before being
//! returned.
IExecutionContext* getContext() const noexcept
{
return m_context;
}
//! Return node for this task.
//!
//! The returned @ref INode will *not* have @ref omni::core::IObject::acquire() called before being returned.
INode* getNode() const noexcept
{
return m_node;
}
//! Return execution path to graph owning the node.
const ExecutionPath& getUpstreamPath() const noexcept
{
return *m_upstreamPath;
}
//! Return tag.
ExecutionTaskTag getTag() const noexcept
{
return m_tag;
}
//! Check if this task has a valid tag set. This will mean that a node generates more than one task.
bool hasValidTag() const noexcept
{
return (m_tag != kEmptyTag);
}
//! Return execution status for this task
Status getExecutionStatus() const noexcept
{
return m_status;
}
//! Execute the task. Will be called by the scheduler when task is dispatched for execution.
inline Status execute(omni::core::ObjectParam<IExecutor> executor) noexcept;
//! This index will never be used by the framework, but is a way to pass something into
//! user code via generated task. Mutating this value is allowed as long as it is done
//! via only legal way to access task, i.e. getCurrentTask
//!
//! Setter for user index
void setUserIndex(uint64_t userIndex) noexcept
{
m_userIndex = userIndex;
}
//! This index will never be used by the framework, but is a way to pass something into
//! user code via generated task. Mutating this value is allowed as long as it is done
//! via only legal way to access task, i.e. getCurrentTask
//!
//! Getter for user index
uint64_t getUserIndex() const noexcept
{
return m_userIndex;
}
//! Sets the status of the task.
//!
//! This is an internal method and should not be called by users.
void setExecutionStatus(Status status) noexcept
{
m_status = status;
}
private:
//! Context in which this task was created. This context needs to live as long as there are still executions to
//! complete.
IExecutionContext* m_context;
//! Node holding the execution definition.
INode* m_node;
//! Execution path to the graph owning the node.
const ExecutionPath* m_upstreamPath;
//! Used to identify dynamically generated work items that node can compute.
ExecutionTaskTag m_tag;
//! User index help with passing data into user code.
uint64_t m_userIndex{ 0 };
//! Execution status.
Status m_status{ Status::eUnknown };
//! Reserved padding space.
uint32_t m_reserved;
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#include <omni/graph/exec/unstable/IExecutionCurrentThread.h>
//! Execute the task. Will be called by the scheduler when task is dispatched for execution.
inline omni::graph::exec::unstable::Status omni::graph::exec::unstable::ExecutionTask::execute(
omni::core::ObjectParam<IExecutor> executor) noexcept
{
if (Status::eUnknown != m_status)
{
return m_status;
}
return getCurrentThread()->execute(*this, executor, &m_status);
}
| 9,905 | C | 39.765432 | 130 | 0.691974 |
omniverse-code/kit/include/omni/graph/exec/unstable/IGraph.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IGraph.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IGraph.
#pragma once
#include <omni/graph/exec/unstable/ConstName.h>
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Stamp.h>
#include <omni/graph/exec/unstable/Status.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IExecutionContext;
class IGraph_abi;
class IGraph;
class INode;
class INodeGraphDef;
class ITopology;
//! Top-level container for storing the Execution Framework's graph of graphs.
//!
//! @ref omni::graph::exec::unstable::IGraph is the top-level container used to store the graph of graphs. This
//! top-level container is referred to as the <i>execution graph</i>.
//!
//! @ref omni::graph::exec::unstable::IGraph's responsibilities include:
//!
//! - Tracking if the graph is currently being constructed. See @ref omni::graph::exec::unstable::IGraph::inBuild().
//!
//! - Tracking gross changes to the topologies of graphs within the execution graph. This is done with the <i>global
//! topology stamp</i> (see @ref omni::graph::exec::unstable::IGraph::getGlobalTopologyStamp()). Each time a topology
//! is invalidated, the global topology stamp is incremented. Consumers of the execution graph can use this stamp to
//! detect changes in the graph. See @rstref{Graph Invalidation <ef_graph_invalidation>} for details.
//!
//! - Owning and providing access to the top level graph definition (see @ref
//! omni::graph::exec::unstable::IGraph::getNodeGraphDef()). The root node of the top-level graph definition is the
//! root of execution graph. @ref omni::graph::exec::unstable::IGraph is the only container, other than @ref
//! omni::graph::exec::unstable::INode, that attaches to definitions.
//!
//! See @rstref{Graph Concepts <ef_graph_concepts>} for more information on how @ref omni::graph::exec::unstable::IGraph
//! fits into the Execution Framework.
//!
//! See @ref omni::graph::exec::unstable::Graph for a concrete implementation of this interface.
class IGraph_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IGraph")>
{
protected:
//! Access the top-level node graph definition.
//!
//! The returned @ref omni::graph::exec::unstable::INodeGraphDef will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
//!
//! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref
//! omni::graph::exec::unstable::IGraph.
virtual OMNI_ATTR("no_acquire") INodeGraphDef* getNodeGraphDef_abi() noexcept = 0;
//! Name set on the graph during construction.
//!
//! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref
//! omni::graph::exec::unstable::IGraph.
virtual OMNI_ATTR("ref") const ConstName* getName_abi() noexcept = 0;
//! Return global topology of the graph. Useful when detecting that graph transformation pipeline needs to run.
//!
//! See @rstref{Graph Invalidation <ef_graph_invalidation>} to understand how this stamp is used to detect changes
//! in the graph.
//!
//! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref
//! omni::graph::exec::unstable::IGraph. It is up to the caller to mutate the stamp in a thread safe manner.
virtual Stamp* getGlobalTopologyStamp_abi() noexcept = 0;
//! Return @c true if a @ref omni::graph::exec::unstable::IGraphBuilder is currently building a part of this graph.
//!
//! @thread_safety This method is thread safe.
virtual bool inBuild_abi() noexcept = 0;
//! Mark that an @ref omni::graph::exec::unstable::IGraphBuilder is currently building a part of this graph.
//!
//! Each builder should call @c _setInBuild(true) followed by @c _setInBuild(false) once building is complete. Since
//! multiple builders can be active at a time, it is safe for this method to be called multiple times.
//!
//! This method should only be called by @ref omni::graph::exec::unstable::IGraphBuilder.
//!
//! @thread_safety This method is thread safe.
virtual void _setInBuild_abi(bool inBuild) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IGraph.
using GraphPtr = omni::core::ObjectPtr<IGraph>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IGraph.gen.h>
//! @copydoc omni::graph::exec::unstable::IGraph_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IGraph : public omni::core::Generated<omni::graph::exec::unstable::IGraph_abi>
{
public:
//! Access topology of the graph.
//!
//! The returned @ref ITopology does *not* have @ref omni::core::IObject::acquire() called before being returned.
//!
//! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref
//! omni::graph::exec::unstable::IGraph.
inline ITopology* getTopology() noexcept;
//! Access root of the graph.
//!
//! The returned @ref INode does *not* have @ref omni::core::IObject::acquire() called before being returned.
//!
//! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref
//! omni::graph::exec::unstable::IGraph.
inline INode* getRoot() noexcept;
};
#include <omni/graph/exec/unstable/IExecutionContext.h>
#include <omni/graph/exec/unstable/INodeGraphDef.h>
inline omni::graph::exec::unstable::ITopology* omni::graph::exec::unstable::IGraph::getTopology() noexcept
{
return getNodeGraphDef()->getTopology();
}
inline omni::graph::exec::unstable::INode* omni::graph::exec::unstable::IGraph::getRoot() noexcept
{
return getNodeGraphDef()->getRoot();
}
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IGraph.gen.h>
| 6,646 | C | 41.883871 | 120 | 0.715769 |
omniverse-code/kit/include/omni/graph/exec/unstable/SmallStack.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/Memory.h>
#include <omni/core/Assert.h>
#include <cstdint>
#include <cstring>
#include <stdexcept>
#include <type_traits>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
namespace detail
{
//! ABI-safe stack with inline memory to avoid heap allocation.
//!
//! Reserved memory within the stack will be used until it is exceeded, at which heap memory will be used.
//!
//! It is assumed the items stored are `sizeof(uint64_t)`.
template <typename T = uint64_t>
class SmallStack
{
public:
//! Type of the item in the stack.
using ItemType = T;
//! Constructor.
SmallStack() noexcept
{
static_assert(8 == sizeof(ItemType), "unexpected item size");
static_assert(std::is_trivially_destructible<ItemType>::value, "items stored must be trivially destructible");
static_assert(offsetof(SmallStack, m_external.data) == 0, "unexpected external data offset");
static_assert(offsetof(SmallStack, m_external.count) == 8, "unexpected external count offset");
static_assert(offsetof(SmallStack, m_external.maxCount) == 12, "unexpected external maxCount offset");
static_assert(offsetof(SmallStack, m_internal.data) == 0, "unexpected data offset");
static_assert(offsetof(SmallStack, m_internal.count) == 56, "unexpected count offset");
static_assert(offsetof(SmallStack, m_internal.isInternal) == 60, "unexpected internal flag offset ");
m_internal.count = 0;
m_internal.isInternal = 1;
}
//! Constructor with a single item.
SmallStack(ItemType item) noexcept
{
m_internal.count = 0;
m_internal.isInternal = 1;
push(item); // may throw, but wont in this case.
}
//! Copy constructor.
//!
//! May throw.
SmallStack(const SmallStack& other) // may throw
{
m_internal.isInternal = 1;
_copy(other);
}
//! Construct from a range.
//!
//! @p end must be equal or greater than @p begin.
//!
//! May throw.
SmallStack(ItemType* begin, ItemType* end) // may throw
{
OMNI_ASSERT(end >= begin);
m_internal.isInternal = 1;
uint32_t count = static_cast<uint32_t>(end - begin);
_copy(begin, count, count);
}
//! Copies the contents of the given stack and pushes the given item.
//!
//! May throw.
SmallStack(const SmallStack& other, ItemType item) // may throw
{
uint32_t otherCount = other.count();
uint32_t count = otherCount + 1;
ItemType* p;
if (count > kMaxInternalDataItemCount)
{
p = _allocate(count);
m_internal.isInternal = 0;
m_external.data = p;
m_external.count = count;
m_external.maxCount = count;
}
else
{
m_internal.isInternal = 1;
p = m_internal.data;
m_internal.count = count;
}
std::memcpy(p, other.begin(), sizeof(ItemType) * otherCount);
p[otherCount] = item;
}
//! Move constructor.
SmallStack(SmallStack&& other) noexcept
{
m_internal.isInternal = 1;
_move(std::move(other));
}
//! Destructor
~SmallStack() noexcept
{
_free();
}
//! Assignment operator.
//!
//! May throw.
SmallStack& operator=(const SmallStack& other) // may throw
{
if (this != &other)
{
_copy(other);
}
return *this;
}
//! Assignment operator.
SmallStack& operator=(SmallStack&& other) noexcept
{
if (this != &other)
{
_move(std::move(other));
}
return *this;
}
//! Compares two stacks, returning either a negative number, positive number, or zero.
//!
//! Works similar to @c std::memcmp.
//!
//! Returns a negative value if this stack less than @p other.
//!
//! Returns a positive value if this stack greater than @p other.
//!
//! Returns zero if the stacks are equal.
//!
//! The returned negative or positive values are not guaranteed to be exactly -1 or 1.
int compare(const SmallStack& other) const noexcept
{
int thisCount = count();
int otherCount = other.count();
if (thisCount == otherCount)
{
return std::memcmp(begin(), other.begin(), sizeof(ItemType) * otherCount);
}
else
{
return (thisCount - otherCount);
}
}
//! Return @c true if the stack is empty.
inline bool empty() const noexcept
{
return (0 == count());
}
//! Returns the top of the stack.
//!
//! Reading the top of an empty stack is undefined behavior.
inline ItemType top() const noexcept
{
if (_isInternal())
{
OMNI_ASSERT(0 != m_internal.count);
return m_internal.data[m_internal.count - 1];
}
else
{
OMNI_ASSERT(0 != m_internal.count);
return m_external.data[m_external.count - 1];
}
}
//! Push the given item to the top of the stack.
//!
//! May throw.
inline void push(ItemType elem) // may throw
{
if (_isInternal())
{
if (m_internal.count == kMaxInternalDataItemCount)
{
// we've ran out of internal space
_allocExternalAndCopyInternal();
m_external.data[m_external.count++] = elem;
}
else
{
m_internal.data[m_internal.count++] = elem;
}
}
else
{
if (m_external.count == m_external.maxCount)
{
_grow();
}
m_external.data[m_external.count++] = elem;
}
}
//! Removes the top of the stack.
//!
//! Popping an empty stack is undefined behavior.
inline void pop() noexcept
{
if (_isInternal())
{
OMNI_ASSERT(m_internal.count > 0);
m_internal.count--;
}
else
{
OMNI_ASSERT(m_external.count > 0);
m_external.count--;
}
}
//! Returns the number of items in the stack.
inline uint32_t count() const noexcept
{
if (_isInternal())
{
return m_internal.count;
}
else
{
return m_external.count;
}
}
//! Returns the number of items in the stack.
inline uint32_t size() const noexcept
{
return count();
}
//! Returns a pointer to the oldest item in the stack.
//!
//! If the stack is empty, the returned pointer should not be read or written though can be compared to @ref end().
inline const ItemType* begin() const noexcept
{
if (_isInternal())
{
return m_internal.data;
}
else
{
return m_external.data;
}
}
//! Returns a pointer to one past the top of the stack.
//!
//! If the stack is empty, the returned pointer should not be read or written though can be compared to @ref
//! begin().
inline const ItemType* end() const noexcept
{
if (_isInternal())
{
return m_internal.data + m_internal.count;
}
else
{
return m_external.data + m_external.count;
}
}
//! Returns a pointer to the oldest item in the stack.
//!
//! Result are undefined if the stack is empty.
inline const ItemType* data() const noexcept
{
return begin();
}
private:
inline bool _isInternal() const noexcept
{
return m_internal.isInternal;
}
inline uint32_t _maxCount() const noexcept
{
if (_isInternal())
{
return kMaxInternalDataItemCount;
}
else
{
return m_external.maxCount;
}
}
inline void _free() noexcept
{
if (!_isInternal())
{
carb::deallocate(m_external.data);
m_internal.count = 0;
m_internal.isInternal = 0;
}
}
// assumes _free() has already been called (when needed)
inline void _copy(const SmallStack& other)
{
_copy(const_cast<ItemType*>(other.begin()), other.count(), other._maxCount());
}
// assumes _free() has already been called (when needed)
inline void _copy(ItemType* data, uint32_t count, uint32_t maxCount)
{
if (_maxCount() < count)
{
// not enough storage for the copy. we'll have to allocate more.
OMNI_ASSERT(maxCount >= count);
_free();
m_external.data = reinterpret_cast<ItemType*>(carb::allocate(sizeof(ItemType) * maxCount));
if (!m_external.data)
{
throw std::bad_alloc();
}
std::memcpy(m_external.data, data, sizeof(ItemType) * count);
m_external.count = count;
m_external.maxCount = maxCount;
m_internal.isInternal = 0;
}
else
{
// data fits in our storage. simply copy it.
if (_isInternal())
{
std::memcpy(m_internal.data, data, sizeof(ItemType) * count);
m_internal.count = count;
}
else
{
std::memcpy(m_external.data, data, sizeof(ItemType) * count);
m_external.count = count;
}
}
}
// assumes _free() has already been called (when needed)
inline void _move(SmallStack&& other) noexcept
{
if (other._isInternal())
{
// since other is using its internal storage, we have to copy the data
_copy(other);
other.m_internal.count = 0;
}
else
{
// other is using external storage
_free();
m_internal.isInternal = 0;
m_external.data = other.m_external.data;
m_external.count = other.m_external.count;
m_external.maxCount = other.m_external.maxCount;
other.m_internal.count = 0;
other.m_internal.isInternal = 1;
}
}
inline ItemType* _allocate(uint32_t maxCount)
{
auto data = reinterpret_cast<ItemType*>(carb::allocate(sizeof(ItemType) * maxCount));
if (!data)
{
throw std::bad_alloc();
}
return data;
}
inline void _allocExternalAndCopyInternal()
{
OMNI_ASSERT(_isInternal());
constexpr uint32_t newMaxCount = kMaxInternalDataItemCount * 2;
ItemType* data = _allocate(newMaxCount);
std::memcpy(data, m_internal.data, sizeof(ItemType) * newMaxCount);
m_external.data = data;
m_external.count = kMaxInternalDataItemCount;
m_external.maxCount = newMaxCount;
m_internal.isInternal = 0;
}
inline void _grow()
{
OMNI_ASSERT(!_isInternal());
OMNI_ASSERT(m_external.maxCount > 0);
m_external.maxCount *= 2;
ItemType* data = _allocate(m_external.maxCount);
std::memcpy(data, m_external.data, sizeof(ItemType) * m_external.count);
carb::deallocate(m_external.data);
m_external.data = data;
}
constexpr static uint32_t kMaxInternalDataItemCount = 7;
struct ExternalData
{
ItemType* data;
uint32_t count;
uint32_t maxCount;
};
static_assert(sizeof(ExternalData) == 16, "ExternalData is unexpected size");
struct InternalData
{
ItemType data[kMaxInternalDataItemCount];
uint32_t count;
uint32_t isInternal;
};
static_assert(sizeof(InternalData) == 64, "InternalData is unexpected size");
private:
union
{
ExternalData m_external;
InternalData m_internal;
};
};
static_assert(sizeof(SmallStack<uint64_t>) == 64, "SmallStack has unexpected size");
static_assert(std::is_standard_layout<SmallStack<uint64_t>>::value, "SmallStack is not ABI-safe");
} // namespace detail
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 12,761 | C | 25.980972 | 119 | 0.561633 |
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutor.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Executes the node in a graph definition.
//!
//! The purpose of an executor is to generate work for the nodes in an graph definition. @ref
//! omni::graph::exec::unstable::IExecutor is a minimal interface that defines enough methods to accomplish just that.
//!
//! However, @ref omni::graph::exec::unstable::IExecutor's minimal nature is not what most users want when customizing
//! execution for their graph definitions. Rather, they want something useful. @ref
//! omni::graph::exec::unstable::Executor is an useful implementation of @ref omni::graph::exec::unstable::IExecutor
//! designed for graph definition authors to extend. See
//! @ref omni::graph::exec::unstable::Executor's documentation to better understand the purpose, duties, and
//! capabilities of an executor.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! See @rstref{Creating an Executor <ef_executor_creation>} for a guide on creating a customize executor for your graph
//! defintion.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IExecutor_abi> : public omni::graph::exec::unstable::IExecutor_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IExecutor")
//! Main execute method. Returning status of the execution.
omni::graph::exec::unstable::Status execute() noexcept;
//! Request for scheduling of additional work after the given task has executed but before it has completed.
//!
//! @param task The current task
omni::graph::exec::unstable::Status continueExecute(omni::graph::exec::unstable::ExecutionTask& task) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IExecutor_abi>::execute() noexcept
{
return execute_abi();
}
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IExecutor_abi>::continueExecute(
omni::graph::exec::unstable::ExecutionTask& task) noexcept
{
return continueExecute_abi(&task);
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 3,011 | C | 37.615384 | 128 | 0.736964 |
omniverse-code/kit/include/omni/graph/exec/unstable/Module.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Module.h
//!
//! @brief Helpers for writing modules/plugins based on @ref omni::graph::exec.
#pragma once
#include <omni/graph/exec/unstable/PassRegistry.h>
//! Helper macro to ensure EF features are enabled in the current module/plugin.
//!
//! This macro should be called from either @c carbOnPluginStartup or @c onStarted.
//!
//! If your module/plugin registers EF nodes or passes, you must call this macro.
//!
//! For Kit-based extensions, rather than calling this macro, call OMNI_KIT_EXEC_CORE_ON_MODULE_STARTED(), which will
//! call this macro on your behalf.
#define OMNI_GRAPH_EXEC_ON_MODULE_STARTED(moduleName_) \
try \
{ \
omni::graph::exec::unstable::registerModulePasses(); \
} \
catch (std::exception & e) \
{ \
CARB_LOG_ERROR("failed to register %s's passes: %s", moduleName_, e.what()); \
}
//! Helper macro to ensure EF features are safely disabled when the current module/plugin unloads.
//!
//! This macro should be called from either @c carbOnPluginShutdown or @c onUnload.
//!
//! If your module/plugin registers EF nodes or passes, you must call this macro.
//!
//! For Kit-based extensions, rather than calling this macro, call OMNI_KIT_EXEC_CORE_ON_MODULE_UNLOAD(), which will
//! call this macro on your behalf.
#define OMNI_GRAPH_EXEC_ON_MODULE_UNLOAD() \
do \
{ \
omni::graph::exec::unstable::deregisterModulePasses(); \
} while (0)
| 2,927 | C | 59.999999 | 120 | 0.457123 |
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutionCurrentThread.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Encapsulates the execution state for the current thread allowing callers to determine quantities like the @ref
//! omni::graph::exec::unstable::ExecutionTask currently executing on the thread.
//!
//! Because methods in this interface return thread local data, all methods in this interface are thread safe.
//!
//! This interface is usually accessed as a singleton via one of the following helper methods:
//!
//! - @ref omni::graph::exec::unstable::getCurrentTask()
//!
//! - @ref omni::graph::exec::unstable::getCurrentExecutor()
//!
//! This interface contains methods for graph and task execution. Users should not call these methods directly. See
//! the methods' docs below for the correct way to perform execution.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IExecutionCurrentThread_abi>
: public omni::graph::exec::unstable::IExecutionCurrentThread_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IExecutionCurrentThread")
//! Executes the given @ref omni::graph::exec::unstable::Graph.
//!
//! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::IExecutionContext::execute().
//!
//! From an ABI point-of-view, the purpose of this method is to handle the special case of the top-level @ref
//! omni::graph::exec::unstable::INodeGraphDef being contained by @ref omni::graph::exec::unstable::IGraph rather
//! than pointed to by a node in another @ref omni::graph::exec::unstable::INodeGraphDef. Meaningful values are set
//! for the threads current task and executor (see @ref omni::graph::exec::unstable::getCurrentTask() and @ref
//! omni::graph::exec::unstable::getCurrentExecutor()).
//!
//! @thread_safety This method is thread safe.
omni::graph::exec::unstable::Status executeGraph(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraph> graph,
omni::core::ObjectParam<omni::graph::exec::unstable::IExecutionContext> context);
//! Executes and sets the thread's "current" task to the given task.
//!
//! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::ExecutionTask::execute().
//!
//! This method executes the definition of the node pointed to by the given task. Importantly, this method sets
//! thread local data to track the currently running task and executor (see @ref
//! omni::graph::exec::unstable::getCurrentTask() and @ref omni::graph::exec::unstable::getCurrentExecutor()).
//!
//! @thread_safety This method is thread safe.
omni::graph::exec::unstable::Status execute(omni::graph::exec::unstable::ExecutionTask& task,
omni::core::ObjectParam<omni::graph::exec::unstable::IExecutor> executor,
omni::graph::exec::unstable::Status* taskStatus);
//! Access the task currently executing on the current thread.
//!
//! Useful when needing to access execution context state without having to pass it to every function.
//!
//! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::getCurrentTask().
//!
//! May return @c nullptr.
//!
//! @thread_safety This method is thread safe.
omni::graph::exec::unstable::ExecutionTask* getCurrentTask() noexcept;
//! Access the executor currently executing on the current thread.
//!
//! Useful when needing to spawn extra work within the scope of the graph.
//!
//! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::getCurrentExecutor().
//!
//! May return @c nullptr.
//!
//! @thread_safety This method is thread safe.
omni::graph::exec::unstable::IExecutor* getCurrentExecutor() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IExecutionCurrentThread_abi>::executeGraph(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraph> graph,
omni::core::ObjectParam<omni::graph::exec::unstable::IExecutionContext> context)
{
OMNI_THROW_IF_ARG_NULL(graph);
OMNI_THROW_IF_ARG_NULL(context);
auto return_ = executeGraph_abi(graph.get(), context.get());
return return_;
}
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IExecutionCurrentThread_abi>::execute(
omni::graph::exec::unstable::ExecutionTask& task,
omni::core::ObjectParam<omni::graph::exec::unstable::IExecutor> executor,
omni::graph::exec::unstable::Status* taskStatus)
{
OMNI_THROW_IF_ARG_NULL(taskStatus);
auto return_ = execute_abi(&task, executor.get(), taskStatus);
return return_;
}
inline omni::graph::exec::unstable::ExecutionTask* omni::core::Generated<
omni::graph::exec::unstable::IExecutionCurrentThread_abi>::getCurrentTask() noexcept
{
return getCurrentTask_abi();
}
inline omni::graph::exec::unstable::IExecutor* omni::core::Generated<
omni::graph::exec::unstable::IExecutionCurrentThread_abi>::getCurrentExecutor() noexcept
{
return getCurrentExecutor_abi();
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 6,214 | C | 43.078014 | 137 | 0.702124 |
omniverse-code/kit/include/omni/graph/exec/unstable/IPassTypeRegistry.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IPassTypeRegistry.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IPassTypeRegistry.
#pragma once
#include <omni/graph/exec/unstable/ConstName.h>
#include <omni/graph/exec/unstable/ElementAt.h>
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Types.h>
#include <cstring>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IPassFactory;
class IPassTypeRegistry;
class IPassTypeRegistry_abi;
//! ABI-safe struct to hold registered @ref omni::graph::exec::unstable::IPassFactory objects.
struct PassTypeRegistryEntry
{
//! The name of the pass type.
const char* name;
//! Factory interface for creating an instance of the pass.
//!
//! This struct does not acquire this pointer.
//!
//! This pointer is never @c nullptr.
IPassFactory* factory;
//! Some passes (e.g. populate passes) desire to affect only a subset of the nodes and/or definitions in a graph.
//! This field is used to specify the name of the node/definitions the pass wishes to affect.
//!
//! The meaning of this field is pass type dependent. Many passes ignore this field.
//!
//! This pointer is never @c nullptr.
const ConstName* nameToMatch;
//! Some pass types (e.g. partition passes) are designed such that only a single pass should affect an entity. When
//! multiple passes wish to affect an entity, this priority value can be used to resolve the conflict. The meaning
//! of the priority value is pass type specific. Many passes ignore this value.
PassPriority priority;
//! Reserved padding space.
uint32_t reserved;
};
static_assert(std::is_standard_layout<PassTypeRegistryEntry>::value, "PassTypeRegistryEntry is expected to be abi safe");
static_assert(offsetof(PassTypeRegistryEntry, name) == 0, "unexpected name offset");
static_assert(offsetof(PassTypeRegistryEntry, factory) == 8, "unexpected factory offset");
static_assert(offsetof(PassTypeRegistryEntry, nameToMatch) == 16, "unexpected hash offset");
static_assert(offsetof(PassTypeRegistryEntry, priority) == 24, "unexpected hash offset");
static_assert(32 == sizeof(PassTypeRegistryEntry), "PassTypeRegistryEntry is an unexpected size");
//! @ref omni::graph::exec::unstable::IPassFactory registry for a particular @ref omni::graph::exec::unstable::PassType.
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
class IPassTypeRegistry_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.IPassTypeRegistry")>
{
protected:
//! Returns the number of registered passes.
virtual uint64_t getPassCount_abi() noexcept = 0;
//! Returns the pass at the given index.
//!
//! If the index is greater than the count, an error is returned.
//!
//! The returned @ref omni::graph::exec::unstable::PassTypeRegistryEntry is valid as long as this pass type registry
//! is not mutated (e.g. a pass is added or removed from the registry).
virtual OMNI_ATTR("throw_result") omni::core::Result
getPassAt_abi(uint64_t index,
OMNI_ATTR("out, not_null, throw_if_null") PassTypeRegistryEntry* outEntry) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IPassTypeRegistry.
using PassTypeRegistryPtr = omni::core::ObjectPtr<IPassTypeRegistry>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IPassTypeRegistry.gen.h>
//! @copydoc omni::graph::exec::unstable::IPassTypeRegistry_abi
//!
//! @ingroup groupOmniGraphExecPassRegistration groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IPassTypeRegistry
: public omni::core::Generated<omni::graph::exec::unstable::IPassTypeRegistry_abi>
{
public:
//! Implementation detail to access registry ABI
struct GetPass
{
//! Access element at a given index
static void getAt(IPassTypeRegistry* owner, uint64_t index, PassTypeRegistryEntry* out)
{
owner->getPassAt(index, out);
}
//! Returns element count
static uint64_t getCount(IPassTypeRegistry* owner)
{
return owner->getPassCount();
}
};
//! Implementation detail that wraps index-based node access with iterators.
using Passes = detail::ElementAt<IPassTypeRegistry, PassTypeRegistryEntry, GetPass>;
//! Returns an object that allows the list of passes to be iterated over (i.e. using range-based for loops).
//!
//! The returned iterator is valid as long as this pass type registry is not mutated (e.g. a pass is added or
//! removed from the registry).
Passes getPasses() noexcept
{
return Passes(this);
}
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IPassFactory.h>
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IPassTypeRegistry.gen.h>
| 5,759 | C | 37.4 | 121 | 0.718875 |
omniverse-code/kit/include/omni/graph/exec/unstable/INode.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Represents work in a graph. Nodes point to a shared execution definition to state the actual work.
//!
//! @ref omni::graph::exec::unstable::INode is the main structural component used to build a graph's topology. @ref
//! omni::graph::exec::unstable::INode stores edges to *parents* (i.e. predecessors) and *children* (i.e. successors).
//! These edges set an ordering between nodes. See @ref omni::graph::exec::unstable::INode::getParents() and @ref
//! omni::graph::exec::unstable::INode::getChildren() respectively.
//!
//! A node represents work to be performed. The description of the work to be performed is stored in a *definition*
//! (i.e. @ref omni::graph::exec::unstable::IDef). Each node wishing to perform work points to a definition (see @ref
//! omni::graph::exec::unstable::INode::getDef()).
//!
//! The definition to which a node points can be one of two types. The first type, @ref
//! omni::graph::exec::unstable::INodeDef, defines work opaquely (i.e. EF is unable to view the work definition and
//! potentially optimize it). The second type, @ref omni::graph::exec::unstable::INodeGraphDef, defines work with a
//! graph. This last representation is the most power as it allows for both *extensibilty* and *composibility* in EF.
//!
//! @rst
//!
//! .. image:: /../docs/ef-simple-w-defs.svg
//! :align: center
//!
//! @endrst
//!
//! Above, we see that nodes point to graph definitions, which contain other nodes that point to other graph
//! definitions. This structure of graphs pointing to other graphs is where EF gets its *graph of graphs* name.
//!
//! Not all nodes will point to a definition. For example, the @rstref{root node <ef_root_node>} in each graph
//! definition will not point to a definition.
//!
//! A node is always part of a graph definition and the graph definition's executor is responsible for orchestrating and
//! generating work to the scheduler.
//!
//! Node's within a graph definition are assigned a unique index, between zero and the number of nodes in the
//! definition. This index is often used as a lookup into transient arrays used to store state during graph traversals.
//! See @ref omni::graph::exec::unstable::INode::getIndexInTopology().
//!
//! Nodes have a notion of validity. See @rstref{Graph Invalidation <ef_graph_invalidation>} for details.
//!
//! @ref omni::graph::exec::unstable::INode does not contain methods for either settings the node's definition or
//! connecting nodes to each other. This functionality is reserved for @ref omni::graph::exec::unstable::IGraphBuilder.
//! See @rstref{Graph Construction <ef_pass_concepts>} for details.
//!
//! See @rstref{Graph Concepts <ef_graph_concepts>} for a guide on how this object relates to other objects in the
//! Execution Framework.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! Users may wish to implement this interface to store meaningful authoring level data in EF. For example, OmniGraph
//! uses an implementation of this node to store graph instancing information. See @ref
//! omni::graph::exec::unstable::Node for a concrete implementation of this interface suitable for sub-classing.
template <>
class omni::core::Generated<omni::graph::exec::unstable::INode_abi> : public omni::graph::exec::unstable::INode_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::INode")
//! Access topology owning this node
//!
//! The returned @ref omni::graph::exec::unstable::ITopology will *not* have @ref omni::core::IObject::acquire()
//! called before being returned.
omni::graph::exec::unstable::ITopology* getTopology() noexcept;
//! Access node's unique identifier name.
const omni::graph::exec::unstable::ConstName& getName() noexcept;
//! Access nodes unique index withing owning topology. Index will be always smaller than topology size.
omni::graph::exec::unstable::NodeIndexInTopology getIndexInTopology() noexcept;
//! Access parents.
omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> getParents() noexcept;
//! Access children.
omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> getChildren() noexcept;
//! Return number of parents that cause cycles within the graph during traversal over this node.
uint32_t getCycleParentCount() noexcept;
//! Check if topology/connectivity of nodes is valid within current topology version.
//!
//! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details on invalidation.
bool isValidTopology() noexcept;
//! Make topology valid for current topology version. Drop all the connections if topology changed.
//!
//! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details on invalidation.
void validateOrResetTopology() noexcept;
//! Access base node definition (can be empty).
//!
//! When you wish to determine if the attached definition is either opaque or a graph, consider calling @ref
//! omni::graph::exec::unstable::INode::getNodeDef() or @ref omni::graph::exec::unstable::INode::getNodeGraphDef()
//! rather than this method.
//!
//! The returned @ref omni::graph::exec::unstable::IDef will *not* have @ref omni::core::IObject::acquire() called
//! before being returned.
omni::graph::exec::unstable::IDef* getDef() noexcept;
//! Access node definition (can be empty).
//!
//! If the returned pointer is @c nullptr, either the definition does not implement @ref
//! omni::graph::exec::unstable::INodeDef or there is no definition attached to the node.
//!
//! The returned @ref omni::graph::exec::unstable::INodeDef will *not* have @ref omni::core::IObject::acquire()
//! called before being returned.
//!
//! Also see @ref omni::graph::exec::unstable::INode::getDef() and @ref
//! omni::graph::exec::unstable::INode::getNodeGraphDef().
omni::graph::exec::unstable::INodeDef* getNodeDef() noexcept;
//! Access node's graph definition (can be empty)
//!
//! The returned graph definition pointer is the graph definition which defines the work this node represents. The
//! returned pointer **is not** the graph definition that contains this node.
//!
//! If the returned pointer is @c nullptr, either the definition does not implement @ref
//! omni::graph::exec::unstable::INodeGraphDef or there is no definition attached to the node.
//!
//! The returned @ref omni::graph::exec::unstable::INodeGraphDef will *not* have @ref omni::core::IObject::acquire()
//! called before being returned.
//!
//! Also see @ref omni::graph::exec::unstable::INode::getDef() and @ref
//! omni::graph::exec::unstable::INode::getNodeDef().
omni::graph::exec::unstable::INodeGraphDef* getNodeGraphDef() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::ITopology* omni::core::Generated<omni::graph::exec::unstable::INode_abi>::getTopology() noexcept
{
return getTopology_abi();
}
inline const omni::graph::exec::unstable::ConstName& omni::core::Generated<omni::graph::exec::unstable::INode_abi>::getName() noexcept
{
return *(getName_abi());
}
inline omni::graph::exec::unstable::NodeIndexInTopology omni::core::Generated<
omni::graph::exec::unstable::INode_abi>::getIndexInTopology() noexcept
{
return getIndexInTopology_abi();
}
inline omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> omni::core::Generated<
omni::graph::exec::unstable::INode_abi>::getParents() noexcept
{
return getParents_abi();
}
inline omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> omni::core::Generated<
omni::graph::exec::unstable::INode_abi>::getChildren() noexcept
{
return getChildren_abi();
}
inline uint32_t omni::core::Generated<omni::graph::exec::unstable::INode_abi>::getCycleParentCount() noexcept
{
return getCycleParentCount_abi();
}
inline bool omni::core::Generated<omni::graph::exec::unstable::INode_abi>::isValidTopology() noexcept
{
return isValidTopology_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::INode_abi>::validateOrResetTopology() noexcept
{
validateOrResetTopology_abi();
}
inline omni::graph::exec::unstable::IDef* omni::core::Generated<omni::graph::exec::unstable::INode_abi>::getDef() noexcept
{
return getDef_abi();
}
inline omni::graph::exec::unstable::INodeDef* omni::core::Generated<omni::graph::exec::unstable::INode_abi>::getNodeDef() noexcept
{
return getNodeDef_abi();
}
inline omni::graph::exec::unstable::INodeGraphDef* omni::core::Generated<
omni::graph::exec::unstable::INode_abi>::getNodeGraphDef() noexcept
{
return getNodeGraphDef_abi();
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 9,758 | C | 43.359091 | 134 | 0.713158 |
omniverse-code/kit/include/omni/graph/exec/unstable/IPass.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IPass.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IPass.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Types.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IPass;
class IPass_abi;
//! @defgroup groupOmniGraphExecPasses Passes
//!
//! @brief Interfaces, classes, and helpers related to graph transformation passes.
//!
//! Passes are user definable objects that populate, transform, and optimize the execution graph.
//!
//! Passes are registered using one of the @ref groupOmniGraphExecPassRegistration helpers.
//!
//! Passes are executed during graph construction via a @ref omni::graph::exec::unstable::PassPipeline.
//! Base class for graph transformation passes.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
class IPass_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IPass")>
{
};
//! Smart pointer managing an instance of @ref omni::graph::exec::unstable::IPass.
using PassPtr = omni::core::ObjectPtr<IPass>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IPass.gen.h>
//! @copydoc omni::graph::exec::unstable::IPass_abi
//!
//! @ingroup groupOmniGraphExecPasses groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IPass : public omni::core::Generated<omni::graph::exec::unstable::IPass_abi>
{
};
// additional headers needed for API implementation
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IPass.gen.h>
| 2,247 | C | 29.79452 | 117 | 0.757899 |
omniverse-code/kit/include/omni/graph/exec/unstable/Node.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Node.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::Node.
#pragma once
#include <omni/core/ResultError.h>
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/IGraph.h>
#include <omni/graph/exec/unstable/IGraphBuilderNode.h>
#include <omni/graph/exec/unstable/INode.h>
#include <omni/graph/exec/unstable/INodeDef.h>
#include <omni/graph/exec/unstable/INodeGraphDef.h>
#include <omni/graph/exec/unstable/ITopology.h>
#include <omni/graph/exec/unstable/SmallVector.h>
#include <omni/graph/exec/unstable/Types.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::INode
template <typename... Bases>
class NodeT : public Implements<Bases...>
{
public:
//! Constructor of a node with an empty definition.
//!
//! May throw.
static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<ITopology> topology, const char* idName)
{
OMNI_THROW_IF_ARG_NULL(topology);
OMNI_THROW_IF_ARG_NULL(idName);
return omni::core::steal(new NodeT(topology.get(), idName));
}
//! Constructor of a node with an empty definition.
//!
//! May throw.
static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<IGraph> owner, const char* idName)
{
OMNI_THROW_IF_ARG_NULL(owner);
OMNI_THROW_IF_ARG_NULL(idName);
return omni::core::steal(new NodeT(owner->getTopology(), idName));
}
//! Constructor of a node with a node graph definition.
//!
//! May throw.
static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<IGraph> owner,
omni::core::ObjectParam<INodeGraphDef> nodeGraphDef,
const char* idName)
{
OMNI_THROW_IF_ARG_NULL(owner);
OMNI_THROW_IF_ARG_NULL(idName);
return omni::core::steal(new NodeT(owner->getTopology(), nodeGraphDef.get(), idName));
}
//! Constructor of a node with a opaque node definition.
//!
//! May throw.
static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<ITopology> owner,
omni::core::ObjectParam<INodeGraphDef> nodeGraphDef,
const char* idName)
{
OMNI_THROW_IF_ARG_NULL(owner);
OMNI_THROW_IF_ARG_NULL(idName);
return omni::core::steal(new NodeT(owner.get(), nodeGraphDef.get(), idName));
}
//! Constructor of a node with a opaque node definition.
//!
//! May throw.
static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<IGraph> owner,
omni::core::ObjectParam<INodeDef> nodeDef,
const char* idName)
{
OMNI_THROW_IF_ARG_NULL(owner);
OMNI_THROW_IF_ARG_NULL(idName);
return omni::core::steal(new NodeT(owner->getTopology(), nodeDef.get(), idName));
}
//! Constructor of a node with a opaque node definition.
//!
//! May throw.
static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<ITopology> topology,
omni::core::ObjectParam<INodeDef> nodeDef,
const char* idName)
{
OMNI_THROW_IF_ARG_NULL(topology);
OMNI_THROW_IF_ARG_NULL(idName);
return omni::core::steal(new NodeT(topology.get(), nodeDef.get(), idName));
}
//! Constructor of a node with a base definition (can be null_ptr, NodeDef or NodeGraphDef).
//!
//! May throw.
static omni::core::ObjectPtr<NodeT> createForDef(omni::core::ObjectParam<ITopology> topology,
omni::core::ObjectParam<IDef> def,
const char* idName)
{
OMNI_THROW_IF_ARG_NULL(topology);
OMNI_THROW_IF_ARG_NULL(idName);
if (!def)
return omni::core::steal(new NodeT(topology.get(), idName));
else if (auto* nodeDef = omni::graph::exec::unstable::cast<INodeDef>(def))
return omni::core::steal(new NodeT(topology.get(), nodeDef, idName));
else if (auto* nodeGraphDef = omni::graph::exec::unstable::cast<INodeGraphDef>(def))
return omni::core::steal(new NodeT(topology.get(), nodeGraphDef, idName));
else
return nullptr;
}
//! Destructor
virtual ~NodeT()
{
// in case we decide to implement move constructor
if (m_indexInTopology != kInvalidNodeIndexInTopology)
{
m_topology->releaseNodeIndex(m_indexInTopology);
if (isValidTopology_abi())
{
m_topology->invalidate();
}
}
}
// disambiguate between INode and IGraphBuilderNode
using INode::getChildren;
using INode::getParents;
using INode::getTopology;
protected:
//! Core implementation of @ref omni::graph::exec::unstable::INode::getTopology_abi
ITopology* getTopology_abi() noexcept override
{
return m_topology;
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::getName_abi
const ConstName* getName_abi() noexcept override
{
return &m_name;
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::getIndexInTopology_abi
NodeIndexInTopology getIndexInTopology_abi() noexcept override
{
return m_indexInTopology;
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::getParents_abi
Span<INode* const> getParents_abi() noexcept override
{
return isValidTopology_abi() ? Span<INode* const>{ m_parents.begin(), m_parents.size() } :
Span<INode* const>{ nullptr, 0 };
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::getChildren_abi
Span<INode* const> getChildren_abi() noexcept override
{
return isValidTopology_abi() ? Span<INode* const>{ m_children.begin(), m_children.size() } :
Span<INode* const>{ nullptr, 0 };
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::getCycleParentCount_abi
uint32_t getCycleParentCount_abi() noexcept override
{
return isValidTopology_abi() ? m_cycleParentCount : 0;
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::isValidTopology_abi
//!
//! @note This method is called in the destructor and therefore must be marked as final
bool isValidTopology_abi() noexcept final override
{
return m_topologyStamp.inSync(m_topology->getStamp());
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::validateOrResetTopology_abi
virtual void validateOrResetTopology_abi() noexcept
{
if (m_topologyStamp.makeSync(m_topology->getStamp()))
{
// topology changed, let's clear the old one
m_parents.clear();
m_children.clear();
m_cycleParentCount = 0;
}
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::getDef_abi
IDef* getDef_abi() noexcept override
{
if (m_nodeDef.get())
{
return m_nodeDef.get();
}
else
{
return m_nodeGraphDef.get();
}
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::getNodeDef_abi
INodeDef* getNodeDef_abi() noexcept override
{
return m_nodeDef.get();
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::getNodeGraphDef_abi
INodeGraphDef* getNodeGraphDef_abi() noexcept override
{
return m_nodeGraphDef.get();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_addParent_abi
omni::core::Result _addParent_abi(IGraphBuilderNode* parent) noexcept override
{
try
{
OMNI_GRAPH_EXEC_CAST_OR_RETURN(asNode, INode, parent);
m_parents.push_back(asNode);
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_removeParent_abi
omni::core::Result _removeParent_abi(IGraphBuilderNode* parent) noexcept override
{
try
{
OMNI_GRAPH_EXEC_CAST_OR_RETURN(asNode, INode, parent);
_eraseRemove(m_parents, asNode);
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_addChild_abi
omni::core::Result _addChild_abi(IGraphBuilderNode* child) noexcept override
{
try
{
OMNI_GRAPH_EXEC_CAST_OR_RETURN(asNode, INode, child);
m_children.push_back(asNode);
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_removeChild_abi
omni::core::Result _removeChild_abi(IGraphBuilderNode* child) noexcept override
{
try
{
OMNI_GRAPH_EXEC_CAST_OR_RETURN(asNode, INode, child);
_eraseRemove(m_children, asNode);
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_removeInvalidParents_abi
void _removeInvalidParents_abi() noexcept override
{
if (isValidTopology_abi())
{
m_parents.erase(
std::remove_if(m_parents.begin(), m_parents.end(), [](INode* n) { return !n->isValidTopology(); }),
m_parents.end());
}
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_removeInvalidChildren_abi
void _removeInvalidChildren_abi() noexcept override
{
if (isValidTopology_abi())
{
m_children.erase(
std::remove_if(m_children.begin(), m_children.end(), [](INode* n) { return !n->isValidTopology(); }),
m_children.end());
}
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_invalidateConnections_abi
//!
//! @warning This only removes connections on a single node. The topology has bi-directional connections
//! for every node with the exception of the connection with the root node.
void _invalidateConnections_abi() noexcept override
{
m_topologyStamp.invalidate();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::setCycleParentCount_abi
void setCycleParentCount_abi(uint32_t count) noexcept override
{
m_cycleParentCount = count;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_setNodeDef_abi
void _setNodeDef_abi(INodeDef* nodeDef) noexcept override
{
m_nodeDef.borrow(nodeDef);
m_nodeGraphDef.release();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_setNodeGraphDef_abi
void _setNodeGraphDef_abi(INodeGraphDef* nodeGraphDef) noexcept override
{
m_nodeGraphDef.borrow(nodeGraphDef);
m_nodeDef.release();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_clearDef_abi
void _clearDef_abi() noexcept override
{
m_nodeDef.release();
m_nodeGraphDef.release();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::getParentAt_abi
omni::core::Result getParentAt_abi(uint64_t index, IGraphBuilderNode** out) noexcept override
{
*out = nullptr;
if (!isValidTopology_abi() || index >= m_parents.size())
{
OMNI_GRAPH_EXEC_RETURN_ERROR(omni::core::kResultInvalidIndex);
}
else
{
OMNI_GRAPH_EXEC_CAST_OR_RETURN(
asGraphBuilderNode, IGraphBuilderNode, m_parents[static_cast<uint32_t>(index)]);
*out = asGraphBuilderNode; // explicitly does not acquire
return omni::core::kResultSuccess;
}
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::getParentCount_abi
uint64_t getParentCount_abi() noexcept override
{
return isValidTopology_abi() ? m_parents.size() : 0;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::getChildAt_abi
omni::core::Result getChildAt_abi(uint64_t index, IGraphBuilderNode** out) noexcept override
{
*out = nullptr;
if (!isValidTopology_abi() || index >= m_children.size())
{
OMNI_GRAPH_EXEC_RETURN_ERROR(omni::core::kResultInvalidIndex);
}
else
{
OMNI_GRAPH_EXEC_CAST_OR_RETURN(
asGraphBuilderNode, IGraphBuilderNode, m_children[static_cast<uint32_t>(index)]);
*out = asGraphBuilderNode; // explicitly does not acquire
return omni::core::kResultSuccess;
}
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::getChildCount_abi
uint64_t getChildCount_abi() noexcept override
{
return isValidTopology_abi() ? m_children.size() : 0;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::hasChild_abi
bool hasChild_abi(IGraphBuilderNode* node) noexcept override
{
if (!isValidTopology_abi())
return false;
auto asNode = omni::graph::exec::unstable::cast<INode>(node);
if (!asNode)
{
return false;
}
return std::find(m_children.begin(), m_children.end(), asNode) != m_children.end();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::isRoot_abi
bool isRoot_abi() noexcept override
{
return (m_topology->getRoot() == static_cast<INode*>(this));
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::getRoot_abi
omni::core::Result getRoot_abi(IGraphBuilderNode** out) noexcept override
{
*out = nullptr;
OMNI_GRAPH_EXEC_CAST_OR_RETURN(asGraphBuilderNode, IGraphBuilderNode, m_topology->getRoot());
*out = asGraphBuilderNode; // explicitly does not acquire
return omni::core::kResultSuccess;
}
//! Constructor
NodeT(ITopology* topology, const char* idName) // may throw
: m_topology{ topology }, m_indexInTopology{ m_topology->acquireNodeIndex() }, m_name{ idName }
{
}
//! Constructor
NodeT(ITopology* topology,
INodeGraphDef* nodeGraphDef,
const char* idName) // may throw
: m_topology{ topology },
m_indexInTopology{ m_topology->acquireNodeIndex() },
m_nodeGraphDef{ nodeGraphDef, omni::core::kBorrow },
m_name{ idName }
{
}
//! Constructor
NodeT(ITopology* topology,
INodeDef* nodeDef,
const char* idName) // may throw
: m_topology{ topology },
m_indexInTopology{ m_topology->acquireNodeIndex() },
m_nodeDef{ nodeDef, omni::core::kBorrow },
m_name{ idName }
{
}
private:
//! Container for connections.
//!
//! Using @ref omni::graph::exec::unstable::SmallVector with local storage space for two nodes.
//! The local space storage was hand picked, following the experience that most of graph nodes
//! have very few downstream nodes.
using NodeArray = SmallVector<INode*, 2>;
//! Helper erase-remove idiom to remove and eliminate a node from the container
template <typename T>
void _eraseRemove(T& v, INode* n) // may throw
{
v.erase(std::remove(v.begin(), v.end(), n), v.end());
};
ITopology* m_topology; //!< Topology owning this node
//! Acquired local index
NodeIndexInTopology m_indexInTopology{ kInvalidNodeIndexInTopology };
NodeArray m_parents; //!< Edges to parents
NodeArray m_children; //!< Edges to children
uint32_t m_cycleParentCount{ 0 }; //!< Cycling parents (used by the graph traversal)
SyncStamp m_topologyStamp; //!< Validity check for edges
omni::core::ObjectPtr<INodeDef> m_nodeDef; //!< Node definition
omni::core::ObjectPtr<INodeGraphDef> m_nodeGraphDef; //!< Node graph definition
ConstName m_name; //!< Identifier name
};
//! Core Node implementation for @ref omni::graph::exec::unstable::INode
using Node = NodeT<INode, IGraphBuilderNode>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 17,448 | C | 34.756147 | 117 | 0.621848 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.