file_path
stringlengths
21
207
content
stringlengths
5
1.02M
size
int64
5
1.02M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.27
0.93
omniverse-code/kit/include/omni/graph/exec/unstable/INodeGraphDefDebug.gen.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Interface containing debugging methods for @ref omni::graph::exec::unstable::INodeGraphDef. //! //! Implementation of this interface is optional. template <> class omni::core::Generated<omni::graph::exec::unstable::INodeGraphDefDebug_abi> : public omni::graph::exec::unstable::INodeGraphDefDebug_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::INodeGraphDefDebug") //! Returns the current execution count. A value of 0 means the graph is not executing. uint64_t getExecutionCount() noexcept; //! Increments the execution count. void incrementExecutionCount() noexcept; //! Decrements the execution count. It is undefined behavior for call decrement more than increment. void decrementExecutionCount() noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline uint64_t omni::core::Generated<omni::graph::exec::unstable::INodeGraphDefDebug_abi>::getExecutionCount() noexcept { return getExecutionCount_abi(); } inline void omni::core::Generated<omni::graph::exec::unstable::INodeGraphDefDebug_abi>::incrementExecutionCount() noexcept { incrementExecutionCount_abi(); } inline void omni::core::Generated<omni::graph::exec::unstable::INodeGraphDefDebug_abi>::decrementExecutionCount() noexcept { decrementExecutionCount_abi(); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
2,190
C
30.3
122
0.750228
omniverse-code/kit/include/omni/graph/exec/unstable/ExecutorFactory.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file ExecutorFactory.h //! //! @brief Declares @ref omni::graph::exec::unstable::ExecutorFactory #pragma once #include <omni/graph/exec/unstable/IExecutor.h> #include <functional> namespace omni { namespace graph { namespace exec { namespace unstable { class ExecutionTask; class ITopology; //! Factory owned by node graph definition used to instantiate executor to generate the work //! //! May throw. using ExecutorFactory = std::function<omni::core::ObjectPtr<IExecutor>(omni::core::ObjectParam<ITopology>, const ExecutionTask&)>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
1,074
C
25.219512
110
0.76257
omniverse-code/kit/include/omni/graph/exec/unstable/INode.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file INode.h //! //! @brief Defines @ref omni::graph::exec::unstable::INode. #pragma once #include <omni/graph/exec/unstable/ConstName.h> #include <omni/graph/exec/unstable/IBase.h> #include <omni/graph/exec/unstable/Span.h> #include <omni/graph/exec/unstable/Types.h> namespace omni { namespace graph { namespace exec { namespace unstable { class IDef; class INode_abi; class INode; class INodeDef; class INodeGraphDef; class ITopology; //! Represents work in a graph. Nodes point to a shared execution definition to state the actual work. //! //! @ref omni::graph::exec::unstable::INode is the main structural component used to build a graph's topology. @ref //! omni::graph::exec::unstable::INode stores edges to *parents* (i.e. predecessors) and *children* (i.e. successors). //! These edges set an ordering between nodes. See @ref omni::graph::exec::unstable::INode::getParents() and @ref //! omni::graph::exec::unstable::INode::getChildren() respectively. //! //! A node represents work to be performed. The description of the work to be performed is stored in a *definition* //! (i.e. @ref omni::graph::exec::unstable::IDef). Each node wishing to perform work points to a definition (see @ref //! omni::graph::exec::unstable::INode::getDef()). //! //! The definition to which a node points can be one of two types. The first type, @ref //! omni::graph::exec::unstable::INodeDef, defines work opaquely (i.e. EF is unable to view the work definition and //! potentially optimize it). The second type, @ref omni::graph::exec::unstable::INodeGraphDef, defines work with a //! graph. This last representation is the most power as it allows for both *extensibilty* and *composibility* in EF. //! //! @rst //! //! .. image:: /../docs/ef-simple-w-defs.svg //! :align: center //! //! @endrst //! //! Above, we see that nodes point to graph definitions, which contain other nodes that point to other graph //! definitions. This structure of graphs pointing to other graphs is where EF gets its *graph of graphs* name. //! //! Not all nodes will point to a definition. For example, the @rstref{root node <ef_root_node>} in each graph //! definition will not point to a definition. //! //! A node is always part of a graph definition and the graph definition's executor is responsible for orchestrating and //! generating work to the scheduler. //! //! Node's within a graph definition are assigned a unique index, between zero and the number of nodes in the //! definition. This index is often used as a lookup into transient arrays used to store state during graph traversals. //! See @ref omni::graph::exec::unstable::INode::getIndexInTopology(). //! //! Nodes have a notion of validity. See @rstref{Graph Invalidation <ef_graph_invalidation>} for details. //! //! @ref omni::graph::exec::unstable::INode does not contain methods for either settings the node's definition or //! connecting nodes to each other. This functionality is reserved for @ref omni::graph::exec::unstable::IGraphBuilder. //! See @rstref{Graph Construction <ef_pass_concepts>} for details. //! //! See @rstref{Graph Concepts <ef_graph_concepts>} for a guide on how this object relates to other objects in the //! Execution Framework. //! //! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during //! execution. //! //! Users may wish to implement this interface to store meaningful authoring level data in EF. For example, OmniGraph //! uses an implementation of this node to store graph instancing information. See @ref //! omni::graph::exec::unstable::Node for a concrete implementation of this interface suitable for sub-classing. class INode_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.INode")> { public: using NodeArray = Span<INode* const>; //!< Stores the list of parents and children. protected: //! Access topology owning this node //! //! The returned @ref omni::graph::exec::unstable::ITopology will *not* have @ref omni::core::IObject::acquire() //! called before being returned. virtual OMNI_ATTR("no_acquire") ITopology* getTopology_abi() noexcept = 0; //! Access node's unique identifier name. virtual OMNI_ATTR("ref") const ConstName* getName_abi() noexcept = 0; //! Access nodes unique index withing owning topology. Index will be always smaller than topology size. virtual NodeIndexInTopology getIndexInTopology_abi() noexcept = 0; //! Access parents. virtual Span<INode* const> getParents_abi() noexcept = 0; //! Access children. virtual Span<INode* const> getChildren_abi() noexcept = 0; //! Return number of parents that cause cycles within the graph during traversal over this node. virtual uint32_t getCycleParentCount_abi() noexcept = 0; //! Check if topology/connectivity of nodes is valid within current topology version. //! //! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details on invalidation. virtual bool isValidTopology_abi() noexcept = 0; //! Make topology valid for current topology version. Drop all the connections if topology changed. //! //! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details on invalidation. virtual void validateOrResetTopology_abi() noexcept = 0; //! Access base node definition (can be empty). //! //! When you wish to determine if the attached definition is either opaque or a graph, consider calling @ref //! omni::graph::exec::unstable::INode::getNodeDef() or @ref omni::graph::exec::unstable::INode::getNodeGraphDef() //! rather than this method. //! //! The returned @ref omni::graph::exec::unstable::IDef will *not* have @ref omni::core::IObject::acquire() called //! before being returned. virtual OMNI_ATTR("no_acquire") IDef* getDef_abi() noexcept = 0; //! Access node definition (can be empty). //! //! If the returned pointer is @c nullptr, either the definition does not implement @ref //! omni::graph::exec::unstable::INodeDef or there is no definition attached to the node. //! //! The returned @ref omni::graph::exec::unstable::INodeDef will *not* have @ref omni::core::IObject::acquire() //! called before being returned. //! //! Also see @ref omni::graph::exec::unstable::INode::getDef() and @ref //! omni::graph::exec::unstable::INode::getNodeGraphDef(). virtual OMNI_ATTR("no_acquire") INodeDef* getNodeDef_abi() noexcept = 0; //! Access node's graph definition (can be empty) //! //! The returned graph definition pointer is the graph definition which defines the work this node represents. The //! returned pointer **is not** the graph definition that contains this node. //! //! If the returned pointer is @c nullptr, either the definition does not implement @ref //! omni::graph::exec::unstable::INodeGraphDef or there is no definition attached to the node. //! //! The returned @ref omni::graph::exec::unstable::INodeGraphDef will *not* have @ref omni::core::IObject::acquire() //! called before being returned. //! //! Also see @ref omni::graph::exec::unstable::INode::getDef() and @ref //! omni::graph::exec::unstable::INode::getNodeDef(). virtual OMNI_ATTR("no_acquire") INodeGraphDef* getNodeGraphDef_abi() noexcept = 0; }; //! Smart pointer managing an instance of @ref INode. using NodePtr = omni::core::ObjectPtr<INode>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/INode.gen.h> //! @copydoc omni::graph::exec::unstable::INode_abi //! //! @ingroup groupOmniGraphExecInterfaces class omni::graph::exec::unstable::INode : public omni::core::Generated<omni::graph::exec::unstable::INode_abi> { public: //! Returns the root of the graph definition of which this node is a part. inline INode* getRoot() noexcept; //! Check if this node is the root of the graph/topology. inline bool isRoot() noexcept; //! Check if a given node is a parent of this node. inline bool hasParent(omni::core::ObjectParam<INode> parent) noexcept; //! Check if a given node is a child of this node. inline bool hasChild(omni::core::ObjectParam<INode> child) noexcept; }; #include <omni/graph/exec/unstable/INodeDef.h> #include <omni/graph/exec/unstable/INodeGraphDef.h> #include <omni/graph/exec/unstable/ITopology.h> inline omni::graph::exec::unstable::INode* omni::graph::exec::unstable::INode::getRoot() noexcept { return getTopology()->getRoot(); } inline bool omni::graph::exec::unstable::INode::isRoot() noexcept { return (getRoot() == this); } inline bool omni::graph::exec::unstable::INode::hasParent(omni::core::ObjectParam<INode> parent) noexcept { auto parents = getParents(); return std::find(parents.begin(), parents.end(), parent.get()) != parents.end(); } inline bool omni::graph::exec::unstable::INode::hasChild(omni::core::ObjectParam<INode> child) noexcept { auto children = getChildren(); return std::find(children.begin(), children.end(), child.get()) != children.end(); } #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/INode.gen.h>
9,771
C
43.217194
120
0.712721
omniverse-code/kit/include/omni/graph/exec/unstable/GraphBuilderContext.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file GraphBuilderContext.h //! //! @brief Defines @ref omni::graph::exec::unstable::IGraphBuilderContext. #pragma once #include <omni/graph/exec/unstable/Assert.h> #include <omni/graph/exec/unstable/IGraph.h> #include <omni/graph/exec/unstable/IGraphBuilder.h> #include <omni/graph/exec/unstable/IGraphBuilderContext.h> #include <omni/graph/exec/unstable/IPassPipeline.h> namespace omni { namespace graph { namespace exec { namespace unstable { //! @copydoc omni::graph::exec::unstable::IGraphBuilderContext template <typename... Bases> class GraphBuilderContextT : public Implements<Bases...> { public: //! Construct graph builder context for a given @ref IGraph with a given pass transformation pipeline. //! //! May throw. static omni::core::ObjectPtr<GraphBuilderContextT> create(omni::core::ObjectParam<IGraph> graph, omni::core::ObjectParam<IPassPipeline> passPipeline) { OMNI_THROW_IF_ARG_NULL(graph); OMNI_THROW_IF_ARG_NULL(passPipeline); return omni::core::steal(new GraphBuilderContextT(graph.get(), passPipeline.get())); } protected: //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderContext::getConstructionStamp_abi Stamp getConstructionStamp_abi() noexcept override { return m_constructionStamp; } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderContext::getGraph_abi IGraph* getGraph_abi() noexcept override { return m_owner; } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderContext::report_abi void report_abi(const char* diagnose) noexcept override { // Default implementation doesn't report anything } //! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderContext::runTransformations_abi void runTransformations_abi(INodeGraphDef* nodeGraphDef) noexcept override { m_pipeline->execute(this, nodeGraphDef); m_constructionStamp.next(); } //! Constructor GraphBuilderContextT(IGraph* graph, IPassPipeline* pipeline) : m_owner{ graph }, m_pipeline{ pipeline, omni::core::kBorrow } { } private: IGraph* m_owner; //!< Owner of all graphs this context touches PassPipelinePtr m_pipeline; //!< Graph transformations pipeline used in this context Stamp m_constructionStamp; //!< Construction version incremented after pipeline run. }; //! Core GraphBuilderContext implementation for @ref omni::graph::exec::unstable::IGraphBuilderContext using GraphBuilderContext = GraphBuilderContextT<IGraphBuilderContext>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
3,207
C
34.252747
114
0.718117
omniverse-code/kit/include/omni/graph/exec/unstable/ExecutionContext.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file ExecutionContext.h //! //! @brief Defines @ref omni::graph::exec::unstable::ExecutionContext. #pragma once #include <carb/thread/RecursiveSharedMutex.h> #include <carb/thread/SharedMutex.h> #include <carb/thread/Spinlock.h> #include <omni/graph/exec/unstable/Assert.h> #include <omni/graph/exec/unstable/ExecutionPath.h> #include <omni/graph/exec/unstable/Executor.h> #include <omni/graph/exec/unstable/IExecutionContext.h> #include <omni/graph/exec/unstable/IGraph.h> #include <omni/graph/exec/unstable/INodeGraphDefDebug.h> #include <omni/graph/exec/unstable/SmallVector.h> #include <omni/graph/exec/unstable/Traversal.h> #include <thread> #include <unordered_map> namespace omni { namespace graph { namespace exec { namespace unstable { //! Implementation details for omni::graph::exec. Items in this namespace should not be relied on outside of the API. namespace detail { //! Utility class for discovering all execution paths for a given definition //! //! Searches are cached until topology of execution graph changes. //! Invalidation of the cache happens lazy upon request. //! //! This class is thread-safe and can be utilized recurrently. class ExecutionPathCache { public: //! Default constructor is removed ExecutionPathCache() = delete; //! Constructor explicit ExecutionPathCache(IGraph& graph) noexcept : m_graph(graph) { } //! Call given function for every execution path that points to given node or node graph definition //! //! Function should have the signature of `void(const ExecutionPath&)` template <typename Key> void applyOnEach(const Key& key, IApplyOnEachFunction& applyFn) { if (m_graph.inBuild()) { // traversing the entire graph while building it is isn't allowed since multiple threads may be building it OMNI_GRAPH_EXEC_ASSERT(!m_graph.inBuild()); return; } if (!m_graph.getTopology()->isValid()) { return; } auto discoverAndApplyOnNodesWithDefinitionFn = [this, &key, &applyFn]( const ExecutionPath& upstreamPath, INodeGraphDef& graph, Paths& collectedPaths, auto recursionFn) -> void { traversal_dfs<VisitFirst>( graph.getRoot(), [this, &upstreamPath, &key, &recursionFn, &applyFn, &collectedPaths](auto info, INode* prev, INode* curr) { auto currNodeGraph = curr->getNodeGraphDef(); if (currNodeGraph) { ExecutionPath newUpstreamPath(upstreamPath, curr); recursionFn(newUpstreamPath, *currNodeGraph, collectedPaths, recursionFn); } auto def = curr->getDef(); if (def && _isMatch(key, def)) { collectedPaths.emplace_back(upstreamPath, curr); applyFn.invoke(collectedPaths.back()); } info.continueVisit(curr); }); }; // check if the this cache is in-sync with the current topology. since we can run this method in parallel, we // need a read lock to m_mutex to safely read m_topologyStamp std::shared_lock<MutexType> readLock(m_mutex); auto topologyStamp = *m_graph.getGlobalTopologyStamp(); if (!m_topologyStamp.inSync(topologyStamp)) { // cache is out-of-sync. upgrade to a write lock. readLock.unlock(); { // here we once again check to see if the cache is in-sync since another thread may have beat this // thread to the write lock and brought the cache into sync. std::lock_guard<MutexType> writeLock(m_mutex); if (m_topologyStamp.makeSync(topologyStamp)) { // we're the thread that got to the write lock first. its our job to clear the cache. m_defCache.clear(); m_nameCache.clear(); } } // grab the read lock again so we can safely read the cache readLock.lock(); } auto& cache = _getCache(key); auto findIt = cache.find(key); if (findIt != cache.end()) { // We've seen this name before. Make a copy of the paths so we can release the readLock. This is // required because an invocation can result in re-entering and taking the writeLock. auto pathsCopy = findIt->second; readLock.unlock(); for (ExecutionPath& path : pathsCopy) { applyFn.invoke(path); } } else { // Release readLock because apply below can result in re-entry of this function readLock.unlock(); // either the key wasn't found or we're building the graph Paths paths; discoverAndApplyOnNodesWithDefinitionFn( ExecutionPath::getEmpty(), *m_graph.getNodeGraphDef(), paths, discoverAndApplyOnNodesWithDefinitionFn); // Insert only once we collected all the paths. Some other thread may be looking for this definition at // the same time. std::lock_guard<MutexType> writeLock(m_mutex); cache.emplace(key, std::move(paths)); } } private: bool _isMatch(const ConstName& desired, IDef* candidate) { return (desired == candidate->getName()); } bool _isMatch(IDef* desired, IDef* candidate) { return (desired == candidate); } auto& _getCache(const ConstName&) { return m_nameCache; } auto& _getCache(IDef*) { return m_defCache; } using Paths = SmallVector<ExecutionPath, 2>; using DefCache = std::unordered_map<IDef*, Paths>; using NameCache = std::unordered_map<ConstName, Paths>; using MutexType = carb::thread::recursive_shared_mutex; IGraph& m_graph; //!< Execution graph to search for execution paths DefCache m_defCache; //!< Storage for already discovered paths (keyed on def ptr) NameCache m_nameCache; //!< Storage for already discovered paths (keyed on def name) MutexType m_mutex; //!< Mutex to allow concurrent utilization of cache and serialized insertion SyncStamp m_topologyStamp; //!< Topology of execution graph this cache is valid for }; } // namespace detail //! @copydoc omni::graph::exec::unstable::IExecutionContext template <typename StorageType, typename ParentInterface = IExecutionContext> class ExecutionContext : public Implements<ParentInterface> { protected: //! Helper RAII object controlling in execution flag. class ScopedInExecute { public: //! Constructor ScopedInExecute(ExecutionContext& context) : m_context(context) { std::lock_guard<carb::thread::Spinlock> lock(m_context.m_threadIdSpinlock); ++m_context.m_contextThreadIds[std::this_thread::get_id()]; } //! Destructor ~ScopedInExecute() { std::lock_guard<carb::thread::Spinlock> lock(m_context.m_threadIdSpinlock); --m_context.m_contextThreadIds[std::this_thread::get_id()]; if (m_context.m_contextThreadIds[std::this_thread::get_id()] == 0) { m_context.m_contextThreadIds.erase(std::this_thread::get_id()); } } private: ExecutionContext& m_context; //!< Context in execution }; //! @copydoc omni::graph::exec::unstable::IExecutionContext::getExecutionStamp_abi Stamp getExecutionStamp_abi() noexcept override { return m_executionStamp; } //! @copydoc omni::graph::exec::unstable::IExecutionContext::inExecute_abi bool inExecute_abi() noexcept override { std::lock_guard<carb::thread::Spinlock> lock(m_threadIdSpinlock); return !m_contextThreadIds.empty(); } //! @copydoc omni::graph::exec::unstable::IExecutionContext::isExecutingThread_abi bool isExecutingThread_abi() noexcept override { std::lock_guard<carb::thread::Spinlock> lock(m_threadIdSpinlock); return m_contextThreadIds.find(std::this_thread::get_id()) != m_contextThreadIds.end(); } //! @copydoc omni::graph::exec::unstable::IExecutionContext::execute_abi Status execute_abi() noexcept override { if (!m_initStamp.inSync(m_graph->getTopology()->getStamp())) { this->initialize(); } m_executionStamp = _getNextGlobalExecutionStamp(); ScopedInExecute scopedInExecute(*this); ScopedExecutionDebug scopedDebug{ m_graph->getNodeGraphDef() }; return getCurrentThread()->executeGraph(m_graph, this); } //! @copydoc omni::graph::exec::unstable::IExecutionContext::executeNode_abi Status executeNode_abi(const ExecutionPath* path, INode* node) noexcept override { if (!m_initStamp.inSync(m_graph->getTopology()->getStamp())) { this->initialize(); } m_executionStamp = _getNextGlobalExecutionStamp(); ScopedInExecute scopedInExecute(*this); ScopedExecutionDebug scopedDebug{ m_graph->getNodeGraphDef() }; auto def = node->getDef(); if (def) { ExecutionTask newTask{ this, node, *path }; auto tmpExecutor = ExecutorFallback::create(node->getTopology(), newTask); return newTask.execute(tmpExecutor); } else { return Status::eFailure; } } //! @copydoc omni::graph::exec::unstable::IExecutionContext::initialize_abi omni::core::Result initialize_abi() noexcept override { try { if (!m_initStamp.makeSync(m_graph->getTopology()->getStamp())) { return omni::core::kResultSuccess; } auto traversalFn = [this](INodeGraphDef* nodeGraphDef, const ExecutionPath& path, auto& recursionFn) -> void { ExecutionTask info(this, nodeGraphDef->getRoot(), path); nodeGraphDef->initializeState(info); // may throw traversal_dfs<VisitFirst>(nodeGraphDef->getRoot(), [&path, &recursionFn, nodeGraphDef](auto info, INode* prev, INode* curr) { auto currNodeGraphDef = curr->getNodeGraphDef(); if (currNodeGraphDef) { ExecutionPath newPath{ path, curr }; // may throw recursionFn(currNodeGraphDef, newPath, recursionFn); } info.continueVisit(curr); }); }; ExecutionPath path; traversalFn(m_graph->getNodeGraphDef(), path, traversalFn); // may throw return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION() } //! @copydoc omni::graph::exec::unstable::IExecutionContext::getStateInfo_abi virtual omni::core::Result getStateInfo_abi(const ExecutionPath* path, INode* node, IExecutionStateInfo** out) noexcept override { try { *out = m_storage.getStateInfo(*path, node); // may throw return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION() } //! @copydoc omni::graph::exec::unstable::IExecutionContext::getNodeData_abi virtual omni::core::Result getNodeData_abi(const ExecutionPath* path, INode* node, NodeDataKey key, omni::core::TypeId* outTypeId, void** outPtr, uint64_t* outItemSize, uint64_t* outBufferSize) noexcept override { try { m_storage.getNodeData(*path, node, key, outTypeId, outPtr, outItemSize, outBufferSize); // may throw return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION() } //! @copydoc omni::graph::exec::unstable::IExecutionContext::setNodeData_abi virtual omni::core::Result setNodeData_abi(const ExecutionPath* path, INode* node, NodeDataKey key, omni::core::TypeId typeId, void* data, uint64_t dataByteCount, uint64_t dataItemCount, NodeDataDeleterFn* deleter) noexcept override { try { m_storage.setNodeData(*path, node, key, typeId, data, dataByteCount, dataItemCount, deleter); // may throw return omni::core::kResultSuccess; } OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION() } //! @copydoc omni::graph::exec::unstable::IExecutionContext::applyOnEachDef_abi void applyOnEachDef_abi(IDef* def, IApplyOnEachFunction* callback) noexcept override { m_pathCache.applyOnEach(def, *callback); } //! @copydoc omni::graph::exec::unstable::IExecutionContext::applyOnEachDefWithName_abi void applyOnEachDefWithName_abi(const ConstName* name, IApplyOnEachFunction* callback) noexcept override { m_pathCache.applyOnEach(*name, *callback); } //! Constructor ExecutionContext(IGraph* graph) noexcept : m_graph(graph), m_executionStamp(_getNextGlobalExecutionStamp()), m_pathCache(*graph) { } StorageType m_storage; //!< Data store. private: static Stamp _getNextGlobalExecutionStamp() noexcept { // since this is private, and will only be accessed indirectly via virtual methods, declaring this inline static // should be ok static Stamp gExecutionStamp; gExecutionStamp.next(); return gExecutionStamp; } IGraph* m_graph{ nullptr }; //!< Graph associated with this context. Stamp m_executionStamp; //!< Execution version incremented with each execution. SyncStamp m_initStamp; //!< State initialization version. Synchronized with graph topology. detail::ExecutionPathCache m_pathCache; //!< Cache of execution paths for a given definition. Populated lazily and //!< thread-safe. std::unordered_map<std::thread::id, size_t> m_contextThreadIds; //!< Unordered map of thread ids that kickstarted //!< context execution, along with a counter that //!< tracks the number of times that //!< nested/recursive execution has been triggered //!< by those context-starting threads. carb::thread::Spinlock m_threadIdSpinlock; //!< Mutex to protect m_contextThreadIds from concurrent write //!< operations. }; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
16,459
C
38.284009
121
0.575734
omniverse-code/kit/include/omni/graph/exec/unstable/NodePartition.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file NodePartition.h //! //! @brief Defines omni::graph::exec::unstable::NodePartition. #pragma once #include <omni/graph/exec/unstable/Span.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations class INode; //! Type definition used to pass node partitions in the ABI. using NodePartition = omni::graph::exec::unstable::Span<INode* const>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
921
C
24.61111
77
0.758958
omniverse-code/kit/include/omni/graph/exec/unstable/IGlobalPass.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IGlobalPass.h //! //! @brief Defines @ref omni::graph::exec::unstable::IGlobalPass. #pragma once #include <omni/graph/exec/unstable/IPass.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class IGraphBuilder; class IGlobalPass; class IGlobalPass_abi; //! Base class for global passes. //! //! The purpose of a global pass is to perform global transformations on the graph. //! //! This transformation category should be considered as a last resort given its global impact on the topology which //! prevents threading at the pass pipeline level. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. class IGlobalPass_abi : public omni::core::Inherits<IPass, OMNI_TYPE_ID("omni.graph.exec.unstable.IGlobalPass")> { protected: //! Call from pass pipeline to apply global graph transformations. virtual OMNI_ATTR("throw_result") omni::core::Result run_abi(IGraphBuilder* builder) noexcept = 0; }; //! Smart pointer managing an instance of @ref IGlobalPass. using GlobalPassPtr = omni::core::ObjectPtr<IGlobalPass>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IGlobalPass.gen.h> //! @copydoc omni::graph::exec::unstable::IGlobalPass_abi //! //! @ingroup groupOmniGraphExecPasses groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IGlobalPass : public omni::core::Generated<omni::graph::exec::unstable::IGlobalPass_abi> { }; // additional headers needed for API implementation #include <omni/graph/exec/unstable/IGraphBuilder.h> // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IGlobalPass.gen.h>
2,291
C
30.833333
116
0.766478
omniverse-code/kit/include/omni/graph/exec/unstable/CompactUniqueIndex.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file CompactUniqueIndex.h //! //! @brief Defines @ref omni::graph::exec::unstable::CompactUniqueIndex. #pragma once #include <omni/graph/exec/unstable/Assert.h> #include <omni/graph/exec/unstable/Types.h> #include <vector> namespace omni { namespace graph { namespace exec { namespace unstable { //! Registry of unique indexes with recycling of released indexes. //! //! Call @ref acquireUniqueIndex() to retrieve a unique index. Indexes are "compact", meaning abandoned indices will be //! reused. This means that if @ref releaseUniqueIndex() is called with a value of 6, the next call to @ref //! acquireUniqueIndex() will return 6. //! //! This class is useful for assigning a stable unique index to a set of dynamic items. //! //! Methods are not thread safe unless otherwise stated. class CompactUniqueIndex { public: //! Invalid index is used when no free indexes are available, and as well //! as a value for reserved elements of the allocation array (an implementation detail) enum : std::size_t { kInvalidIndex = kInvalidNodeIndexInTopology }; //! Constructor CompactUniqueIndex() noexcept = default; //! Destructor ~CompactUniqueIndex() noexcept = default; //! Returns a unique index. //! //! If @ref releaseUniqueIndex() was previously called, the value passed to it will be returned (i.e. the index will //! be recycled). Otherwise, a new index is allocated that is one greater than the current max index. //! //! May throw. inline std::size_t acquireUniqueIndex(); //! Marks an index as no longer used. //! //! A subsequent call to @ref acquireUniqueIndex() will prefer reusing the index given to this method. //! //! If @p indexToFree was not previously returned by @ref acquireUniqueIndex, undefined behavior will result. inline void releaseUniqueIndex(std::size_t indexToFree); //! Returns the size of the registry. //! //! The maximum number of indices is returned, not the current number of "active" indices. Said differently, if //! @ref acquireUniqueIndex() is called followed by @ref releaseUniqueIndex(), @ref size() would return 1 not 0. std::size_t size() const { return m_allocatedIndexes.size(); } private: //! Index registry. Holds acquired and released indexes. std::vector<std::size_t> m_allocatedIndexes; //! All released indexes will form a list and m_lastFree points to the last released / first item of the list. std::size_t m_lastFree{ kInvalidIndex }; }; inline std::size_t CompactUniqueIndex::acquireUniqueIndex() { // no free index to recycle, allocate a new one if (m_lastFree == kInvalidIndex) { m_allocatedIndexes.emplace_back(kInvalidIndex); OMNI_GRAPH_EXEC_ASSERT(m_allocatedIndexes.size() > 0); return m_allocatedIndexes.size() - 1; } // recycle existing index else { OMNI_GRAPH_EXEC_ASSERT(m_lastFree < m_allocatedIndexes.size()); std::size_t recycledIndex = m_lastFree; m_lastFree = m_allocatedIndexes[recycledIndex]; m_allocatedIndexes[recycledIndex] = kInvalidIndex; return recycledIndex; } } inline void CompactUniqueIndex::releaseUniqueIndex(std::size_t indexToFree) { OMNI_GRAPH_EXEC_ASSERT(indexToFree < m_allocatedIndexes.size()); OMNI_GRAPH_EXEC_ASSERT(m_allocatedIndexes[indexToFree] == kInvalidIndex); if (indexToFree < m_allocatedIndexes.size() && m_allocatedIndexes[indexToFree] == kInvalidIndex) { if (m_lastFree == kInvalidIndex) m_lastFree = indexToFree; else { m_allocatedIndexes[indexToFree] = m_lastFree; m_lastFree = indexToFree; } } } } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
4,278
C
32.960317
120
0.695185
omniverse-code/kit/include/omni/graph/exec/unstable/PassRegistry.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file PassRegistry.h //! //! @brief Defines @ref omni::graph::exec::unstable::IPassRegistry. #pragma once #include <omni/graph/exec/unstable/Assert.h> #include <omni/graph/exec/unstable/IGlobalPass.h> #include <omni/graph/exec/unstable/IPartitionPass.h> #include <omni/graph/exec/unstable/IPassFactory.h> #include <omni/graph/exec/unstable/IPassRegistry.h> #include <omni/graph/exec/unstable/IPopulatePass.h> #include <omni/graph/exec/unstable/Types.h> #include <memory> #include <string> #include <vector> namespace omni { namespace graph { namespace exec { namespace unstable { //! Scoped object that registers a pass factory in its constructor and deregisters in the objects destructor. //! //! Useful for temporarily registering @ref IPassFactory, for example, in a unit test. //! //! When registering a pass in a plugin, rather than using this object, prefer using one of the pass registration macros //! (e.g. @ref OMNI_GRAPH_EXEC_REGISTER_POPULATE_PASS()). See @ref groupOmniGraphExecPassRegistration for a list of //! registration macros. class ScopedPassRegistration { public: //! Constructor. Calls @ref IPassRegistry::registerPass(). //! //! May throw. ScopedPassRegistration(PassType type, const char* name, omni::core::ObjectParam<IPassFactory> factory, const ConstName& nameToMatch = ConstName(), PassPriority priority = 0) : m_type(type), m_name(name) { OMNI_THROW_IF_ARG_NULL(name); _register(factory.get(), nameToMatch, priority); } //! Constructor. Calls @ref IPassRegistry::registerPass(). //! //! The given function should have the signature `IPass*(IGraphBuilder*)`. //! //! May throw. template <typename Fn> ScopedPassRegistration( PassType type, const char* name, Fn&& fn, const ConstName& nameToMatch = ConstName(), PassPriority priority = 0) : m_type(type), m_name(name) { OMNI_THROW_IF_ARG_NULL(name); _register(createPassFactory(std::forward<Fn>(fn)).get(), nameToMatch, priority); } //! Constructor. Calls @ref IPassRegistry::registerPass(). //! //! May throw. ScopedPassRegistration(PassType type, std::string&& name, omni::core::ObjectParam<IPassFactory> factory, const ConstName& nameToMatch = ConstName(), PassPriority priority = 0) : m_type(type), m_name(std::move(name)) { _register(factory.get(), nameToMatch, priority); } //! Destructor. Calls @ref IPassRegistry::deregisterPass(). ~ScopedPassRegistration() noexcept { if (m_registry) { m_registry->deregisterPass(m_type, m_name.c_str()); } } private: CARB_PREVENT_COPY_AND_MOVE(ScopedPassRegistration); void _register(IPassFactory* factory, const ConstName& nameToMatch, PassPriority priority) { OMNI_THROW_IF_ARG_NULL(factory); m_registry = getPassRegistry(); if (m_registry) { getPassRegistry()->registerPass(m_type, m_name.c_str(), factory, nameToMatch, priority); } } IPassRegistry* m_registry; PassType m_type; std::string m_name; }; #ifndef DOXYGEN_BUILD namespace detail { struct PassRegistrationInfo { PassType type; std::string name; PassFactoryPtr factory; ConstName nameToMatch; PassPriority priority; PassRegistrationInfo( PassType type_, const char* name_, PassFactoryPtr&& factory_, ConstName&& nameToMatch_, PassPriority priority_) : type(type_), name(name_), factory(std::move(factory_)), nameToMatch(std::move(nameToMatch_)), priority(priority_) { } }; //! Return the per module (e.g. DLL) list of passes that should be registered. //! //! This function is an implementation detail and should not be directly used. Rather, populate this list with one of //! the following macros: //! //! - @ref OMNI_GRAPH_EXEC_REGISTER_PASS() //! //! - @ref OMNI_GRAPH_EXEC_REGISTER_POPULATE_PASS() //! //! This list is cleared after the module developer calls @ref registerModulePasses(). inline std::vector<PassRegistrationInfo>& getModulePassesToRegister() { static std::vector<PassRegistrationInfo> sPasses; return sPasses; } //! Return the per module (e.g. DLL) list of passes that should be deregistered. //! //! This function is an implementation detail and should not be directly used. //! //! This list is populated by @ref registerModulePasses(). inline std::vector<std::unique_ptr<ScopedPassRegistration>>& getModulePassesToDeregister() { static std::vector<std::unique_ptr<ScopedPassRegistration>> sPasses; return sPasses; } } // namespace detail # define OMNI_GRAPH_EXEC_CONCAT_(a_, b_) a_##b_ # define OMNI_GRAPH_EXEC_CONCAT(a_, b_) OMNI_GRAPH_EXEC_CONCAT_(a_, b_) # define OMNI_GRAPH_EXEC_REGISTER_PASS_(type_, class_, var_, nameToMatch, priority_) \ static auto var_ = []() \ { \ omni::graph::exec::unstable::detail::getModulePassesToRegister().emplace_back( \ type_, #class_, \ omni::graph::exec::unstable::createPassFactory([](omni::graph::exec::unstable::IGraphBuilder* b) \ { return class_::create(b); }), \ omni::graph::exec::unstable::ConstName(nameToMatch), priority_); \ return 0; \ }() #endif //! @defgroup groupOmniGraphExecPassRegistration Pass Registration //! //! @brief Macros to register a plugin's passes. //! //! Pass registration macros should be called at global scope (not within a function/method). //! //! In order to avoid accidentally registering a pass twice, it is recommended to call registration macros from a *.cpp* //! file rather than a *.h* file. //! //! Registration macros only add the pass to a list of passes to register. This is useful if you have passes defined in //! several **.cpp** files in your module. It is up to the module developer to call @ref registerModulePasses() and //! @ref deregisterModulePasses() to perform the actual registration. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. //! //! @ingroup groupOmniGraphExecPasses //! Adds an @ref omni::graph::exec::unstable::IPass to a list to be registered at the module's (i.e.g DLL) startup. //! //! @param type_ A @ref omni::graph::exec::unstable::PassType. //! //! @param class_ An implementation of @ref omni::graph::exec::unstable::IPass. //! //! This macro should be called at global scope (not within a function/method). //! //! In order to avoid accidentally registering a pass twice, it is recommended to call this macro from a *.cpp* file //! rather than a *.h* file. //! //! This macro only adds the pass to a list of passes to register. This is useful if you have passes defined in several //! **.cpp** files in your module. It is up to the module developer to call @ref //! omni::graph::exec::unstable::registerModulePasses() and //! @ref omni::graph::exec::unstable::deregisterModulePasses() to perform the actual registration. //! //! See @ref groupOmniGraphExecPassRegistration for more information about pass registration. //! //! @ingroup groupOmniGraphExecPassRegistration #define OMNI_GRAPH_EXEC_REGISTER_PASS(type_, class_) \ OMNI_GRAPH_EXEC_REGISTER_PASS_(type_, class_, OMNI_GRAPH_EXEC_CONCAT(sRegisterPass, __LINE__), "", 0) //! Adds an @ref omni::graph::exec::unstable::IPopulatePass to a list to be registered as type //! @ref omni::graph::exec::unstable::PassType::ePopulate at the module's (i.e.g DLL) startup. //! //! @param class_ An implementation of @ref omni::graph::exec::unstable::IPopulatePass. //! //! @param defNameToPopulate_ The name of the definition, @ref omni::graph::exec::unstable::IDef, this pass should //! populate. An example would be "OmniGraph". //! //! This macro should be called at global scope (not within a function/method). //! //! In order to avoid accidentally registering a pass twice, it is recommended to call this macro from a *.cpp* file //! rather than a *.h* file. //! //! See @ref groupOmniGraphExecPassRegistration for more information about pass registration. //! //! @ingroup groupOmniGraphExecPassRegistration #define OMNI_GRAPH_EXEC_REGISTER_POPULATE_PASS(class_, defNameToPopulate_) \ static_assert(std::is_base_of<omni::graph::exec::unstable::IPopulatePass, class_>::value, \ "Registering a class that doesn't implement IPopulatePass"); \ OMNI_GRAPH_EXEC_REGISTER_PASS_(omni::graph::exec::unstable::PassType::ePopulate, class_, \ OMNI_GRAPH_EXEC_CONCAT(sRegisterPass, __LINE__), defNameToPopulate_, 0) //! Adds an @ref omni::graph::exec::unstable::IPartitionPass to a list to be registered as type //! @ref omni::graph::exec::unstable::PassType::ePartitioning at the module's (i.e.g DLL) startup. //! //! @param class_ An implementation of @ref omni::graph::exec::unstable::IPartitionPass. //! //! @param priority_ @ref omni::graph::exec::unstable::PassPriority used to resolve conflicts between passes //! //! This macro should be called at global scope (not within a function/method). //! //! In order to avoid accidentally registering a pass twice, it is recommended to call this macro from a *.cpp* file //! rather than a *.h* file. //! //! See @ref groupOmniGraphExecPassRegistration for more information about pass registration. //! //! @ingroup groupOmniGraphExecPassRegistration #define OMNI_GRAPH_EXEC_REGISTER_PARTITION_PASS(class_, priority_) \ static_assert(std::is_base_of<omni::graph::exec::unstable::IPartitionPass, class_>::value, \ "Registering a class that doesn't implement IPartitionPass"); \ OMNI_GRAPH_EXEC_REGISTER_PASS_(omni::graph::exec::unstable::PassType::ePartitioning, class_, \ OMNI_GRAPH_EXEC_CONCAT(sRegisterPass, __LINE__), "", priority_) //! Adds an @ref omni::graph::exec::unstable::IGlobalPass to a list to be registered as type //! @ref omni::graph::exec::unstable::PassType::eGlobal at the module's (i.e.g DLL) startup. //! //! @param class_ An implementation of @ref omni::graph::exec::unstable::IGlobalPass. //! //! This macro should be called at global scope (not within a function/method). //! //! In order to avoid accidentally registering a pass twice, it is recommended to call this macro from a *.cpp* file //! rather than a *.h* file. //! //! See @ref groupOmniGraphExecPassRegistration for more information about pass registration. //! //! @ingroup groupOmniGraphExecPassRegistration #define OMNI_GRAPH_EXEC_REGISTER_GLOBAL_PASS(class_) \ static_assert(std::is_base_of<omni::graph::exec::unstable::IGlobalPass, class_>::value, \ "Registering a class that doesn't implement IGlobalPass"); \ OMNI_GRAPH_EXEC_REGISTER_PASS_(omni::graph::exec::unstable::PassType::eGlobal, class_, \ OMNI_GRAPH_EXEC_CONCAT(sRegisterPass, __LINE__), "", 0) //! Registers the module's @ref omni::graph::exec::unstable::IPass factories with @ref //! omni::graph::exec::unstable::IPassRegistry. //! //! This function should be called in the module's function registered with omni::core::OMNI_MODULE_ON_MODULE_STARTED(). //! This is usually called @c onStarted(). //! //! When developing a Kit extension, prefer calling @c OMNI_KIT_EXEC_CORE_ON_MODULE_STARTED() rather than this function. //! //! May throw. //! //! See @ref groupOmniGraphExecPassRegistration for more information about pass registration. //! //! @ingroup groupOmniGraphExecPassRegistration inline void registerModulePasses() { auto& toRegister = detail::getModulePassesToRegister(); auto& toDeregister = detail::getModulePassesToDeregister(); for (auto& pass : toRegister) { toDeregister.emplace_back(std::make_unique<ScopedPassRegistration>( pass.type, std::move(pass.name), std::move(pass.factory), std::move(pass.nameToMatch), pass.priority)); } toRegister.clear(); } //! Deregisters the module's @ref omni::graph::exec::unstable::IPass factories with @ref IPassRegistry. //! //! Failure to call this function may lead to crashes during program shutdown. //! //! This function should be called in the module's function registered with omni::core::OMNI_MODULE_ON_MODULE_UNLOAD(). //! This is usually called @c onUnload(). //! //! When developing a Kit extension, prefer calling @c OMNI_KIT_EXEC_CORE_ON_MODULE_UNLOAD() rather than this function. //! //! See @ref groupOmniGraphExecPassRegistration for more information about pass registration. //! //! @ingroup groupOmniGraphExecPassRegistration inline void deregisterModulePasses() noexcept { detail::getModulePassesToDeregister().clear(); } } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
14,452
C
43.74613
123
0.637559
omniverse-code/kit/include/omni/graph/exec/unstable/AtomicBackoff.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file AtomicBackoff.h //! //! @brief Defines @ref omni::graph::exec::unstable::AtomicBackoff. #pragma once #include <carb/Defines.h> #if CARB_X86_64 # include <immintrin.h> #endif #include <thread> namespace omni { namespace graph { namespace exec { namespace unstable { //! Exponential backoff pattern for waiting with a cap number of pauses //! //! This class implements exponential backoff, where each call to pause will //! cause busy waiting and increment the number of iterations for next pause call. //! All of this is cap with a maximum limit of pause calls after which waiting //! is considered long and switches to yield. //! //! This class is useful when we expect short waits and would like to prevent //! yielding the compute resources for this short period of time. //! //! Methods are not thread safe unless otherwise noted. class AtomicBackoff { public: //! Default constructor constexpr AtomicBackoff() noexcept { } // No use in allowing copy and assignment operators for this class AtomicBackoff(const AtomicBackoff&) = delete; AtomicBackoff& operator=(const AtomicBackoff&) = delete; //! Pause execution for a short period of time. //! //! Use exponential backoff pattern and a upper wait cap to select between busy waiting and yielding. void pause() noexcept { if (m_loopCount <= kLoopBeforeYield) { auto loop = m_loopCount; while (loop-- > 0) { #if CARB_X86_64 _mm_pause(); #elif CARB_AARCH64 // based on TBB machine_pause and BOOST pause.hpp __asm__ __volatile__("yield" ::: "memory"); #endif } m_loopCount *= 2; } else { std::this_thread::yield(); } } //! Clear wait counter void reset() noexcept { m_loopCount = 1; } private: //! Upper cap for busy waiting. Pass this count the pause call will always yield until reset method is called. //! //! The number must be power of two and is approximately equal to number of pause instructions it would take //! to do a context switch. enum : int { kLoopBeforeYield = 16 }; //! Next number of busy loop iterations. Incremented exponentially and cap at kLoopBeforeYield int m_loopCount{ 1 }; }; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
2,892
C
26.552381
114
0.664592
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutionStateInfo.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! State associated with a given execution task //! //! @note We separated execution state from the execution graph to allow concurrent and/or nested execution template <> class omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi> : public omni::graph::exec::unstable::IExecutionStateInfo_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IExecutionStateInfo") //! Store a "future" result for this state. The actual computation is running asynchronously outside of execution //! frame //! //! @return \c true if execution state accepts "future" results. bool storeBackgroundResult(omni::core::ObjectParam<omni::graph::exec::unstable::IBackgroundResult> result); //! Query used by some executors to determine if computation of a node is necessary bool needsCompute(const omni::graph::exec::unstable::Stamp& execVersion) noexcept; //! Set to request computation void requestCompute() noexcept; //! Reset request to compute after computation was performed void setComputed() noexcept; //! Get current/last exec version set for this node during execution omni::graph::exec::unstable::SyncStamp getExecutionStamp() noexcept; //! Set current exec version for this node. Returns true if version wasn't in sync. bool setExecutionStamp(const omni::graph::exec::unstable::Stamp& execVersion) noexcept; //! Returns a value from a node's key/value datastore. //! //! The key is used as a look-up in the node's key/value datastore. //! //! The type of each data item is returned in @p outTypeId. //! //! @p outPtr will be updated with a pointer to the actual data. //! //! @p outItemSize store the size of each item in the returned array. //! //! @p outItemCount contains the number of items returned (i.e. the number //! of items @p outPtr points to). For an array, this will be greater than //! 1. //! //! If the key is not found, @p outPtr is set to @c nullptr and @p //! outItemCount is set to 0. //! //! Accessing the node's key/value datastore is not thread safe. //! //! An exception is thrown on all other errors. void getNodeData(omni::graph::exec::unstable::NodeDataKey key, omni::core::TypeId* outTypeId, void** outPtr, uint64_t* outItemSize, uint64_t* outItemCount); //! Sets a value in a node's key/value datastore. //! //! The key is used as a look-up in the node's key/value datastore. //! //! The type of each data item is set with @p typeId. //! //! @p data points to an array of data items. //! //! @p itemSize is the size of each item in the given array. //! //! @p itemCount contains the number of items pointed to by @p data. For an //! array, this will be greater than 1. //! //! @p deleter is a function used to delete @p data when either a new value //! is set at the key or the context is invalidated. If @p deleter is @c //! nullptr, it is up to the calling code to manage the lifetime of the @p //! data. //! //! Accessing the node's key/value datastore is not thread safe. //! //! An exception is thrown on all other errors. void setNodeData(omni::graph::exec::unstable::NodeDataKey key, omni::core::TypeId typeId, void* data, uint64_t itemSize, uint64_t itemCount, omni::graph::exec::unstable::NodeDataDeleterFn* deleter); }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline bool omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::storeBackgroundResult( omni::core::ObjectParam<omni::graph::exec::unstable::IBackgroundResult> result) { OMNI_THROW_IF_ARG_NULL(result); auto return_ = storeBackgroundResult_abi(result.get()); return return_; } inline bool omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::needsCompute( const omni::graph::exec::unstable::Stamp& execVersion) noexcept { return needsCompute_abi(execVersion); } inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::requestCompute() noexcept { requestCompute_abi(); } inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::setComputed() noexcept { setComputed_abi(); } inline omni::graph::exec::unstable::SyncStamp omni::core::Generated< omni::graph::exec::unstable::IExecutionStateInfo_abi>::getExecutionStamp() noexcept { return getExecutionStamp_abi(); } inline bool omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::setExecutionStamp( const omni::graph::exec::unstable::Stamp& execVersion) noexcept { return setExecutionStamp_abi(execVersion); } inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::getNodeData( omni::graph::exec::unstable::NodeDataKey key, omni::core::TypeId* outTypeId, void** outPtr, uint64_t* outItemSize, uint64_t* outItemCount) { OMNI_THROW_IF_ARG_NULL(outTypeId); OMNI_THROW_IF_ARG_NULL(outPtr); OMNI_THROW_IF_ARG_NULL(outItemSize); OMNI_THROW_IF_ARG_NULL(outItemCount); OMNI_THROW_IF_FAILED(getNodeData_abi(key, outTypeId, outPtr, outItemSize, outItemCount)); } inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::setNodeData( omni::graph::exec::unstable::NodeDataKey key, omni::core::TypeId typeId, void* data, uint64_t itemSize, uint64_t itemCount, omni::graph::exec::unstable::NodeDataDeleterFn* deleter) { OMNI_THROW_IF_ARG_NULL(data); OMNI_THROW_IF_FAILED(setNodeData_abi(key, typeId, data, itemSize, itemCount, deleter)); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
6,760
C
35.945355
117
0.684911
omniverse-code/kit/include/omni/graph/exec/unstable/ConstName.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file ConstName.h //! //! @brief Defines @ref omni::graph::exec::unstable::ConstName. #pragma once #include <carb/Defines.h> #include <carb/cpp/StringView.h> #include <omni/String.h> #include <omni/graph/exec/unstable/Types.h> #include <type_traits> namespace omni { namespace graph { namespace exec { namespace unstable { //! An immutable name with fast hash based comparisons. //! //! @ref ConstName is a hash of a given string. This hash is used for all comparisons. The original string is also //! stored in this object. //! //! Prefer using the overloaded comparison operators (e.g. ==, <, !=) rather than performing comparison operators with //! the result of @ref toString() or @ref getString(). Using the comparison operators is considerably faster. //! //! Comparison of @ref ConstName with `const char*`, @c omni::string, or @c std::string is potentially slow and as such //! no comparison operators exist to do so. To compare a @ref ConstName with either a `const char*`, @c omni::string, //! or @c std::string, you must first explicitly create a @ref ConstName from the string. //! //! Classes like `carb::RString` and `pxr::TfToken` also utilize a hash of an original string for fast string //! comparison. In these classes, the hash is simply passed around but the string is stored in a global lookup table. //! When the original string is needed, the hash is used as a lookup in the global table. //! //! Unlike `carb::RString` and `pxr::TfToken`, @ref ConstName avoids the global table. Rather, the original string is //! stored alongside the hash. The benefit of avoiding the global table is speed. Performance testing revealed that //! when constructing names of objects during graph traversal, the cost of multiple threads reading and writing to the //! global tables storing `carb::RString` and `pxr::TfToken`'s strings was a bottleneck. //! //! If you need speed in threaded code, use @ref ConstName. If you want to save space, use `carb::RString` or //! `pxr::TfToken`. //! //! The object is ABI-safe. When returning a @ref ConstName or passing a @ref ConstName to/from an ABI method, prefer //! using a const pointer rather than passing by value. class ConstName { public: //! Construct from a static compile time string. template <std::size_t N> explicit ConstName(const char (&s)[N]) : m_hash(carb::fnv1aHash(s, N - 1)), m_name(s, N - 1) { // N-1 so that we don't hash the terminating \0. } //! Construct from a @ref carb::cpp::string_view. This constructor also accepts `const char *`. explicit ConstName(const carb::cpp::string_view& sv) : m_hash(carb::fnv1aHash(sv.data(), sv.size())), m_name(sv) { } //! Construct from a @ref carb::cpp::string_view with an already computed hash. explicit ConstName(const carb::cpp::string_view& sv, NameHash hash) : m_hash(hash), m_name(sv) { } //! Construct from empty string. ConstName() noexcept : m_hash(CARB_HASH_STRING("")) { } //! Implementation detail. Ignore. struct BogusArg { }; //! Construct from a @c std::string. template <typename T> explicit ConstName(const T& s, std::enable_if_t<std::is_same<std::string, T>::value, BogusArg> = {}) : m_hash(carb::fnv1aHash(s.data(), s.size())), m_name(s) { // the enable_if disambiguates which constructor a const char* arg should use. // the BogusArg type keeps this constructor from being confused with the one with a hash. } //! Construct from a @c std::string and a pre-computed hash. template <typename T> explicit ConstName(const T& s, NameHash hash, std::enable_if_t<std::is_same<std::string, T>::value, BogusArg> = {}) : m_hash(hash), m_name(s) { // the enable_if disambiguates which constructor a const char* arg should use. } //! Returns the string used to generate the hash. For debugging purposes only. const omni::string& getString() const noexcept { return m_name; } //! Returns the hash used for comparison. //! //! Prefer using the overloaded comparison operators (e.g. <, ==) rather than directly calling this method. constexpr uint64_t getHash() const noexcept { return m_hash; } //! Converts to a @c std::string. For debugging purposes only. //! //! Prefer using @ref getString() over this method, as @ref getString() does not copy any data. std::string toString() const { return std::string(m_name.data(), m_name.size()); } //! Returns the name as a null-terminated `const char*`. const char* c_str() const noexcept { return m_name.c_str(); } private: uint64_t m_hash; omni::string m_name; }; //! Compares two @ref ConstName objects. //! //! Returns @c true if the hashes are the same. constexpr bool operator==(const ConstName& lhs, const ConstName& rhs) noexcept { return (lhs.getHash() == rhs.getHash()); } //! Compares a @ref ConstName with a hash. //! //! Returns @c true if the hashes are the same. constexpr bool operator==(const ConstName& lhs, NameHash rhs) noexcept { return (lhs.getHash() == rhs); } //! Compares a hash with a @ref ConstName //! //! Returns @c true if the hashes are the same. constexpr bool operator==(NameHash lhs, const ConstName& rhs) noexcept { return (lhs == rhs.getHash()); } //! Compares two @ref ConstName objects. //! //! Returns @c true if the hashes are not the same. constexpr bool operator!=(const ConstName& lhs, const ConstName& rhs) noexcept { return (lhs.getHash() != rhs.getHash()); } //! Compares a @ref ConstName with a hash. //! //! Returns @c true if the hashes are not the same. constexpr bool operator!=(const ConstName& lhs, NameHash rhs) noexcept { return (lhs.getHash() != rhs); } //! Compares a hash with a @ref ConstName //! //! Returns @c true if the hashes are not the same. constexpr bool operator!=(NameHash lhs, const ConstName& rhs) noexcept { return (lhs != rhs.getHash()); } //! Compares two @ref ConstName objects. //! //! Returns @c true if the first hash's value is less than the seconds. constexpr bool operator<(const ConstName& lhs, const ConstName& rhs) noexcept { return (lhs.getHash() < rhs.getHash()); } //! Compares a @ref ConstName with a hash. //! //! Returns @c true if the first hash's value is less than the seconds. constexpr bool operator<(const ConstName& lhs, NameHash rhs) noexcept { return (lhs.getHash() < rhs); } //! Compares a hash with a @ref ConstName //! //! Returns @c true if the first hash's value is less than the seconds. constexpr bool operator<(NameHash lhs, const ConstName& rhs) noexcept { return (lhs < rhs.getHash()); } //! Output stream operator overload. Outputs the contents of @p str to the stream @p os. //! //! @param os Stream to output the string to. //! @param str The string to output. //! //! @return @p os. //! //! @throws std::ios_base::failure if an exception is thrown during output. inline std::ostream& operator<<(std::ostream& os, const ConstName& str) { return (os << str.getString()); } //! Concatenates @c std::string with a @ref ConstName. Returns a @c omni::string. inline auto operator+(const std::string& lhs, const ConstName& rhs) { return lhs + rhs.getString(); } //! Concatenates @c std::string with a @ref ConstName. Returns a @c omni::string. //! //! Concatenates strings. inline auto operator+(const ConstName& lhs, const std::string& rhs) { return lhs.getString() + rhs; } } // namespace unstable } // namespace exec } // namespace graph } // namespace omni namespace std { //! Hash specialization for std::string template <> struct hash<omni::graph::exec::unstable::ConstName> { //! Argument type alias. using argument_type = omni::graph::exec::unstable::ConstName; //! Result type alias. using result_type = std::size_t; //! Hash operator result_type operator()(const argument_type& x) const noexcept { return x.getHash(); } }; } // namespace std
8,494
C
31.30038
119
0.678479
omniverse-code/kit/include/omni/graph/exec/unstable/IGraphBuilderContext.gen.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Common state for graph builders. //! //! *TODO* We will use this class to introduce reporting from graph transformation pipeline back to the application. template <> class omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderContext_abi> : public omni::graph::exec::unstable::IGraphBuilderContext_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IGraphBuilderContext") //! Current construction version. //! //! Incremented after each pass pipeline run to include definitions that were created before the run. omni::graph::exec::unstable::Stamp getConstructionStamp() noexcept; //! Return owner of all graphs this builder touches //! //! The returned @ref omni::graph::exec::unstable::IGraph will *not* have //! @ref omni::core::IObject::acquire() called before being returned. omni::graph::exec::unstable::IGraph* getGraph() noexcept; //! To be overriden by application specific class to enable reporting from transformation pipeline. void report(const char* diagnose) noexcept; //! Run transformation pipeline void runTransformations(omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::graph::exec::unstable::Stamp omni::core::Generated< omni::graph::exec::unstable::IGraphBuilderContext_abi>::getConstructionStamp() noexcept { return getConstructionStamp_abi(); } inline omni::graph::exec::unstable::IGraph* omni::core::Generated< omni::graph::exec::unstable::IGraphBuilderContext_abi>::getGraph() noexcept { return getGraph_abi(); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderContext_abi>::report(const char* diagnose) noexcept { report_abi(diagnose); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderContext_abi>::runTransformations( omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept { runTransformations_abi(nodeGraphDef.get()); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
2,952
C
33.337209
127
0.738821
omniverse-code/kit/include/omni/graph/exec/unstable/IInvalidationForwarder.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IInvalidationForwarder.h //! //! @brief Defines @ref omni::graph::exec::unstable::IInvalidationForwarder. #pragma once #include <omni/graph/exec/unstable/IBase.h> namespace omni { namespace graph { namespace exec { namespace unstable { class IInvalidationForwarder_abi; class IInvalidationForwarder; class ITopology; //! Interface wrapping a function (possibly with storage) to forward topology invalidation notices. class IInvalidationForwarder_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IInvalidationForwarder")> { protected: //! Invokes the wrapped function. //! //! The given topology must not be @c nullptr. virtual void invoke_abi(OMNI_ATTR("not_null, throw_if_null") ITopology* topology) noexcept = 0; }; //! Smart pointer managing an instance of @ref IInvalidationForwarder. using InvalidationForwarderPtr = omni::core::ObjectPtr<IInvalidationForwarder>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IInvalidationForwarder.gen.h> //! @copydoc omni::graph::exec::unstable::IInvalidationForwarder_abi //! //! @ingroup groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IInvalidationForwarder : public omni::core::Generated<omni::graph::exec::unstable::IInvalidationForwarder_abi> { }; #include <omni/graph/exec/unstable/ITopology.h> #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IInvalidationForwarder.gen.h>
2,058
C
30.676923
99
0.757532
omniverse-code/kit/include/omni/graph/exec/unstable/IPassRegistry.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Registry (database) of known @ref omni::graph::exec::unstable::IPass objects. //! //! When registering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly accessing //! methods on this interface. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. template <> class omni::core::Generated<omni::graph::exec::unstable::IPassRegistry_abi> : public omni::graph::exec::unstable::IPassRegistry_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPassRegistry") //! Registers a new pass. //! //! @p type is th type of pass being registered (e.g. populate, partition, etc). //! //! @p name is the name of the pass. This name is used to deregister the pass (see @ref //! omni::graph::exec::unstable::IPassRegistry::deregisterPass) so the name should be unique within this registry. //! Must not be `nullptr`. //! //! @p factory is the interface that will instantiate the pass. Must not be `nullptr`. //! //! Some pass types (e.g. populate passes) desire to affect only a subset of the nodes and/or definitions in a //! graph. @p nameToMatch is used to specify the name of the node/definitions the pass wishes to affect. The meaning //! of this field is pass type dependent. Many pass types ignore @p nameToMatch. Must not be `nullptr`. This method //! copies the name. //! //! Some pass types (e.g. partition passes) are designed such that only a single pass should affect an entity. When //! multiple passes wish to affect an entity, @p priority can be used to resolve the conflict. The meaning of the //! priority value is pass type specific. Many pass types ignore @p priority. //! //! When registering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly //! accessing this method. bool registerPass(omni::graph::exec::unstable::PassType passType, const char* name, omni::core::ObjectParam<omni::graph::exec::unstable::IPassFactory> factory, const omni::graph::exec::unstable::ConstName& nameToMatch, omni::graph::exec::unstable::PassPriority priority); //! Deregisters a pass. //! //! Returns @c true if the pass was found and removed. Returns @c false if the pass could not be found. //! //! If multiple passes were registered with the same name, this method will only remove one of them. //! //! When deregistering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly //! accessing this method. bool deregisterPass(omni::graph::exec::unstable::PassType passType, const char* name); //! Returns a sub-registry containing pass of the specified type. //! //! The returned @ref omni::graph::exec::unstable::IPassTypeRegistry will *not* have //! @ref omni::core::IObject::acquire() called before being returned. omni::graph::exec::unstable::IPassTypeRegistry* getPassTypeRegistry(omni::graph::exec::unstable::PassType type) noexcept; //! Returns version stamp for the registry. //! //! Version is incremented each time the content of registry changes, i.e. pass is added or removed. omni::graph::exec::unstable::Stamp getStamp() noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline bool omni::core::Generated<omni::graph::exec::unstable::IPassRegistry_abi>::registerPass( omni::graph::exec::unstable::PassType passType, const char* name, omni::core::ObjectParam<omni::graph::exec::unstable::IPassFactory> factory, const omni::graph::exec::unstable::ConstName& nameToMatch, omni::graph::exec::unstable::PassPriority priority) { OMNI_THROW_IF_ARG_NULL(name); OMNI_THROW_IF_ARG_NULL(factory); auto return_ = registerPass_abi(passType, name, factory.get(), &nameToMatch, priority); return return_; } inline bool omni::core::Generated<omni::graph::exec::unstable::IPassRegistry_abi>::deregisterPass( omni::graph::exec::unstable::PassType passType, const char* name) { OMNI_THROW_IF_ARG_NULL(name); auto return_ = deregisterPass_abi(passType, name); return return_; } inline omni::graph::exec::unstable::IPassTypeRegistry* omni::core::Generated< omni::graph::exec::unstable::IPassRegistry_abi>::getPassTypeRegistry(omni::graph::exec::unstable::PassType type) noexcept { return getPassTypeRegistry_abi(type); } inline omni::graph::exec::unstable::Stamp omni::core::Generated<omni::graph::exec::unstable::IPassRegistry_abi>::getStamp() noexcept { return getStamp_abi(); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
5,545
C
41.992248
132
0.707124
omniverse-code/kit/include/omni/graph/exec/unstable/IDef.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IDef.h //! //! @brief Defines @ref omni::graph::exec::unstable::IDef. #pragma once #include <omni/graph/exec/unstable/ConstName.h> #include <omni/graph/exec/unstable/IBase.h> #include <omni/graph/exec/unstable/SchedulingInfo.h> #include <omni/graph/exec/unstable/Status.h> #include <omni/graph/exec/unstable/Types.h> namespace omni { namespace graph { namespace exec { namespace unstable { class IDef; class IDef_abi; class ExecutionTask; //! Base class for all node definitions //! //! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during //! execution. //! //! Since definitions can be shared by multiple nodes, and nodes can be executed in parallel, implementations of //! this interface should expect its methods to be called in parallel. class IDef_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IDef")> { protected: //! Execute the node definition. //! //! See thread safety information in interface description. virtual Status execute_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* info) noexcept = 0; //! Provide runtime information about scheduling constraints particular task have //! //! The provided @ref omni::graph::exec::unstable::ExecutionTask can be used to determine the path of the current //! definition. //! //! The given task must not be @c nullptr. //! //! See thread safety information in interface description. virtual SchedulingInfo getSchedulingInfo_abi(OMNI_ATTR("in, not_null, throw_if_null, ref") const ExecutionTask* info) noexcept = 0; //! Return unique definition identifier. //! //! See thread safety information in interface description. virtual OMNI_ATTR("ref") const ConstName* getName_abi() noexcept = 0; }; //! Smart pointer managing an instance of @ref omni::graph::exec::unstable::IDef. using DefPtr = omni::core::ObjectPtr<IDef>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IDef.gen.h> //! @copydoc omni::graph::exec::unstable::IDef_abi //! //! @ingroup groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IDef : public omni::core::Generated<omni::graph::exec::unstable::IDef_abi> { }; #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IDef.gen.h>
2,975
C
33.206896
117
0.718655
omniverse-code/kit/include/omni/graph/exec/unstable/IPassRegistry.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IPassRegistry.h //! //! @brief Defines @ref omni::graph::exec::unstable::IPassRegistry. #pragma once #include <omni/graph/exec/unstable/ConstName.h> #include <omni/graph/exec/unstable/IBase.h> #include <omni/graph/exec/unstable/Stamp.h> #include <omni/graph/exec/unstable/Types.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class IPassFactory; class IPassRegistry; class IPassRegistry_abi; class IPassTypeRegistry; //! Registry (database) of known @ref omni::graph::exec::unstable::IPass objects. //! //! When registering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly accessing //! methods on this interface. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. class IPassRegistry_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IPassRegistry")> { protected: //! Registers a new pass. //! //! @p type is th type of pass being registered (e.g. populate, partition, etc). //! //! @p name is the name of the pass. This name is used to deregister the pass (see @ref //! omni::graph::exec::unstable::IPassRegistry::deregisterPass) so the name should be unique within this registry. //! Must not be `nullptr`. //! //! @p factory is the interface that will instantiate the pass. Must not be `nullptr`. //! //! Some pass types (e.g. populate passes) desire to affect only a subset of the nodes and/or definitions in a //! graph. @p nameToMatch is used to specify the name of the node/definitions the pass wishes to affect. The meaning //! of this field is pass type dependent. Many pass types ignore @p nameToMatch. Must not be `nullptr`. This method //! copies the name. //! //! Some pass types (e.g. partition passes) are designed such that only a single pass should affect an entity. When //! multiple passes wish to affect an entity, @p priority can be used to resolve the conflict. The meaning of the //! priority value is pass type specific. Many pass types ignore @p priority. //! //! When registering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly //! accessing this method. virtual bool registerPass_abi(PassType passType, OMNI_ATTR("c_str, not_null, throw_if_null") const char* name, OMNI_ATTR("not_null, throw_if_null") IPassFactory* factory, OMNI_ATTR("in, not_null, throw_if_null, ref") const ConstName* nameToMatch, PassPriority priority) noexcept = 0; //! Deregisters a pass. //! //! Returns @c true if the pass was found and removed. Returns @c false if the pass could not be found. //! //! If multiple passes were registered with the same name, this method will only remove one of them. //! //! When deregistering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly //! accessing this method. virtual bool deregisterPass_abi(PassType passType, OMNI_ATTR("c_str, not_null, throw_if_null") const char* name) noexcept = 0; //! Returns a sub-registry containing pass of the specified type. //! //! The returned @ref omni::graph::exec::unstable::IPassTypeRegistry will *not* have //! @ref omni::core::IObject::acquire() called before being returned. virtual OMNI_ATTR("no_acquire") IPassTypeRegistry* getPassTypeRegistry_abi(PassType type) noexcept = 0; //! Returns version stamp for the registry. //! //! Version is incremented each time the content of registry changes, i.e. pass is added or removed. virtual Stamp getStamp_abi() noexcept = 0; }; //! Smart pointer managing an instance of @ref omni::graph::exec::unstable::IPassRegistry. using PassRegistryPtr = omni::core::ObjectPtr<IPassRegistry>; //! Returns the singleton pass registry. inline IPassRegistry* getPassRegistry() noexcept; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IPassRegistry.gen.h> //! @copydoc omni::graph::exec::unstable::IPassRegistry_abi //! //! @ingroup groupOmniGraphExecPassRegistration groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IPassRegistry : public omni::core::Generated<omni::graph::exec::unstable::IPassRegistry_abi> { }; // additional headers needed for API implementation #include <omni/graph/exec/unstable/IPassFactory.h> #include <omni/graph/exec/unstable/IPassTypeRegistry.h> //! Returns a singleton containing the pass registry. //! //! See @ref groupOmniGraphExecPassRegistration for more information about pass registration. //! //! @ingroup groupOmniGraphExecPassRegistration inline omni::graph::exec::unstable::IPassRegistry* omni::graph::exec::unstable::getPassRegistry() noexcept { // createType() always calls acquire() and returns an ObjectPtr to make sure release() is called. we don't want to // hold a ref here to avoid static destruction issues. here we allow the returned ObjectPtr to destruct (after // calling get()) to release our ref. we know the DLL in which the singleton was created is maintaining a ref and // will keep the singleton alive for the lifetime of the DLL. static auto sSingleton = omni::core::createType<IPassRegistry>().get(); return sSingleton; } // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IPassRegistry.gen.h>
6,207
C
43.342857
125
0.716449
omniverse-code/kit/include/omni/graph/exec/unstable/Stamp.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file Stamp.h //! //! @brief Defines omni::graph::exec::unstable::Stamp class. #pragma once #include <limits> #include <string> namespace omni { namespace graph { namespace exec { namespace unstable { //! Low-level ABI type to represent @ref Stamp. using Stamp_abi = int16_t; //! Lazy, light-weight change notification system. //! //! The heart of the EF's invalidation system is @ref Stamp and @ref SyncStamp. //! //! Stamps track the state/version of a resource. Stamps are implemented as an unsigned number. If the state of a //! resource changes, the stamp is incremented. //! //! Stamps are broken into two parts. //! //! The first part is implemented by the @ref Stamp class. As a resource changes, @ref Stamp::next()| s called to //! denote the new state of the resource. @ref Stamp objects are owned by the resource they track. //! //! The second part of stamps is implemented by the @ref SyncStamp class. @ref SyncStamp tracks/synchronizes to the //! state of a @ref Stamp. @ref SyncStamp objects are owned by the entities that wish to utilize the mutating resource. //! //! For example, consider the following example showing how a consumer of a resource can uses stamps to detect when a //! resource has changed and update cached data: //! #ifdef OMNI_GRAPH_EXEC_DOC_BUILD //! @snippet "../tests.cpp/TestStamp.cpp" ef-docs-stamp-example #endif //! //! By default constructed @ref Stamp and @ref SyncStamp are never in sync, meaning reconstruction will always happen at //! least once. //! //! Stamps are a lazy, light-weight alternative to heavier change notification systems such as callbacks. //! //! Stamps use unsigned numbers to detect changes in the tracked resource, relying on overflow behavior to wrap the //! unsigned number. A check for @ref Stamp::kInvalidStamp is performed during this overflow. //! //! Because of the limited bit depth of @ref Stamp, it is possible, though improbable, that a resource at stamp *X*, //! wraps all the way back to *X* before a @ref SyncStamp tries to synchronize with the stamp. In such a case, the @ref //! SyncStamp will erroneously believe it is in sync with the resource. Again, this is unlikely, though possible. //! //! EF makes extensive use of stamps to detect changes in pass registration, graph topology, and graph construction. See //! @rstref{Graph Invalidation <ef_graph_invalidation>} to understand how @ref Stamp is used for invalidation in EF. //! //! This object is ABI-safe. class Stamp { public: //! Anonymous structure to define kInvalidStamp. enum : Stamp_abi { kInvalidStamp = -1 //!< Value for an invalid stamp. }; //! Bump the stamp void next() noexcept { // depending on usage, we may have to turn this into atomic operator // for now we don't expect this to be needed m_generation = (m_generation == std::numeric_limits<decltype(m_generation)>::max()) ? 0 : m_generation + 1; static_assert(offsetof(Stamp, m_generation) == 0, "unexpected offset"); } //! Make stamp invalid void invalidate() noexcept { m_generation = kInvalidStamp; } //! Check if stamp is valid bool isValid() const noexcept { return m_generation != Stamp::kInvalidStamp; } //! Equal operator bool operator==(const Stamp& rhs) const noexcept { return (m_generation == rhs.m_generation); } //! Not equal operator bool operator!=(const Stamp& rhs) const noexcept { return (m_generation != rhs.m_generation); } //! Convert to string for debugging std::string toString() const // may throw { std::string str; if (isValid()) { str = std::to_string(m_generation); } else { str = "INV"; } return str; } private: Stamp_abi m_generation{ kInvalidStamp }; //!< Stamp storage friend class SyncStamp; }; static_assert(std::is_standard_layout<Stamp>::value, "Stamp is expected to be abi safe"); static_assert(2 == sizeof(Stamp), "Stamp is an unexpected size"); //! Watcher of a mutating resource. Observes a resources @ref Stamp and detects if it has changed. //! //! Used always in pair with @ref Stamp class to detect changes in a resource. See @ref Stamp's docs for an //! explanation on how this object is used during invalidation. class SyncStamp { public: enum { kInvalidStamp = Stamp::kInvalidStamp - 1 }; //! Constructor SyncStamp() noexcept = default; //! Construct in sync with given stamp SyncStamp(Stamp id) noexcept : m_syncStamp(id.m_generation) { static_assert(offsetof(SyncStamp, m_syncStamp) == 0, "unexpected offset"); } //! Check if two classes are in sync. Always return false if this instance is having invalid stamp stored. bool inSync(const Stamp& id) const noexcept { if (m_syncStamp == SyncStamp::kInvalidStamp) return false; return (m_syncStamp == id.m_generation); } //! Check if two sync stamp are in sync bool inSync(const SyncStamp& syncStamp) const noexcept { return (m_syncStamp == syncStamp.m_syncStamp); } //! Synchronize this instance with given stamp void sync(const Stamp& id) noexcept { m_syncStamp = id.m_generation; } //! Synchronize this instance with given sync stamp void sync(const SyncStamp& syncStamp) noexcept { m_syncStamp = syncStamp.m_syncStamp; } //! In one call test and synchronize the stamp. After this call this instance is guaranteed to be in sync with //! given id. //! //! @return False if stamps were in sync and true otherwise. bool makeSync(const Stamp& id) noexcept { if (inSync(id)) return false; sync(id); return true; } //! Is this sync stamp valid bool isValid() const noexcept { return m_syncStamp != SyncStamp::kInvalidStamp; } //! Invalidate stored stamp void invalidate() noexcept { m_syncStamp = SyncStamp::kInvalidStamp; } //! Explicit call to convert to Stamp class Stamp toStamp() const noexcept { Stamp id; if (isValid()) id.m_generation = m_syncStamp; return id; } //! Convert to string for debugging std::string toString() const // may throw { std::string str; if (isValid()) { str = std::to_string(m_syncStamp); } else { str = "INV"; } return str; } private: Stamp_abi m_syncStamp{ kInvalidStamp }; //!< Stamp storage }; static_assert(std::is_standard_layout<SyncStamp>::value, "SyncStamp is expected to be abi safe"); static_assert(2 == sizeof(SyncStamp), "SyncStamp is an unexpected size"); } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
7,400
C
29.331967
120
0.659054
omniverse-code/kit/include/omni/graph/exec/unstable/ScheduleFunction.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file ScheduleFunction.h //! //! @brief Helpers for @ref omni::graph::exec::unstable::IScheduleFunction. #pragma once #include <omni/graph/exec/unstable/IScheduleFunction.h> namespace omni { namespace graph { namespace exec { namespace unstable { namespace detail { #ifndef DOXYGEN_BUILD template <typename Fn> struct ScheduleFunctionHelper { static omni::graph::exec::unstable::Status invoke(Fn&& fn) { return fn(); } static auto capture(Fn&& fn) { return std::move(fn); } }; template <> struct ScheduleFunctionHelper<IScheduleFunction*&> { static omni::graph::exec::unstable::Status invoke(IScheduleFunction* fn) { return fn->invoke(); } static auto capture(IScheduleFunction* fn) { return omni::core::borrow(fn); } }; template <> struct ScheduleFunctionHelper<omni::core::ObjectPtr<IScheduleFunction>&> { static omni::graph::exec::unstable::Status invoke(omni::core::ObjectPtr<IScheduleFunction>& fn) { return fn->invoke(); } static omni::core::ObjectPtr<IScheduleFunction> capture(omni::core::ObjectPtr<IScheduleFunction>& fn) { return std::move(fn); } }; #endif } // namespace detail //! Helper function to efficiently call an invocable object (i.e. std::function, function ptr, IScheduleFunction*). template <typename Fn> omni::graph::exec::unstable::Status invokeScheduleFunction(Fn&& fn) { return detail::ScheduleFunctionHelper<Fn>::invoke(std::forward<Fn>(fn)); } //! Helper function to efficiently capture an invocable object (i.e. std::function, function ptr, IScheduleFunction*). //! //! Suitable when capturing the invocable object in a lambda to be passed to a scheduler. //! //! Use this function when an @ref IScheduleFunction will be invoked at a later time by a scheduler. This function will //! call @ref omni::core::IObject::acquire() on the @ref IScheduleFunction. //! //! If an invocable object that is not a @ref IScheduleFunction is passed to this function, @c std::move() will be //! called. template <typename Fn> auto captureScheduleFunction(Fn&& fn) -> decltype(detail::ScheduleFunctionHelper<Fn>::capture(std::forward<Fn>(fn))) { return detail::ScheduleFunctionHelper<Fn>::capture(std::forward<Fn>(fn)); } } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
2,807
C
26.529412
120
0.714286
omniverse-code/kit/include/omni/graph/exec/unstable/INodeGraphDef.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file INodeGraphDef.h //! //! @brief Defines @ref omni::graph::exec::unstable::INodeGraphDef. #pragma once #include <omni/graph/exec/unstable/IDef.h> #include <omni/graph/exec/unstable/INodeFactory.h> namespace omni { namespace graph { namespace exec { namespace unstable { class INode; class INodeGraphDef_abi; class INodeGraphDef; class ITopology; //! Graph definition. Defines work to be done as a graph. //! //! Nodes within a graph represent work to be done. The actual work to be performed is described in a //! @rstref{definition <ef_definition>}. Each node wanting to perform work points to a defintion. //! //! This interface is a subclass of the work definition interface (i.e. @ref omni::graph::exec::unstable::IDef) and //! extends @ref omni::graph::exec::unstable::IDef with methods to describe work as a graph. //! //! Visually: //! //! @rst //! //! .. image:: /../docs/ef-simple-w-defs.svg //! :align: center //! //! @endrst //! //! Above, you can see the two types of definitions: opaque definitions (described by @ref //! omni::graph::exec::unstable::INodeDef) and graph definitions (described by this interface). //! //! Nodes within a graph definition can point to other graph definitions. This composibility is where EF gets its *graph //! of graphs* moniker. //! //! Multiple node's in the execution graph can point to the same instance of a graph definition. This saves both space //! and graph construction time. However, since each graph definition can be shared, its pointer value cannot be used //! to uniquely identify its location in the graph. To solve this, when traversing/executing a graph definition, an //! @ref omni::graph::exec::unstable::ExecutionPath is passed (usually via @ref //! omni::graph::exec::unstable::ExecutionTask::getUpstreamPath()). //! //! When defining new graph types, it is common to create a new implementation of this interface. See @ref //! omni::graph::exec:unstable::NodeGraphDef for an implementation of this interface that can be easily inherited from. //! See @rstref{Definition Creation <ef_definition_creation>} for a guide on creating your own graph definition. //! //! How a graph definition's nodes are traversed during execution is defined by the definition's @ref //! omni::graph::exec::unstable::IExecutor. See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth //! guide on how executors and graph definitions work together during execution. //! //! See also @ref omni::graph::exec::unstable::INode, @ref omni::graph::exec::unstable::IExecutor, and @ref //! omni::graph::exec::unstable::ExecutionTask. class INodeGraphDef_abi : public omni::core::Inherits<IDef, OMNI_TYPE_ID("omni.graph.exec.unstable.INodeGraphDef")> { protected: //! Return this graph's topology object. //! //! Each @ref omni::graph::exec::unstable::INodeGraphDef owns a @ref omni::graph::exec::unstable::ITopology. //! //! The returned @ref omni::graph::exec::unstable::ITopology. will *not* have @ref omni::core::IObject::acquire() //! called before being returned. virtual OMNI_ATTR("no_acquire") ITopology* getTopology_abi() noexcept = 0; //! Initialize the state of the graph. //! //! It is up to the implementation of the graph type to decide whether this call needs to be propagated over all //! nodes within the graph or a single shared state is owned by the graph. //! //! @param rootTask State will be initialized for every instance of this graph. Root task will provide a path to //! allow discovery of the state. Must not be @c nullptr. virtual OMNI_ATTR("throw_result") omni::core::Result initializeState_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* rootTask) noexcept = 0; //! Pre-execution call can be used to setup the graph state prior to execution or skip entirely the execution. //! //! The given task must not be @c nullptr. virtual Status preExecute_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* info) noexcept = 0; //! Post-execution call can be used to finalize the execution, e.g. transfer computation results to consumers. //! //! The given task must not be @c nullptr. virtual Status postExecute_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* info) noexcept = 0; //! Acquire factory object allowing for allocating new node instances for this node graph def. //! //! Provided factory may be empty when graph def doesn't allow allocating new nodes outside of pass that constructed //! the definition in the first place. //! //! Accessing node factory is thread-safe but mutating graphs topology is not. This includes node creation. virtual INodeFactory* getNodeFactory_abi() noexcept = 0; }; //! Smart pointer managing an instance of @ref INodeGraphDef. using NodeGraphDefPtr = omni::core::ObjectPtr<INodeGraphDef>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/INodeGraphDef.gen.h> //! @copydoc omni::graph::exec::unstable::INodeGraphDef_abi //! //! @ingroup groupOmniGraphExecInterfaces class omni::graph::exec::unstable::INodeGraphDef : public omni::core::Generated<omni::graph::exec::unstable::INodeGraphDef_abi> { public: //! Access graph's root node. //! //! The returned @ref INode. will *not* have @ref omni::core::IObject::acquire() called before being returned. inline INode* getRoot() noexcept; }; #include <omni/graph/exec/unstable/ITopology.h> inline omni::graph::exec::unstable::INode* omni::graph::exec::unstable::INodeGraphDef::getRoot() noexcept { return getTopology()->getRoot(); } #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/INodeGraphDef.gen.h>
6,314
C
42.854166
120
0.721571
omniverse-code/kit/include/omni/graph/exec/unstable/NodeDef.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file NodeDef.h //! //! @brief Declares @ref omni::graph::exec::unstable::INodeDef. #pragma once #include <omni/graph/exec/unstable/Assert.h> #include <omni/graph/exec/unstable/INodeDef.h> namespace omni { namespace graph { namespace exec { namespace unstable { //! @copydoc omni::graph::exec::unstable::INodeDef template <typename... Bases> class NodeDefT : public Implements<Bases...> { public: //! Construct node definition //! //! @param definitionName Definition name is considered as a token that transformation passes can register against //! //! *TODO* Replace runtime hashing of node definition name to id with a compile time hash generation. //! //! May throw. static omni::core::ObjectPtr<NodeDefT> create(const char* definitionName) { OMNI_THROW_IF_ARG_NULL(definitionName); return omni::core::steal(new NodeDefT(definitionName)); } protected: //! Core implementation of @ref omni::graph::exec::unstable::IDef::execute_abi for @ref NodeDef //! //! Returns success without executing anything Status execute_abi(ExecutionTask* info) noexcept override { return Status::eSuccess; } //! Core implementation of @ref omni::graph::exec::unstable::IDef::getSchedulingInfo_abi for @ref NodeDef //! //! Returns serial scheduling SchedulingInfo getSchedulingInfo_abi(const ExecutionTask* info) noexcept override { return SchedulingInfo::eSerial; } //! Core implementation of @ref omni::graph::exec::unstable::IDef::getName_abi for @ref NodeDef const ConstName* getName_abi() noexcept override { return &m_name; } //! Constructor NodeDefT(const char* definitionName) : m_name{ definitionName } { } private: ConstName m_name; //!< We associate a name with each opaque definition. This is where we store it. }; //! Core NodeDef implementation for @ref omni::graph::exec::unstable::INodeDef using NodeDef = NodeDefT<INodeDef>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
2,528
C
29.107143
120
0.705301
omniverse-code/kit/include/omni/graph/exec/unstable/ExecutionPath.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file ExecutionPath.h //! //! @brief Defines @ref omni::graph::exec::unstable::ExecutionPath. #pragma once #include <omni/graph/exec/unstable/Assert.h> #include <omni/graph/exec/unstable/SmallStack.h> #include <omni/graph/exec/unstable/Span.h> #include <omni/graph/exec/unstable/Types.h> #include <atomic> #include <initializer_list> namespace omni { namespace graph { namespace exec { namespace unstable { class INode; //! Path representing a unique location of an instantiated node during execution. //! //! The @ref omni::graph::exec::unstable::ExecutionPath class is an efficient utility class used to store the *execution //! path* of an @ref omni::graph::exec::unstable::INode. There's subtlety to what an execution path is. That subtlety is //! best explained with a diagram: //! //! @rst //! //! .. image:: /../docs/ef-execution-path-point-k.svg //! :align: center //! //! @endrst //! //! Above, nodes are labelled with lower-case letters (e.g. *a*, *b*, etc.). Node can point to either an //! @ref omni::graph::exec::unstable::INodeDef (which defines opaque computation) or an @ref //! omni::graph::exec::unstable::INodeGraphDef (which defines its computation with a subgraph). In the diagram above, //! @ref omni::graph::exec::unstable::INodeGraphDef objects are labelled with upper-case letters (e.g. *X*, *Y*). //! //! Observe that @ref omni::graph::exec::unstable::INodeGraphDef *X* is used by both nodes *e* and *f*. This illustrates //! that @ref omni::graph::exec::unstable::INodeGraphDef objects can be reused within the graph. This makes sense //! because @ref omni::graph::exec::unstable::INodeGraphDef is defining computational logic and that logic may be needed //! in multiple places in the graph (e.g. instancing). Likewise, though not illustrated above, @ref //! omni::graph::exec::unstable::INodeDef objects can be reused. //! //! Consider node *k* above (pointed to by the yellow arrow). When *k* is executing, what is its execution path? One //! way to describe the path is to store the nodes traversed to get to the node. For instance, */a/c/m/n/h/i/k* could be //! a likely path. Lets call this representation of a path the *traversal path*. //! //! EF (via @ref omni::graph::exec::unstable::ExecutionPath) does not store *traversal paths*. Rather, it uses a much //! more compact representation called the *execution path*. In the diagram above, the execution path for *k* is //! */f/p/k*. //! //! @ref omni::graph::exec::unstable::ExecutionPath stores enough information to **uniquely identify the node**. That's //! important, since *k* is being shared in the diagram above. The execution path */e/k* points to the same *k* node's //! memory but logically */e/k* and */f/p/k* are different nodes. This illustrates the main purpose of this object: //! @ref omni::graph::exec::unstable::INode, @ref omni::graph::exec::unstable::INodeDef, *and* @ref //! omni::graph::exec::unstable::INodeGraphDef *objects can not be uniquely identified by their raw pointer value.* //! @ref omni::graph::exec::unstable::ExecutionPath *must be used to uniquely identify a node.* //! //! @ref omni::graph::exec::unstable::ExecutionPath is often used as a key in a key/value store to access a node's state //! data. //! //! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during //! execution. //! //! Unless otherwise noted, methods in this class are not thread-safe. //! //! To reduce the amount of new paths we create, we only create a new path when entering a node graph definition //! execution. All tasks generated for computing nodes withing the same node graph will point to the same path. class ExecutionPath { enum : ExecutionPathHash { kEmptyPathHash = 0 }; public: //! Default constructor for an empty path. Consider using sEmpty if you need one. ExecutionPath() noexcept { } //! Copy constructor ExecutionPath(const ExecutionPath& src) : m_path{ src.m_path }, m_cachedHash(src.m_cachedHash.load()) { } //! Assignment operator ExecutionPath& operator=(const ExecutionPath& rhs) { m_path = rhs.m_path; m_cachedHash.store(rhs.m_cachedHash.load()); return *this; } //! Construct a path for a node (used only at the beginning of the execution). explicit ExecutionPath(omni::core::ObjectParam<INode> node) noexcept : m_path{ node.get() } { OMNI_GRAPH_EXEC_ASSERT(node.get()); } //! Construct a path from an upstream path and a node. Mostly used when entering a node graph definition. //! //! May throw. ExecutionPath(const ExecutionPath& upPath, omni::core::ObjectParam<INode> node) : m_path{ upPath.m_path, node.get() } { OMNI_GRAPH_EXEC_ASSERT(node.get()); } //! Convenience method for constructing paths from initializer list. //! //! May throw. explicit ExecutionPath(std::initializer_list<omni::core::ObjectParam<INode>> path) : m_path{ const_cast<INode**>(reinterpret_cast<INode* const*>(path.begin())), const_cast<INode**>(reinterpret_cast<const INode* const*>(path.end())) } { static_assert(sizeof(INode*) == sizeof(omni::core::ObjectParam<INode>), "unexpected ObjectParam size"); } private: ExecutionPath(INode** begin, INode** end) : m_path{ begin, end } { } public: //! Append a node to the path. //! //! The given node is not internally acquired and it is up to the calling code to ensure the node remains alive //! while in use by this object. //! //! May throw. void push(INode* node) { OMNI_GRAPH_EXEC_ASSERT(node); m_path.push(node); m_cachedHash = kEmptyPathHash; } //! Return a new path with a last node removed //! //! May throw. ExecutionPath copyWithoutTop() const { int delta = (m_path.empty() ? 0 : -1); return ExecutionPath{ const_cast<INode**>(m_path.begin()), const_cast<INode**>(m_path.end() + delta) }; } //! Compute unique index using pairing function and unique indexes of nodes (within owning topology) //! //! This is one strategy to generate a hash for a path. Other hashing strategies can be built outside of the class //! and used for example when retrieving state from execution context. //! //! The result is cached and method is thread-safe. inline ExecutionPathHash getHash() const noexcept; //! Compute unique index using pairing function and unique indexes of nodes (within owning topology) //! //! @param node Include given node as the last node in the path. This allows us to avoid creating a new path //! when fetching a state for an execution task. //! //! This method is thread-safe. inline ExecutionPathHash getHashWith(omni::core::ObjectParam<INode> node) const noexcept; //! Check if path is empty. bool isEmpty() const noexcept { return m_path.empty(); } //! Access to underlying path container Span<INode* const> getData() const noexcept { return Span<INode* const>{ m_path.begin(), m_path.size() }; } //! Return the node at the top of the stack. //! //! Undefined behavior if the stack is empty. INode* getTop() const noexcept { return m_path.top(); } //! An instance of an empty path. //! //! @warning A different empty path may be returned over multiple calls of this method. Do rely on using a pointer //! to the returned object to check if another path is the empty path. Rather, use the @ref isEmpty() //! method to check if a path is empty. static const ExecutionPath& getEmpty() noexcept { static ExecutionPath sPath; // unique per DLL return sPath; } //! Pairing function used by the hashing algorithm static ExecutionPathHash pairingFunction(ExecutionPathHash a, ExecutionPathHash b) { return static_cast<ExecutionPathHash>(((a + b) * (a + b + 1) * 0.5) + b); } private: // Container for nodes forming the path // // We use a small stack to reduce the need of heap allocations. using PathStorage = detail::SmallStack<INode*>; PathStorage m_path; //!< Path is defined by a series of nodes that we visit during task generation //! Cache used to accelerate getHash method. We decided to go with mutable version, since we want to preserve //! the const correctness in places that receive the path, i.e. we don't want anyone to attempt mutating the //! path from these places. The alternative would be to not provide caching which means we give up performance //! and that is not acceptable. mutable std::atomic<ExecutionPathHash> m_cachedHash{ kEmptyPathHash }; }; static_assert(std::is_standard_layout<ExecutionPath>::value, "ExecutionPath is expected to be abi safe"); static_assert(72 == sizeof(ExecutionPath), "ExecutionPath is an unexpected size"); } // namespace unstable } // namespace exec } // namespace graph } // namespace omni // includes needed for method implementations #include <omni/graph/exec/unstable/INode.h> namespace omni { namespace graph { namespace exec { namespace unstable { inline ExecutionPathHash ExecutionPath::getHash() const noexcept { if (m_path.empty()) { return kEmptyPathHash; } if (m_cachedHash != kEmptyPathHash) return m_cachedHash; ExecutionPathHash result = kEmptyPathHash; for (INode* node : m_path) { result = pairingFunction(result, node->getIndexInTopology()); } // no need for compare and exchange...all threads that potentially computed this cache will generate the same result // and since write is atomic, all reads will see correct value m_cachedHash = result; return result; } inline ExecutionPathHash ExecutionPath::getHashWith(omni::core::ObjectParam<INode> node) const noexcept { OMNI_GRAPH_EXEC_ASSERT(node.get()); ExecutionPathHash result = getHash(); return pairingFunction(result, node->getIndexInTopology()); } } // namespace unstable } // namespace exec } // namespace graph } // namespace omni
10,670
C
36.442105
120
0.68463
omniverse-code/kit/include/omni/graph/exec/unstable/IGraphBuilderNode.gen.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Describes a node @ref omni::graph::exec::unstable::IGraphBuilder can manipulate. //! //! Only @ref omni::graph::exec::unstable::IGraphBuilder should use @ref omni::graph::exec::unstable::IGraphBuilderNode. //! One way to think about this interface is that it is a private interface used by //! @ref omni::graph::exec::unstable::IGraphBuilder to connect instances of @ref omni::graph::exec::unstable::INode. template <> class omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi> : public omni::graph::exec::unstable::IGraphBuilderNode_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IGraphBuilderNode") //! Adds the given node as a parent (i.e. upstream) of this node. //! //! @ref omni::core::IObject::acquire() is not called on the given node. It is up to the calling code to ensure the //! node persists while in use by this interface. //! //! @p parent must not be @c nullptr. //! //! It is undefined behavior to add a parent multiple times to a node. //! //! This method is not thread safe. //! //! May throw. void _addParent(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> parent); //! Removes the given node as a parent. //! //! If given node is not a parent, this method returns success. //! //! This method is not thread safe. //! //! May throw. void _removeParent(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> parent); //! Adds the given node as a child (i.e. downstream) of this node. //! //! @ref omni::core::IObject::acquire() is not called on the given node. It is up to the calling code to ensure the //! node persists while in use by this interface. //! //! @p child must not be @c nullptr. //! //! It is undefined behavior to add a child multiple times to a node. //! //! This method is not thread safe. //! //! May throw. void _addChild(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> child); //! Removes the given node as a child. //! //! If given node is not a child, this method returns success. //! //! This method is not thread safe. //! //! May throw. void _removeChild(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> child); //! Remove from the container parent nodes that no longer exist in current topology, i.e are invalid. //! //! @ref omni::core::IObject::release() is not called on the invalid nodes. //! //! This method is not thread safe. void _removeInvalidParents() noexcept; //! Remove from the container child nodes that no longer exist in current topology, i.e are invalid. //! //! @ref omni::core::IObject::release() is not called on the invalid nodes. //! //! This method is not thread safe. void _removeInvalidChildren() noexcept; //! Invalidate all children and parents connections by invalidating the topology this node is sync with. //! //! This method is thread safe. void _invalidateConnections() noexcept; //! Sets the number of parents who are a part of cycle. //! //! This method is not thread safe. void setCycleParentCount(uint32_t count) noexcept; //! Sets the definition for this node. //! //! If a definition is already set, it will be replaced by the given definition. //! //! The given definition may be @c nullptr. //! //! @ref omni::core::IObject::acquire() is called on the given pointer. //! //! See also @ref omni::graph::exec::unstable::IGraphBuilderNode::_setNodeGraphDef(). //! //! This method is not thread safe. void _setNodeDef(omni::core::ObjectParam<omni::graph::exec::unstable::INodeDef> nodeDef) noexcept; //! Sets the definition for this node. //! //! If a definition is already set, it will be replaced by the given definition. //! //! The given definition may be @c nullptr. //! //! @ref omni::core::IObject::acquire() is called on the given pointer. //! //! See also @ref omni::graph::exec::unstable::IGraphBuilderNode::_setNodeDef(). //! //! This method is not thread safe. void _setNodeGraphDef(omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept; //! Unsets this node's definition. //! //! If the definition is already @c nullptr, this method does nothing. //! //! This method is not thread safe. void _clearDef() noexcept; //! Access the topology owning this node. //! //! The returned @ref omni::graph::exec::unstable::ITopology will *not* have //! @ref omni::core::IObject::acquire() called before being returned. //! //! This method is not thread safe. omni::graph::exec::unstable::ITopology* getTopology() noexcept; //! Make topology valid for current topology version. Drop all the connections if topology changed. //! //! This method is not thread safe. void validateOrResetTopology() noexcept; //! Access parent at the given index. //! //! If the given index is greater than the parent count, an error is returned. //! //! This method is not thread safe. //! //! May throw due to internal casting. //! //! See @ref omni::graph::exec::unstable::IGraphBuilderNode::getParentCount(). //! //! Consider using @ref omni::graph::exec::unstable::IGraphBuilderNode::getParents() //! for a modern C++ wrapper to this method. //! //! The returned @ref omni::graph::exec::unstable::IGraphBuilderNode will *not* have //! @ref omni::core::IObject::acquire() called before being returned. omni::graph::exec::unstable::IGraphBuilderNode* getParentAt(uint64_t index); //! Returns the number of parents. //! //! This method is not thread safe. uint64_t getParentCount() noexcept; //! Access child at the given index. //! //! If the given index is greater than the parent count, an error is returned. //! //! This method is not thread safe. //! //! May throw due to internal casting. //! //! See @ref omni::graph::exec::unstable::IGraphBuilderNode::getChildCount(). //! //! Consider using @ref omni::graph::exec::unstable::IGraphBuilderNode::getChildren() //! for a modern C++ wrapper to this method. //! //! The returned @ref omni::graph::exec::unstable::IGraphBuilderNode will *not* have //! @ref omni::core::IObject::acquire() called before being returned. omni::graph::exec::unstable::IGraphBuilderNode* getChildAt(uint64_t index); //! Returns the number of children. //! //! This method is not thread safe. uint64_t getChildCount() noexcept; //! Returns @c true if the given node is an immediate child of this node. //! //! @p node may be @c nullptr. //! //! This method is not thread safe. bool hasChild(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> node) noexcept; //! Returns @c true if this node is the root of the topology. //! //! This method is not thread safe. bool isRoot() noexcept; //! Returns the root node of the topology of which this node is a part. //! //! This method is not thread safe. //! //! May throw due to internal casting. //! //! The returned @ref omni::graph::exec::unstable::IGraphBuilderNode will *not* have //! @ref omni::core::IObject::acquire() called before being returned. omni::core::Result getRoot(omni::graph::exec::unstable::IGraphBuilderNode** out); }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_addParent( omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> parent) { OMNI_THROW_IF_ARG_NULL(parent); OMNI_THROW_IF_FAILED(_addParent_abi(parent.get())); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_removeParent( omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> parent) { OMNI_THROW_IF_ARG_NULL(parent); OMNI_THROW_IF_FAILED(_removeParent_abi(parent.get())); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_addChild( omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> child) { OMNI_THROW_IF_ARG_NULL(child); OMNI_THROW_IF_FAILED(_addChild_abi(child.get())); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_removeChild( omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> child) { OMNI_THROW_IF_ARG_NULL(child); OMNI_THROW_IF_FAILED(_removeChild_abi(child.get())); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_removeInvalidParents() noexcept { _removeInvalidParents_abi(); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_removeInvalidChildren() noexcept { _removeInvalidChildren_abi(); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_invalidateConnections() noexcept { _invalidateConnections_abi(); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::setCycleParentCount(uint32_t count) noexcept { setCycleParentCount_abi(count); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_setNodeDef( omni::core::ObjectParam<omni::graph::exec::unstable::INodeDef> nodeDef) noexcept { _setNodeDef_abi(nodeDef.get()); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_setNodeGraphDef( omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept { _setNodeGraphDef_abi(nodeGraphDef.get()); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_clearDef() noexcept { _clearDef_abi(); } inline omni::graph::exec::unstable::ITopology* omni::core::Generated< omni::graph::exec::unstable::IGraphBuilderNode_abi>::getTopology() noexcept { return getTopology_abi(); } inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::validateOrResetTopology() noexcept { validateOrResetTopology_abi(); } inline omni::graph::exec::unstable::IGraphBuilderNode* omni::core::Generated< omni::graph::exec::unstable::IGraphBuilderNode_abi>::getParentAt(uint64_t index) { omni::graph::exec::unstable::IGraphBuilderNode* out; OMNI_THROW_IF_FAILED(getParentAt_abi(index, &out)); return out; } inline uint64_t omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::getParentCount() noexcept { return getParentCount_abi(); } inline omni::graph::exec::unstable::IGraphBuilderNode* omni::core::Generated< omni::graph::exec::unstable::IGraphBuilderNode_abi>::getChildAt(uint64_t index) { omni::graph::exec::unstable::IGraphBuilderNode* out; OMNI_THROW_IF_FAILED(getChildAt_abi(index, &out)); return out; } inline uint64_t omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::getChildCount() noexcept { return getChildCount_abi(); } inline bool omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::hasChild( omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> node) noexcept { return hasChild_abi(node.get()); } inline bool omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::isRoot() noexcept { return isRoot_abi(); } inline omni::core::Result omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::getRoot( omni::graph::exec::unstable::IGraphBuilderNode** out) { OMNI_THROW_IF_ARG_NULL(out); auto return_ = getRoot_abi(out); return return_; } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
12,837
C
35.575498
131
0.683493
omniverse-code/kit/include/omni/graph/exec/unstable/IGlobalPass.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Base class for global passes. //! //! The purpose of a global pass is to perform global transformations on the graph. //! //! This transformation category should be considered as a last resort given its global impact on the topology which //! prevents threading at the pass pipeline level. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. template <> class omni::core::Generated<omni::graph::exec::unstable::IGlobalPass_abi> : public omni::graph::exec::unstable::IGlobalPass_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IGlobalPass") //! Call from pass pipeline to apply global graph transformations. void run(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder); }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline void omni::core::Generated<omni::graph::exec::unstable::IGlobalPass_abi>::run( omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder) { OMNI_THROW_IF_FAILED(run_abi(builder.get())); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
1,920
C
31.016666
116
0.740625
omniverse-code/kit/include/omni/graph/exec/unstable/IPopulatePass.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Base class for populate passes. //! //! Register a populate pass with @ref OMNI_GRAPH_EXEC_REGISTER_POPULATE_PASS(). When registering a pass, a "name to //! match" is also specified. This name is the name of a node or definition on which the registered pass should //! populate. //! //! Populate passes are typically the first pass type to run in the pass pipeline. When a node is encountered during //! construction, only a single populate pass will get a chance to populate the newly discovered node. If no pass is //! registered against the node's name, the node definition's name is used to find a population pass to run. //! //! Populate pass is allowed to attach a new definition to a node it runs on. //! //! Minimal rebuild of the execution graph topology should be considered by the pass each time it runs. Pass pipeline //! leaves the responsibility of deciding if pass needs to run to the implementation. At minimum it can rely on //! verifying that topology of @ref omni::graph::exec::unstable::NodeGraphDef it generated before is still valid or //! @ref omni::graph::exec::unstable::NodeDef has not changed. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. template <> class omni::core::Generated<omni::graph::exec::unstable::IPopulatePass_abi> : public omni::graph::exec::unstable::IPopulatePass_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPopulatePass") //! Call from pass pipeline to apply graph transformations on a given node (definition or topology). void run(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder, omni::core::ObjectParam<omni::graph::exec::unstable::INode> node); }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline void omni::core::Generated<omni::graph::exec::unstable::IPopulatePass_abi>::run( omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder, omni::core::ObjectParam<omni::graph::exec::unstable::INode> node) { OMNI_THROW_IF_FAILED(run_abi(builder.get(), node.get())); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
2,956
C
40.069444
117
0.739513
omniverse-code/kit/include/omni/graph/exec/unstable/IGraphBuilderContext.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // //! @file IGraphBuilderContext.h //! //! @brief Defines @ref omni::graph::exec::unstable::IGraphBuilderContext. #pragma once #include <omni/graph/exec/unstable/IBase.h> #include <omni/graph/exec/unstable/Stamp.h> namespace omni { namespace graph { namespace exec { namespace unstable { // forward declarations needed by interface declaration class IGraph; class IGraphBuilder; class IGraphBuilderContext; class IGraphBuilderContext_abi; class INodeGraphDef; //! Common state for graph builders. //! //! *TODO* We will use this class to introduce reporting from graph transformation pipeline back to the application. class IGraphBuilderContext_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IGraphBuilderContext")> { protected: //! Current construction version. //! //! Incremented after each pass pipeline run to include definitions that were created before the run. virtual Stamp getConstructionStamp_abi() noexcept = 0; //! Return owner of all graphs this builder touches //! //! The returned @ref omni::graph::exec::unstable::IGraph will *not* have //! @ref omni::core::IObject::acquire() called before being returned. virtual OMNI_ATTR("no_acquire") IGraph* getGraph_abi() noexcept = 0; //! To be overriden by application specific class to enable reporting from transformation pipeline. virtual void report_abi(OMNI_ATTR("in, c_str, not_null") const char* diagnose) noexcept = 0; //! Run transformation pipeline virtual void runTransformations_abi(OMNI_ATTR("not_null") INodeGraphDef* nodeGraphDef) noexcept = 0; }; //! Smart pointer managing an instance of @ref IGraphBuilderContext. using GraphBuilderContextPtr = omni::core::ObjectPtr<IGraphBuilderContext>; } // namespace unstable } // namespace exec } // namespace graph } // namespace omni // generated API declaration #define OMNI_BIND_INCLUDE_INTERFACE_DECL #include <omni/graph/exec/unstable/IGraphBuilderContext.gen.h> //! @copydoc omni::graph::exec::unstable::IGraphBuilderContext_abi //! //! @ingroup groupOmniGraphExecInterfaces class omni::graph::exec::unstable::IGraphBuilderContext : public omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderContext_abi> { }; // additional headers needed for API implementation #include <omni/graph/exec/unstable/IGraph.h> #include <omni/graph/exec/unstable/IGraphBuilder.h> #include <omni/graph/exec/unstable/INodeGraphDef.h> // generated API implementation #define OMNI_BIND_INCLUDE_INTERFACE_IMPL #include <omni/graph/exec/unstable/IGraphBuilderContext.gen.h>
3,089
C
34.113636
116
0.752994
omniverse-code/kit/include/omni/graph/exec/unstable/IPartitionPass.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Base class for graph partitioning passes. //! //! Partition passes are typically run just after population passes and only on newly modified //! @ref omni::graph::exec::unstable::INodeGraphDef objects. The job of a partition pass is to recognize patterns in the //! newly populated graph and replace them with a new definition or augment existing one. //! //! Partition passes can only mutate the graph from the @ref omni::graph::exec::unstable::IPartitionPass::commit method //! using provided @ref omni::graph::exec::unstable::IGraphBuilder. This will guarantee that the rest of the pipeline //! is aware of changes made to the graph and avoid potential threading issues. //! //! See @ref groupOmniGraphExecPasses for more pass related functionality. template <> class omni::core::Generated<omni::graph::exec::unstable::IPartitionPass_abi> : public omni::graph::exec::unstable::IPartitionPass_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPartitionPass") //! Call from pass pipeline to initialize the pass for @p topology. //! //! This interface method implementation can't mutate given @p topology. Multiple passes can run concurrently on it. //! //! Returns True if initialization was successful and pipeline should issue calls to run and commit. //! Otherwise this pass will be destroyed and won't participate in partitioning @p topology. bool initialize(omni::core::ObjectParam<omni::graph::exec::unstable::ITopology> topology); //! Call from pass pipeline to discover nodes requiring partitioning. //! //! No topology changes are permitted at this point. Multiple passes will get a chance to receive this //! notification. //! //! Call to this method comes from graph traversal that may run multiple passes concurrently. void run(omni::core::ObjectParam<omni::graph::exec::unstable::INode> node); //! Call to verify generated partitions and commit new definition/s replacing discovered partitions. //! //! Commit of partitions is done serially and in the priority order of the pass. Passes with higher order will get //! the chance first. This is the only partition pass method that can mutate the graph. void commit(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder); }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline bool omni::core::Generated<omni::graph::exec::unstable::IPartitionPass_abi>::initialize( omni::core::ObjectParam<omni::graph::exec::unstable::ITopology> topology) { OMNI_THROW_IF_ARG_NULL(topology); auto return_ = initialize_abi(topology.get()); return return_; } inline void omni::core::Generated<omni::graph::exec::unstable::IPartitionPass_abi>::run( omni::core::ObjectParam<omni::graph::exec::unstable::INode> node) { OMNI_THROW_IF_ARG_NULL(node); run_abi(node.get()); } inline void omni::core::Generated<omni::graph::exec::unstable::IPartitionPass_abi>::commit( omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder) { OMNI_THROW_IF_ARG_NULL(builder); commit_abi(builder.get()); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
4,009
C
39.918367
120
0.733599
omniverse-code/kit/include/omni/graph/ui/IOmniGraphUi.h
// Copyright (c) 2021-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/core/IObject.h> namespace omni { namespace graph { namespace ui { OMNI_DECLARE_INTERFACE(IOmniGraphUi); class IOmniGraphUi_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.ui.IOmniGraphUi")> { // No functionality but exists so that we can provide C++ nodes }; } // namespace ui } // namespace graph } // namespace omni #include "IOmniGraphUi.gen.h" // generated file
881
C
25.727272
98
0.754824
omniverse-code/kit/include/omni/graph/ui/PyIOmniGraphUI.gen.h
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // #pragma once #include <omni/core/ITypeFactory.h> #include <omni/python/PyBind.h> #include <omni/python/PyString.h> #include <omni/python/PyVec.h> #include <sstream> auto bindIOmniGraphUi(py::module& m) { // hack around pybind11 issues with C++17 // - https://github.com/pybind/pybind11/issues/2234 // - https://github.com/pybind/pybind11/issues/2666 // - https://github.com/pybind/pybind11/issues/2856 py::class_<omni::core::Generated<omni::graph::ui::IOmniGraphUi_abi>, omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::ui::IOmniGraphUi_abi>>, omni::core::IObject> clsParent(m, "_IOmniGraphUi"); py::class_<omni::graph::ui::IOmniGraphUi, omni::core::Generated<omni::graph::ui::IOmniGraphUi_abi>, omni::python::detail::PyObjectPtr<omni::graph::ui::IOmniGraphUi>, omni::core::IObject> cls(m, "IOmniGraphUi"); cls.def(py::init( [](const omni::core::ObjectPtr<omni::core::IObject>& obj) { auto tmp = omni::core::cast<omni::graph::ui::IOmniGraphUi>(obj.get()); if (!tmp) { throw std::runtime_error("invalid type conversion"); } return tmp; })); cls.def(py::init( []() { auto tmp = omni::core::createType<omni::graph::ui::IOmniGraphUi>(); if (!tmp) { throw std::runtime_error("unable to create omni::graph::ui::IOmniGraphUi instantiation"); } return tmp; })); return omni::python::PyBind<omni::graph::ui::IOmniGraphUi>::bind(cls); }
2,152
C
38.87037
128
0.626859
omniverse-code/kit/include/omni/graph/ui/IOmniGraphUi.gen.h
// Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL template <> class omni::core::Generated<omni::graph::ui::IOmniGraphUi_abi> : public omni::graph::ui::IOmniGraphUi_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::ui::IOmniGraphUi") }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
1,124
C
25.785714
105
0.741103
omniverse-code/kit/include/omni/graph/image/unstable/any.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <typeinfo> #include <type_traits> #include <utility> namespace omni { namespace graph { namespace image { namespace unstable { namespace cpp17 { // The class describes a type-safe container of a single value of any copy-constructible type. // This class is a not quite standards conformant implementation of std::any. // It does not support everything std::any supports, and the API is limited to // a subset that is actually used currently in the project. // For example, it is missing constructors using std::in_place_type_t<ValueType> // disambiguation tags and std::make_any. Additionally, this implementation *does not throw exceptions*. // Instead, it asserts and logs errors when casts fail. // The long term intention is we will move to a C++17 compiler, and import the std // version of this class, removing this code from our codebase. Therefore it is very important that this class // doesn't do anything that the std can't, though the opposite is permissible. class any final { public: any() : vtable(nullptr) { } any(const any& rhs) : vtable(rhs.vtable) { if (rhs.has_value()) { rhs.vtable->copy(rhs.storage, this->storage); } } any(any&& rhs) noexcept : vtable(rhs.vtable) { if (rhs.has_value()) { rhs.vtable->move(rhs.storage, this->storage); rhs.vtable = nullptr; } } ~any() { this->reset(); } template<typename ValueType, typename = typename std::enable_if<!std::is_same<typename std::decay<ValueType>::type, any>::value>::type> any(ValueType&& value) { static_assert(std::is_copy_constructible< typename std::decay<ValueType>::type>::value, "T needs to be copy constructible"); this->construct(std::forward<ValueType>(value)); } any& operator=(const any& rhs) { any(rhs).swap(*this); return *this; } any& operator=(any&& rhs) noexcept { any(std::move(rhs)).swap(*this); return *this; } template<typename ValueType, typename = typename std::enable_if<!std::is_same<typename std::decay<ValueType>::type, any>::value>::type> any& operator=(ValueType&& value) { static_assert(std::is_copy_constructible< typename std::decay<ValueType>::type>::value, "T needs to be copy constructible"); any(std::forward<ValueType>(value)).swap(*this); return *this; } void reset() noexcept { if (has_value()) { this->vtable->destroy(storage); this->vtable = nullptr; } } bool has_value() const noexcept { return this->vtable != nullptr; } const std::type_info& type() const noexcept { return !has_value() ? typeid(void) : this->vtable->type(); } void swap(any& rhs) noexcept { if (this->vtable != rhs.vtable) { any tmp(std::move(rhs)); rhs.vtable = this->vtable; if (this->vtable != nullptr) { this->vtable->move(this->storage, rhs.storage); } this->vtable = tmp.vtable; if (tmp.vtable != nullptr) { tmp.vtable->move(tmp.storage, this->storage); tmp.vtable = nullptr; } } else { if (this->vtable != nullptr) { this->vtable->swap(this->storage, rhs.storage); } } } private: union storage_union { using stack_storage_t = typename std::aligned_storage<2 * sizeof(void*), std::alignment_of<void*>::value>::type; void* dynamic; stack_storage_t stack; }; struct vtable_type { const std::type_info& (*type)() noexcept; void(*destroy)(storage_union&) noexcept; void(*copy)(const storage_union& src, storage_union& dest); void(*move)(storage_union& src, storage_union& dest) noexcept; void(*swap)(storage_union& lhs, storage_union& rhs) noexcept; }; template<typename T> struct vtable_dynamic { static const std::type_info& type() noexcept { return typeid(T); } static void destroy(storage_union& storage) noexcept { delete reinterpret_cast<T*>(storage.dynamic); } static void copy(const storage_union& src, storage_union& dest) { dest.dynamic = new T(*reinterpret_cast<const T*>(src.dynamic)); } static void move(storage_union& src, storage_union& dest) noexcept { dest.dynamic = src.dynamic; src.dynamic = nullptr; } static void swap(storage_union& lhs, storage_union& rhs) noexcept { std::swap(lhs.dynamic, rhs.dynamic); } }; template<typename T> struct vtable_stack { static const std::type_info& type() noexcept { return typeid(T); } static void destroy(storage_union& storage) noexcept { reinterpret_cast<T*>(&storage.stack)->~T(); } static void copy(const storage_union& src, storage_union& dest) { new (&dest.stack) T(reinterpret_cast<const T&>(src.stack)); } static void move(storage_union& src, storage_union& dest) noexcept { new (&dest.stack) T(std::move(reinterpret_cast<T&>(src.stack))); destroy(src); } static void swap(storage_union& lhs, storage_union& rhs) noexcept { storage_union tmp_storage; move(rhs, tmp_storage); move(lhs, rhs); move(tmp_storage, lhs); } }; template<typename T> struct requires_allocation : std::integral_constant<bool, !(std::is_nothrow_move_constructible<T>::value && sizeof(T) <= sizeof(storage_union::stack) && std::alignment_of<T>::value <= std::alignment_of< storage_union::stack_storage_t>::value)> {}; template<typename T> static vtable_type* vtable_for_type() { using VTableType = typename std::conditional<requires_allocation<T>::value, vtable_dynamic<T>, vtable_stack<T>>::type; static vtable_type table = { VTableType::type, VTableType::destroy, VTableType::copy, VTableType::move, VTableType::swap, }; return &table; } protected: template<typename T> friend const T* any_cast(const any* operand) noexcept; template<typename T> friend T* any_cast(any* operand) noexcept; bool is_typed(const std::type_info& t) const { return is_same(this->type(), t); } static bool is_same(const std::type_info& a, const std::type_info& b) { return a == b; } template<typename T> const T* cast() const noexcept { return requires_allocation<typename std::decay<T>::type>::value ? reinterpret_cast<const T*>(storage.dynamic) : reinterpret_cast<const T*>(&storage.stack); } template<typename T> T* cast() noexcept { return requires_allocation<typename std::decay<T>::type>::value ? reinterpret_cast<T*>(storage.dynamic) : reinterpret_cast<T*>(&storage.stack); } private: storage_union storage; vtable_type* vtable; template<typename ValueType, typename T> typename std::enable_if<requires_allocation<T>::value>::type do_construct(ValueType&& value) { storage.dynamic = new T(std::forward<ValueType>(value)); } template<typename ValueType, typename T> typename std::enable_if<!requires_allocation<T>::value>::type do_construct(ValueType&& value) { new (&storage.stack) T(std::forward<ValueType>(value)); } template<typename ValueType> void construct(ValueType&& value) { using T = typename std::decay<ValueType>::type; this->vtable = vtable_for_type<T>(); do_construct<ValueType, T>(std::forward<ValueType>(value)); } }; namespace detail { template<typename ValueType> inline ValueType any_cast_move_if_true(typename std::remove_reference<ValueType>::type* p, std::true_type) { return std::move(*p); } template<typename ValueType> inline ValueType any_cast_move_if_true(typename std::remove_reference<ValueType>::type* p, std::false_type) { return *p; } } template<typename ValueType> inline ValueType any_cast(const any& operand) { using T = typename std::add_const<typename std::remove_reference<ValueType>::type>::type; auto p = any_cast<T>(&operand); if (p == nullptr) { CARB_LOG_ERROR("cpp17::any: Unable to cast value of type %s to type %s", operand.type().name(), typeid(T).name()); } return *p; } template<typename ValueType> inline ValueType any_cast(any& operand) { using T = typename std::remove_reference<ValueType>::type; auto p = any_cast<T>(&operand); if (p == nullptr) { CARB_LOG_ERROR("cpp17::any: Unable to cast value of type %s to type %s", operand.type().name(), typeid(T).name()); } return *p; } template<typename ValueType> inline ValueType any_cast(any&& operand) { using can_move = std::integral_constant<bool, std::is_move_constructible<ValueType>::value && !std::is_lvalue_reference<ValueType>::value>; using T = typename std::remove_reference<ValueType>::type; auto p = any_cast<T>(&operand); if (p == nullptr) { CARB_LOG_ERROR("cpp17::any: Unable to cast value of type %s to type %s", operand.type().name(), typeid(T).name()); } return detail::any_cast_move_if_true<ValueType>(p, can_move()); } template<typename ValueType> inline const ValueType* any_cast(const any* operand) noexcept { using T = typename std::decay<ValueType>::type; if (operand && operand->is_typed(typeid(T))) return operand->cast<ValueType>(); return nullptr; } template<typename ValueType> inline ValueType* any_cast(any* operand) noexcept { using T = typename std::decay<ValueType>::type; if (operand && operand->is_typed(typeid(T))) return operand->cast<ValueType>(); return nullptr; } } // namespace cpp17 } // namespace unstable } // namespace image } // namespace graph } // namespace omni
11,046
C
26.826196
139
0.597954
omniverse-code/kit/include/omni/graph/image/unstable/ComputeParamsBuilder.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // // This ABI is unstable and subject to change /* _ _ _____ ______ _______ __ ______ _ _ _____ ______ ___ _ _____ _____ _____ _ __ | | | |/ ____| ____| /\|__ __| \ \ / / __ \| | | | __ \ / __ \ \ / / \ | | | __ \|_ _|/ ____| |/ / | | | | (___ | |__ / \ | | \ \_/ / | | | | | | |__) | | | | \ \ /\ / /| \| | | |__) | | | | (___ | ' / | | | |\___ \| __| / /\ \ | | \ /| | | | | | | _ / | | | |\ \/ \/ / | . ` | | _ / | | \___ \| < | |__| |____) | |____ / ____ \| | | | | |__| | |__| | | \ \ | |__| | \ /\ / | |\ | | | \ \ _| |_ ____) | . \ \____/|_____/|______| /_/ \_\_| |_| \____/ \____/|_| \_\ \____/ \/ \/ |_| \_| |_| \_\_____|_____/|_|\_| */ #pragma once #include <carb/cudainterop/CudaInterop.h> #include <carb/renderer/Renderer.h> #include <omni/fabric/IToken.h> #include <omni/graph/core/GpuInteropEntryUserData.h> #include <omni/graph/core/ogn/Database.h> #include <omni/kit/renderer/IGpuFoundation.h> #include <rtx/rendergraph/RenderGraphBuilder.h> #include <rtx/rendergraph/RenderGraphTypes.h> #include <rtx/resourcemanager/ResourceManager.h> #include <rtx/resourcemanager/ResourceManagerTypes.h> #include <rtx/hydra/HydraRenderResults.h> #include <vector> #include <tuple> #include <unordered_map> #include "any.h" namespace omni { namespace graph { namespace image { namespace unstable { /** * @brief Structure for holding arbitrary parameters. * * The ComputeParams class is used to hold and access arbitrary parameters of various types. * It allows adding parameters with a specified key and retrieving parameters by their key and type. * * Example usage: * ComputeParams<std::string> params; * params.add("param1", 42); * params.add("param2", "hello"); * params.add("param3", 3.14); * * int param1Value = params.get<int>("param1"); * std::string param2Value = params.get<std::string>("param2"); * double param3Value = params.get<double>("param3"); */ template <typename TKey> class ComputeParams { public: /** * @brief Constructor. * * @param[in] initialCapacity The initial capacity of the container where the parameters are stored. */ explicit ComputeParams(std::size_t initialCapacity = 32) { m_data.reserve(initialCapacity); } /** * @brief Adds a new entry in the parameter map. * If an element with the given key is already in the container, it is replaced. * * @param[in] key The unique identifier of a parameter value. * @param[in] value The value of the parameter. */ template <typename T> void add(TKey const& key, T&& value) { m_data[key] = std::forward<T>(value); } /** * @brief Gets a value from the parameter map. * * The return type must match the type of the value stored for that key. If the type of the stored value does not match the requested type, * the function logs an error and terminates the program. * * @param[in] key The unique identifier of the parameter. * @return Returns the value of the specified type. * @exception std::out_of_range if there is no data for the given key */ template <typename T> T const& get(TKey const& key) const { return cpp17::any_cast<T const&>(m_data.at(key)); } /** * @brief Gets a value from the parameter map. * * If there is no value for the given key, or if the value type is different from the requested type, returns nullptr. * * @param[in] key The unique identifier of the parameter. * @return Returns the value of the specified type. */ template<typename T> T const* tryGet(TKey const& key) const noexcept { if (m_data.find(key) != m_data.end()) { auto const& a = m_data.at(key); return cpp17::any_cast<T const>(&a); } return nullptr; } /** * @brief Checks if a key is present in the container. * * @param[in] key The unique identifier of the parameter. * @return Returns true if the key is found, otherwise returns false. */ bool hasKey(TKey const& key) const noexcept { return m_data.find(key) != m_data.end(); } private: std::unordered_map<TKey, cpp17::any> m_data; }; /** * @brief A builder class for constructing instances of the ComputeParams class. * * The ComputeParamsBuilder provides a fluent interface for building ComputeParams objects. * It allows setting multiple parameters of different types and creates a ComputeParams object * with the provided parameter values. The object is intended to be used from Omnigraph nodes. * * Example usage: * ComputeParams<std::string> params = ComputeParamsBuilder<std::string>{gpu, rp, db} * .addValue("param1", 42) * .addValue("param2", "hello") * .addValue("param3", 3.14) * .build(); * * The main purpose for this builder is to facilitate the sdheduling of CUDA tasks. For this purpose, * the builder provides some specialized APIs for adding input AOVs, for allocating new AOVs and for * scheduling the work on the GPU. * * Once built, the ComputeParams instance can be passed to the CUDA task using the scheduleCudaTask function. * * Alternatively, the builder can directly build the params and schedule the CUDA task in the same chain of method calls. * * Example usage for scheduling CUDA tasks: * ComputeParamsBuilder<std::string>{ gpu, rp, db } * .addValue("multiplier", db.inputs.multiplier()) * .addInputTexture("inputAOV", db.inputs.inputAOV(), * [](cudaMipmappedArray_t cudaPtr, carb::graphics::TextureDesc const* desc, ComputeParams<std::string>& params) * { * params.add("width", desc->width); * params.add("height", desc->height); * }) * .addOutputTexture("outputAOV", db.inputs.outputAOV(), db.inputs.width(), db.inputs.height(), "TestTexture") * .scheduleCudaTask("TestCudaTask", * [](ComputeParams<std::string>* data, cudaStream_t stream) * { * auto multiplier = data->get<float>("multiplier"); * auto inputAOV = data->get<cudaMipmappedArray_t>("inputAOV"); * auto outputAOV = data->get<cudaMipmappedArray_t>("outputAOV"); * auto width = data->get<uint32_t>("width"); * auto height = data->get<uint32_t>("height"); * * // ... call CUDA kernel * }); * * Note: after building the ComputeParams of scheduling the CUDA task, the ComputeParamsBuilder instance cannot be modified anymore. * This restriction is imposed in order to provide the guarantee that the AOV pointers built by the builder are not invalidated by further * modifications of the render product through the builder API. */ template<typename TKey> class ComputeParamsBuilder { public: /** * @brief Callback invoked after extracting a texture AOV with a given token from the Render Product. * Allows adding additional parameters from the TextureDesc of the AOV, such as the width, height, etc. */ using TextureDescCallback = std::function<void(cudaMipmappedArray_t, carb::graphics::TextureDesc const*, ComputeParams<TKey>&)>; /** * @brief Callback invoked after extracting a buffer AOV with a given token from the Render Product. * Allows adding additional parameters from the BufferDesc of the AOV, such as the bufferSize. */ using BufferDescCallback = std::function<void(cudaMipmappedArray_t, carb::graphics::BufferDesc const*, ComputeParams<TKey>&)>; /** * @brief Callback invoked by the builder to explicitly allocate an AOV. * Allows explicit control over the parameters of the new AOV. */ using AllocateAOVCallback = std::function<cudaMipmappedArray_t(ComputeParams<TKey> const&, omni::graph::core::GpuFoundationsInterfaces*, omni::usd::hydra::HydraRenderProduct*, rtx::resourcemanager::SyncScopeId, uint32_t)>; /** * @brief Callback invoked by the builder after the allocation of a new AOV. * Allows setting fabric attributes of the node. */ using PostAllocateAOVCallback = std::function<void(cudaMipmappedArray_t)>; private: enum class AOVType { Buffer, Texture }; template <typename T> struct AOVParams { AOVType aovType; T key; omni::fabric::TokenC aovToken; union { carb::graphics::TextureDesc textureDesc; carb::graphics::BufferDesc bufferDesc; }; //cpp17::any callback; // cpp17::any does not seem to work with lambdas. std::any should work, but can't use it for now // TODO: find a better way to define the callbacks TextureDescCallback inputTextureCb; BufferDescCallback inputBufferCb; AllocateAOVCallback allocateAOVCb; PostAllocateAOVCallback postAllocateCb; }; public: /** * @brief Constructor. * * @param[in] gpu The GPU interface. * @param[in] rp The render product on which the CUDA computation is applied. * @param[in] db The node database. * @param[in] initialCapacity The initial capacity of the container where the parameters are stored. */ ComputeParamsBuilder( omni::graph::core::GpuFoundationsInterfaces* gpu, omni::usd::hydra::HydraRenderProduct* rp, omni::graph::core::ogn::OmniGraphDatabase& db, std::size_t initialCapacity = 32) : m_gpu(gpu) , m_rp(rp) , m_db(db) , m_data(initialCapacity) , m_deviceIndex(s_invalidDeviceIndex) , m_buildError(BuildError::NoError) { m_inputAOVs.reserve(initialCapacity); m_outputAOVs.reserve(initialCapacity); m_outputAOVTokens.reserve(initialCapacity); } /** * @brief Set a parameter value of type T with the specified key. * * @param[in] key The unique identifier of the parameter value. * @param[in] value The parameter value. * @return Returns an r-value reference of the ComputeParamsBuilder. */ template<typename TValue> ComputeParamsBuilder&& addValue(TKey const& key, TValue&& value) && { m_data.add(key, value); return std::move(*this); } /** * @brief Add a texture AOV. The AOV is expected to be already allocated. * * @param[in] key The unique identifier of the parameter value. * @param[in] aovToken The token used to extract the AOV from the Render Product. * @param[in] cb Callback which can be used to add additional parameters from the texture description of the AOV. * @return Returns an r-value reference of the ComputeParamsBuilder. */ ComputeParamsBuilder&& addInputTexture(TKey const& key, omni::fabric::TokenC aovToken, TextureDescCallback const& cb = nullptr) && { m_inputAOVs.emplace_back(AOVParams<TKey> { /*.aovType =*/ AOVType::Texture, /*.key =*/ key, /*.aovToken =*/ aovToken }); m_inputAOVs.back().inputTextureCb = cb; return std::move(*this); } /** * @brief Add a buffer AOV. The AOV is expected to be already allocated. * * @param[in] key The unique identifier of the parameter value. * @param[in] aovToken The token used to extract the AOV from the Render Product. * @param[in] cb Callback which can be used to add additional parameters from the buffer description of the AOV. * @return Returns an r-value reference of the ComputeParamsBuilder. */ ComputeParamsBuilder&& addInputBuffer(TKey const& key, omni::fabric::TokenC aovToken, BufferDescCallback const& cb = nullptr) && { m_inputAOVs.emplace_back(AOVParams<TKey> { /*.aovType =*/ AOVType::Buffer, /*.key =*/ key, /*.aovToken =*/ aovToken }); m_inputAOVs.back().inputBufferCb = cb; return std::move(*this); } /** * @brief Allocates a new texture AOV which will be filled in the CUDA task. * * @param[in] key The unique identifier of the parameter value. * @param[in] aovToken The token used to extract the AOV from the Render Product. * @param[in] width The width of the allocated texture. * @param[in] height The height of the allocated texture. * @param[in] format The texture format. * @param[in] debugName A string used to identify the new AOV in the debugger. * @param[in] postAllocateCb A callback which allows the binding of the new AOV to an output attribute of a node. * @return Returns an r-value reference of the ComputeParamsBuilder. */ ComputeParamsBuilder&& addOutputTexture( TKey const& key, omni::fabric::TokenC aovToken, uint32_t width, uint32_t height, carb::graphics::Format format, const char* debugName, PostAllocateAOVCallback postAllocateCb = nullptr) && { m_outputAOVTokens.emplace_back(aovToken); m_outputAOVs.emplace_back(AOVParams<TKey> { /*.aovType =*/ AOVType::Texture, /*.key =*/ key, /*.aovToken =*/ aovToken }); m_outputAOVs.back().textureDesc = carb::graphics::TextureDesc{ carb::graphics::TextureType::e2D, carb::graphics::kTextureUsageFlagShaderResourceStorage | carb::graphics::kTextureUsageFlagShaderResource | carb::graphics::kTextureUsageFlagExportShared, width, height, 1, 1, format, carb::graphics::SampleCount::e1x, { { 0, 0, 0, 0 }, nullptr }, debugName, nullptr }; m_outputAOVs.back().postAllocateCb = postAllocateCb; return std::move(*this); } /** * @brief Allocates a new texture AOV which will be filled in the CUDA task. * * Allows explicit definition and initialization of the AOV. * * @param[in] key The unique identifier of the parameter value. * @param[in] aovToken The token used to extract the AOV from the Render Product. * @param[in] callback The callback where the initialization of the AOV must be done. * @return Returns an r-value reference of the ComputeParamsBuilder. */ ComputeParamsBuilder&& addOutputTexture( TKey const& key, omni::fabric::TokenC aovToken, AllocateAOVCallback callback) && { m_outputAOVTokens.emplace_back(aovToken); m_outputAOVs.emplace_back(AOVParams<TKey> { /*.aovType =*/ AOVType::Texture, /*.key =*/ key, /*.aovToken =*/ aovToken }); m_outputAOVs.back().allocateAOVCb = callback; return std::move(*this); } /** * @brief Allocates a new buffer AOV which will be filled in the CUDA task. * * @param[in] key The unique identifier of the parameter value. * @param[in] aovToken The token used to extract the AOV from the Render Product. * @param[in] bufferSize The size of the allocated buffer. * @param[in] debugName A string used to identify the new AOV in the debugger. * @param[in] postAllocateCb A callback which allows the binding of the new AOV to an output attribute of a node. * @return Returns an r-value reference of the ComputeParamsBuilder. */ ComputeParamsBuilder&& addOutputBuffer( TKey const& key, omni::fabric::TokenC aovToken, uint32_t bufferSize, const char* debugName, PostAllocateAOVCallback postAllocateCb = nullptr) && { m_outputAOVTokens.emplace_back(aovToken); m_outputAOVs.emplace_back(AOVParams<TKey> { /*.aovType =*/ AOVType::Buffer, /*.key =*/ key, /*.aovToken =*/ aovToken, }); m_outputAOVs.back().bufferDesc = carb::graphics::BufferDesc { carb::graphics::kBufferUsageFlagExportShared, bufferSize, debugName, nullptr }; m_outputAOVs.back().postAllocateCb = postAllocateCb; return std::move(*this); } /** * @brief Allocates a new buffer AOV which will be filled in the CUDA task. * * Allows explicit definition and initialization of the AOV. * * @param[in] key The unique identifier of the parameter value. * @param[in] aovToken The token used to extract the AOV from the Render Product. * @param[in] callback The callback where the initialization of the AOV must be done. * @return Returns an r-value reference of the ComputeParamsBuilder. */ ComputeParamsBuilder&& addOutputBuffer( TKey const& key, omni::fabric::TokenC aovToken, AllocateAOVCallback callback) && { m_outputAOVTokens.emplace_back(aovToken); m_outputAOVs.emplace_back(AOVParams<TKey> { /*.aovType =*/ AOVType::Buffer, /*.key =*/ key, /*.aovToken =*/ aovToken }); m_outputAOVs.back().allocateAOVCb = callback; return std::move(*this); } /** * @brief Builds the final ComputeParams structure. * * The following steps are performed when building the final params, in this order: * 1) The new AOVs are added to the Render Product * 2) The input AOVs (already allocated) are extracted from the Render Product * 3) The new AOVs are allocated * * This sequence ensures that all the AOV pointers in the resulting ComputeParams structure are valid. * * No further values can be added to the builder after this call. * * @return Returns the constructed ComputeParams object. */ ComputeParams<TKey> build() { if (!isValid()) return m_data; // append the outputs first to avoid further structural changes which will invalidate the AOV pointers if (!m_outputAOVTokens.empty()) appendUninitializedRenderVars(m_rp, m_outputAOVTokens); auto rmCtx = reinterpret_cast<rtx::resourcemanager::Context*>(m_gpu->resourceManagerContext); auto rm = reinterpret_cast<rtx::resourcemanager::ResourceManager*>(m_gpu->resourceManager); for (auto const& aovParams : m_inputAOVs) { auto aovPtr = omni::usd::hydra::getRenderVarFromProduct(m_rp, aovParams.aovToken.token); if (aovPtr == nullptr) { m_db.logWarning("Missing RenderVar %s", m_db.tokenToString(aovParams.aovToken)); continue; } if (aovPtr->resource == nullptr) { m_db.logWarning("RenderVar %s is an invalid resource.", m_db.tokenToString(aovParams.aovToken)); continue; } const uint32_t deviceIndex = rm->getFirstDeviceIndex(*rmCtx, *aovPtr->resource); if (m_deviceIndex != deviceIndex) { if (m_deviceIndex == s_invalidDeviceIndex) { m_deviceIndex = deviceIndex; } else { m_db.logWarning("RenderVar %s has an inconsistend device index (%lu/%lu).", m_db.tokenToString(aovParams.aovToken), static_cast<unsigned long>(deviceIndex), static_cast<unsigned long>(m_deviceIndex)); m_buildError = BuildError::InconsistentDeviceIndex; break; } } switch (aovParams.aovType) { case AOVType::Texture: { auto cudaPtr = (cudaMipmappedArray_t)rm->getCudaMipmappedArray(*aovPtr->resource, m_deviceIndex); if (aovParams.inputTextureCb) { const auto textureDesc = rm->getTextureDesc(*rmCtx, aovPtr->resource); aovParams.inputTextureCb(cudaPtr, textureDesc, m_data); } m_data.add(aovParams.key, cudaPtr); break; } case AOVType::Buffer: { auto cudaPtr = (cudaMipmappedArray_t)rm->getCudaMipmappedArray(*aovPtr->resource, m_deviceIndex); if (aovParams.inputBufferCb) { const auto bufferDesc = rm->getBufferDesc(aovPtr->resource); aovParams.inputBufferCb(cudaPtr, bufferDesc, m_data); } m_data.add(aovParams.key, cudaPtr); break; } } } if (m_buildError != BuildError::NoError) { return m_data; } // the device index is not set, get the index of the first device render variable if (m_deviceIndex == s_invalidDeviceIndex) { for (uint32_t i = 0; i < m_rp->renderVarCnt; i++) { if (m_rp->vars[i].isRpResource) { m_deviceIndex = rm->getFirstDeviceIndex(*rmCtx, *m_rp->vars[i].resource); break; } } // the render product has no device render variable, use the render product device index if (m_deviceIndex == s_invalidDeviceIndex) { m_deviceIndex = carb::graphics::DeviceMask(m_rp->deviceMask).getFirstIndex(); } } auto iRenderGraph = reinterpret_cast<gpu::rendergraph::IRenderGraph*>(m_gpu->renderGraph); auto rgBuilder = reinterpret_cast<rtx::rendergraph::RenderGraphBuilder*>(m_gpu->renderGraphBuilder); auto renderGraph = iRenderGraph->getRenderGraph(m_deviceIndex); const auto syncScopeId = rgBuilder->getRenderGraphDesc(*renderGraph).syncScopeId; for (auto const& aovParams : m_outputAOVs) { cudaMipmappedArray_t ptr = 0; if (aovParams.allocateAOVCb != nullptr) { // custom AOV allocation auto cb = aovParams.allocateAOVCb; ptr = cb(m_data, m_gpu, m_rp, syncScopeId, m_deviceIndex); } else { // standard AOV allocation switch (aovParams.aovType) { case AOVType::Texture: ptr = (cudaMipmappedArray_t)allocateRenderVarTexture( m_gpu, m_rp, syncScopeId, m_deviceIndex, aovParams.aovToken, aovParams.textureDesc); break; case AOVType::Buffer: ptr = (cudaMipmappedArray_t)allocateRenderVarBuffer( m_gpu, m_rp, syncScopeId, m_deviceIndex, aovParams.aovToken, aovParams.bufferDesc); break; default: break; } } m_data.add(aovParams.key, ptr); if (aovParams.postAllocateCb != nullptr) aovParams.postAllocateCb(ptr); } return m_data; } /** * @brief Builds the final ComputeParams structure and schedules the CUDA task. * * No further values can be added to the builder after this call. * * @param[in] renderOpName The name of the render op in the render graph. * @param[in] computeCuda The entry point to the CUDA computation kernel. * @return Returns true if the builder is valid and the CUDA task was scheduled, otherwise returns false. */ bool scheduleCudaTask(const char* renderOpName, void (*computeCuda)(ComputeParams<TKey>* data, cudaStream_t stream)) &&; /** * @brief Builds the final ComputeParams structure and schedules the CUDA task. * * Allows validation of the ComputeParams before scheduling the CUDA task. * No further values can be added to the builder after this call. * * @param[in] renderOpName The name of the render op in the render graph. * @param[in] computeCuda The entry point to the CUDA computation kernel. * @param[in] validateCb A callback to validate the parameters before scheduling the CUDA task. * @return Returns true if the builder is valid, the params are validated by the user callback * and the CUDA task was scheduled, otherwise returns false. */ bool scheduleCudaTask( const char* renderOpName, void (*computeCuda)(ComputeParams<TKey>* data, cudaStream_t stream), bool (*validateCb)(ComputeParams<TKey> const& params) ) &&; private: bool isValid() const { if (!m_gpu || !m_rp || (m_buildError != BuildError::NoError)) { CARB_LOG_WARN_ONCE("ComputeParamsBuilder: invalid RenderProduct inputs"); return false; } return true; } static void appendUninitializedRenderVars(omni::usd::hydra::HydraRenderProduct* rp, const std::vector<omni::fabric::TokenC>& renderVarTokens) { using TokenC = omni::fabric::TokenC; // filter already existing aovs std::vector<TokenC> filteredRenderVarTokens; filteredRenderVarTokens.reserve(renderVarTokens.size()); for (const auto token : renderVarTokens) { if (!omni::usd::hydra::getRenderVarFromProduct(rp, token.token)) { filteredRenderVarTokens.emplace_back(token); } } using namespace omni::usd::hydra; const size_t numRenderVars = filteredRenderVarTokens.size(); HydraRenderVar* newVars = new HydraRenderVar[rp->renderVarCnt + numRenderVars]; const size_t varArraySize = sizeof(HydraRenderVar) * rp->renderVarCnt; std::memcpy(newVars, rp->vars, varArraySize); for (size_t i = 0; i < numRenderVars; ++i) { newVars[rp->renderVarCnt + i].aov = filteredRenderVarTokens[i].token; newVars[rp->renderVarCnt + i].isRpResource = true; newVars[rp->renderVarCnt + i].resource = nullptr; newVars[rp->renderVarCnt + i].isBufferRpResource = true; newVars[rp->renderVarCnt + i].isFrameLifetimeRsrc = false; } delete[] rp->vars; rp->vars = newVars; rp->renderVarCnt += static_cast<uint32_t>(numRenderVars); } static uint64_t allocateRenderVarBuffer(omni::graph::core::GpuFoundationsInterfaces* gpu, omni::usd::hydra::HydraRenderProduct* rp, rtx::resourcemanager::SyncScopeId syncScopeId, uint32_t deviceIndex, omni::fabric::TokenC deviceRenderVarToken, carb::graphics::BufferDesc const& buffDesc) { auto rmCtx = reinterpret_cast<rtx::resourcemanager::Context*>(gpu->resourceManagerContext); auto rm = reinterpret_cast<rtx::resourcemanager::ResourceManager*>(gpu->resourceManager); using namespace carb::graphics; auto deviceRenderVar = omni::usd::hydra::getRenderVarFromProduct(rp, deviceRenderVarToken.token); CARB_ASSERT(deviceRenderVar && deviceRenderVar->isRpResource && deviceRenderVar->isBufferRpResource); if (!deviceRenderVar || !deviceRenderVar->isRpResource || !deviceRenderVar->isBufferRpResource) { return 0; } const rtx::resourcemanager::ResourceDesc resourceDesc = { rtx::resourcemanager::ResourceMode::ePooled, MemoryLocation::eDevice, rtx::resourcemanager::ResourceCategory::eOtherBuffer, rtx::resourcemanager::kResourceUsageFlagCudaShared, DeviceMask::getDeviceMaskFromIndex(deviceIndex), deviceIndex, syncScopeId }; CARB_ASSERT(!deviceRenderVar->resource); if (!deviceRenderVar->resource) { deviceRenderVar->resource = buffDesc.size > 0 ? rm->getResourceFromBufferDesc(*rmCtx, buffDesc, resourceDesc) : nullptr; deviceRenderVar->isFrameLifetimeRsrc = true; } auto cudaDevicePointer = deviceRenderVar->resource ? rm->getCudaDevicePointer(*deviceRenderVar->resource, deviceIndex) : nullptr; return reinterpret_cast<uint64_t>(cudaDevicePointer); } static uint64_t allocateRenderVarTexture(omni::graph::core::GpuFoundationsInterfaces* gpu, omni::usd::hydra::HydraRenderProduct* rp, rtx::resourcemanager::SyncScopeId syncScopeId, uint32_t deviceIndex, omni::fabric::TokenC deviceRenderVarToken, carb::graphics::TextureDesc const& texDesc) { auto rmCtx = reinterpret_cast<rtx::resourcemanager::Context*>(gpu->resourceManagerContext); auto rm = reinterpret_cast<rtx::resourcemanager::ResourceManager*>(gpu->resourceManager); using namespace carb::graphics; auto deviceRenderVar = omni::usd::hydra::getRenderVarFromProduct(rp, deviceRenderVarToken.token); CARB_ASSERT(deviceRenderVar && deviceRenderVar->isRpResource && deviceRenderVar->isBufferRpResource); if (!deviceRenderVar || !deviceRenderVar->isRpResource || !deviceRenderVar->isBufferRpResource) { return 0; } const rtx::resourcemanager::ResourceDesc resDesc = { rtx::resourcemanager::ResourceMode::ePooled, carb::graphics::MemoryLocation::eDevice, rtx::resourcemanager::ResourceCategory::eOtherTexture, rtx::resourcemanager::kResourceUsageFlagCudaShared, carb::graphics::DeviceMask::getDeviceMaskFromIndex(deviceIndex), deviceIndex, syncScopeId }; deviceRenderVar->resource = rm->getResourceFromTextureDesc(*rmCtx, texDesc, resDesc); deviceRenderVar->isBufferRpResource = false; deviceRenderVar->isFrameLifetimeRsrc = true; auto cudaDevicePointer = deviceRenderVar->resource ? rm->getCudaDevicePointer(*deviceRenderVar->resource, deviceIndex) : nullptr; return reinterpret_cast<uint64_t>(cudaDevicePointer); } protected: omni::graph::core::GpuFoundationsInterfaces* m_gpu; omni::usd::hydra::HydraRenderProduct* m_rp; omni::graph::core::ogn::OmniGraphDatabase& m_db; std::vector<AOVParams<TKey>> m_inputAOVs; std::vector<AOVParams<TKey>> m_outputAOVs; std::vector<omni::fabric::TokenC> m_outputAOVTokens; ComputeParams<TKey> m_data; uint32_t m_deviceIndex; enum class BuildError { NoError, InconsistentDeviceIndex } m_buildError; static constexpr uint32_t s_invalidDeviceIndex = std::numeric_limits<uint32_t>::max(); }; namespace { // Temporary structure for passing the params and the computeCuda callback to the cudaInterop lambda. template <typename TParams> struct UserData { ComputeParams<TParams>* params; void (*computeCuda)(ComputeParams<TParams>* data, cudaStream_t stream); }; } // namespace /** * @brief Schedule a CUDA task on the post render graph. * * @param[in] gpu The GPU interface. * @param[in] rp The render product on which the CUDA computation is applied. * @param[in] computeParams The parameters of the computation. * @param[in] renderOpName The name of the render op in the render graph. * @param[in] computeCuda The CUDA computation entry point. */ template <typename TParams> inline void scheduleCudaTask(omni::graph::core::GpuFoundationsInterfaces* gpu, omni::usd::hydra::HydraRenderProduct* rp, uint32_t deviceIndex, ComputeParams<TParams> const& computeParams, const char* renderOpName, void (*computeCuda)(ComputeParams<TParams>* data, cudaStream_t stream)) { CARB_ASSERT(gpu); CARB_ASSERT(rp); CARB_ASSERT(computeCuda); auto iRenderGraph = reinterpret_cast<gpu::rendergraph::IRenderGraph*>(gpu->renderGraph); auto rgBuilder = reinterpret_cast<rtx::rendergraph::RenderGraphBuilder*>(gpu->renderGraphBuilder); auto renderGraph = iRenderGraph->getRenderGraph(deviceIndex); auto computeParamsPtr = new ComputeParams<TParams>(std::move(computeParams)); auto cudaData = new UserData<TParams>{ computeParamsPtr, computeCuda }; const rtx::rendergraph::ParamBlockRefs paramBlockRefs{ 0, {} }; rtx::rendergraph::RenderOpParams* renderOpParams = rgBuilder->createParams(*renderGraph, paramBlockRefs); rtx::rendergraph::addRenderOpLambdaEx( *rgBuilder, *renderGraph, renderOpName, renderOpParams, rtx::rendergraph::kRenderOpFlagNoAnnotation, [rgBuilder, cudaData, computeCuda](rtx::rendergraph::RenderOpInputCp renderOpInput) { renderOpInput->graphicsMux->cmdCudaInterop( renderOpInput->commandList, [](cudaStream_t cudaStream, void* userData) -> void { auto cudaData = reinterpret_cast<UserData<TParams>*>(userData); auto params = cudaData->params; auto computeCuda = cudaData->computeCuda; computeCuda(params, cudaStream); delete params; delete cudaData; }, cudaData, carb::graphicsmux::CudaInteropFlags::eNone); }); } template<typename TKey> inline bool ComputeParamsBuilder<TKey>::scheduleCudaTask(const char* renderOpName, void (*computeCuda)(ComputeParams<TKey>* data, cudaStream_t stream)) && { if (!isValid()) return false; auto computeParams = build(); omni::graph::image::unstable::scheduleCudaTask(m_gpu, m_rp, m_deviceIndex, computeParams, renderOpName, computeCuda); return true; } template <typename TKey> inline bool ComputeParamsBuilder<TKey>::scheduleCudaTask( const char* renderOpName, void (*computeCuda)(ComputeParams<TKey>* data, cudaStream_t stream), bool (*validateCb)(ComputeParams<TKey> const& params)) && { if (!isValid()) return false; auto computeParams = build(); if (validateCb && validateCb(computeParams)) { omni::graph::image::unstable::scheduleCudaTask( m_gpu, m_rp, m_deviceIndex, computeParams, renderOpName, computeCuda); return true; } return false; } } // namespace unstable } // namespace image } // namespace graph } // namespace omni
36,245
C
40.329532
154
0.597186
omniverse-code/kit/include/omni/graph/core/IVariable.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL /** * Object that contains a value that is local to a graph, available from anywhere in the graph */ template <> class omni::core::Generated<omni::graph::core::IVariable_abi> : public omni::graph::core::IVariable_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::IVariable") /** * Returns the name of the variable object. The name is derived by * removing any variable specific prefixes from the underlying attribute. * * @return The name of the variable. */ const char* getName() noexcept; /** * Returns the full path to the variables underlying attribute * * @return The full usd path of the variable */ const char* getSourcePath() noexcept; /** * Returns the type of the variable * * @return The type of the variable */ omni::graph::core::Type getType() noexcept; /** * Returns the category of the variable * * @return The category of the variable, or an empty string if it is not set. */ const char* getCategory() noexcept; /** * Sets the category of the variable * * @param[in] category A string representing the variable category */ void setCategory(const char* category) noexcept; /** * Gets the display name of the variable. By default the display name is the same * as the variable name. * * @return The display name of the variable, or an empty string if it is not set. */ const char* getDisplayName() noexcept; /** * Set the display name of the variable. * * @param[in] displayName A string to set the display name to */ void setDisplayName(const char* displayName) noexcept; /** * Get the tooltip used for the variable. * * @return The tooltip of the variable, or an emtpy string if none is set. */ const char* getTooltip() noexcept; /** * Set the tooltip used for the variable * * @param[in] toolTip A description used as a tooltip. */ void setTooltip(const char* toolTip) noexcept; /** * Get the scope of the variable. The scope determines which graphs can read and write the value. * * @return The scope of the variable. */ omni::graph::core::eVariableScope getScope() noexcept; /** * Sets the scope of the variable. * * @param[in] scope The scope to set on the variable. */ void setScope(omni::graph::core::eVariableScope scope) noexcept; /** * Returns whether this variable is valid * * @return True if the variable is valid, false otherwise */ bool isValid() noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline const char* omni::core::Generated<omni::graph::core::IVariable_abi>::getName() noexcept { return getName_abi(); } inline const char* omni::core::Generated<omni::graph::core::IVariable_abi>::getSourcePath() noexcept { return getSourcePath_abi(); } inline omni::graph::core::Type omni::core::Generated<omni::graph::core::IVariable_abi>::getType() noexcept { return getType_abi(); } inline const char* omni::core::Generated<omni::graph::core::IVariable_abi>::getCategory() noexcept { return getCategory_abi(); } inline void omni::core::Generated<omni::graph::core::IVariable_abi>::setCategory(const char* category) noexcept { setCategory_abi(category); } inline const char* omni::core::Generated<omni::graph::core::IVariable_abi>::getDisplayName() noexcept { return getDisplayName_abi(); } inline void omni::core::Generated<omni::graph::core::IVariable_abi>::setDisplayName(const char* displayName) noexcept { setDisplayName_abi(displayName); } inline const char* omni::core::Generated<omni::graph::core::IVariable_abi>::getTooltip() noexcept { return getTooltip_abi(); } inline void omni::core::Generated<omni::graph::core::IVariable_abi>::setTooltip(const char* toolTip) noexcept { setTooltip_abi(toolTip); } inline omni::graph::core::eVariableScope omni::core::Generated<omni::graph::core::IVariable_abi>::getScope() noexcept { return getScope_abi(); } inline void omni::core::Generated<omni::graph::core::IVariable_abi>::setScope(omni::graph::core::eVariableScope scope) noexcept { setScope_abi(scope); } inline bool omni::core::Generated<omni::graph::core::IVariable_abi>::isValid() noexcept { return isValid_abi(); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
5,283
C
26.664921
127
0.680485
omniverse-code/kit/include/omni/graph/core/BundleAttrib.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // ==================================================================================================== /* _____ _ _ _ _ _ | __ \ | \ | | | | | | | | | | | | ___ | \| | ___ | |_ | | | |___ ___ | | | |/ _ \ | . ` |/ _ \| __| | | | / __|/ _ \ | |__| | (_) | | |\ | (_) | |_ | |__| \__ \ __/ |_____/ \___/ |_| \_|\___/ \__| \____/|___/\___| This is a temporary interface that can change at any time. */ // ==================================================================================================== #include "IDirtyID.h" #include <omni/graph/core/IBundle.h> namespace omni { namespace graph { namespace core { class BundlePrim; class ConstBundlePrim; using BundleAttribSourceType = uint8_t; /** * BundleAttributeSource is used to differentiate between UsdAttributes * and UsdRelationships. * * TODO: Investigate why we can't use eRelationship for this purpose. */ enum class BundleAttribSource : BundleAttribSourceType { Attribute, Relationship, }; /** * Attribute in bundle primitive. * * In contrast to (Const)BundlePrim and (Const)BundlePrims, PrimAttribute uses * const qualifier to express constness of the attribute. * * TODO: Review if const qualifier is appropriate. */ class BundleAttrib { public: /** * Backward compatibility alias. */ using SourceType = BundleAttribSourceType; using Source = BundleAttribSource; BundleAttrib() = default; /** * Read initialization. */ BundleAttrib(ConstBundlePrim& prim, omni::graph::core::NameToken name) noexcept; /** * Read-Write initialization. */ BundleAttrib(BundlePrim& prim, omni::graph::core::NameToken name, omni::graph::core::Type type, size_t arrayElementCount, BundleAttribSource source) noexcept; BundleAttrib(BundleAttrib const&) = delete; BundleAttrib(BundleAttrib&&) noexcept = delete; BundleAttrib& operator=(BundleAttrib const&) = delete; BundleAttrib& operator=(BundleAttrib&&) noexcept = delete; /** * @return Bundle Primitive where this attribute belongs to. */ ConstBundlePrim* getBundlePrim() const noexcept; /** * @return Bundle Primitive where this attribute belongs to. */ BundlePrim* getBundlePrim() noexcept; /** * @return Non const attribute handle of this attribute. */ omni::graph::core::AttributeDataHandle handle() noexcept; /** * @return Const attribute handle of this attribute. */ omni::graph::core::ConstAttributeDataHandle handle() const noexcept; /** * @return Name of this attribute. */ omni::graph::core::NameToken name() const noexcept; /** * @return Type of this attribute. */ omni::graph::core::Type type() const noexcept; /** * @return Interpolation of this attribute. */ omni::graph::core::NameToken interpolation() const noexcept; /** * Set interpolation for this attribute. * * @return True if operation successful, false otherwise. */ bool setInterpolation(omni::graph::core::NameToken interpolation) noexcept; /** * Clean interpolation information for this attribute. */ void clearInterpolation() noexcept; [[deprecated("Dirty ID management has been moved to core. Use IBundleChanges.")]] DirtyIDType dirtyID() const noexcept; [[deprecated("Setting DirtyID has no effect, Dirty ID management has been moved to core. Use IBundleChanges.")]] bool setDirtyID(DirtyIDType dirtyID) noexcept { return false; } [[deprecated("Bumping DirtyID has no effect, Dirty ID management has been moved to core. Use IBundleChanges.")]] bool bumpDirtyID() noexcept { return false; } /** * Set source for this attribute. * * @return True if successful, false otherwise. */ bool setSource(Source source) noexcept; /** * Reset source to default value for this attribute. */ void clearSource() noexcept; /** * @return True if this attribute is an array attribute. */ bool isArray() const noexcept; /** * @return Size of this attribute. If attribute is not an array, then size is 1. */ size_t size() const noexcept; /** * Changes size of this attribute. */ void resize(size_t arrayElementCount) noexcept; /** * Copy attribute contents from another attribute. * Destination name is preserved. */ void copyContentsFrom(BundleAttrib const& sourceAttr) noexcept; /** * @return Internal data as void pointer. */ void* getDataInternal() noexcept; /** * @return Internal data as void pointer. */ void const* getDataInternal() const noexcept; template <typename T> T get() const noexcept; // NOTE: If this is not an array type attribute, this pointer may not be valid once any prim, // even if it's not the prim containing this attribute, has an attribute added or removed, // due to how attribute data is stored. template <typename T> T* getData() noexcept; template <typename T> T const* getData() const noexcept; template <typename T> T const* getConstData() const noexcept; template <typename T> void set(T const& value) noexcept; template <typename T> void set(T const* values, size_t elementCount) noexcept; /*********************************************************************************************** * * TODO: Following methods might be deprecated in the future. * In the next iteration when real interface starts to emerge, we can retire those methods. * ***********************************************************************************************/ /** * @todo First iteration of MPiB didn't use 'eRelationship' type to describe relationships. * Thus, strange approach was created to treat attribute, that is a relationship as a "source". */ Source source() const noexcept; /** * @return true if this attribute is data. */ bool isAttributeData() const noexcept; /** * @return true if this attribute is relationship. */ bool isRelationshipData() const noexcept; /** * @deprecated IBundle2 interface does not require prefixing, use getName(). */ omni::graph::core::NameToken prefixedName() const noexcept; private: /** * Remove attribute and its internal data. */ void clearContents() noexcept; omni::graph::core::IConstBundle2* getConstBundlePtr() const noexcept; omni::graph::core::IBundle2* getBundlePtr() noexcept; ConstBundlePrim* m_bundlePrim{ nullptr }; // Attribute Definition: omni::graph::core::NameToken m_name = omni::fabric::kUninitializedToken; omni::fabric::TypeC m_type; // Attribute Property Cached Values: omni::graph::core::NameToken m_interpolation = omni::fabric::kUninitializedToken; Source m_source { BundleAttribSource::Attribute }; friend class ConstBundlePrims; friend class BundlePrim; }; /** * Do not use! Backward compatibility alias. */ using BundleAttributeInfo = BundleAttrib; } // namespace core } // namespace graph } // namespace omni #include "BundleAttribImpl.h"
7,866
C
27.400722
116
0.607806
omniverse-code/kit/include/omni/graph/core/GpuInteropEntryUserData.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <unordered_map> #include <carb/graphics/GraphicsTypes.h> namespace omni { namespace usd { namespace hydra { struct HydraRenderProduct; } // namespace hydra } // namespace usd namespace graph { namespace core { // Less than ideal, but GpuInteropCudaEntryUserData + GpuInteropRpEntryUserData // are filled out by RenderGraphScheduler.cpp and passed to the top level GpuInterop // CudaEntry or RenderProductEntry nodes marking the head of post-processing chain // for RTX Hydra Renderer struct GpuInteropCudaResourceData { void* cudaResource; uint32_t width; uint32_t height; uint32_t depthOrArraySize; uint16_t mipCount; carb::graphics::Format format; bool isBuffer; uint32_t deviceIndex; }; typedef std::unordered_map<std::string, GpuInteropCudaResourceData> GpuInteropCudaResourceMap; struct GpuInteropCudaEntryUserData { void* cudaStream; double simTime; double hydraTime; int64_t frameId; int64_t externalTimeOfSimFrame; GpuInteropCudaResourceMap cudaRsrcMap; }; // Gpu Foundations initialization inside Kit remains a trainwreck, since unresolved // we pass struct GpuFoundationsInterfaces { void* graphics; void* graphicsMux; void* deviceGroup; void* renderGraphBuilder; void* resourceManager; void* resourceManagerContext; void* renderGraph; }; struct GpuInteropRpEntryUserData { double simTime; double hydraTime; GpuFoundationsInterfaces* gpu; omni::usd::hydra::HydraRenderProduct* rp; }; } // namespace core } // namespace graph } // namespace omni
2,028
C
23.154762
94
0.757396
omniverse-code/kit/include/omni/graph/core/OgnWrappers.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #pragma message ("OgnWrappers.h is deprecated - include the specific omni/graph/core/ogn/ file you require") // This file contains simple interface classes which wrap data in the OGN database for easier use // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // #include <omni/graph/core/CppWrappers.h> #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/TemplateUtils.h> #include <omni/graph/core/ogn/Types.h> #include <omni/graph/core/ogn/StringAttribute.h> #include <omni/graph/core/ogn/ArrayAttribute.h> #include <omni/graph/core/ogn/SimpleAttribute.h>
1,181
C
44.461537
115
0.777307
omniverse-code/kit/include/omni/graph/core/StringUtils.h
// Copyright (c) 2021-2021 NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // This file contains helpful string utilities that can be implemented entirely as inlines, preventing the // need for a bunch of tiny little extensions. #include <carb/logging/Log.h> #include <omni/graph/core/Type.h> #include <omni/graph/core/PreUsdInclude.h> #include <pxr/base/tf/token.h> #include <omni/graph/core/PostUsdInclude.h> #include <string> #include <vector> // snprintf becomes _snprintf on Windows, but we want to use std::snprintf #ifdef HAVE_SNPRINTF # undef snprintf #endif // The namespace is merely to ensure uniqueness. There's nothing inherently associated with OmniGraph in here namespace omni { namespace graph { namespace core { // ============================================================================================================== inline void tokenizeString(const char* input, const std::string& separator, std::vector<pxr::TfToken> & output) { std::string remainder = input; size_t separatorLocation = remainder.find(separator); while (separatorLocation != std::string::npos) { std::string tokenStr = remainder.substr(0, separatorLocation); output.emplace_back(tokenStr); remainder = remainder.substr(separatorLocation + separator.size()); separatorLocation = remainder.find(separator); } if (!remainder.empty()) { output.emplace_back(remainder); } } // ============================================================================================================== // This is like tokenizeString, except returns a vector of strings, not tokens inline std::vector<std::string> splitString(const char* string, char delimiter) { std::vector<std::string> strings; const char* prev_pos = string; while (*string++) { char ch = *string; if (ch == delimiter) { strings.push_back(std::string(prev_pos, string)); prev_pos = string + 1; } } if (prev_pos != string) strings.push_back(std::string(prev_pos, string - 1)); return strings; } // ============================================================================================================== // Return a formatted string. // On error will return an empty string. template <typename... Args> std::string formatString(const char* format, Args&&... args) { int fmtSize = std::snprintf(nullptr, 0, format, args...) + 1; // Extra space for '\0' if (fmtSize <= 0) { CARB_LOG_ERROR("Error formating string %s", format); return {}; } auto size = static_cast<size_t>(fmtSize); auto buf = std::make_unique<char[]>(size); std::snprintf(buf.get(), size, format, args...); return std::string(buf.get(), buf.get() + size - 1); // We don't want the '\0' inside } //early version of GCC emit a warning if the "format" string passed to "std::snprintf" does not contain any formatting character // Specialize the function for this use case and prevent that warning inline std::string formatString(const char* format) { return std::string(format); } } // namespace core } // namespace graph } // namespace omni
3,565
C
33.288461
128
0.62216
omniverse-code/kit/include/omni/graph/core/IConstBundle.gen.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Provide read only access to recursive bundles. //! template <> class omni::core::Generated<omni::graph::core::IConstBundle2_abi> : public omni::graph::core::IConstBundle2_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::IConstBundle2") //! Return true if this bundle is valid, false otherwise. bool isValid() noexcept; //! Return the context of this bundle. omni::graph::core::GraphContextObj getContext() noexcept; //! Return Handle to this bundle. Invalid handle is returned if this bundle is invalid. omni::graph::core::ConstBundleHandle getConstHandle() noexcept; //! Return full path of this bundle. carb::flatcache::PathC getPath() noexcept; //! Return name of this bundle omni::graph::core::NameToken getName() noexcept; //! Return handle to the parent of this bundle. Invalid handle is returned if bundle has no parent. omni::graph::core::ConstBundleHandle getConstParentBundle() noexcept; //! @brief Get the names and types of all attributes in this bundle. //! //! This method operates in two modes: **query mode** or **get mode**. //! //! **Query mode** is enabled when names and types are `nullptr`. When in this mode, *nameAndTypeCount //! will be populated with the number of attributes in the bundle. //! //! **Get mode** is enabled when names or types is not `nullptr`. Upon entering the function, *nameAndTypeCount //! stores the number of entries in names and types. In **Get mode** names are not nullptr, names array is populated //! with attribute names. In **Get mode** types are not nullptr, types array is populated with attribute types. //! //! @param names The names of the attributes. //! @param types The types of the attributes. //! @param nameAndTypeCount must not be `nullptr` in both modes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getAttributeNamesAndTypes(omni::graph::core::NameToken* const names, omni::graph::core::Type* const types, size_t* const nameAndTypeCount) noexcept; //! @brief Get read only handles to all attributes in this bundle. //! //! This method operates in two modes: **query mode** or **get mode**. //! //! **Query mode** is enabled when attributes is `nullptr`. When in this mode, *attributeCount //! will be populated with the number of attributes in the bundle. //! //! **Get mode** is enabled when attributes is not `nullptr`. Upon entering the function, *attributeCount //! stores the number of entries in attributes. //! In **Get mode** attributes are not nullptr, attributes array is populated with attribute handles in the bundle. //! //! @param attributes The buffer to store handles of the attributes in this bundle. //! @param attributeCount Size of attributes buffer. Must not be `nullptr` in both modes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getConstAttributes(omni::graph::core::ConstAttributeDataHandle* const attributes, size_t* const attributeCount) noexcept; //! @brief Search for read only handles of the attribute in this bundle by using attribute names. //! //! @param names The name of the attributes to be searched for. //! @param nameCount Size of names buffer. //! @param attributes The buffer to store handles of the attributes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getConstAttributesByName(const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::ConstAttributeDataHandle* const attributes) noexcept; //! @brief Get read only handles to all child bundles in this bundle. //! //! This method operates in two modes: **query mode** or **get mode**. //! //! **Query mode** is enabled when bundles is `nullptr`. When in this mode, *bundleCount //! will be populated with the number of bundles in the bundle. //! //! **Get mode** is enabled when bundles is not `nullptr`. Upon entering the function, *bundleCount //! stores the number of entries in bundles. //! In **Get mode** bundles are not nullptr, bundles array is populated with bundle handles in the bundle. //! //! @param bundles The buffer to save child bundle handles. //! @param bundleCount Size of the bundles buffer. Must not be `nullptr` in both modes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getConstChildBundles(omni::graph::core::ConstBundleHandle* const bundles, size_t* const bundleCount) noexcept; //! @brief Get read only handle to child bundle by index. //! //! @param bundleIndex Bundle index in range [0, childBundleCount). //! @param bundle Handle under the index. If bundle index is out of range, then invalid handle is returned. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getConstChildBundle(size_t bundleIndex, omni::graph::core::ConstBundleHandle* const bundle) noexcept; //! @brief Lookup for read only handles to child bundles under specified names. //! //! For children that are not found invalid handles are returned. //! //! @param names The names of the child bundles in this bundle. //! @param nameCount The number of child bundles to be searched. //! @param foundBundles Output handles to the found bundles. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getConstChildBundlesByName(const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::ConstBundleHandle* const foundBundles) noexcept; //! Return Const Bundle Handle to Metadata Storage omni::graph::core::ConstBundleHandle getConstMetadataStorage() noexcept; //! @brief Get the names and types of all bundle metadata fields in this bundle. //! //! This method operates in two modes: **query mode** or **get mode**. //! //! **Query mode** is enabled when fieldNames and fieldTypes are `nullptr`. When in this mode, *fieldCount //! will be populated with the number of metadata fields in this bundle. //! //! **Get mode** is enabled when fieldNames or fieldTypes is not `nullptr`. Upon entering the function, //! *fieldCount stores the number of entries in fieldNames and @p fieldTypes. //! //! In **Get mode** fieldNames are not `nullptr`, fieldNames array is populated with field names. //! In **Get mode** fieldTypes are not `nullptr`, fieldTypes array is populated with field types. //! //! @param fieldNames Output field names in this bundle. //! @param fieldTypes Output field types in this bundle. //! @param fieldCount must not be `nullptr` in both modes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getBundleMetadataNamesAndTypes(omni::graph::core::NameToken* const fieldNames, omni::graph::core::Type* const fieldTypes, size_t* const fieldCount) noexcept; //! @brief Search for field handles in this bundle by using field names. //! //!@param fieldNames Name of bundle metadata fields to be searched for. //!@param fieldCount Size of fieldNames and bundleMetadata arrays. //!@param bundleMetadata Handle to metadata fields in this bundle. //!@return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getConstBundleMetadataByName(const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, omni::graph::core::ConstAttributeDataHandle* const bundleMetadata) noexcept; //! @brief Get the names and types of all attribute metadata fields in the attribute. //! //! This method operates in two modes: **query mode** or **get mode**. //! //! **Query mode** is enabled when fieldNames and @p fieldTypes are `nullptr`. When in this mode, *fieldCount //! will be populated with the number of metadata fields in the attribute. //! //! **Get mode** is enabled when fieldNames or fieldTypes is not `nullptr`. Upon entering the function, //! *fieldCount stores the number of entries in fieldNames and fieldTypes. //! //! In **Get mode** fieldNames are not `nullptr`, fieldNames array is populated with field names. //! In **Get mode** fieldTypes are not `nullptr`, fieldTypes array is populated with field types. //! //! @param attribute Name of the attribute. //! @param fieldNames Output field names in the attribute. //! @param fieldTypes Output field types in the attribute. //! @param fieldCount must not be `nullptr` in both modes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getAttributeMetadataNamesAndTypes(omni::graph::core::NameToken attribute, omni::graph::core::NameToken* const fieldNames, omni::graph::core::Type* const fieldTypes, size_t* const fieldCount) noexcept; //! @brief Search for read only field handles in the attribute by using field names. //! //! @param attribute The name of the attribute. //! @param fieldNames The names of attribute metadata fields to be searched for. //! @param fieldCount Size of fieldNames and attributeMetadata arrays. //! @param attributeMetadata Handles to attribute metadata fields in the attribute. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getConstAttributeMetadataByName( omni::graph::core::NameToken attribute, const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, omni::graph::core::ConstAttributeDataHandle* const attributeMetadata) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline bool omni::core::Generated<omni::graph::core::IConstBundle2_abi>::isValid() noexcept { return isValid_abi(); } inline omni::graph::core::GraphContextObj omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getContext() noexcept { return getContext_abi(); } inline omni::graph::core::ConstBundleHandle omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstHandle() noexcept { return getConstHandle_abi(); } inline carb::flatcache::PathC omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getPath() noexcept { return getPath_abi(); } inline omni::graph::core::NameToken omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getName() noexcept { return getName_abi(); } inline omni::graph::core::ConstBundleHandle omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstParentBundle() noexcept { return getConstParentBundle_abi(); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getAttributeNamesAndTypes( omni::graph::core::NameToken* const names, omni::graph::core::Type* const types, size_t* const nameAndTypeCount) noexcept { return getAttributeNamesAndTypes_abi(names, types, nameAndTypeCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstAttributes( omni::graph::core::ConstAttributeDataHandle* const attributes, size_t* const attributeCount) noexcept { return getConstAttributes_abi(attributes, attributeCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstAttributesByName( const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::ConstAttributeDataHandle* const attributes) noexcept { return getConstAttributesByName_abi(names, nameCount, attributes); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstChildBundles( omni::graph::core::ConstBundleHandle* const bundles, size_t* const bundleCount) noexcept { return getConstChildBundles_abi(bundles, bundleCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstChildBundle( size_t bundleIndex, omni::graph::core::ConstBundleHandle* const bundle) noexcept { return getConstChildBundle_abi(bundleIndex, bundle); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstChildBundlesByName( const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::ConstBundleHandle* const foundBundles) noexcept { return getConstChildBundlesByName_abi(names, nameCount, foundBundles); } inline omni::graph::core::ConstBundleHandle omni::core::Generated< omni::graph::core::IConstBundle2_abi>::getConstMetadataStorage() noexcept { return getConstMetadataStorage_abi(); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getBundleMetadataNamesAndTypes( omni::graph::core::NameToken* const fieldNames, omni::graph::core::Type* const fieldTypes, size_t* const fieldCount) noexcept { return getBundleMetadataNamesAndTypes_abi(fieldNames, fieldTypes, fieldCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstBundleMetadataByName( const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, omni::graph::core::ConstAttributeDataHandle* const bundleMetadata) noexcept { return getConstBundleMetadataByName_abi(fieldNames, fieldCount, bundleMetadata); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getAttributeMetadataNamesAndTypes( omni::graph::core::NameToken attribute, omni::graph::core::NameToken* const fieldNames, omni::graph::core::Type* const fieldTypes, size_t* const fieldCount) noexcept { return getAttributeMetadataNamesAndTypes_abi(attribute, fieldNames, fieldTypes, fieldCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstAttributeMetadataByName( omni::graph::core::NameToken attribute, const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, omni::graph::core::ConstAttributeDataHandle* const attributeMetadata) noexcept { return getConstAttributeMetadataByName_abi(attribute, fieldNames, fieldCount, attributeMetadata); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
16,589
C
48.969879
136
0.687624
omniverse-code/kit/include/omni/graph/core/ISchedulingHints2.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/core/IObject.h> #include <omni/core/Omni.h> #include <omni/inspect/IInspector.h> #include <omni/graph/core/ISchedulingHints.h> namespace omni { namespace graph { namespace core { //! The purity of the node implementation. For some context, a "pure" node is //! one whose initialize, compute, and release methods are entirely deterministic, //! i.e. they will always produce the same output attribute values for a given set //! of input attribute values, and do not access, rely on, or otherwise mutate data //! external to the node's scope enum class ePurityStatus { //! Node is assumed to not be pure eImpure, //! Node can be considered pure if explicitly specified by the node author ePure }; //! Declare the ISchedulingHints2 interface definition OMNI_DECLARE_INTERFACE(ISchedulingHints2); //! Interface extension for ISchedulingHints that adds a new "pure" hint class ISchedulingHints2_abi : public omni::core::Inherits<ISchedulingHints, OMNI_TYPE_ID("omni.graph.core.ISchedulingHints2")> { protected: /** * Get the flag describing the node's purity state. * * @returns Value of the PurityStatus flag. */ virtual ePurityStatus getPurityStatus_abi() noexcept = 0; /** * Set the flag describing the node's purity status. * * @param[in] newPurityStatus New value of the PurityStatus flag. */ virtual void setPurityStatus_abi(ePurityStatus newPurityStatus) noexcept = 0; }; } // namespace core } // namespace graph } // namespace omni #include "ISchedulingHints2.gen.h" //! @cond Doxygen_Suppress //! //! API part of the scheduling hints 2 interface //! @copydoc omni::graph::core::ISchedulingHints2_abi OMNI_DEFINE_INTERFACE_API(omni::graph::core::ISchedulingHints2) //! @endcond { public: //! @copydoc omni::graph::core::ISchedulingHints2::getPurityStatus_abi inline omni::graph::core::ePurityStatus getPurityStatus() noexcept { return getPurityStatus_abi(); } //! @copydoc omni::graph::core::ISchedulingHints2::setPurityStatus_abi inline void setPurityStatus(omni::graph::core::ePurityStatus newPurityStatus) noexcept { setPurityStatus_abi(newPurityStatus); } };
2,661
C
30.690476
102
0.73168
omniverse-code/kit/include/omni/graph/core/PyISchedulingHints2.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // #pragma once #include <omni/core/ITypeFactory.h> #include <omni/python/PyBind.h> #include <omni/python/PyString.h> #include <omni/python/PyVec.h> #include <sstream> auto bindePurityStatus(py::module& m) { py::enum_<omni::graph::core::ePurityStatus> e( m, "ePurityStatus", R"OMNI_BIND_RAW_(The purity of the node implementation. For some context, a "pure" node is one whose initialize, compute, and release methods are entirely deterministic, i.e. they will always produce the same output attribute values for a given set of input attribute values, and do not access, rely on, or otherwise mutate data external to the node's scope)OMNI_BIND_RAW_"); e.value("E_IMPURE", omni::graph::core::ePurityStatus::eImpure, R"OMNI_BIND_RAW_(Node is assumed to not be pure)OMNI_BIND_RAW_"); e.value("E_PURE", omni::graph::core::ePurityStatus::ePure, R"OMNI_BIND_RAW_(Node can be considered pure if explicitly specified by the node author)OMNI_BIND_RAW_"); return e; } auto bindISchedulingHints2(py::module& m) { // hack around pybind11 issues with C++17 // - https://github.com/pybind/pybind11/issues/2234 // - https://github.com/pybind/pybind11/issues/2666 // - https://github.com/pybind/pybind11/issues/2856 py::class_<omni::core::Generated<omni::graph::core::ISchedulingHints2_abi>, omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::core::ISchedulingHints2_abi>>, omni::core::Api<omni::graph::core::ISchedulingHints_abi>> clsParent(m, "_ISchedulingHints2"); py::class_<omni::graph::core::ISchedulingHints2, omni::core::Generated<omni::graph::core::ISchedulingHints2_abi>, omni::python::detail::PyObjectPtr<omni::graph::core::ISchedulingHints2>, omni::core::Api<omni::graph::core::ISchedulingHints_abi>> cls(m, "ISchedulingHints2", R"OMNI_BIND_RAW_(Interface extension for ISchedulingHints that adds a new "pure" hint)OMNI_BIND_RAW_"); cls.def(py::init( [](const omni::core::ObjectPtr<omni::core::IObject>& obj) { auto tmp = omni::core::cast<omni::graph::core::ISchedulingHints2>(obj.get()); if (!tmp) { throw std::runtime_error("invalid type conversion"); } return tmp; })); cls.def(py::init( []() { auto tmp = omni::core::createType<omni::graph::core::ISchedulingHints2>(); if (!tmp) { throw std::runtime_error("unable to create omni::graph::core::ISchedulingHints2 instantiation"); } return tmp; })); cls.def_property("purity_status", &omni::graph::core::ISchedulingHints2::getPurityStatus, &omni::graph::core::ISchedulingHints2::setPurityStatus); return omni::python::PyBind<omni::graph::core::ISchedulingHints2>::bind(cls); }
3,455
C
45.079999
118
0.657308
omniverse-code/kit/include/omni/graph/core/IBundle.gen.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Provide read write access to recursive bundles. //! template <> class omni::core::Generated<omni::graph::core::IBundle2_abi> : public omni::graph::core::IBundle2_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::IBundle2") //! Return handle to this bundle. Invalid handle is returned if this bundle is invalid. omni::graph::core::BundleHandle getHandle() noexcept; //! Return parent of this bundle, or invalid handle if there is no parent. omni::graph::core::BundleHandle getParentBundle() noexcept; //! @brief Get read-write handles to all attributes in this bundle. //! //! @copydetails IConstBundle2_abi::getConstAttributes_abi omni::core::Result getAttributes(omni::graph::core::AttributeDataHandle* const attributes, size_t* const attributeCount) noexcept; //! @brief Searches for read-write handles of the attribute in this bundle by using attribute names. //! //! @copydetails IConstBundle2_abi::getConstAttributesByName_abi omni::core::Result getAttributesByName(const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::AttributeDataHandle* const attributes) noexcept; //! @brief Get read write handles to all child bundles in this bundle. //! //! @copydetails IConstBundle2_abi::getConstChildBundles_abi omni::core::Result getChildBundles(omni::graph::core::BundleHandle* const bundles, size_t* const bundleCount) noexcept; //! @brief Get read write handle to child bundle by index. //! //! @copydetails IConstBundle2_abi::getConstChildBundle_abi omni::core::Result getChildBundle(size_t bundleIndex, omni::graph::core::BundleHandle* const bundle) noexcept; //! @brief Lookup for read write handles to child bundles under specified names. //! //! @copydetails IConstBundle2_abi::getConstChildBundlesByName_abi omni::core::Result getChildBundlesByName(const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::BundleHandle* const foundBundles) noexcept; //! @brief Create new attributes by copying existing. //! //! Source attribute handles' data and metadata are copied. If a handle is invalid, //! then its source is ignored. //! Created attributes are owned by this bundle. //! //! @param newNames The names for the new attributes, if `nullptr` then names are taken from the source attributes. //! @param sourceAttributes Handles to attributes whose data type is to be copied. //! @param attributeCount Number of attributes to be copied. //! @param overwrite An option to overwrite existing attributes. //! @param copiedAttributes Output handles to the newly copied attributes. Can be `nullptr` if no output is //! required. //! @param copiedCount Number of successfully copied attributes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result copyAttributes(const omni::graph::core::NameToken* const newNames, const omni::graph::core::ConstAttributeDataHandle* const sourceAttributes, size_t attributeCount, bool overwrite, omni::graph::core::AttributeDataHandle* const copiedAttributes, size_t* const copiedCount) noexcept; //! @brief Create attributes based on provided names and types. //! //! Created attributes are owned by this bundle. //! //! @param names The names of the attributes. //! @param types The types of the attributes. //! @param elementCount Number of elements in the array, can be `nullptr` if attribute is not an array. //! @param attributeCount Number of attributes to be created. //! @param createdAttributes Output handles to the newly created attributes. Can be nullptr if no output is //! required. //! @param createdCount Number of successfully created attributes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result createAttributes(const omni::graph::core::NameToken* const names, const omni::graph::core::Type* const types, const size_t* const elementCount, size_t attributeCount, omni::graph::core::AttributeDataHandle* const createdAttributes, size_t* const createdCount) noexcept; //! @brief Use attribute handles as pattern to create new attributes. //! //! The name and type for new attributes are taken from pattern attributes, data and metadata is not copied. //! If pattern handle is invalid, then attribute creation is skipped. //! Created attributes are owned by this bundle. //! //! @param patternAttributes Attributes whose name and type is to be used to create new attributes. //! @param patternCount Number of attributes to be created. //! @param createdAttributes Output handles to the newly created attributes. Can be nullptr if no output is //! required. //! @param createdCount Number of successfully created attributes. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result createAttributesLike(const omni::graph::core::ConstAttributeDataHandle* const patternAttributes, size_t patternCount, omni::graph::core::AttributeDataHandle* const createdAttributes, size_t* const createdCount) noexcept; //! @brief Create immediate child bundles under specified names in this bundle. //! //! Only immediate children are created. This method does not work recursively. //! If name token is invalid, then child bundle creation is skipped. //! Created bundles are owned by this bundle. //! //! @param names New children names in this bundle. //! @param nameCount Number of bundles to be created. //! @param createdBundles Output handles to the newly created bundles. Can be nullptr if no output is required. //! @param createdCount Number of successfully created child bundles. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result createChildBundles(const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::BundleHandle* const createdBundles, size_t* const createdCount) noexcept; //! <b>Feature not implemented yet.</b> //! //! @brief Add a set of attributes to this bundle as links. //! //! Added attributes are links to other attributes that are part of another bundle. //! If target handle is invalid, then linking is skipped. //! The links are owned by this bundle, but targets of the links are not. //! Removing links from this bundle does not destroy the data links point to. //! //! @param linkNames The names for new links. //! @param targetAttributes Handles to attributes whose data is to be added. //! @param attributeCount Number of attributes to be added. //! @param linkedAttributes Output handles to linked attributes. Can be nullptr if no output is required. //! @param linkedCount Number of attributes successfully linked. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result linkAttributes(const omni::graph::core::NameToken* const linkNames, const omni::graph::core::ConstAttributeDataHandle* const targetAttributes, size_t attributeCount, omni::graph::core::AttributeDataHandle* const linkedAttributes, size_t* const linkedCount) noexcept; //! @brief Copy bundle data and metadata from the source bundle to this bundle. //! //! If source handle is invalid, then operation is skipped. //! //! @param sourceBundle Handle to bundle whose data is to be copied. //! @param overwrite An option to overwrite existing content of the bundle. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result copyBundle(const omni::graph::core::ConstBundleHandle& sourceBundle, bool overwrite) noexcept; //! @brief @brief Create new child bundles by copying existing. //! //! Source bundle handles' data and metadata are copied. If a handle is invalid, //! then its source is ignored. //! Created bundles are owned by this bundle. //! //! @param newNames Names for new children, if `nullptr` then names are taken from the source bundles. //! @param sourceBundles Handles to bundles whose data is to be copied. //! @param bundleCount Number of bundles to be copied. //! @param overwrite An option to overwrite existing child bundles. //! @param copiedBundles Output handles to the newly copied bundles. Can be `nullptr` if no output is required. //! @param copiedCount Number of successfully copied child bundles. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result copyChildBundles(const omni::graph::core::NameToken* const newNames, const omni::graph::core::ConstBundleHandle* const sourceBundles, size_t bundleCount, bool overwrite, omni::graph::core::BundleHandle* const copiedBundles, size_t* const copiedCount) noexcept; //! <b>Feature not implemented yet.</b> //! //! @brief Link content from the source bundle to this bundle. //! //! If source handle is invalid, then operation is skipped. //! //! @param sourceBundle Handle to bundle whose data is to be linked. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result linkBundle(const omni::graph::core::ConstBundleHandle* const sourceBundle) noexcept; //! @brief Add a set of bundles as children to this bundle as links. //! //! Created bundles are links to other bundles that are part of another bundle. //! If target handle is invalid, then operation is skipped. //! The links are owned by this bundle, but targets of the links are not. //! Removing links from this bundle does not destroy the targets data. //! //! @param linkNames Names for new links. //! @param targetBundles Handles to bundles whose data is to be added. //! @param bundleCount Number of bundles to be added. //! @param linkedBundles Handles to linked bundles. Can be nullptr if no output is required. //! @param linkedCount Number of child bundles successfully linked. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result linkChildBundles(const omni::graph::core::NameToken* const linkNames, const omni::graph::core::ConstBundleHandle* const targetBundles, size_t bundleCount, omni::graph::core::BundleHandle* const linkedBundles, size_t* const linkedCount) noexcept; //! @brief Remove attributes based on provided handles. //! //! Lookup the attribute handles and if they are part of this bundle then remove attributes' data and //! metadata. Attribute handles that are not part of this bundle are ignored. //! //! @param attributes Handles to attributes whose data is to be removed //! @param attributeCount Number of attributes to be removed. //! @param removedCount Number of attributes successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeAttributes(const omni::graph::core::ConstAttributeDataHandle* const attributes, size_t attributeCount, size_t* const removedCount) noexcept; //! @brief Remove attributes based on provided names. //! //! Lookup the attribute names and if they are part of this bundle then remove attributes' data and //! metadata. Attribute names that are not part of this bundle are ignored. //! //! @param names The names of the attributes whose data is to be removed. //! @param nameCount Number of attributes to be removed. //! @param removedCount Number of attributes successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeAttributesByName(const omni::graph::core::NameToken* const names, size_t nameCount, size_t* const removedCount) noexcept; //! @brief Remove child bundles based on provided handles. //! //! Lookup the bundle handles and if they are children of the bundle then remove them and their metadata. //! Bundle handles that are not children of this bundle are ignored. //! Only empty child bundles can be removed. //! //! @param childHandles Handles to bundles to be removed. //! @param childCount Number of child bundles to be removed. //! @param removedCount Number of child bundles successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeChildBundles(const omni::graph::core::ConstBundleHandle* const childHandles, size_t childCount, size_t* const removedCount) noexcept; //! @brief Remove child bundles based on provided names. //! //! Lookup the bundle names and if the are children of the bundle then remove them and their metadata. //! Bundle names that are not children of this bundle are ignored. //! Only empty child bundles can be removed. //! //! @param names The names of the child bundles to be removed. //! @param nameCount Number of child bundles to be removed. //! @param removedCount Number of child bundles successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeChildBundlesByName(const omni::graph::core::NameToken* const names, size_t nameCount, size_t* const removedCount) noexcept; //! Return Bundle Handle to Metadata Storage omni::graph::core::BundleHandle getMetadataStorage() noexcept; //! @brief Search for bundle metadata fields based on provided names. //! //! Invalid attribute handles are returned for not existing names. //! //! @param fieldNames Bundle metadata field names to be searched for. //! @param fieldCount Size of fieldNames and bundleMetadata arrays. //! @param bundleMetadata Handles to bundle metadata fields in this bundle. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result getBundleMetadataByName(const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, omni::graph::core::AttributeDataHandle* const bundleMetadata) noexcept; //! @brief Create bundle metadata fields in this bundle. //! //! @param fieldNames Names of new bundle metadata fields. //! @param fieldTypes Types of new bundle metadata fields. //! @param elementCount Number of elements in the array, can be `nullptr` if field is not an array. //! @param fieldCount Size of fieldNames and fieldTypes arrays. //! @param bundleMetadata Handles to the newly created bundle metadata fields. Can be `nullptr` if no output is //! required. //! @param createdCount Number of child bundles successfully created. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result createBundleMetadata(const omni::graph::core::NameToken* const fieldNames, const omni::graph::core::Type* const fieldTypes, const size_t* const elementCount, size_t fieldCount, omni::graph::core::AttributeDataHandle* const bundleMetadata, size_t* const createdCount) noexcept; //! @brief Remove bundle metadata based on provided field names. //! //! @param fieldNames Names of the bundle metadata fields whose data is to be removed. //! @param fieldCount Number of the bundle metadata fields to be removed. //! @param removedCount Number of bundle metadata fields successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeBundleMetadata(const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, size_t* const removedCount) noexcept; //! @brief Search for read write field handles in the attribute by using field names. //! //! @copydetails IConstBundle2_abi::getConstAttributeMetadataByName_abi omni::core::Result getAttributeMetadataByName(omni::graph::core::NameToken attribute, const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, omni::graph::core::AttributeDataHandle* const attributeMetadata) noexcept; //! @brief Create attribute metadata fields. //! //! @param attribute Name of the attribute. //! @param fieldNames Names of new attribute metadata fields. //! @param fieldTypes Types of new attribute metadata fields. //! @param elementCount Number of elements in the array, can be `nullptr` if field is not an array. //! @param fieldCount Size of fieldNames and fieldTypes arrays. //! @param attributeMetadata Handles to the newly created attribute metadata. Can be `nullptr` if no output is //! required. //! @param removedCount Number of attribute metadata fields successfully created. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result createAttributeMetadata(omni::graph::core::NameToken attribute, const omni::graph::core::NameToken* const fieldNames, const omni::graph::core::Type* const fieldTypes, const size_t* const elementCount, size_t fieldCount, omni::graph::core::AttributeDataHandle* const attributeMetadata, size_t* const removedCount) noexcept; //! @brief Remove attribute metadata fields. //! //! @param attribute Name of the attribute. //! @param fieldNames Names of the attribute metadata fields to be removed. //! @param fieldCount Size of fieldNames array. //! @param removedCount Number of attribute metadata fields successfully removed. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result removeAttributeMetadata(omni::graph::core::NameToken attribute, const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, size_t* const removedCount) noexcept; //! @brief Remove all attributes, child bundles and metadata from this bundle, but keep the bundle itself. //! //! @param bundleMetadata Clears bundle metadata in this bundle. //! @param attributes Clears attributes in this bundle. //! @param childBundles Clears child bundles in this bundle. //! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are //! invalid. omni::core::Result clearContents(bool bundleMetadata, bool attributes, bool childBundles) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::graph::core::BundleHandle omni::core::Generated<omni::graph::core::IBundle2_abi>::getHandle() noexcept { return getHandle_abi(); } inline omni::graph::core::BundleHandle omni::core::Generated<omni::graph::core::IBundle2_abi>::getParentBundle() noexcept { return getParentBundle_abi(); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getAttributes( omni::graph::core::AttributeDataHandle* const attributes, size_t* const attributeCount) noexcept { return getAttributes_abi(attributes, attributeCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getAttributesByName( const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::AttributeDataHandle* const attributes) noexcept { return getAttributesByName_abi(names, nameCount, attributes); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getChildBundles( omni::graph::core::BundleHandle* const bundles, size_t* const bundleCount) noexcept { return getChildBundles_abi(bundles, bundleCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getChildBundle( size_t bundleIndex, omni::graph::core::BundleHandle* const bundle) noexcept { return getChildBundle_abi(bundleIndex, bundle); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getChildBundlesByName( const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::BundleHandle* const foundBundles) noexcept { return getChildBundlesByName_abi(names, nameCount, foundBundles); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::copyAttributes( const omni::graph::core::NameToken* const newNames, const omni::graph::core::ConstAttributeDataHandle* const sourceAttributes, size_t attributeCount, bool overwrite, omni::graph::core::AttributeDataHandle* const copiedAttributes, size_t* const copiedCount) noexcept { return copyAttributes_abi(newNames, sourceAttributes, attributeCount, overwrite, copiedAttributes, copiedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::createAttributes( const omni::graph::core::NameToken* const names, const omni::graph::core::Type* const types, const size_t* const elementCount, size_t attributeCount, omni::graph::core::AttributeDataHandle* const createdAttributes, size_t* const createdCount) noexcept { return createAttributes_abi(names, types, elementCount, attributeCount, createdAttributes, createdCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::createAttributesLike( const omni::graph::core::ConstAttributeDataHandle* const patternAttributes, size_t patternCount, omni::graph::core::AttributeDataHandle* const createdAttributes, size_t* const createdCount) noexcept { return createAttributesLike_abi(patternAttributes, patternCount, createdAttributes, createdCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::createChildBundles( const omni::graph::core::NameToken* const names, size_t nameCount, omni::graph::core::BundleHandle* const createdBundles, size_t* const createdCount) noexcept { return createChildBundles_abi(names, nameCount, createdBundles, createdCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::linkAttributes( const omni::graph::core::NameToken* const linkNames, const omni::graph::core::ConstAttributeDataHandle* const targetAttributes, size_t attributeCount, omni::graph::core::AttributeDataHandle* const linkedAttributes, size_t* const linkedCount) noexcept { return linkAttributes_abi(linkNames, targetAttributes, attributeCount, linkedAttributes, linkedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::copyBundle( const omni::graph::core::ConstBundleHandle& sourceBundle, bool overwrite) noexcept { return copyBundle_abi(sourceBundle, overwrite); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::copyChildBundles( const omni::graph::core::NameToken* const newNames, const omni::graph::core::ConstBundleHandle* const sourceBundles, size_t bundleCount, bool overwrite, omni::graph::core::BundleHandle* const copiedBundles, size_t* const copiedCount) noexcept { return copyChildBundles_abi(newNames, sourceBundles, bundleCount, overwrite, copiedBundles, copiedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::linkBundle( const omni::graph::core::ConstBundleHandle* const sourceBundle) noexcept { return linkBundle_abi(sourceBundle); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::linkChildBundles( const omni::graph::core::NameToken* const linkNames, const omni::graph::core::ConstBundleHandle* const targetBundles, size_t bundleCount, omni::graph::core::BundleHandle* const linkedBundles, size_t* const linkedCount) noexcept { return linkChildBundles_abi(linkNames, targetBundles, bundleCount, linkedBundles, linkedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeAttributes( const omni::graph::core::ConstAttributeDataHandle* const attributes, size_t attributeCount, size_t* const removedCount) noexcept { return removeAttributes_abi(attributes, attributeCount, removedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeAttributesByName( const omni::graph::core::NameToken* const names, size_t nameCount, size_t* const removedCount) noexcept { return removeAttributesByName_abi(names, nameCount, removedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeChildBundles( const omni::graph::core::ConstBundleHandle* const childHandles, size_t childCount, size_t* const removedCount) noexcept { return removeChildBundles_abi(childHandles, childCount, removedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeChildBundlesByName( const omni::graph::core::NameToken* const names, size_t nameCount, size_t* const removedCount) noexcept { return removeChildBundlesByName_abi(names, nameCount, removedCount); } inline omni::graph::core::BundleHandle omni::core::Generated<omni::graph::core::IBundle2_abi>::getMetadataStorage() noexcept { return getMetadataStorage_abi(); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getBundleMetadataByName( const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, omni::graph::core::AttributeDataHandle* const bundleMetadata) noexcept { return getBundleMetadataByName_abi(fieldNames, fieldCount, bundleMetadata); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::createBundleMetadata( const omni::graph::core::NameToken* const fieldNames, const omni::graph::core::Type* const fieldTypes, const size_t* const elementCount, size_t fieldCount, omni::graph::core::AttributeDataHandle* const bundleMetadata, size_t* const createdCount) noexcept { return createBundleMetadata_abi(fieldNames, fieldTypes, elementCount, fieldCount, bundleMetadata, createdCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeBundleMetadata( const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, size_t* const removedCount) noexcept { return removeBundleMetadata_abi(fieldNames, fieldCount, removedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::getAttributeMetadataByName( omni::graph::core::NameToken attribute, const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, omni::graph::core::AttributeDataHandle* const attributeMetadata) noexcept { return getAttributeMetadataByName_abi(attribute, fieldNames, fieldCount, attributeMetadata); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::createAttributeMetadata( omni::graph::core::NameToken attribute, const omni::graph::core::NameToken* const fieldNames, const omni::graph::core::Type* const fieldTypes, const size_t* const elementCount, size_t fieldCount, omni::graph::core::AttributeDataHandle* const attributeMetadata, size_t* const removedCount) noexcept { return createAttributeMetadata_abi( attribute, fieldNames, fieldTypes, elementCount, fieldCount, attributeMetadata, removedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::removeAttributeMetadata( omni::graph::core::NameToken attribute, const omni::graph::core::NameToken* const fieldNames, size_t fieldCount, size_t* const removedCount) noexcept { return removeAttributeMetadata_abi(attribute, fieldNames, fieldCount, removedCount); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundle2_abi>::clearContents(bool bundleMetadata, bool attributes, bool childBundles) noexcept { return clearContents_abi(bundleMetadata, attributes, childBundles); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
32,728
C
52.30456
124
0.670466
omniverse-code/kit/include/omni/graph/core/IDataModel.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "Handle.h" #include <carb/Interface.h> namespace omni { namespace graph { namespace core { /** * Interface to the underlying data access for OmniGraph */ struct IDataModel { //! @private to avoid doxygen problems CARB_PLUGIN_INTERFACE("omni::graph::core::IDataModel", 1, 1); // This shouldn't exist...all data model changes should be implemented via data model interface // until this is done, we expose a way to protect edits to data model...like ones in PrimCommon /** * @return Allocate and return new scoped lock for read or write. */ void* (CARB_ABI* enterEditScope)(bool writer); /** * @param[in] scope Free scoped lock */ void (CARB_ABI* exitEditScope)(void* scope); }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle STRUCT_INTEGRITY_CHECK(IDataModel, exitEditScope, 1) //! Scoping object to enter and exist editing mode for the DataModel class DataModelEditScope { public: //! Constructor to enter the edit scope, optionally with write mode enabled DataModelEditScope(bool write) { static const IDataModel& iDataModel = *carb::getCachedInterface<IDataModel>(); m_scope = iDataModel.enterEditScope(write); } //! Destructor that exits the DataModel edit scope ~DataModelEditScope() { static const IDataModel& iDataModel = *carb::getCachedInterface<IDataModel>(); iDataModel.exitEditScope(m_scope); } private: void* m_scope{nullptr}; }; } } }
2,002
C
28.028985
107
0.715285
omniverse-code/kit/include/omni/graph/core/TemplateUtils.h
// Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <utility> #include <type_traits> // ====================================================================== // Implementation of the C++20 feature to detect whether a type is a bounded array (e.g. int[2], float[3]...) template<class T> struct is_bounded_array: std::false_type {}; template<class T, std::size_t N> struct is_bounded_array<T[N]> : std::true_type {}; // ====================================================================== // When we move to C++17 we can replace and_ with std::conjunction // Recursively applies std::conditional to all of the template arguments. template <typename... Conds> struct and_ : std::true_type { }; template <typename Cond, typename... Conds> struct and_<Cond, Conds...> : std::conditional<Cond::value, and_<Conds...>, std::false_type>::type { }; // ====================================================================== // When we move to C++17 we can replace "fold" with C++ fold expression // Ex: args && ... // Recursively applies provided functor to all of the template arguments. // Ex: fold(std::logical_and<>(), args...) template <class F, class A0> auto fold(F&&, A0&& a0) { return std::forward<A0>(a0); } template <class F, class A0, class... As> auto fold(F&& f, A0&& a0, As&&... as) { return f(std::forward<A0>(a0), fold(f, std::forward<As>(as)...)); } // ====================================================================== // Removes const& qualifier on a type template <typename T> using remove_const_ref = std::remove_const<typename std::remove_reference<T>::type>; // ====================================================================== // Check to see if a list of types are all of the named type template <typename MembersAreThisType, typename... MemberType> using areTypeT = and_<std::is_same<MembersAreThisType, MemberType>...>; // ====================================================================== // Templatized version of void template <typename... Ts> using void_t = void; // ====================================================================== // This set of templates is used to define a metaprogram "is_detected" that derives from // std::true_type if the declared templated function exists and std::false_type if not // (for use in compile-time overload selection, described below). namespace detail { // Matches a call with any type, any templated type, and a variable length list of any types. // There has to be a typename as the first parameter because another template can't be one. // Using the void_t<> type defined above allows this parameter to be used for SFINAE selection. template <typename, template <typename...> class, typename...> struct is_detected : std::false_type { }; // This specialization of detail::is_detected triggers only when the Operation can be instantiated // with the Arguments. For method checks the "has_X" templates above will be legal types when the // class mentioned as the first member of "Arguments" implements the method "X". This in turn will // make void_t<Operation<Arguments...>>> a legal type. In those situations this specialization will // succeed and is_detected<> will be a std::true_type. template <template <class...> class Operation, typename... Arguments> struct is_detected<void_t<Operation<Arguments...>>, Operation, Arguments...> : std::true_type { }; } // This is used only to hide the implementation detail of using the void_t<> template argument to // guide the SFINAE substitution which detects method overrides. That way the templates below can // use this more natural pattern: // is_detected<has_X, NodeTypeClass>() // instead of this: // detail::is_detected<void_t<>, has_X, NodeTypeClass>() template <template <class...> class Operation, typename... Arguments> using is_detected = ::detail::is_detected<void_t<>, Operation, Arguments...>;
4,283
C
43.164948
109
0.636937
omniverse-code/kit/include/omni/graph/core/PyIConstBundle.gen.h
// Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // #pragma once #include <omni/core/ITypeFactory.h> #include <omni/python/PyBind.h> #include <omni/python/PyString.h> #include <omni/python/PyVec.h> #include <sstream> auto bindIConstBundle2(py::module& m) { // hack around pybind11 issues with C++17 // - https://github.com/pybind/pybind11/issues/2234 // - https://github.com/pybind/pybind11/issues/2666 // - https://github.com/pybind/pybind11/issues/2856 py::class_<omni::core::Generated<omni::graph::core::IConstBundle2_abi>, omni::core::ObjectPtr<omni::core::Generated<omni::graph::core::IConstBundle2_abi>>, omni::core::IObject> clsParent(m, "_IConstBundle2"); py::class_<omni::graph::core::IConstBundle2, omni::core::Generated<omni::graph::core::IConstBundle2_abi>, omni::core::ObjectPtr<omni::graph::core::IConstBundle2>, omni::core::IObject> cls(m, "IConstBundle2", R"OMNI_BIND_RAW_(Provide read only access to recursive bundles.)OMNI_BIND_RAW_"); cls.def(py::init( [](const omni::core::ObjectPtr<omni::core::IObject>& obj) { auto tmp = omni::core::cast<omni::graph::core::IConstBundle2>(obj.get()); if (!tmp) { throw std::runtime_error("invalid type conversion"); } return tmp; })); cls.def(py::init( []() { auto tmp = omni::core::createType<omni::graph::core::IConstBundle2>(); if (!tmp) { throw std::runtime_error("unable to create omni::graph::core::IConstBundle2 instantiation"); } return tmp; })); cls.def_property_readonly("valid", &omni::graph::core::IConstBundle2::isValid); return omni::python::PyBind<omni::graph::core::IConstBundle2>::bind(cls); }
2,323
C
41.254545
119
0.637538
omniverse-code/kit/include/omni/graph/core/ISchedulingHints.h
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/core/IObject.h> #include <omni/core/Omni.h> #include <omni/inspect/IInspector.h> namespace omni { namespace graph { namespace core { //! How does the node access the data described by the enum eAccessLocation enum class eAccessType { //! There is no access to data of the associated type eNone, //! There is only read access to data of the associated type eRead, //! There is only write access to data of the associated type eWrite, //! There is both read and write access to data of the associated type eReadWrite }; //! What type of non-attribute data does this node access enum class eAccessLocation { //! Accesses the USD stage data eUsd, //! Accesses data that is not part of the node or node type eGlobal, //! Accesses data that is shared by every instance of a particular node type eStatic, //! Accesses information on the topology of the graph to which the node belongs eTopology }; //! How thread safe is the node during evaluation enum class eThreadSafety { //! Nodes can be evaluated in multiple threads safely eSafe, //! Nodes cannot be evaluated in multiple threads safely eUnsafe, //! The thread safety status of the node type is unknown eUnknown }; //! How the node is allowed to be computed enum class eComputeRule { //! Nodes are computed according to the default evaluator rules eDefault, //! The evaluator may skip computing this node until explicitly requested with INode::requestCompute eOnRequest }; //! Declare the ISchedulingHints interface definition OMNI_DECLARE_INTERFACE(ISchedulingHints); //! Interface to the list of scheduling hints that can be applied to a node type class ISchedulingHints_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.core.ISchedulingHints")> { protected: /** * Get the threadSafety status (i.e. can be run in parallel with other nodes) * * @returns Is the node compute threadsafe? */ virtual eThreadSafety getThreadSafety_abi() noexcept = 0; /** * Set the flag indicating if a node is threadsafe or not. * * @param[in] newThreadSafety New value of the threadsafe flag */ virtual void setThreadSafety_abi(eThreadSafety newThreadSafety) noexcept = 0; /** * Get the type of access the node has for a given data type * * @param[in] dataType Type of data for which access type is being modified * @returns Value of the access type flag */ virtual eAccessType getDataAccess_abi(eAccessLocation dataType) noexcept = 0; /** * Set the flag describing how a node accesses particular data in its compute _abi (defaults to no access). * Setting any of these flags will, in most cases, automatically mark the node as "not threadsafe". * One current exception to this is allowing a node to be both threadsafe and a writer to USD, since * such behavior can be achieved if delayed writebacks (e.g. "registerForUSDWriteBack") are utilized * in the node's compute method. * * @param[in] dataType Type of data for which access type is being modified * @param[in] newAccessType New value of the access type flag */ virtual void setDataAccess_abi(eAccessLocation dataType, eAccessType newAccessType) noexcept = 0; /** * Get the flag describing the compute rule which may be followed by the evaluator. * * @returns Value of the ComputeRule flag */ virtual eComputeRule getComputeRule_abi() noexcept = 0; /** * Set the flag describing the compute rule which may be followed by the evaluator. * * @param[in] newComputeRule New value of the ComputeRule flag */ virtual void setComputeRule_abi(eComputeRule newComputeRule) noexcept = 0; /** * Runs the inspector on the scheduling hints. * * @param[in] inspector The inspector class * @return true if the inspection ran successfully, false if the inspection type is not supported */ virtual bool inspect_abi(omni::inspect::IInspector* inspector) noexcept = 0; }; } // namespace core } // namespace graph } // namespace omni #include "ISchedulingHints.gen.h"
4,687
C
33.218978
111
0.711756
omniverse-code/kit/include/omni/graph/core/CppWrappers.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "TemplateUtils.h" #include <carb/Defines.h> #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/CudaUtils.h> #include <string> #include <tuple> #include <type_traits> #include <vector> #include <gsl/span> namespace omni { namespace graph { namespace core { // The templates for extracting data do not like double pointers so use this type for assigning strings using CString = char*; // NOTE: This file is a work in progress, for assessing possible interfaces, not yet for use. // -Wall will warn about these inline functions not being used #if defined(__GNUC__) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wunused-function" #endif using Path = omni::fabric::Path; using Token = omni::fabric::Token; // ---------------------------------------------------------------------------- // Accessors for accessing attributes by name static ConstAttributeDataHandle getAttributeR(GraphContextObj const& contextObj, ConstBundleHandle bundleHandle, fabric::TokenC name) { ConstAttributeDataHandle out; contextObj.iBundle->getAttributesByNameR(&out, contextObj, bundleHandle, &name, 1); return out; } static ConstAttributeDataHandle getAttributeR(GraphContextObj const& contextObj, ConstBundleHandle bundleHandle, Token const& name) { NameToken nameToken = name; return getAttributeR(contextObj, bundleHandle, nameToken); } template <typename... Types, typename... NameTypes> std::tuple<Types...> getAttributesR(const GraphContextObj& contextObj, ConstBundleHandle& primHandle, std::tuple<NameTypes...> names) { // Check that size(out)==len(names) and that every element of names has type size_t const size_t inCount = std::tuple_size<std::tuple<NameTypes...>>::value; const size_t outCount = std::tuple_size<std::tuple<Types...>>::value; static_assert(inCount == outCount, "Input and output tuples must be of same length"); static_assert(areTypeT<NameToken, NameTypes...>::value || areTypeT<Token, NameTypes...>::value, "Attribute names must have type NameToken or Token"); static_assert(areTypeT<ConstAttributeDataHandle, Types...>::value, "Outputs must have type ConstAttributeDataHandle"); std::tuple<Types...> out; // Cast to C-ABI compatible types std::tuple<Types...>* outTuplePtr = &out; ConstAttributeDataHandle* outPtr = reinterpret_cast<ConstAttributeDataHandle*>(outTuplePtr); std::tuple<NameTypes...>* inTuplePtr = &names; NameToken* namesPtr = reinterpret_cast<NameToken*>(inTuplePtr); // Call C-ABI version of method contextObj.iBundle->getAttributesByNameR(outPtr, contextObj, primHandle, namesPtr, inCount); return out; } static AttributeDataHandle getAttributeW(const GraphContextObj& contextObj, BundleHandle& primHandle, const Token& name) { AttributeDataHandle out; NameToken nameToken = name; contextObj.iBundle->getAttributesByNameW(&out, contextObj, primHandle, &nameToken, 1); return out; } template <typename... Types, typename... NameTypes> std::tuple<Types...> getAttributesW(const GraphContextObj& contextObj, BundleHandle& primHandle, std::tuple<NameTypes...> names) { // Check that size(out)==len(names) and that every element of names has type size_t const size_t inCount = std::tuple_size<std::tuple<NameTypes...>>::value; const size_t outCount = std::tuple_size<std::tuple<Types...>>::value; static_assert(inCount == outCount, "Input and output tuples must be of same length"); static_assert(areTypeT<NameToken, NameTypes...>::value || areTypeT<Token, NameTypes...>::value, "Attribute names must have type NameToken or Token"); static_assert(areTypeT<AttributeDataHandle, Types...>::value, "Outputs must have type AttributeDataHandle"); std::tuple<Types...> out; // Cast to C-ABI compatible types std::tuple<Types...>* outTuplePtr = &out; AttributeDataHandle* outPtr = reinterpret_cast<AttributeDataHandle*>(outTuplePtr); std::tuple<NameTypes...>* inTuplePtr = &names; HandleInt* namesPtr = reinterpret_cast<HandleInt*>(inTuplePtr); // Call C-ABI version of method contextObj.iBundle->getAttributesByNameW(outPtr, contextObj, primHandle, namesPtr, inCount); return out; } // ---------------------------------------------------------------------------- // Accessors for accessing attributes by AttributeDataHandle // Specialization for the read-only data of a single attribute. Get a single attribute handle from getAttributeR // to pass in as the second parameter. // auto constHandle = getAttributeR(contextObj, node, Token("myAttribute")) // // Note that the template parameter for the special case is the data type, not a pointer type // const auto dataPtr = getDataR<float>(contextObj, constHandle); template <typename T> const T* getDataR(const GraphContextObj& contextObj, const ConstAttributeDataHandle& attrHandle) { const T* out; const void** outPtr = reinterpret_cast<const void**>(&out); contextObj.iAttributeData->getDataR(outPtr, contextObj, &attrHandle, 1); return out; } // Get the read-only data for an arbitrary list of attributes. Get a tuple attribute handle from getAttributesR // to pass in as the second parameter. // auto constHandles = getAttributesR<ConstAttributeDataHandle, ConstAttributeDataHandle>(contextObj, node, // std::make_tuple(Token("a"), Token("b"))) // const float* a_value{nullptr}; // const float* b_value{nullptr}; // // Note that the template parameters for the general case are pointers to the data type // std::tie(a_value, b_value) = getDataR<float*, float*>(contextObj, constHandles); template <typename... Types, typename... HandleTypes> std::tuple<Types...> getDataR(const GraphContextObj& contextObj, std::tuple<HandleTypes...> handles) { // Check that size(out)==len(names) and that every element of names has type size_t const size_t inCount = std::tuple_size<std::tuple<HandleTypes...>>::value; const size_t outCount = std::tuple_size<std::tuple<Types...>>::value; static_assert(inCount == outCount, "Input and output tuples must be of same length"); static_assert(areTypeT<ConstAttributeDataHandle, HandleTypes...>::value, "Attribute handles must have type ConstAttributeDataHandle"); std::tuple<Types...> out; // Cast to C-ABI compatible types std::tuple<Types...>* outTuplePtr = &out; const void** outPtr = reinterpret_cast<const void**>(outTuplePtr); std::tuple<HandleTypes...>* inTuplePtr = &handles; ConstAttributeDataHandle* handlesPtr = reinterpret_cast<ConstAttributeDataHandle*>(inTuplePtr); // Call C-ABI version of method contextObj.iAttributeData->getDataR(outPtr, contextObj, handlesPtr, inCount); return out; } // Specialization for the read-only GPU data of a single attribute. Get a single attribute handle from getAttributeR // to pass in as the second parameter. // auto handle = getAttributeR(contextObj, node, Token("myAttribute")) // // Note that the template parameter for the special case is the data type, not a pointer type // const auto gpuData = getDataRGPU<float>(contextObj, handle); // If you wish to get CPU pointers to GPU data for arrays then use the alternative version: // const auto gpuData = getDataRGPUOnCPU<float>(contextObj, handle); template <typename T> const T* getDataRGpuAt(const GraphContextObj& contextObj, const ConstAttributeDataHandle& attrHandle, omni::fabric::PtrToPtrKind where) { const T* out; const void** outPtr = reinterpret_cast<const void**>(&out); contextObj.iAttributeData->getDataRGpuAt(outPtr, contextObj, &attrHandle, 1, where); return out; } // Syntactic sugar template <typename T> const T* getDataRGPU(const GraphContextObj& contextObj, const ConstAttributeDataHandle& attrHandle) { return getDataRGpuAt<T>(contextObj, attrHandle, omni::fabric::PtrToPtrKind::eGpuPtrToGpuPtr); } template <typename T> const T* getDataRGPUOnCPU(const GraphContextObj& contextObj, const ConstAttributeDataHandle& attrHandle) { return getDataRGpuAt<T>(contextObj, attrHandle, omni::fabric::PtrToPtrKind::eCpuPtrToGpuPtr); } // Get the read-only GPU data for an arbitrary list of attributes. Get a tuple attribute handle from getAttributesR // to pass in as the second parameter. // auto handles = getAttributesR<ConstAttributeDataHandle, ConstAttributeDataHandle>(contextObj, node, // std::make_tuple(Token("a"), Token("b"))) // const float* a_gpu_value{nullptr}; // const float* b_gpu_value{nullptr}; // // Note that the template parameters for the general case are pointers to the data type // std::tie(a_gpu_value, b_gpu_value) = getDataRGPU<float*, float*>(contextObj, handles); // If you wish to get CPU pointers to GPU data for arrays then use the alternative version: // std::tie(a_gpu_value, b_gpu_value) = getDataRGPUOnCPU<float*, float*>(contextObj, handles); template <typename... Types, typename... NameTypes> std::tuple<Types...> getDataRGpuAt(const GraphContextObj& contextObj, std::tuple<NameTypes...> handles, omni::fabric::PtrToPtrKind where) { // Check that size(out)==len(names) and that every element of names has type size_t const size_t inCount = std::tuple_size<std::tuple<NameTypes...>>::value; const size_t outCount = std::tuple_size<std::tuple<Types...>>::value; static_assert(inCount == outCount, "Input and output tuples must be of same length"); static_assert(areTypeT<ConstAttributeDataHandle, NameTypes...>::value, "Attribute handles must have type ConstAttributeDataHandle"); std::tuple<Types...> out; // Cast to C-ABI compatible types std::tuple<Types...>* outTuplePtr = &out; const void** outPtr = reinterpret_cast<const void**>(outTuplePtr); std::tuple<NameTypes...>* inTuplePtr = &handles; ConstAttributeDataHandle* handlesPtr = reinterpret_cast<ConstAttributeDataHandle*>(inTuplePtr); // Call C-ABI version of method contextObj.iAttributeData->getDataRGpuAt(outPtr, contextObj, handlesPtr, inCount, where); return out; } // Syntactic sugar template <typename... Types, typename... NameTypes> std::tuple<Types...> getDataRGPU(const GraphContextObj& contextObj, std::tuple<NameTypes...> handles) { return getDataRGpuAt<Types...>(contextObj, handles, omni::fabric::PtrToPtrKind::eGpuPtrToGpuPtr); } template <typename... Types, typename... NameTypes> std::tuple<Types...> getDataRGPUOnCPU(const GraphContextObj& contextObj, std::tuple<NameTypes...> handles) { return getDataRGpuAt<Types...>(contextObj, handles, omni::fabric::PtrToPtrKind::eCpuPtrToGpuPtr); } // Specialization for the writable data of a single attribute. Get a single attribute handle from getAttributeW // to pass in as the second parameter. // auto handle = getAttributeW(contextObj, node, Token("myAttribute")) // // Note that the template parameter for the special case is the data type, not a pointer type // auto dataPtr = getDataW<float>(contextObj, handle); template <typename T> T* getDataW(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle) { T* out; void** outPtr = reinterpret_cast<void**>(&out); contextObj.iAttributeData->getDataW(outPtr, contextObj, &attrHandle, 1); return out; } // Get the writable data for an arbitrary list of attributes. Get a tuple attribute handle from getAttributesW // to pass in as the second parameter. // auto handles = getAttributesW<AttributeDataHandle, AttributeDataHandle>(contextObj, node, // std::make_tuple(Token("a"), Token("b"))) // float* a_value{nullptr}; // float* b_value{nullptr}; // // Note that the template parameters for the general case are pointers to the data type // std::tie(a_value, b_value) = getDataW<float*, float*>(contextObj, handles); template <typename... Types, typename... NameTypes> std::tuple<Types...> getDataW(const GraphContextObj& contextObj, std::tuple<NameTypes...> handles) { // Check that size(out)==len(names) and that every element of names has type size_t const size_t inCount = std::tuple_size<std::tuple<NameTypes...>>::value; const size_t outCount = std::tuple_size<std::tuple<Types...>>::value; static_assert(inCount == outCount, "Input and output tuples must be of same length"); static_assert( areTypeT<AttributeDataHandle, NameTypes...>::value, "Attribute handles must have type AttributeDataHandle"); std::tuple<Types...> out; // Cast to C-ABI compatible types std::tuple<Types...>* outTuplePtr = &out; void** outPtr = reinterpret_cast<void**>(outTuplePtr); std::tuple<NameTypes...>* inTuplePtr = &handles; AttributeDataHandle* handlesPtr = reinterpret_cast<AttributeDataHandle*>(inTuplePtr); // Call C-ABI version of method contextObj.iAttributeData->getDataW(outPtr, contextObj, handlesPtr, inCount); return out; } // Specialization for the writable GPU data of a single attribute. Get a single attribute handle from getAttributeW // to pass in as the second parameter. // auto handle = getAttributeW(contextObj, node, Token("myAttribute")) // // Note that the template parameter for the special case is the data type, not a pointer type // auto gpuData = getDataWGPU<float>(contextObj, handle); // If you wish to get CPU pointers to GPU data for arrays then use the alternative version: // auto gpuData = getDataWGPUOnCPU<float>(contextObj, handle); template <typename T> T* getDataWGpuAt(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle, omni::fabric::PtrToPtrKind whereGpuPtrs) { T* out; void** outPtr = reinterpret_cast<void**>(&out); contextObj.iAttributeData->getDataWGpuAt(outPtr, contextObj, &attrHandle, 1, whereGpuPtrs); return out; } // Syntactic sugar template <typename T> T* getDataWGPU(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle) { return getDataWGpuAt<T>(contextObj, attrHandle, omni::fabric::PtrToPtrKind::eGpuPtrToGpuPtr); } template <typename T> T* getDataWGPUOnCPU(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle) { return getDataWGpuAt<T>(contextObj, attrHandle, omni::fabric::PtrToPtrKind::eCpuPtrToGpuPtr); } // Get the writable GPU data for an arbitrary list of attributes. Get a tuple attribute handle from getAttributesW // to pass in as the second parameter. // auto handles = getAttributesW<AttributeDataHandle, AttributeDataHandle>(contextObj, node, // std::make_tuple(Token("a"), Token("b"))) // float* a_gpu_value{nullptr}; // float* b_gpu_value{nullptr}; // // Note that the template parameters for the general case are pointers to the data type // std::tie(a_gpu_value, b_gpu_value) = getDataWGPU<float*, float*>(contextObj, handles); // If you wish to get CPU pointers to GPU data for arrays then use the alternative version: // std::tie(a_gpu_value, b_gpu_value) = getDataWGPUOnCPU<float*, float*>(contextObj, handles); template <typename... Types, typename... NameTypes> std::tuple<Types...> getDataWGpuAt(const GraphContextObj& contextObj, std::tuple<NameTypes...> handles, omni::fabric::PtrToPtrKind whereGpuPtrs) { // Check that size(out)==len(names) and that every element of names has type size_t const size_t inCount = std::tuple_size<std::tuple<NameTypes...>>::value; const size_t outCount = std::tuple_size<std::tuple<Types...>>::value; static_assert(inCount == outCount, "Input and output tuples must be of same length"); static_assert( areTypeT<AttributeDataHandle, NameTypes...>::value, "Attribute handles must have type AttributeDataHandle"); std::tuple<Types...> out; // Cast to C-ABI compatible types std::tuple<Types...>* outTuplePtr = &out; void** outPtr = reinterpret_cast<void**>(outTuplePtr); std::tuple<NameTypes...>* inTuplePtr = &handles; AttributeDataHandle* handlesPtr = reinterpret_cast<AttributeDataHandle*>(inTuplePtr); // Call C-ABI version of method contextObj.iAttributeData->getDataWGpuAt(outPtr, contextObj, handlesPtr, inCount, whereGpuPtrs); return out; } // Syntactic sugar template <typename... Types, typename... NameTypes> std::tuple<Types...> getDataWGPU(const GraphContextObj& contextObj, std::tuple<NameTypes...> handles) { return getDataWGpuAt<Types...>(contextObj, handles, omni::fabric::PtrToPtrKind::eGpuPtrToGpuPtr); } template <typename... Types, typename... NameTypes> std::tuple<Types...> getDataWGPUOnCPU(const GraphContextObj& contextObj, std::tuple<NameTypes...> handles) { return getDataWGpuAt<Types...>(contextObj, handles, omni::fabric::PtrToPtrKind::eCpuPtrToGpuPtr); } static size_t getElementCount(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle) { size_t out; ConstAttributeDataHandle constAttrHandle = attrHandle; contextObj.iAttributeData->getElementCount(&out, contextObj, &constAttrHandle, 1); return out; } static size_t getElementCount(const GraphContextObj& contextObj, ConstAttributeDataHandle const& attrHandle) { size_t out; contextObj.iAttributeData->getElementCount(&out, contextObj, &attrHandle, 1); return out; } template <typename... Types, typename... HandleTypes> std::tuple<Types...> getElementCount(const GraphContextObj& contextObj, std::tuple<HandleTypes...> handles) { // Check that size(out)==len(names) and that every element of names has type size_t const size_t inCount = std::tuple_size<std::tuple<HandleTypes...>>::value; const size_t outCount = std::tuple_size<std::tuple<Types...>>::value; static_assert(inCount == outCount, "Input and output tuples must be of same length"); static_assert(areTypeT<ConstAttributeDataHandle, HandleTypes...>::value || areTypeT<AttributeDataHandle, HandleTypes...>::value, "Attribute handles must have type ConstAttributeDataHandle or AttributeDataHandle"); static_assert(areTypeT<size_t, Types...>::value, "Outputs must have type size_t"); std::tuple<Types...> out; // Cast to C-ABI compatible types std::tuple<Types...>* outTuplePtr = &out; size_t* outPtr = reinterpret_cast<size_t*>(outTuplePtr); std::tuple<HandleTypes...>* inTuplePtr = &handles; ConstAttributeDataHandle* handlesPtr = reinterpret_cast<ConstAttributeDataHandle*>(inTuplePtr); // Call C-ABI version of method contextObj.iAttributeData->getElementCount(outPtr, contextObj, handlesPtr, inCount); return out; } //----------------------------------------------------------------------------- static std::vector<ConstAttributeDataHandle> getAttributes(const GraphContextObj& contextObj, ConstBundleHandle bundle) { size_t count = contextObj.iBundle->getAttributesCount(contextObj, bundle); std::vector<ConstAttributeDataHandle> attrsOut(count); contextObj.iBundle->getAttributesR(attrsOut.data(), contextObj, bundle, count); return attrsOut; } static std::vector<AttributeDataHandle> getAttributes(const GraphContextObj& contextObj, BundleHandle bundle) { size_t count = contextObj.iBundle->getAttributesCount(contextObj, bundle); std::vector<AttributeDataHandle> attrsOut(count); contextObj.iBundle->getAttributesW(attrsOut.data(), contextObj, bundle, count); return attrsOut; } static void getAttributesByName(ConstAttributeDataHandle* attrsOut, const GraphContextObj& contextObj, ConstBundleHandle prim, const Token* attrNames, size_t count) { const NameToken* nameTokens = reinterpret_cast<const NameToken*>(attrNames); contextObj.iBundle->getAttributesByNameR(attrsOut, contextObj, prim, nameTokens, count); } static void getAttributesByName(AttributeDataHandle* attrsOut, const GraphContextObj& contextObj, BundleHandle prim, const Token* attrNames, size_t count) { const NameToken* nameTokens = reinterpret_cast<const NameToken*>(attrNames); contextObj.iBundle->getAttributesByNameW(attrsOut, contextObj, prim, nameTokens, count); } // ====================================================================== // Context wrappers // getAttributeR is to be used in conjunction with the single item getDataR specialization to extract read-only data // from a single attribute. If you have more than one attribute it is best to use getAttributesR, to minimize the calls // across the ABI boundary. See the getDataR description for more information. static ConstAttributeDataHandle getAttributeR(const GraphContextObj& contextObj, NodeContextHandle node, const Token& name, InstanceIndex instanceIndex) { ConstAttributeDataHandle out; NameToken nameToken = name; contextObj.iContext->getAttributesByNameR(&out, contextObj, node, &nameToken, 1, instanceIndex); return out; } // getAttributesR is to be used in conjunction with the general version of getDataR to extract read-only data from an // arbitrary list of attributes. It uses variadic templates to gather all arguments into a single ABI call. See the // getDataR description for more information. Sample usage: // std::tie(a, b, c) = getAttributesR(contextObj, node, std::make_tuple(Token("a"), Token("b"), Token("c")), instanceIndex) template <typename... Types, typename... NameTypes> std::tuple<Types...> getAttributesR(const GraphContextObj& contextObj, NodeContextHandle node, std::tuple<NameTypes...> names, InstanceIndex instanceIndex) { // Check that size(out)==len(names) and that every element of names has type size_t const size_t inCount = std::tuple_size<std::tuple<NameTypes...>>::value; const size_t outCount = std::tuple_size<std::tuple<Types...>>::value; static_assert(inCount == outCount, "Input and output tuples must be of same length"); static_assert(areTypeT<NameToken, NameTypes...>::value || areTypeT<Token, NameTypes...>::value, "Attribute names must have type NameToken or Token"); static_assert(areTypeT<ConstAttributeDataHandle, Types...>::value, "Outputs must have type ConstAttributeDataHandle"); std::tuple<Types...> out; // Cast to C-ABI compatible types std::tuple<Types...>* outTuplePtr = &out; ConstAttributeDataHandle* outPtr = reinterpret_cast<ConstAttributeDataHandle*>(outTuplePtr); std::tuple<NameTypes...>* inTuplePtr = &names; NameToken* namesPtr = reinterpret_cast<NameToken*>(inTuplePtr); // Call C-ABI version of method contextObj.iContext->getAttributesByNameR(outPtr, contextObj, node, namesPtr, inCount, instanceIndex); return out; } // getAttributeW is to be used in conjunction with the single item getDataW specialization to extract writable data // from a single attribute. If you have more than one attribute it is best to use getAttributesW, to minimize the calls // across the ABI boundary. See the getDataW description for more information. static AttributeDataHandle getAttributeW(const GraphContextObj& contextObj, NodeContextHandle node, const Token& name, InstanceIndex instanceIndex) { AttributeDataHandle out; NameToken nameToken = name; contextObj.iContext->getAttributesByNameW(&out, contextObj, node, &nameToken, 1, instanceIndex); return out; } // getAttributesW is to be used in conjunction with the general version of getDataW to extract read-only data from an // arbitrary list of attributes. It uses variadic templates to gather all arguments into a single ABI call. See the // getDataW description for more information. Sample usage: // std::tie(a, b, c) = getAttributesW(contextObj, node, std::make_tuple(Token("a"), Token("b"), Token("c")), instanceIndex) template <typename... Types, typename... NameTypes> std::tuple<Types...> getAttributesW(const GraphContextObj& contextObj, NodeContextHandle node, std::tuple<NameTypes...> names, InstanceIndex instanceIndex) { // Check that size(out)==len(names) and that every element of names has type size_t const size_t inCount = std::tuple_size<std::tuple<NameTypes...>>::value; const size_t outCount = std::tuple_size<std::tuple<Types...>>::value; static_assert(inCount == outCount, "Input and output tuples must be of same length"); static_assert(areTypeT<NameToken, NameTypes...>::value || areTypeT<Token, NameTypes...>::value, "Attribute names must have type NameToken or Token"); static_assert(areTypeT<AttributeDataHandle, Types...>::value, "Outputs must have type AttributeDataHandle"); std::tuple<Types...> out; // Cast to C-ABI compatible types std::tuple<Types...>* outTuplePtr = &out; AttributeDataHandle* outPtr = reinterpret_cast<AttributeDataHandle*>(outTuplePtr); std::tuple<NameTypes...>* inTuplePtr = &names; NameToken* namesPtr = reinterpret_cast<NameToken*>(inTuplePtr); // Call C-ABI version of method contextObj.iContext->getAttributesByNameW(outPtr, contextObj, node, namesPtr, inCount, instanceIndex); return out; } #if defined(__GNUC__) # pragma GCC diagnostic pop #endif } } }
26,594
C
46.746858
144
0.692976
omniverse-code/kit/include/omni/graph/core/IVariable2.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL /** * @brief Interface extension for IVariable that adds the ability to set a variable type */ template <> class omni::core::Generated<omni::graph::core::IVariable2_abi> : public omni::graph::core::IVariable2_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::IVariable2") /** * Sets the type of the variable. * * @param[in] type New type for the variable * * @return True if the type is able to be set, false otherwise */ bool setType(omni::graph::core::Type type) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline bool omni::core::Generated<omni::graph::core::IVariable2_abi>::setType(omni::graph::core::Type type) noexcept { return setType_abi(type); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
1,610
C
26.305084
116
0.714907
omniverse-code/kit/include/omni/graph/core/BundlePrims.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // ==================================================================================================== /* _____ _ _ _ _ _ | __ \ | \ | | | | | | | | | | | | ___ | \| | ___ | |_ | | | |___ ___ | | | |/ _ \ | . ` |/ _ \| __| | | | / __|/ _ \ | |__| | (_) | | |\ | (_) | |_ | |__| \__ \ __/ |_____/ \___/ |_| \_|\___/ \__| \____/|___/\___| This is a temporary interface that can change at any time. */ // ==================================================================================================== #include "ConstBundlePrims.h" namespace omni { namespace graph { namespace core { class BundlePrims; class BundlePrimIterator; class BundlePrimAttrIterator; /** * Collection of read-write attributes in a primitive. */ class BundlePrim : public ConstBundlePrim { public: using AttrMapIteratorType = BundleAttributeMap::iterator; /** * @return Parent of this bundle prim. */ BundlePrims* getBundlePrims() noexcept; /** * @return Bundle handle of this primitive. */ BundleHandle handle() noexcept; /** * Sets path of the primitive. */ void setPath(NameToken path) noexcept; /** * Sets type of the primitive. */ void setType(NameToken type) noexcept; /** * @return Cached instance of BundleAttrib if attribute is found successfully, nullptr otherwise. */ BundleAttrib* getAttr(NameToken attrName) noexcept; /** * @return BundleAttrib if attribute is added successfully, nullptr otherwise. */ BundleAttrib* addAttr(NameToken attrName, Type type, size_t arrayElementCount = 0, BundleAttrib::Source source = BundleAttrib::Source::Attribute) noexcept; /** * Convenience structure for adding attributes. */ struct AddAttrInfo { NameToken attrName; Type type; size_t arrayElementCount; BundleAttrib::Source source; }; /** * Adds a list of attributes to this bundle prim. * * @param[in] attrList Vector of all the new attributes to be added to this prim * @returns True if all (new) attributes were added successfully * * @todo Weakness of this interface is that it forces usage of std::vector. */ bool addAttrs(std::vector<AddAttrInfo> const& attrList) noexcept; /** * Remove attribute with a given name from this primitive. */ void removeAttr(NameToken attrName) noexcept; /** * Recursively remove all attributes from this primitive. */ void clearContents() noexcept; /** * Copy contents from another bundle prim. */ void copyContentsFrom(ConstBundlePrim& source, bool removeAttrsNotInSource = true) noexcept; [[deprecated("Bumping DirtyID has no effect, Dirty ID management has been moved to core. Use IBundleChanges.")]] void bumpDirtyID() noexcept {} [[deprecated("Setting DirtyID has no effect, Dirty ID management has been moved to core. Use IBundleChanges.")]] void setDirtyID(DirtyIDType dirtyID) noexcept {} /** * @return Attribute iterator pointing to the first attribute in this bundle. */ BundlePrimAttrIterator begin() noexcept; /** * @return Attribute iterator pointing to the last attribute in this bundle. */ BundlePrimAttrIterator end() noexcept; /** * @return Attribute iterator pointing to the first attribute in this bundle. */ ConstBundlePrimAttrIterator cbegin() noexcept; /** * @return Attribute iterator pointing to the last attribute in this bundle. */ ConstBundlePrimAttrIterator cend() noexcept; /*********************************************************************************************** * * TODO: Following methods might be deprecated in the future. * In the next iteration when real interface starts to emerge, we can retire those methods. * ***********************************************************************************************/ /** * Create an attribute that is a relationship type. */ BundleAttrib* addRelationship(NameToken name, size_t targetCount) noexcept; /** * @deprecated Use getBundlePrims. */ BundlePrims* bundlePrims() noexcept; /** * @deprecated Do not use! */ void copyContentsFrom(ConstBundlePrim const& source, bool removeAttrsNotInSource = true) noexcept; private: /** * Direct initialization with IBundle interface. * * ConstBundlePrim and BundlePrim take advantage of polymorphic relationship * between IConstBundle and IBundle interfaces. * In order to modify bundles, BundlePrim makes an attempt to cast IConstBundle * to IBundle interface. When this process is successful then, bundle can be modified. * * Only BundlePrims is allowed to create instances of BundlePrim. */ BundlePrim(BundlePrims& bundlePrims, omni::core::ObjectPtr<IBundle2> bundle); /** * Clear contents of IBundle. */ void recursiveClearContents(GraphContextObj const& context, IBundleFactory* factory, IBundle2* bundle) noexcept; /** * @return Make an attempt to cast IConstBundle interface to IBundle. Returns nullptr if operation failed. */ IBundle2* getBundlePtr(IConstBundle2* constBundle) noexcept; /** * @return Make an attempt to cast IConstBundle interface to IBundle. Returns nullptr if operation failed. */ IBundle2* getBundlePtr() noexcept; /** * @return True if primitive is an instance of common attributes. */ bool isCommonAttrs() const noexcept { BundlePrimIndex primIndex = static_cast<ConstBundlePrim*>(const_cast<BundlePrim*>(this))->primIndex(); return primIndex == kInvalidBundlePrimIndex; } friend class BundlePrimIterator; friend class BundlePrims; friend class BundleAttrib; }; /** * Collection of read-write primitives in a bundle. * * Bundle Primitives is not movable, not copyable. It lifespan is managed by the user. */ class BundlePrims : public ConstBundlePrims { public: /** * Acquire access to a bundle primitives under given handle. */ BundlePrims(GraphContextObj const& context, BundleHandle const& bundle); ~BundlePrims() noexcept; /** * @return Bundle handle of this primitive. */ BundleHandle handle() noexcept; /** * @return BundlePrim under given index, or nullptr if prim is not found. */ BundlePrim* getPrim(BundlePrimIndex primIndex) noexcept; /** * @return BundlePrim allowing access to attributes to this bundle primitives. */ BundlePrim& getCommonAttrs() noexcept; /** * Add new primitives to this bundle. * * @return Number of successfully added primitives. */ size_t addPrims(size_t primCountToAdd) noexcept; /** * Remove primitive under given index. */ bool removePrim(BundlePrimIndex primIndex) noexcept; /** * Cleans up this primitive bundle. Remove all primitives and attributes. */ void clearContents() noexcept; [[deprecated("Bumping DirtyID has no effect, Dirty ID management has been moved to core. Use IBundleChanges.")]] DirtyIDType bumpBundleDirtyID() noexcept { return kInvalidDirtyID; } /** * @return Primitive iterator pointing to the first primitive in this bundle. */ BundlePrimIterator begin() noexcept; /** * @return Primitive iterator pointing to the last primitive in this bundle. */ BundlePrimIterator end() noexcept; /** * @return Primitive iterator pointing to the first primitive in this bundle. */ ConstBundlePrimIterator cbegin() noexcept; /** * @return Primitive iterator pointing to the last primitive in this bundle. */ ConstBundlePrimIterator cend() noexcept; /*********************************************************************************************** * * TODO: Following methods might be deprecated in the future. * In the next iteration when real interface starts to emerge, we can retire those methods. * ***********************************************************************************************/ /** * @deprecated Don't use! Read attach() description. */ BundlePrims(); /** * @deprecated Use appropriate constructor and heap allocate BundlePrims. * * @todo: There is no benefit of using this method. Cache has to be rebuild from scratch * whenever BundlePrims is attached/detached. * It would be better to remove default constructor and enforce cache construction * through constructor with arguments. */ void attach(GraphContextObj const& context, BundleHandle const& bundle) noexcept; /** * @deprecated Use appropriate constructor and heap allocate ConstBundlePrims. */ void detach() noexcept; /** * @deprecated Do not use! Use removePrim with index. This override introduces ambiguity where int can * be converted to a pointer. * * @todo: Weakness of removePrim design is that it introduces two overrides with following arguments: * * pointer * * integer * This leads to ambiguity during override resolution. Override with a pointer should be avoided * and removed in the future. */ bool removePrim(ConstBundlePrim* prim) noexcept; /** * @deprecated Do not use! There is no need for this function to exist. * Get the primitive and call clearContents(). */ BundlePrim* getClearedPrim(BundlePrimIndex primIndex) noexcept; /** * @deprecated Responsibility to cache primitive's attributes has been moved to BundlePrim. */ void ensurePrimAttrsCached(BundlePrimIndex primIndex) noexcept; private: /** * @return Returns nullptr if bundle is read only, or IBundle2 instance otherwise. */ IBundle2* getBundlePtr() noexcept; // cached attribute handles AttributeDataHandle m_bundlePrimIndexOffsetAttr{ AttributeDataHandle::invalidValue() }; friend class BundlePrim; friend class BundleAttrib; }; /** * Primitives in Bundle iterator. */ class BundlePrimIterator { public: BundlePrimIterator(BundlePrims& bundlePrims, BundlePrimIndex primIndex = 0) noexcept; BundlePrimIterator(BundlePrimIterator const& that) noexcept = default; BundlePrimIterator& operator=(BundlePrimIterator const& that) noexcept = default; bool operator==(BundlePrimIterator const& that) const noexcept; bool operator!=(BundlePrimIterator const& that) const noexcept; BundlePrim& operator*() noexcept; BundlePrim* operator->() noexcept; BundlePrimIterator& operator++() noexcept; private: BundlePrims* m_bundlePrims; BundlePrimIndex m_primIndex; }; /** * Attributes in Primitive iterator. */ class BundlePrimAttrIterator { public: BundlePrimAttrIterator(BundlePrim& bundlePrim, BundlePrim::AttrMapIteratorType attrIter) noexcept; BundlePrimAttrIterator(BundlePrimAttrIterator const& that) noexcept = default; BundlePrimAttrIterator& operator=(BundlePrimAttrIterator const& that) noexcept = default; bool operator==(BundlePrimAttrIterator const& that) const noexcept; bool operator!=(BundlePrimAttrIterator const& that) const noexcept; BundleAttrib& operator*() noexcept; BundleAttrib* operator->() noexcept; BundlePrimAttrIterator& operator++() noexcept; BundleAttrib const* getConst() noexcept; private: BundlePrim* m_bundlePrim; BundlePrim::AttrMapIteratorType m_attrIter; }; } // namespace core } // namespace graph } // namespace omni #include "BundlePrimsImpl.h"
12,439
C
30.414141
116
0.631642
omniverse-code/kit/include/omni/graph/core/IVariable.h
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/core/IObject.h> #include <omni/graph/core/Type.h> #include <omni/graph/core/Handle.h> namespace omni { namespace graph { namespace core { /** * Scope in which the variable has been made available */ enum class eVariableScope { /** Variable is accessible only to its graph */ ePrivate = 0, /** Variable can be read by other graphs */ eReadOnly = 1, /** Variable can be read/written by other graphs */ ePublic = 2, }; //! Declare the IVariable interface definition OMNI_DECLARE_INTERFACE(IVariable); //! Data type to use for a reference to an IVariable interface definition using IVariablePtr = omni::core::ObjectPtr<IVariable>; /** * Object that contains a value that is local to a graph, available from anywhere in the graph */ class IVariable_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.core.IVariable")> { protected: /** * Returns the name of the variable object. The name is derived by * removing any variable specific prefixes from the underlying attribute. * * @return The name of the variable. */ virtual const char* getName_abi() noexcept = 0; /** * Returns the full path to the variables underlying attribute * * @return The full usd path of the variable */ virtual const char* getSourcePath_abi() noexcept = 0; /** * Returns the type of the variable * * @return The type of the variable */ virtual OMNI_ATTR("no_py") Type getType_abi() noexcept = 0; /** * Returns the category of the variable * * @return The category of the variable, or an empty string if it is not set. */ virtual const char* getCategory_abi() noexcept = 0; /** * Sets the category of the variable * * @param[in] category A string representing the variable category */ virtual void setCategory_abi(OMNI_ATTR("c_str, in, not_null") const char* category) noexcept = 0; /** * Gets the display name of the variable. By default the display name is the same * as the variable name. * * @return The display name of the variable, or an empty string if it is not set. */ virtual const char* getDisplayName_abi() noexcept = 0; /** * Set the display name of the variable. * * @param[in] displayName A string to set the display name to */ virtual void setDisplayName_abi(OMNI_ATTR("c_str, in, not_null") const char* displayName) noexcept = 0; /** * Get the tooltip used for the variable. * * @return The tooltip of the variable, or an emtpy string if none is set. */ virtual const char* getTooltip_abi() noexcept = 0; /** * Set the tooltip used for the variable * * @param[in] toolTip A description used as a tooltip. */ virtual void setTooltip_abi(OMNI_ATTR("c_str, in, not_null") const char* toolTip) noexcept = 0; /** * Get the scope of the variable. The scope determines which graphs can read and write the value. * * @return The scope of the variable. */ virtual eVariableScope getScope_abi() noexcept = 0; /** * Sets the scope of the variable. * * @param[in] scope The scope to set on the variable. */ virtual void setScope_abi(eVariableScope scope) noexcept = 0; /** * Returns whether this variable is valid * * @return True if the variable is valid, false otherwise */ virtual bool isValid_abi() noexcept = 0; }; } // namespace core } // namespace graph } // namespace omni #include "IVariable.gen.h" //! @cond Doxygen_Suppress //! //! API part of the variable interface //! @copydoc omni::graph::core::IVariable_abi OMNI_DEFINE_INTERFACE_API(omni::graph::core::IVariable) //! @endcond { public: /** * Changes the type of the variable. * * @param[in] type The type to change the variable to * @returns True if the type was successfully changed, False otherwise. Setting the type * can fail if the backing USD change is on a layer with a weaker opinion. */ inline bool setType(omni::graph::core::Type type) noexcept; };
4,637
C
27.807453
107
0.664007
omniverse-code/kit/include/omni/graph/core/PyISchedulingHints.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // #pragma once #include <omni/core/ITypeFactory.h> #include <omni/python/PyBind.h> #include <omni/python/PyString.h> #include <omni/python/PyVec.h> #include <sstream> auto bindeAccessType(py::module& m) { py::enum_<omni::graph::core::eAccessType> e( m, "eAccessType", R"OMNI_BIND_RAW_(How does the node access the data described by the enum eAccessLocation)OMNI_BIND_RAW_"); e.value("E_NONE", omni::graph::core::eAccessType::eNone, R"OMNI_BIND_RAW_(There is no access to data of the associated type)OMNI_BIND_RAW_"); e.value("E_READ", omni::graph::core::eAccessType::eRead, R"OMNI_BIND_RAW_(There is only read access to data of the associated type)OMNI_BIND_RAW_"); e.value("E_WRITE", omni::graph::core::eAccessType::eWrite, R"OMNI_BIND_RAW_(There is only write access to data of the associated type)OMNI_BIND_RAW_"); e.value("E_READ_WRITE", omni::graph::core::eAccessType::eReadWrite, R"OMNI_BIND_RAW_(There is both read and write access to data of the associated type)OMNI_BIND_RAW_"); return e; } auto bindeAccessLocation(py::module& m) { py::enum_<omni::graph::core::eAccessLocation> e( m, "eAccessLocation", R"OMNI_BIND_RAW_(What type of non-attribute data does this node access)OMNI_BIND_RAW_"); e.value("E_USD", omni::graph::core::eAccessLocation::eUsd, R"OMNI_BIND_RAW_(Accesses the USD stage data)OMNI_BIND_RAW_"); e.value("E_GLOBAL", omni::graph::core::eAccessLocation::eGlobal, R"OMNI_BIND_RAW_(Accesses data that is not part of the node or node type)OMNI_BIND_RAW_"); e.value("E_STATIC", omni::graph::core::eAccessLocation::eStatic, R"OMNI_BIND_RAW_(Accesses data that is shared by every instance of a particular node type)OMNI_BIND_RAW_"); e.value("E_TOPOLOGY", omni::graph::core::eAccessLocation::eTopology, R"OMNI_BIND_RAW_(Accesses information on the topology of the graph to which the node belongs)OMNI_BIND_RAW_"); return e; } auto bindeThreadSafety(py::module& m) { py::enum_<omni::graph::core::eThreadSafety> e( m, "eThreadSafety", R"OMNI_BIND_RAW_(How thread safe is the node during evaluation)OMNI_BIND_RAW_"); e.value("E_SAFE", omni::graph::core::eThreadSafety::eSafe, R"OMNI_BIND_RAW_(Nodes can be evaluated in multiple threads safely)OMNI_BIND_RAW_"); e.value("E_UNSAFE", omni::graph::core::eThreadSafety::eUnsafe, R"OMNI_BIND_RAW_(Nodes cannot be evaluated in multiple threads safely)OMNI_BIND_RAW_"); e.value("E_UNKNOWN", omni::graph::core::eThreadSafety::eUnknown, R"OMNI_BIND_RAW_(The thread safety status of the node type is unknown)OMNI_BIND_RAW_"); return e; } auto bindeComputeRule(py::module& m) { py::enum_<omni::graph::core::eComputeRule> e( m, "eComputeRule", R"OMNI_BIND_RAW_(How the node is allowed to be computed)OMNI_BIND_RAW_"); e.value("E_DEFAULT", omni::graph::core::eComputeRule::eDefault, R"OMNI_BIND_RAW_(Nodes are computed according to the default evaluator rules)OMNI_BIND_RAW_"); e.value( "E_ON_REQUEST", omni::graph::core::eComputeRule::eOnRequest, R"OMNI_BIND_RAW_(The evaluator may skip computing this node until explicitly requested with INode::requestCompute)OMNI_BIND_RAW_"); return e; } auto bindISchedulingHints(py::module& m) { // hack around pybind11 issues with C++17 // - https://github.com/pybind/pybind11/issues/2234 // - https://github.com/pybind/pybind11/issues/2666 // - https://github.com/pybind/pybind11/issues/2856 py::class_<omni::core::Generated<omni::graph::core::ISchedulingHints_abi>, omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::core::ISchedulingHints_abi>>, omni::core::IObject> clsParent(m, "_ISchedulingHints"); py::class_<omni::graph::core::ISchedulingHints, omni::core::Generated<omni::graph::core::ISchedulingHints_abi>, omni::python::detail::PyObjectPtr<omni::graph::core::ISchedulingHints>, omni::core::IObject> cls(m, "ISchedulingHints", R"OMNI_BIND_RAW_(Interface to the list of scheduling hints that can be applied to a node type)OMNI_BIND_RAW_"); cls.def(py::init( [](const omni::core::ObjectPtr<omni::core::IObject>& obj) { auto tmp = omni::core::cast<omni::graph::core::ISchedulingHints>(obj.get()); if (!tmp) { throw std::runtime_error("invalid type conversion"); } return tmp; })); cls.def(py::init( []() { auto tmp = omni::core::createType<omni::graph::core::ISchedulingHints>(); if (!tmp) { throw std::runtime_error("unable to create omni::graph::core::ISchedulingHints instantiation"); } return tmp; })); cls.def_property("thread_safety", &omni::graph::core::ISchedulingHints::getThreadSafety, &omni::graph::core::ISchedulingHints::setThreadSafety); cls.def_property("compute_rule", &omni::graph::core::ISchedulingHints::getComputeRule, &omni::graph::core::ISchedulingHints::setComputeRule); cls.def("get_data_access", &omni::graph::core::ISchedulingHints::getDataAccess, R"OMNI_BIND_RAW_(Get the type of access the node has for a given data type @param[in] dataType Type of data for which access type is being modified @returns Value of the access type flag)OMNI_BIND_RAW_", py::arg("data_type")); cls.def("set_data_access", &omni::graph::core::ISchedulingHints::setDataAccess, R"OMNI_BIND_RAW_(Set the flag describing how a node accesses particular data in its compute _abi (defaults to no access). Setting any of these flags will, in most cases, automatically mark the node as "not threadsafe". One current exception to this is allowing a node to be both threadsafe and a writer to USD, since such behavior can be achieved if delayed writebacks (e.g. "registerForUSDWriteBack") are utilized in the node's compute method. @param[in] dataType Type of data for which access type is being modified @param[in] newAccessType New value of the access type flag)OMNI_BIND_RAW_", py::arg("data_type"), py::arg("new_access_type")); cls.def("inspect", [](omni::graph::core::ISchedulingHints* self, omni::inspect::IInspector* inspector) { auto return_value = self->inspect(inspector); return return_value; }, R"OMNI_BIND_RAW_(Runs the inspector on the scheduling hints. @param[in] inspector The inspector class @return true if the inspection ran successfully, false if the inspection type is not supported)OMNI_BIND_RAW_", py::arg("inspector")); return omni::python::PyBind<omni::graph::core::ISchedulingHints>::bind(cls); }
7,462
C
49.425675
139
0.664969
omniverse-code/kit/include/omni/graph/core/ConstBundlePrimsImpl.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "ConstBundlePrims.h" #include <omni/graph/core/CppWrappers.h> #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/ComputeGraph.h> #include <algorithm> namespace omni { namespace graph { namespace core { // ==================================================================================================== // // Const Bundle Primitive // // ==================================================================================================== inline ConstBundlePrim::ConstBundlePrim(ConstBundlePrims& bundlePrims, omni::core::ObjectPtr<IConstBundle2> bundle) : m_bundlePrims{ &bundlePrims }, m_bundle{ std::move(bundle) } { // Read and cache all non internal attributes. readAndCacheAttributes(); const detail::AttrDefinition& primIndexDef = detail::getPrimIndexDefinition(); m_primIndexAttr = getConstBundlePtr()->getConstBundleMetadataByName(primIndexDef.token); } inline ConstBundleHandle ConstBundlePrim::getConstHandle() noexcept { return m_bundle->getConstHandle(); } inline void ConstBundlePrim::readAndCacheAttributes() noexcept { IConstBundle2* bundle = getConstBundlePtr(); GraphContextObj const& context = getConstBundlePrims()->context(); std::vector<ConstAttributeDataHandle> attrHandles(bundle->getAttributeCount()); bundle->getConstAttributes(attrHandles.data(), attrHandles.size()); auto& attrs = getAttributes(); for(ConstAttributeDataHandle& attrHandle : attrHandles) { if(!attrHandle.isValid()) continue; NameToken attrName = context.iAttributeData->getName(context, attrHandle); attrs.insert(std::make_pair(attrName, std::make_unique<BundleAttrib>(*this, attrName))); } } inline BundleAttrib const* ConstBundlePrim::getConstAttr(core::NameToken attrName) noexcept { // Try to find cached attributes auto& attrMap = getAttributes(); auto it = attrMap.find(attrName); if (it != attrMap.end()) { return it->second.get(); } // Try to find attribute in this bundle. IConstBundle2* bundle = getConstBundlePtr(); ConstAttributeDataHandle attributeHandle = bundle->getConstAttributeByName(attrName); if (!attributeHandle.isValid()) { // attribute is not found, ensure entry is removed from the cache. auto it = attrMap.find(attrName); if (it != attrMap.end()) { attrMap.erase(it); } return nullptr; } // Check if attribute in the bundle is stale auto newPrimAttribute = new BundleAttrib{ *this, attrName}; std::unique_ptr<BundleAttrib> primAttributePtr{ newPrimAttribute }; attrMap.emplace(attrName, std::move(primAttributePtr)); return newPrimAttribute; } inline BundleAttrib const* ConstBundlePrim::getAttr(NameToken attrName) const noexcept { return const_cast<ConstBundlePrim*>(this)->getConstAttr(attrName); } inline size_t ConstBundlePrim::attrCount() noexcept { return getAttributes().size(); } inline BundlePrimIndex ConstBundlePrim::primIndex() noexcept { if (m_primIndexAttr.isValid()) { ConstBundlePrims* bundlePrims = getConstBundlePrims(); return *getDataR<BundlePrimIndex>(bundlePrims->context(), m_primIndexAttr); } return kInvalidBundlePrimIndex; } inline NameToken ConstBundlePrim::path() noexcept { if (!m_pathAttr.isValid()) { const detail::AttrDefinition& attrDef = detail::getPrimPathDefinition(); m_pathAttr = getConstBundlePtr()->getConstAttributeByName(attrDef.token); } NameToken result = omni::fabric::kUninitializedToken; if (m_pathAttr.isValid()) { ConstBundlePrims* bundlePrims = getConstBundlePrims(); result = *getDataR<NameToken>(bundlePrims->context(), m_pathAttr); } return result; } inline NameToken ConstBundlePrim::path() const noexcept { return const_cast<ConstBundlePrim*>(this)->path(); } inline NameToken ConstBundlePrim::type() noexcept { if (!m_typeAttr.isValid()) { const detail::AttrDefinition& attrDef = detail::getPrimTypeDefinition(); m_typeAttr = getConstBundlePtr()->getConstAttributeByName(attrDef.token); } NameToken result = omni::fabric::kUninitializedToken; if (m_typeAttr.isValid()) { ConstBundlePrims* bundlePrims = getConstBundlePrims(); result = *getDataR<NameToken>(bundlePrims->context(), m_typeAttr); } return result; } inline NameToken ConstBundlePrim::type() const noexcept { return const_cast<ConstBundlePrim*>(this)->type(); } inline DirtyIDType ConstBundlePrim::dirtyID() noexcept { auto id = carb::getCachedInterface<ComputeGraph>()->getDirtyIDInterfacePtr(m_bundle->getContext()); return id->getForBundle(m_bundle->getConstHandle()); } inline DirtyIDType ConstBundlePrim::dirtyID() const noexcept { CARB_IGNOREWARNING_MSC_WITH_PUSH(4996) CARB_IGNOREWARNING_GNUC_WITH_PUSH("-Wdeprecated-declarations") return const_cast<ConstBundlePrim*>(this)->dirtyID(); CARB_IGNOREWARNING_GNUC_POP CARB_IGNOREWARNING_MSC_POP } inline ConstBundlePrims* ConstBundlePrim::getConstBundlePrims() noexcept { return m_bundlePrims; } inline ConstBundlePrimAttrIterator ConstBundlePrim::begin() noexcept { return ConstBundlePrimAttrIterator(*this, getAttributes().begin()); } inline ConstBundlePrimAttrIterator ConstBundlePrim::end() noexcept { return ConstBundlePrimAttrIterator(*this, getAttributes().end()); } inline ConstBundlePrimAttrIterator ConstBundlePrim::begin() const noexcept { ConstBundlePrim& thisPrim = const_cast<ConstBundlePrim&>(*this); return ConstBundlePrimAttrIterator(thisPrim, thisPrim.getAttributes().begin()); } inline ConstBundlePrimAttrIterator ConstBundlePrim::end() const noexcept { ConstBundlePrim& thisPrim = const_cast<ConstBundlePrim&>(*this); return ConstBundlePrimAttrIterator(thisPrim, thisPrim.getAttributes().end()); } inline IConstBundle2* ConstBundlePrim::getConstBundlePtr() noexcept { return m_bundle.get(); } inline ConstBundlePrim::BundleAttributeMap& ConstBundlePrim::getAttributes() noexcept { return m_attributes; } // ==================================================================================================== // // Const Bundle Primitives // // ==================================================================================================== inline ConstBundlePrims::ConstBundlePrims() { } inline ConstBundlePrims::ConstBundlePrims(GraphContextObj const& context, ConstBundleHandle const& bundle) : ConstBundlePrims() { attach(context, bundle); } inline void ConstBundlePrims::detach() noexcept { m_primitives.clear(); m_commonAttributes.reset(); m_context = GraphContextObj{}; m_bundle.release(); m_factory.release(); } inline ConstBundlePrims::BundlePrimArray& ConstBundlePrims::getPrimitives() noexcept { return m_primitives; } inline ConstBundleHandle ConstBundlePrims::getConstHandle() noexcept { return m_bundle->getConstHandle(); } template <typename FUNC> ConstBundlePrim* ConstBundlePrims::getConstPrim(BundlePrimIndex primIndex, FUNC createSortedBundlePrims) noexcept { // Return invalid const bundle prim if out of bounds. size_t const bundlePrimCount = getPrimCount(); if (primIndex >= bundlePrimCount) { return nullptr; } auto& prims = getPrimitives(); // HDC_TODO: we need a clear signal to be sure when creation and resorting is required. if (prims.size() != bundlePrimCount) { prims = std::move(createSortedBundlePrims()); CARB_ASSERT(bundlePrimCount == (size_t)std::count_if(prims.cbegin(), prims.cend(), [](const auto& p) { return p.get(); })); } return prims[primIndex].get(); } inline ConstBundlePrim* ConstBundlePrims::getPrim(BundlePrimIndex primIndex) noexcept { return getConstPrim(primIndex); } inline ConstBundlePrim* ConstBundlePrims::getConstPrim(BundlePrimIndex primIndex) noexcept { auto createSortedBundlePrims = [this, &bundlePrims = *this]() -> BundlePrimArray { const size_t childBundleCount = getConstBundlePtr()->getChildBundleCount(); std::vector<ConstBundleHandle> handles(childBundleCount); getConstBundlePtr()->getConstChildBundles(handles.data(), handles.size()); const GraphContextObj& graphContext = context(); BundlePrimArray prims(childBundleCount); BundlePrimArray nonIndexedPrims; for (ConstBundleHandle& handle : handles) { auto childBundle = getBundleFactoryPtr()->getConstBundle(graphContext, handle); ConstBundlePrim* prim = new ConstBundlePrim(bundlePrims, childBundle); BundlePrimIndex index = prim->primIndex(); CARB_ASSERT(index < childBundleCount || index == kInvalidBundlePrimIndex); if (index < childBundleCount) { prims[index].reset(prim); } else { nonIndexedPrims.emplace_back(prim); } } // Merge non-indexed prims into the sorted array. if (!nonIndexedPrims.empty()) { BundlePrimIndex index = 0; for (ConstBundlePrimPtr& nonIndexedPrim : nonIndexedPrims) { while (index < childBundleCount) { ConstBundlePrimPtr& prim = prims[index++]; if (!prim) { prim = std::move(nonIndexedPrim); break; } } } } return prims; }; return getConstPrim(primIndex, createSortedBundlePrims); } inline DirtyIDType ConstBundlePrims::getBundleDirtyID() noexcept { CARB_IGNOREWARNING_MSC_WITH_PUSH(4996) CARB_IGNOREWARNING_GNUC_WITH_PUSH("-Wdeprecated-declarations") return getCommonAttrs().dirtyID(); CARB_IGNOREWARNING_GNUC_POP CARB_IGNOREWARNING_MSC_POP } inline ConstBundlePrim& ConstBundlePrims::getConstCommonAttrs() noexcept { return *m_commonAttributes; } inline GraphContextObj const& ConstBundlePrims::context() noexcept { if (m_bundle) { m_context = m_bundle->getContext(); } else { m_context = GraphContextObj{}; } return m_context; } inline void ConstBundlePrims::attach(GraphContextObj const& context, ConstBundleHandle const& bundleHandle) noexcept { ComputeGraph* computeGraph = carb::getCachedInterface<ComputeGraph>(); omni::core::ObjectPtr<IBundleFactory> factory = computeGraph->getBundleFactoryInterfacePtr(); omni::core::ObjectPtr<IConstBundle2> bundle = factory->getConstBundle(context, bundleHandle); attach(std::move(factory), std::move(bundle)); } inline void ConstBundlePrims::attach(omni::core::ObjectPtr<IBundleFactory>&& factoryPtr, omni::core::ObjectPtr<IConstBundle2>&& bundlePtr) noexcept { // Initialize members m_factory = std::move(factoryPtr); m_bundle = std::move(bundlePtr); // Initialize common attributes to provide access to ConstBundlePrims attributes. m_commonAttributes.reset(new ConstBundlePrim(*this, m_bundle)); if (!m_bundle->isValid()) { return; } // TODO: Following code is necessary for backward compatibility. IConstBundle2* bundle = getConstBundlePtr(); GraphContextObj const& context = this->context(); } inline IBundleFactory* ConstBundlePrims::getBundleFactoryPtr() noexcept { return m_factory.get(); } inline IConstBundle2* ConstBundlePrims::getConstBundlePtr() noexcept { return m_bundle.get(); } inline size_t ConstBundlePrims::getPrimCount() noexcept { if (IConstBundle2* bundle = getConstBundlePtr()) { return bundle->getChildBundleCount(); } return 0; } inline ConstBundlePrimIterator ConstBundlePrims::begin() noexcept { return ConstBundlePrimIterator(*this); } inline ConstBundlePrimIterator ConstBundlePrims::end() noexcept { return ConstBundlePrimIterator(*this, getPrimCount()); } /*********************************************************************************************** * * TODO: Following methods might be deprecated in the future, but are kept for backward compatibility. * In the next iteration when real interface starts to emerge, we can retire those methods. * ***********************************************************************************************/ inline ConstBundlePrim& ConstBundlePrims::getCommonAttrs() noexcept { return getConstCommonAttrs(); } inline ConstBundleHandle ConstBundlePrims::handle() noexcept { return m_bundle->getConstHandle(); } inline void ConstBundlePrims::separateAttrs() noexcept { // There is nothing to separate. This function is deprecated. } inline void ConstBundlePrims::ensurePrimAttrsCached(BundlePrimIndex primIndex) noexcept { // Responsibility of caching attributes was moved to Bundle Prim. } // ==================================================================================================== // // Const Bundle Primitive Iterator // // ==================================================================================================== inline ConstBundlePrimIterator::ConstBundlePrimIterator(ConstBundlePrims& bundlePrims, BundlePrimIndex primIndex) noexcept : m_bundlePrims(&bundlePrims), m_primIndex(primIndex) { } inline bool ConstBundlePrimIterator::operator==(ConstBundlePrimIterator const& that) const noexcept { return m_bundlePrims == that.m_bundlePrims && m_primIndex == that.m_primIndex; } inline bool ConstBundlePrimIterator::operator!=(ConstBundlePrimIterator const& that) const noexcept { return !(*this == that); } inline ConstBundlePrim& ConstBundlePrimIterator::operator*() noexcept { return *(m_bundlePrims->getConstPrim(m_primIndex)); } inline ConstBundlePrim* ConstBundlePrimIterator::operator->() noexcept { return m_bundlePrims->getConstPrim(m_primIndex); } inline ConstBundlePrimIterator& ConstBundlePrimIterator::operator++() noexcept { ++m_primIndex; return *this; } // ==================================================================================================== // // Const Bundle Primitive Attribute Iterator // // ==================================================================================================== inline ConstBundlePrimAttrIterator::ConstBundlePrimAttrIterator(ConstBundlePrim& bundlePrim, ConstBundlePrim::AttrMapIteratorType attrIter) noexcept : m_bundlePrim(&bundlePrim), m_attrIter(attrIter) { } inline bool ConstBundlePrimAttrIterator::operator==(ConstBundlePrimAttrIterator const& that) const noexcept { return m_bundlePrim == that.m_bundlePrim && m_attrIter == that.m_attrIter; } inline bool ConstBundlePrimAttrIterator::operator!=(ConstBundlePrimAttrIterator const& that) const noexcept { return !(*this == that); } inline BundleAttrib const& ConstBundlePrimAttrIterator::operator*() const noexcept { CARB_ASSERT(m_attrIter->second); return *(m_attrIter->second); } inline BundleAttrib const*ConstBundlePrimAttrIterator:: operator->() const noexcept { CARB_ASSERT(m_attrIter->second); return m_attrIter->second.get(); } inline ConstBundlePrimAttrIterator& ConstBundlePrimAttrIterator::operator++() noexcept { ++m_attrIter; return *this; } } // namespace core } // namespace graph } // namespace omni
16,127
C
29.545454
148
0.655671
omniverse-code/kit/include/omni/graph/core/IGatherPrototype.h
// Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/flatcache/IFlatcache.h> #include <omni/graph/core/iComputeGraph.h> #include <tuple> namespace carb { namespace flatcache { using ArrayIndex = size_t; } } namespace omni { namespace graph { namespace core { // ==================================================================================================== // This is a temporary ABI interface used for prototyping the use of improved Gather. Some of these functions // may move to other interfaces once the prototyping is complete. For now, none of them should be used outside // of the prototyping efforts. /* _____ _ _ _ _ _ | __ \ | \ | | | | | | | | | | | | ___ | \| | ___ | |_ | | | |___ ___ | | | |/ _ \ | . ` |/ _ \| __| | | | / __|/ _ \ | |__| | (_) | | |\ | (_) | |_ | |__| \__ \ __/ |_____/ \___/ |_| \_|\___/ \__| \____/|___/\___| */ using PathBucketIndex = std::tuple<carb::flatcache::PathC, carb::flatcache::BucketId, carb::flatcache::ArrayIndex>; using GatherId = uint64_t; static constexpr GatherId kInvalidGatherId = 0; /** * Gathered attributes can be automatically converted and copied to Hydra fast-path attributes */ enum class GatherAddTransformsMode { eNone, ///< do not add any attributes eLocal, ///< _localTransform eWorld ///< _worldPosition, _worldOrientation, _worldScale }; struct IGatherPrototype { CARB_PLUGIN_INTERFACE("omni::graph::core::IGatherPrototype", 2, 0); /** * Adds the given paths to FC and updates the FC sync filter for this Graph. The prims may be tagged to ensure * the buckets do not contain unrelated prims. The returned GatherId identifies the FC buckets. * * After this call, the given paths will be present in FC, in a set of buckets which contain only these paths. This * is necessary to allow vectorized access to an attribute. * * @note The order of the paths supplied to this function may not be the order of the same paths in the Gather. Use * getGatheredPaths() to get the gather-order of gathered prims. * * @note The returned GatherId can become invalidated if the underlying flatcache data changes, use isGatherValid() * to check. * * @param[in] context structure containing both the interface and underlying object * @param[in] paths array of prim paths to add * @param[in] numPaths number of elements in paths * @param[in] allAttributes when true, all USD attributes will be gathered. When false, the "attributes" parameter * is used. * @param[in] attributes array of attribute names to add to FC, when allAttributes is false * @param[in] numAttributes number of elements in attributes array * @param[in] addTransformsMode The transform attributes to create if any * @param[in] shouldWriteBack Flag to request to write back cached data to USD * @param[in] forceExportToHistory when true, all gathered paths will be tagged for being exported into the history * * @return The gather id corresponding to the gathered buckets, or kInvalidGatherId on failure. */ GatherId(CARB_ABI* gatherPaths)(const GraphContextObj&, const carb::flatcache::PathC* paths, size_t numPaths, bool allAttributes, NameToken const* attributes, size_t numAttributes, GatherAddTransformsMode addTransformsMode, bool shouldWriteBack, bool forceExportToHistory); /** * Gets the array of paths in the given Gather, in gather order. * * @param[in] context structure containing both the interface and underlying object * @param[in] gatherId The Gather id returned from gatherPaths() * @param[out] refToPaths reference to the array of prim paths, in gather-order. This pointer is volatile, do not * save. It can become invalid the next time something changes in flatcache. * @param[out] refToSize number of elements in refToPaths * * @return true if the Gather was found, false if it was not */ bool (CARB_ABI*getGatheredPaths)(const GraphContextObj&, GatherId gatherId, carb::flatcache::PathC const*& refToPaths, size_t& refToSize); /** * Gets the array of buckets in the given Gather, in gather order. * For example if the gathered paths are P1,P2,P3,P4 which spans bucket B1, B2, we know that the sum of the * sizes of B1 and B2 must be 4. If B1 has 2 entries we know where the paths are located: * * B1 * row 0: P1 * row 1: P2 * * B2 * row 0: P3 * row 1: P4 * * @param[in] context structure containing both the interface and underlying object * @param[in] gatherId The Gather id returned from gatherPaths() * @param[out] refToBucketArray reference to the array of BucketIds, in gather-order. This pointer is volatile, do not * save. It can become invalid the next time something changes in flatcache. * @param[out] refToSize number of elements in refToBucketArray * * @return true if the Gather was found, false if it was not */ bool(CARB_ABI* getGatheredBuckets)(const GraphContextObj&, GatherId gatherId, BucketId const*& refToBucketArray, size_t& refToSize); /** * Gets the size information for the given gathered attribute name. * * @param[in] context structure containing both the interface and underlying object * @param[in] gatherId The Gather id returned from gatherPaths() * @param[in] attributeName name of the gathered attribute * @param[out] type The type information * @param[out] baseSizeBytes The size of the base elements of the Type * FIXME: Should be accessible from flatcache ABI? * @return true if the attribute was found, false if it was not */ bool(CARB_ABI* getGatheredType)(const GraphContextObj&, GatherId gatherId, NameToken attributeName, Type& type, size_t& baseSizeBytes); // ============================================================================================================== // The functions below here are temporary replacements for the same functions removed from IGraphContext. They // were being modified to use attributes, causing a breaking change, and since they were going to be changed anyway // they were moved to this prototype for easier update later. // ============================================================================================================== /** * Given an attribute connected to a gather node, retrieves the attribute value in system memory * Deprecated: to be replaced with the 2.0 API soon * * @param[in] context structure containing both the interface and underlying object * @param[in] attrObj The attribute structure to which the operation applies * @param[in] flags The data access flags indicating whether the data is to be R, W, or RW * @return the void pointer to the data */ void*(CARB_ABI* getGatherArray)(const GraphContextObj& context, const AttributeObj& attrObj, DataAccessFlags flags); /** * Given an attribute connected to a gather node, retrieves the attribute value in GPU memory * Deprecated: to be replaced with the 2.0 API soon * * @param[in] context structure containing both the interface and underlying object * @param[in] attrObj The attribute structure to which the operation applies * @param[in] flags The data access flags indicating whether the data is to be R, W, or RW * @return the void pointer to the data */ void*(CARB_ABI* getGatherArrayGPU)(const GraphContextObj& context, const AttributeObj& attrObj, DataAccessFlags flags); /** * Given an attribute connected to a gather node, retrieves the array of the paths it has gathered * * @param[in] context structure containing both the interface and underlying object * @param[in] attrObj The source attribute structure. This can be any of the gather node's attributes. * @return the pointer to the path array, encoded as uint64_t. These integers can be cast to SdfPaths. */ const uint64_t*(CARB_ABI* getGatherPathArray)(const GraphContextObj& context, const AttributeObj& attrObj); /** * Given an array attribute in a gather node, retrieves an array of the array sizes in CPU memory * * @param[in] context structure containing both the interface and underlying object * @param[in] attrObj The source attribute structure. * @param[in] flags The data access flags indicating whether the data is to be R, W, or RW * @return the pointer to the array */ size_t*(CARB_ABI* getGatherArrayAttributeSizes)(const GraphContextObj& context, const AttributeObj& attrObj, DataAccessFlags flags); /** * Given an array attribute in a gather node, retrieves an array of the array sizes in GPU memory * Deprecated: to be replaced with the 2.0 API soon * * @param[in] context structure containing both the interface and underlying object * @param[in] attrObj The source attribute structure. * @param[in] flags The data access flags indicating whether the data is to be R, W, or RW * @return the pointer to the array */ size_t*(CARB_ABI* getGatherArrayAttributeSizesGPU)(const GraphContextObj& context, const AttributeObj& attrObj, DataAccessFlags flags); /** * Given a gather node attribute, retrieves the number of elements * Deprecated: to be replaced with the 2.0 API soon * * @param[in] context structure containing both the interface and underlying object * @param[in] attrObj The attribute structure to which the operation applies * @return the number of elements gathered, -1 if operation is unsuccessful */ size_t(CARB_ABI* getElementCount)(const GraphContextObj& context, const AttributeObj& attrObj); /** * Gets the array of repeated paths and where to find its data in the given Gather. * Each element in the array will have the path of the prim, the bucket id of it's data, and the index of its position inside the bucket * * @param[in] context structure containing both the interface and underlying object * @param[in] gatherId The Gather id returned from gatherPaths() * @param[out] refToRepeatedPathsArray reference to the array of PathBucketIndex which outlines the location of the * data for repeated paths in the gater. This pointer is volatile, do not * save. It can become invalid the next time something changes in flatcache. * @param[out] refToSize number of elements in refToRepeatedPathsArray * * @return true if the Gather was found, false if it was not */ bool(CARB_ABI* getGatheredRepeatedPaths)(const GraphContextObj&, GatherId gatherId, PathBucketIndex const*& refToRepeatedPathsArray, size_t& refToSize); }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle STRUCT_INTEGRITY_CHECK(IGatherPrototype, getGatheredRepeatedPaths, 10) } // namespace core } // namespace graph } // namespace omni
12,354
C
50.053719
140
0.635017
omniverse-code/kit/include/omni/graph/core/CudaUtils.h
// Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // This file contains support for CUDA or CUDA/C++ common code // TODO: find out how to assert in CUDA #ifndef __CUDACC__ # define CUDA_SAFE_ASSERT(cond, ...) CARB_ASSERT(cond, ##__VA_ARGS__) # define CUDA_CALLABLE #else # define CUDA_SAFE_ASSERT(cond, ...) # define CUDA_CALLABLE __device__ __host__ #endif
770
C
35.714284
77
0.735065
omniverse-code/kit/include/omni/graph/core/IBundleFactory.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "bundle/IBundleFactory2.h"
489
C
39.83333
77
0.795501
omniverse-code/kit/include/omni/graph/core/IConstBundle.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "bundle/IConstBundle2.h"
487
C
39.666663
77
0.794661
omniverse-code/kit/include/omni/graph/core/NodeTypeRegistrar.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "iComputeGraph.h" #include <carb/Framework.h> #include <iostream> #include <type_traits> //==================================================================================================== // // Node type registration is handled through this set of classes. The implementation is geared towards simplifying // the registration process as much as possible. All of this is support code for the macro "REGISTER_OGN_NODE()", // which you place at the bottom of your .ogn file. // //==================================================================================================== namespace omni { namespace graph { namespace core { // If a node type version is not specified then it gets this value. // Keep in sync with DEFAULT_NODE_TYPE_VERSION_DEFAULT in omni.graph.core/python/tests/omnigraph_test_utils.py static constexpr int kDefaultNodeTypeVersion = 1; // See this file for how to handle new methods added to the iNodeType interface. #define NODE_TYPE_REGISTRY // For inclusion protection #include "NodeTypeRegistryTemplates.h" // Scoped node registration helper class. Used by the NodeTypeRegistrar to pass in node methods for the // INodeType ABI, keeping the node type registered for the lifetime of the instantiated object. Only one // registration object can exist per node type so copy methods are deleted in favour of their move equivalents. class NodeTypeRegistration { public: // Direct access to the interface, for the manual registration modes const INodeType& nodeTypeInterface() const { return m_interface; } // Direct access to the version, for the manual registration modes const int nodeTypeVersion() const { return m_versionNumber; } // Move constructor allows passing of registration information through return values without double // register/unregister NodeTypeRegistration(NodeTypeRegistration&& rhs) noexcept { m_interface = rhs.m_interface; m_versionNumber = rhs.m_versionNumber; rhs.m_interface = INodeType{}; rhs.m_versionNumber = kDefaultNodeTypeVersion; } // Move operator allows passing of registration information through return values without double register/unregister NodeTypeRegistration& operator=(NodeTypeRegistration&& rhs) noexcept { m_interface = rhs.m_interface; m_versionNumber = rhs.m_versionNumber; rhs.m_interface = INodeType{}; rhs.m_versionNumber = kDefaultNodeTypeVersion; return *this; } // Only available constructor takes in the four methods required by the INodeType API and registers // the node type using that interface. Only the name function is remembered for later unregistration. NodeTypeRegistration(GetNodeTypeFunction nameFn, ComputeFunction computeFn, InitializeFunction initializeFn, ReleaseFunction releaseFn, InitializeTypeFunction initializeTypeFn, UpdateNodeVersionFunction updateNodeVersionFn, AddInputFunction addInputFn, AddExtendedInputFunction addExtendedInputFn, AddOutputFunction addOutputFn, AddExtendedOutputFunction addExtendedOutputFn, AddStateFunction addStateFn, AddExtendedStateFunction addExtendedStateFn, HasStateFunction hasStateFn, RegisterTasksFunction registerTasksFn, GetAllMetadataFunction getAllMetadataFn, GetMetadataFunction getMetadataFn, GetMetadataCountFunction getMetadataCountFn, SetMetadataFunction setMetadataFn, GetScheduleNodeCountFunction getScheduleNodeCountFn, GetScheduleNodesFunction getScheduleNodesFn, OnConnectionTypeResolveFunction onConnectionTypeResolveFn, InspectFunction inspectFn, ComputeVectorizedFunction computeVectorized, ReleaseInstanceFunction releaseInstance, int versionNumber) { m_versionNumber = versionNumber; m_interface.addInput = addInputFn; m_interface.addExtendedInput = addExtendedInputFn; m_interface.addOutput = addOutputFn; m_interface.addExtendedOutput = addExtendedOutputFn; m_interface.addState = addStateFn; m_interface.addExtendedState = addExtendedStateFn; m_interface.compute = computeFn; m_interface.getNodeType = nameFn; m_interface.getScheduleNodes = getScheduleNodesFn; m_interface.getScheduleNodeCount = getScheduleNodeCountFn; m_interface.hasState = hasStateFn; m_interface.initialize = initializeFn; m_interface.initializeType = initializeTypeFn; m_interface.registerTasks = registerTasksFn; m_interface.getAllMetadata = getAllMetadataFn; m_interface.getMetadata = getMetadataFn; m_interface.getMetadataCount = getMetadataCountFn; m_interface.setMetadata = setMetadataFn; m_interface.release = releaseFn; m_interface.updateNodeVersion = updateNodeVersionFn; m_interface.onConnectionTypeResolve = onConnectionTypeResolveFn; m_interface.inspect = inspectFn; m_interface.computeVectorized = computeVectorized; m_interface.releaseInstance = releaseInstance; m_interface.getCarbABIVersion = []() { return INodeType::getInterfaceDesc().version; }; } // Node registrations must be unique NodeTypeRegistration() = delete; NodeTypeRegistration(const NodeTypeRegistration&) = delete; NodeTypeRegistration& operator=(const NodeTypeRegistration&) = delete; private: INodeType m_interface = {}; // Interface created for this node type int m_versionNumber{ kDefaultNodeTypeVersion }; // Node type's registered version number }; // Template class from which nodes can derive to automate their registration. // template <typename T> class NodeTypeRegistrar { static const char* s_nodeTypeName; // Name used if the node does not have a getNodeTypeName method public: // Register the node type, returning a scoped registration object. Your node type will be registered and // available for the lifetime of the returned object. static NodeTypeRegistration registerNode(const char* name, int versionNumber, InitializeTypeFunction initializeTypeOverride) { s_nodeTypeName = name; auto nodeTypeNameGetter = getNodeTypeFunction<T>(); if (!nodeTypeNameGetter) { nodeTypeNameGetter = &getNodeTypeName; } return std::move(NodeTypeRegistration( nodeTypeNameGetter, computeFunction<T>(), initializeFunction<T>(), releaseFunction<T>(), initializeTypeOverride ? initializeTypeOverride : initializeTypeFunction<T>(), updateNodeVersionFunction<T>(), addInputFunction<T>(), addExtendedInputFunction<T>(), addOutputFunction<T>(), addExtendedOutputFunction<T>(), addStateFunction<T>(), addExtendedStateFunction<T>(), hasStateFunction<T>(), registerTasksFunction<T>(), getAllMetadataFunction<T>(), getMetadataFunction<T>(), getMetadataCountFunction<T>(), setMetadataFunction<T>(), getScheduleNodeCountFunction<T>(), getScheduleNodesFunction<T>(), onConnectionTypeResolveFunction<T>(), inspectFunction<T>(), computeVectorizedFunction<T>(), releaseInstanceFunction<T>(), versionNumber)); } static const char* getNodeTypeName() { return s_nodeTypeName; } }; // When the template instantiates the class it will also instantiate this static member template <typename NodeType> const char* NodeTypeRegistrar<NodeType>::s_nodeTypeName{ nullptr }; // Macro that simplifies the syntax of node registration, creating a scoped object for automatic register/unregister // that can be accessed through the consistently named method "nodeTypeRegistrationMYNODENAME" to call its // registerNodeType and unregisterNodeType methods at the appropriate time. #define REGISTER_NODE_TYPE(NODE_CLASS, NODE_TYPE_NAME, NODE_TYPE_VERSION) \ const omni::graph::core::NodeTypeRegistration& nodeTypeRegistration##NODE_CLASS() \ { \ static omni::graph::core::NodeTypeRegistration s_nodeRegistration{ \ omni::graph::core::NodeTypeRegistrar<NODE_CLASS>::registerNode(NODE_TYPE_NAME, NODE_TYPE_VERSION, nullptr) \ }; \ return s_nodeRegistration; \ } } } } #define REGISTER_NAMESPACED_NODE_TYPE(NODE_CLASS, NAMESPACE, NODE_TYPE_NAME, NODE_TYPE_VERSION) \ const omni::graph::core::NodeTypeRegistration& nodeTypeRegistration##NODE_CLASS() \ { \ static omni::graph::core::NodeTypeRegistration s_nodeRegistration{ \ omni::graph::core::NodeTypeRegistrar<NAMESPACE ::NODE_CLASS>::registerNode( \ NODE_TYPE_NAME, NODE_TYPE_VERSION, nullptr) \ }; \ return s_nodeRegistration; \ }
10,672
C
50.810679
134
0.619097
omniverse-code/kit/include/omni/graph/core/IBundleFactory.gen.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL template <> class omni::core::Generated<omni::graph::core::IBundleFactory_abi> : public omni::graph::core::IBundleFactory_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::IBundleFactory") /** * Create bundles at given paths and acquire instances of IBundle2 interface. * * @param[in] contextObj The context where bundles are created. * @param[in] paths Locations for new bundles. * @param[in] pathCount Length of paths array. * @param[out] createdBundles Output instances of IBundle2 interface. * @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are * invalid. */ omni::core::Result createBundles(const omni::graph::core::GraphContextObj* const contextObj, const carb::flatcache::PathC* const paths, size_t pathCount, omni::graph::core::IBundle2** const createdBundles) noexcept; /** * Acquire instances of IConstBundle2 interface from const bundle handles. * * @param[in] contextObj The context where bundles belong to. * @param[in] bundleHandles The bundle handles. * @param[in] bundleCount Length of bundleHandles array. * @param[out] bundles Output instances of IConstBundle2 interface. * @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are * invalid. */ omni::core::Result getConstBundles(const omni::graph::core::GraphContextObj* const contextObj, const omni::graph::core::ConstBundleHandle* const bundleHandles, size_t bundleCount, omni::graph::core::IConstBundle2** const bundles) noexcept; /** * Acquire instances of IBundle2 interface from bundle handles. * * @param[in] contextObj The context where bundles belong to. * @param[in] bundleHandles The bundle handles. * @param[in] bundleCount Length of bundleHandles array. * @param[out] bundles Output instances of IConstBundle2 interface. * @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are * invalid. */ omni::core::Result getBundles(const omni::graph::core::GraphContextObj* const contextObj, const omni::graph::core::BundleHandle* const bundleHandles, size_t bundleCount, omni::graph::core::IBundle2** const bundles) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::core::Result omni::core::Generated<omni::graph::core::IBundleFactory_abi>::createBundles( const omni::graph::core::GraphContextObj* const contextObj, const carb::flatcache::PathC* const paths, size_t pathCount, omni::graph::core::IBundle2** const createdBundles) noexcept { return createBundles_abi(contextObj, paths, pathCount, createdBundles); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundleFactory_abi>::getConstBundles( const omni::graph::core::GraphContextObj* const contextObj, const omni::graph::core::ConstBundleHandle* const bundleHandles, size_t bundleCount, omni::graph::core::IConstBundle2** const bundles) noexcept { return getConstBundles_abi(contextObj, bundleHandles, bundleCount, bundles); } inline omni::core::Result omni::core::Generated<omni::graph::core::IBundleFactory_abi>::getBundles( const omni::graph::core::GraphContextObj* const contextObj, const omni::graph::core::BundleHandle* const bundleHandles, size_t bundleCount, omni::graph::core::IBundle2** const bundles) noexcept { return getBundles_abi(contextObj, bundleHandles, bundleCount, bundles); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
4,743
C
40.982301
114
0.684166
omniverse-code/kit/include/omni/graph/core/INodeCategories.h
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/core/IObject.h> namespace omni { namespace graph { namespace core { //! Declare the INodeCategories interface definition OMNI_DECLARE_INTERFACE(INodeCategories); /** Interface to the list of categories that a node type can belong to */ class INodeCategories_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.core.INodeCategories")> { protected: /** * Get the number of categories available * * @returns Count of fixed category types */ virtual size_t getCategoryCount_abi() noexcept = 0; /** * Get the list of available categories and their descriptions. * * The caller is responsible for allocating and destroying buffers large enough to hold "bufferSize" results. * If bufferSize > getCategoryCount() then the entries at the ends of the buffers will be filled with nullptr. * * @param[in] categoryNameBuffer List of category names * @param[in] categoryDescriptionBuffer List of category descriptions corresponding to the names * @param[in] bufferSize Number of entries to fill in the buffers * * @return true if the category buffer was successfully filled and the bufferSize matched the category count */ virtual OMNI_ATTR("no_py") bool getCategories_abi( OMNI_ATTR("*c_str, out, not_null, count=bufferSize") char const** categoryNameBuffer, OMNI_ATTR("*c_str, out, not_null, count=bufferSize") char const** categoryDescriptionBuffer, size_t bufferSize ) noexcept = 0; /** * Define a new category * * @param[in] categoryName Name of the new category * @param[in] categoryDescription Description of the category * * @return false if there was already a category with the given name */ virtual bool defineCategory_abi( OMNI_ATTR("c_str, in, not_null") char const* categoryName, OMNI_ATTR("c_str, in, not_null") char const* categoryDescription ) noexcept = 0; /** * Remove an existing category, mainly to manage the ones created by a node type for itself * * @param[in] categoryName Name of the category to remove * * @return false if there was no category with the given name */ virtual bool removeCategory_abi( OMNI_ATTR("c_str, in, not_null") char const* categoryName ) noexcept = 0; }; } // namespace core } // namespace graph } // namespace omni #include "INodeCategories.gen.h" // generated file
2,946
C
35.382716
114
0.70095
omniverse-code/kit/include/omni/graph/core/PyIBundleFactory.gen.h
// Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // #pragma once #include <omni/core/ITypeFactory.h> #include <omni/python/PyBind.h> #include <omni/python/PyString.h> #include <omni/python/PyVec.h> #include <sstream> auto bindIBundleFactory(py::module& m) { // hack around pybind11 issues with C++17 // - https://github.com/pybind/pybind11/issues/2234 // - https://github.com/pybind/pybind11/issues/2666 // - https://github.com/pybind/pybind11/issues/2856 py::class_<omni::core::Generated<omni::graph::core::IBundleFactory_abi>, omni::core::ObjectPtr<omni::core::Generated<omni::graph::core::IBundleFactory_abi>>, omni::core::IObject> clsParent(m, "_IBundleFactory"); py::class_<omni::graph::core::IBundleFactory, omni::core::Generated<omni::graph::core::IBundleFactory_abi>, omni::core::ObjectPtr<omni::graph::core::IBundleFactory>, omni::core::IObject> cls(m, "IBundleFactory"); cls.def(py::init( [](const omni::core::ObjectPtr<omni::core::IObject>& obj) { auto tmp = omni::core::cast<omni::graph::core::IBundleFactory>(obj.get()); if (!tmp) { throw std::runtime_error("invalid type conversion"); } return tmp; })); cls.def(py::init( []() { auto tmp = omni::core::createType<omni::graph::core::IBundleFactory>(); if (!tmp) { throw std::runtime_error("unable to create omni::graph::core::IBundleFactory instantiation"); } return tmp; })); return omni::python::PyBind<omni::graph::core::IBundleFactory>::bind(cls); }
2,170
C
39.203703
120
0.631797
omniverse-code/kit/include/omni/graph/core/iComputeGraph.h
// Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/events/IEvents.h> #include <carb/Defines.h> #include <carb/Interface.h> #include <carb/Types.h> #include <omni/fabric/Enums.h> #include <omni/fabric/IPath.h> #include <omni/fabric/IToken.h> #include <omni/graph/core/Type.h> #include <omni/graph/core/Handle.h> #include <omni/graph/core/ISchedulingHints2.h> #include <omni/graph/core/ogn/Types.h> #include <omni/graph/core/IVariable2.h> #include <omni/graph/core/iAttributeData.h> #include <omni/graph/exec/unstable/Stamp.h> #include <omni/graph/core/bundle/IBundle1.h> #include <omni/inspect/IInspector.h> #include <cstddef> // Interfaces pulled out of this file but which are still referenced through it #include "IGraphRegistry.h" #ifdef __CUDA_ARCH__ #error iComputeGraph.h cannot be included from a .cu file due to a compiler problem. You probably want Handle.h. #endif namespace omni { namespace graph { namespace core { //! If 1 then extra logging is enabled (which affects performance) #define COMPUTE_GRAPH_VERBOSE_LOGGING 0 // ============================================================================================================== /** Encapsulates the information required to define a file format version number */ struct FileFormatVersion { int majorVersion; //!< Major version, for which changes mean incompatible formats int minorVersion; //!< Minor version, for which changes mean compatible formats, possibly with auto-upgrades /** * @brief Equality operator for the file format version object * * @param rhs Version number to compare against * @return true The version number is not equal to @p rhs * @return false The version number is equal to @p rhs */ bool operator==(const FileFormatVersion& rhs) const { return rhs.majorVersion == majorVersion && rhs.minorVersion == minorVersion; } /** * @brief Inequality operator for the file format version object * * @param rhs Version number to compare against * @return true The version number is not equal to @p rhs * @return false The version number is equal to @p rhs */ bool operator!=(const FileFormatVersion& rhs) const { return !(*this==rhs); } /** * @brief Less-than operator for the file format version object * * @param rhs Version number to compare against * @return true The version number is less than @p rhs * @return false The version number is greater than or equal to @p rhs */ bool operator<(const FileFormatVersion& rhs) const { return majorVersion < rhs.majorVersion || ((majorVersion == rhs.majorVersion) && (minorVersion < rhs.minorVersion)); } /** * @brief Greater-than operator for the file format version object * * @param rhs Version number to compare against * @return true The version number is greater than @p rhs * @return false The version number is less than or equal to @p rhs */ bool operator>(const FileFormatVersion& rhs) const { return majorVersion > rhs.majorVersion || ((majorVersion == rhs.majorVersion) && (minorVersion > rhs.minorVersion)); } }; struct GraphObj; // ============================================================================================================== /** Callback object to instantiate for use as a callback when an older version of an OmniGraph file is read */ struct FileFormatUpgrade { /** * Callback function definition. Parameters are * * - oldVersion Version of the file being read * - newVersion Current version of the file format * - userData User data to pass to the callback */ void(*fileFormatUpgradeCallback)(const FileFormatVersion& oldVersion, const FileFormatVersion& newVersion, GraphObj& graphObj, void* userData); /** User data to pass to the callback function */ void* userData; /** * @brief Equality operator for the file format upgrade callback object * * @param rhs Callback object to compare against * @return true The callback object is equal to @p rhs * @return false The callback object is not equal to @p rhs */ bool operator==(const FileFormatUpgrade& rhs) const { return rhs.fileFormatUpgradeCallback == fileFormatUpgradeCallback && rhs.userData == userData; } /** * @brief Inequality operator for the file format upgrade callback object * * @param rhs Callback object to compare against * @return true The callback object is not equal to @p rhs * @return false The callback object is equal to @p rhs */ bool operator!=(const FileFormatUpgrade& rhs) const { return !(*this == rhs); } }; // ============================================================================================================== /** * @brief Encapsulation of a callback that happens when a node's error status changes * */ struct ErrorStatusChangeCallback { /** * Callback function definition. Parameters are * * - nodeList List of nodes whose error status changed since the last compute * - graphObj Graph to which the nodes belong * - userData User data to pass to the callback */ void (*errorStatusChangeCallback)(const std::vector<NodeObj>& nodeList, GraphObj& graphObj, void* userData); /** User data to pass to the callback function */ void* userData; /** * @brief Equality operator for the error status change callback object * * @param rhs Callback object to compare against * @return true The callback object is equal to @p rhs * @return false The callback object is not equal to @p rhs */ bool operator==(const ErrorStatusChangeCallback& rhs) const { return rhs.errorStatusChangeCallback == errorStatusChangeCallback && rhs.userData == userData; } /** * @brief Inequality operator for the error status change callback object * * @param rhs Callback object to compare against * @return true The callback object is not equal to @p rhs * @return false The callback object is equal to @p rhs */ bool operator!=(const ErrorStatusChangeCallback& rhs) const { return !(*this == rhs); } }; // ============================================================================================================== /** Type of connection between two attributes */ enum ConnectionType { kConnectionType_Regular = 0, //!< Normal attribute to attribute evaluation connection kConnectionType_DataOnly = 1, //!< Data only connection, not implying evaluation kConnectionType_Execution = 2, //!< Execution type connection, for execution type attributes kConnectionType_Bundle = 3, //!< Bundle to bundle connections kConnectionType_PureRelationship = 4 //!< Only establish a relationship, no execution or data passed }; // ============================================================================================================== /** Extended type of an attribute */ enum ExtendedAttributeType { kExtendedAttributeType_Regular = 0, //!< No extended type, just a normal strongly typed attribute kExtendedAttributeType_Union = 1, //!< An attribute that could be any one of a specific list of types kExtendedAttributeType_Any = 2 //!< An attribute that can be any legal type }; // ============================================================================================================== /** Port type of an attribute */ enum AttributePortType { kAttributePortType_Input = 0, //!< The attribute is an input kAttributePortType_Output = 1, //!< The attribute is an output kAttributePortType_State = 2, //!< The attribute holds state information kAttributePortType_Unknown = 3 //!< The port type is currently unknown }; // ============================================================================================================== /** * The kind of backing for the graph - can be one of FC shared, with history * or without history. The shared FC means the orchestration graph (the graph * containing all other graphs as nodes) has a stage with history, and all * other global level graphs (which are nodes in this orchestration graph) share * this stage with history FC. The shared FC also applies to subgraphs that share * the same FC as their parent graph. The stage with history setting is self explanatory * but note there can only be 1 stage with history currently. The stage without * history uses a separate FC to house the data for the graph * The "None" backing type are for orchestration graphs (graphs that hold other graphs * as nodes) that don't necessarily need a cache to hold "real" data` */ enum GraphBackingType { kGraphBackingType_FabricShared = 0, //!< Graph backing is a shared copy of Fabric kGraphBackingType_FabricWithHistory = 1, //!< Use the Fabric instantiation that has history kGraphBackingType_FabricWithoutHistory = 2, //!< Use the Fabric instantiation that has no retained history kGraphBackingType_Unknown = 3, //!< Backing type is currently unknown kGraphBackingType_None = 4, //!< There is no backing for the OmniGraph data kGraphBackingType_FlatCacheShared = 0, //!< @private Deprecated, use kGraphBackingType_FabricShared kGraphBackingType_FlatCacheWithHistory = 1, //!< @private Deprecated, use kGraphBackingType_FabricWithHistory kGraphBackingType_FlatCacheWithoutHistory = 2, //!< @private Deprecated, use kGraphBackingType_FabricWithoutHistory }; // ============================================================================================================== /** * The pipeline stage defines where this graph is going to be used - as part of * simulation (before rendering), pre-rendering (after sim but before render), or * post-rendering. Each pipeline stage will have a set of graphs that will * be run there. We use larger numbers for the enums with spaces in between * so that other stages can be inserted in the future. The custom pipeline stage * allows for graphs that can be run at any unknown time. */ enum GraphPipelineStage { kGraphPipelineStage_Simulation = 10, //!< The simulation phase, a.k.a. normal evaluation kGraphPipelineStage_PreRender = 20, //!< The preRender phase, run just before Hydra takes over kGraphPipelineStage_PostRender = 30, //!< The postRender phase, run after Hydra finishes kGraphPipelineStage_Unknown = 100, //!< The phase is currently unknown kGraphPipelineStage_OnDemand = 200, //!< The graph evaluates only on demand, not as part of the pipeline kGraphPipelineStage_Count = 4 //!< The number of existing pipelines }; // ============================================================================================================== /** * The graph evaluation mode specifies whether a graph is intended to self-evaluate, * or if represents an asset to by evaluated on behalf of a different Prim. */ enum class GraphEvaluationMode { //! In Automatic mode, the graph is evaluated as Standalone, unless an OmniGraphAPI interface has a relationship to it, //! in which case it is evaluated as Instanced mode. Automatic = 0, //! In Standalone mode, the graph is evaluated once, with itself as the graph target Standalone = 1, //! In Instanced mode, the graph is evaluated once for each OmniGraphAPI interface with a relationship to the graph Prim //! Graphs that are used as assets should use this mode to prevent standalone execution. Instanced = 2 }; // ============================================================================================================== /** Information passed to define the opposite end of a connection */ struct ConnectionInfo { AttributeObj attrObj; //!< Attribute at the opposite end ConnectionType connectionType; //!< Type of connection being made }; // ============================================================================================================== /** Callback object used when a connection is made or broken between two attributes */ struct ConnectionCallback { /** * Callback function definition. Parameters are * * - srcAttr Source end of the connection that changed * - dstAttr Destination end of the connection that changed * - userData User data to pass to the callback */ void(*connectionCallback)(const AttributeObj& srcAttr, const AttributeObj& dstAttr, void* userData); /** User data to pass to the callback function */ void* userData; /** * @brief Equality operator for the connection/disconnection callback object * * @param rhs Callback object to compare against * @return true The callback object is equal to @p rhs * @return false The callback object is not equal to @p rhs */ bool operator==(const ConnectionCallback& rhs) const { return rhs.connectionCallback == connectionCallback && rhs.userData == userData; } /** * @brief Inequality operator for the connection/disconnection callback object * * @param rhs Callback object to compare against * @return true The callback object is not equal to @p rhs * @return false The callback object is equal to @p rhs */ bool operator!=(const ConnectionCallback& rhs) const { return !(*this == rhs); } }; // ============================================================================================================== /** Callback object used when a path has changed, requiring a path attribute update */ struct PathChangedCallback { /** * Callback function definition. Parameters are * * - paths Array of paths that have changed * - numPaths Number of paths in the array * - userData User data to pass to the callback */ void(*pathChangedCallback)(const omni::fabric::PathC* paths, const size_t numPaths, void* userData); /** User data to pass to the callback function */ void* userData; /** * @brief Equality operator for the path change callback object * * @param rhs Callback object to compare against * @return true The callback object is equal to @p rhs * @return false The callback object is not equal to @p rhs */ bool operator==(const PathChangedCallback& rhs) const { return rhs.pathChangedCallback == pathChangedCallback && rhs.userData == userData; } /** * @brief Inequality operator for the path change callback object * * @param rhs Callback object to compare against * @return true The callback object is not equal to @p rhs * @return false The callback object is equal to @p rhs */ bool operator!=(const PathChangedCallback& rhs) const { return !(*this == rhs); } }; namespace ogn { class OmniGraphDatabase; } /** * A callback allocate, initialize, and returns an OGN database for a given node */ using CreateDbFunc = ogn::OmniGraphDatabase* (*)(GraphContextObj const*, NodeObj const*, size_t); /** * Parameters for IGraph::CreateGraphAsNode */ struct CreateGraphAsNodeOptions { static const uint8_t kCurrentVersion = 1; //!< Version number of this structure //! The version of this structure. const uint8_t version{ CreateGraphAsNodeOptions::kCurrentVersion }; //! The name of the node that wraps the graph const char* nodeName{ "" }; //! The path to where the graph that the node will wrap will be added const char* graphPath{ "" }; //! The evaluator to use for the new graph const char* evaluatorName{ "" }; //! Whether this graph is a top level global graph bool isGlobalGraph{ true }; //! Whether to back this graph by USD bool backByUSD{ true }; //! What kind of FC backs this graph GraphBackingType backingType{ kGraphBackingType_FabricShared }; //! What pipeline stage this graph occupies GraphPipelineStage pipelineStage{ kGraphPipelineStage_Simulation }; //! The evaluation mode for the graph GraphEvaluationMode evaluationMode{ GraphEvaluationMode::Automatic }; }; /** * @brief Possible values to be set for Attributes of type "execution" * @private Deprecated. See omni::graph::action::IActionGraph. */ enum ExecutionAttributeState : uint32_t { kExecutionAttributeStateDisabled, //!< Output attribute connection is disabled kExecutionAttributeStateEnabled, //!< Output attribute connection is enabled //! Output attribute connection is enabled and the node is pushed to the evaluation stack kExecutionAttributeStateEnabledAndPush, //! Push this node as a latent event for the current entry point kExecutionAttributeStateLatentPush, // Output attribute connection is enabled and the latent state is finished for this node kExecutionAttributeStateLatentFinish }; /** * The attribute name prefix used for attributes which hold the concrete value of resolved extended attributes. */ #define RESOLVED_ATTRIBUTE_PREFIX "__resolved_" /** * The path used to identify the targeted prim, when graph instancing is used. At runtime this token will be * replaced with the absolute path to the targeted prim. */ static constexpr char kInstancingGraphTargetPath[] = "_OMNI_GRAPH_TARGET"; /** * The value of an uninitialized attribute for a type (eg: tuple_count, array_depth) */ constexpr uint8_t kUninitializedTypeCount = UINT8_MAX; /** * Value representing an instance */ struct InstanceIndex { /** Returns true iff this InstanceIndex is equal to the @p other */ bool const operator==(InstanceIndex const& other) const { return index == other.index; } /** Returns true iff this InstanceIndex is not equal to the @p other */ bool const operator!=(InstanceIndex const& other) const { return index != other.index; } /** Returns true iff this InstanceIndex is less than the @p other */ bool const operator<(InstanceIndex const& other) const { return index < other.index; } /** Returns true iff this InstanceIndex is less than or equal to the @p other */ bool const operator<=(InstanceIndex const& other) const { return index <= other.index; } /** Returns true iff this InstanceIndex is greater than the @p other */ bool const operator>(InstanceIndex const& other) const { return index > other.index; } /** Returns true iff this InstanceIndex is greater than or equal to the @p other */ bool const operator>=(InstanceIndex const& other) const { return index >= other.index; } /** Returns the sum of this instance index and that of the @p other */ InstanceIndex operator+(InstanceIndex other) const { return { index + other.index }; } /** Returns the sum of this instance index and @p idx */ InstanceIndex operator+(size_t idx) const { return { index + idx }; } /** Increments the instance index by the index amount contained in @p other */ InstanceIndex& operator+=(InstanceIndex other) { index += other.index; return *this; } /** Increments the index value */ InstanceIndex& operator++() { index++; return *this; } size_t index{ 0 }; //!< Index value for the instance }; /** * Some default instance value */ static constexpr InstanceIndex kAuthoringGraphIndex{ size_t(-1) }; //!< Special index for the authoring graph original static constexpr InstanceIndex kAccordingToContextIndex{ size_t(-2) }; //!< Special index for a context static constexpr InstanceIndex kInvalidInstanceIndex{ size_t(-3) }; //!< Special index indicating an invalid instance // ====================================================================== /** Interface to provide functionality to access and modify properties of an OmniGraph attribute. */ struct IAttribute { //! @private to avoid doxygen problems CARB_PLUGIN_INTERFACE("omni::graph::core::IAttribute", 1, 12); /** * Returns the name of the attribute * * @param[in] attr Reference to the AttributeObj struct representing the attribute object * @return The name of the attribute */ const char*(CARB_ABI* getName)(const AttributeObj& attr); /** * Returns the type name of the attribute * * @param[in] attr Reference to the AttributeObj struct representing the attribute object * @return The type name of the attribute */ const char*(CARB_ABI* getTypeName)(const AttributeObj& attr); /** * Returns the extended type, if any, of the attribute. Extended types are things like "union" and * "any" types that aren't in the explicit list of types in USD. kExtendedAttributeType_Regular * means that the attribute is not one of these extended types. * * @param[in] attr Reference to the AttributeObj struct representing the attribute object * @return The extended type of the attribute */ ExtendedAttributeType(CARB_ABI* getExtendedType)(const AttributeObj& attr); /** * Returns the resolved type an extended type like union actually turns out to be, by * inferring it from the connection. If the type is still not resolved, the BaseDataType of * the returned type will have eNone as its value. * * If the attribute type is just a Regular one then this method will return its permanent type. * * @param[in] attr Reference to the AttributeObj struct representing the attribute object * @return The resolved type of the attribute, based on the connection */ Type(CARB_ABI* getResolvedType)(const AttributeObj& attr); /** * Returns whether the attribute is an array * * @param[in] attr Reference to the AttributeObj struct representing the attribute object * @return Whether or not the attribute is an array */ bool(CARB_ABI* isArray)(const AttributeObj& attr); /** * Connects an attribute using a relationship to some other prim. This could be for a bundle connection * or a pure relationship to a prim. In the case of a pure relationship to a prim, some meta-data will * be added to mark the relationship as not being used for a bundle * * @param[in] attrObj The attr that represents the relationship * @param[in] pathToPrim The path to the prim to connect to * @param[in] modifyInUsd Whether the connection is also modified in the underlying USD representation * @param[in] isBundleConnection Whether the connection is to be used for bundles or just a pure relationship to a prim * @return true if connection is successful, false otherwise */ bool(CARB_ABI* connectPrim)(const AttributeObj& attrObj, const char* pathToPrim, bool modifyInUsd, bool isBundleConnection); /** * Disconnects an attribute using a relationship to some other prim. This could be for a bundle connection * or a pure relationship to a prim. * * @param[in] attrObj The attr that represents the relationship * @param[in] pathToPrim The path to the prim to disconnect * @param[in] modifyInUsd Whether the connection is also modified in the underlying USD representation * @param[in] isBundleConnection Whether the connection is to be used for bundles or just a pure relationship to a prim * @return true if disconnection is successful, false otherwise */ bool(CARB_ABI* disconnectPrim)(const AttributeObj& attrObj, const char* pathToPrim, bool modifyInUsd, bool isBundleConnection); /** * Connects two attributes together to add an edge to the graph. This is a legacy version of the * connection API. Calling this is equivalent to setting kConnectionType_Regular type connections. * Please use connectAttrsEx to have fuller control over the kind of connections created. * * @param[in] srcAttr The attr that is the source of the directed connection * @param[in] destAttr The attr that is the destination of the directed connection * @param[in] modifyInUsd Whether the connection is also modified in the underlying USD representation * @return true if connection is successful, false otherwise */ bool(CARB_ABI* connectAttrs)(const AttributeObj& srcAttr, const AttributeObj& destAttr, bool modifyInUsd); /** * Connects two attributes together to add an edge to the graph. This is an extended version with * more information about the connection, such as the type of connection. * * @param[in] srcAttr The attr that is the source of the directed connection * @param[in] destAttrInfo A ConnectionInfo struct describing the connection * @param[in] modifyInUsd Whether the connection is also modified in the underlying USD representation * @return true if connection is successful, false otherwise */ bool(CARB_ABI* connectAttrsEx)(const AttributeObj& srcAttr, const ConnectionInfo& destAttr, bool modifyInUsd); /** * Disconnects two attributes that are connected * * @param[in] srcAttr The attribute that is the source of the directed connection * @param[in] destAttr The attribute that is the destination of the directed connection * @param[in] modifyInUsd Whether the connection is also modified in the underlying USD representation * @return true if connection is successfully broken, false otherwise (if no connections existed) */ bool(CARB_ABI* disconnectAttrs)(const AttributeObj& srcAttr, const AttributeObj& destAttr, bool modifyInUsd); /** * Queries whether two attributes are connected * * @param[in] srcAttr The attribute that is the source of the directed connection * @param[in] destAttr The attribute that is the destination of the directed connection * @return true if the two attributes are connected, false otherwise */ bool(CARB_ABI* areAttrsConnected)(const AttributeObj& srcAttr, const AttributeObj& destAttr); /** * Queries whether two attributes are connection compatible * * @param[in] srcAttr The attribute that would be the source of the directed connection * @param[in] destAttr The attribute that would be the destination of the directed connection * @return true if the two attributes are compatible, false otherwise */ bool(CARB_ABI* areAttrsCompatible)(const AttributeObj& srcAttr, const AttributeObj& destAttr); /** * Retrieves the number of upstream connections to the attribute of a node * * @param[in] attrObj The attribute object for which to retrieve the connection count * @return The number of upstream connections to that attribute */ size_t(CARB_ABI* getUpstreamConnectionCount)(const AttributeObj& attrObj); /** * Retrieves the upstream connections of the attribute of a node * * @param[in] attrObj The attribute object for which to retrieve the connections * @param[out] attrsBuf Buffer to hold the return AttributeObj * @param[in] bufferSize the number of AttributeObj structures the buffer is able to hold * @return true on success, false on failure */ bool(CARB_ABI* getUpstreamConnections)(const AttributeObj& attrObj, AttributeObj* attrsBuf, size_t bufferSize); /** * Retrieves the detailed upstream connection info of an attribute. Includes information like type of * connections. * * @param[in] attrObj The attribute object for which to retrieve the connections * @param[out] connectionInfoBif Buffer to hold the return ConnectionInfo * @param[in] bufferSize the number of AttributeObj structures the buffer is able to hold * @return true on success, false on failure */ bool(CARB_ABI* getUpstreamConnectionsInfo)(const AttributeObj& attrObj, ConnectionInfo* connectionInfoBuf, size_t bufferSize); /** * Retrieves the number of downstream connections to the attribute of a node * * @param[in] attrObj The attribute object for which to retrieve the connection count * @return The number of downstream connections to that attribute */ size_t(CARB_ABI* getDownstreamConnectionCount)(const AttributeObj& attrObj); /** * Retrieves the down connections of the attribute of a node * * @param[in] attrObj The attribute object for which to retrieve the connections * @param[out] attrsBuf Buffer to hold the return AttributeObj * @param[in] bufferSize the number of AttributeObj structures the buffer is able to hold * @return true on success, false on failure */ bool(CARB_ABI* getDownstreamConnections)(const AttributeObj& attrObj, AttributeObj* attrsBuf, size_t bufferSize); /** * Retrieves the detailed upstream connection info of an attribute. Includes information like type of * connections. * * @param[in] attrObj The attribute object for which to retrieve the connections * @param[out] connectionInfoBif Buffer to hold the return ConnectionInfo * @param[in] bufferSize the number of AttributeObj structures the buffer is able to hold * @return true on success, false on failure */ bool(CARB_ABI* getDownstreamConnectionsInfo)(const AttributeObj& attrObj, ConnectionInfo* connectionInfoBuf, size_t bufferSize); /** * Retrieves the node associated with this attribute * * @param[in] attrObj The attribute object for which to retrieve the node * @return The NodeObj representing the node. In case of failure, the node handle * will be kInvalidNodeHandle */ NodeObj(CARB_ABI* getNode)(const AttributeObj& attrObj); /** * Ensures the attribute's value is updated, before reading it. For push graphs this does nothing, as the * push graph is always evaluating and considered up to date. * For pull graphs, this generates the true "pull" on the attribute, that will cause whatever is upstream * and is dirty to evaluate. * * @param[in] attrObj The attribute object for which to update the value for * @param[in] updateImmediately Whether to cause the graph to update immediately (synchronously) * @return Whether the update was successful */ bool(CARB_ABI* updateAttributeValue)(const AttributeObj& attrObj, bool updateImmediately); /** @private Deprecated - do not use */ AttributeDataHandle(CARB_ABI* deprecated_0)(const AttributeObj&); /** @private Deprecated - do not use */ ConstAttributeDataHandle(CARB_ABI* deprecated_1)(const AttributeObj&); /** * Registers a callback to be invoked when the value of the current attribute changes * * An attribute only permits a single callback and when called, the previously set callback * is replaced. Passing nullptr as the callback will remove any existing callback. * * @param[in] thisAttribute Reference to the AttributeObj struct representing the current attribute object * @param[in] onValueChanged The callback to trigger. Parameters are the attribute involved, and the new value * @param[in] triggerOnConnected Whether to trigger the callback on connected attributes. */ void(CARB_ABI* registerValueChangedCallback)(const AttributeObj& attrObj, void (*onValueChanged)(const AttributeObj& attr, const void* value), bool triggerOnConnected); /** * Returns the set of all metadata on this attribute. * * The keyBuf and valueBuf arrays preallocated by the caller, and contain at least "getMetadataCount()" * entries in them. * All returned strings are owned by the node type and not to be destroyed. * The returned keyBuf and valueBuf must have exactly the same size with corresponding index values; that is * keyBuf[i] is the metadata name for the string in valueBuf[i]. * * @param[in] thisAttribute Reference to the AttributeObj struct representing the current attribute object * @param[out] keyBuf Buffer in which to put the list of metadata keys * @param[out] valueBuf Buffer in which to put the list of metadata values * @param[in] bufferSize the number of strings each of the two buffers is able to hold * @return Number of metadata items successfully populated */ size_t(CARB_ABI* getAllMetadata)(const AttributeObj& thisAttribute, const char** keyBuf, const char** valueBuf, size_t bufferSize); /** * Retrieves a metadata value from this attribute * * @param[in] thisAttribute Reference to the AttributeObj struct representing the current attribute object * @param[in] key The name of the metadata to be retrieved * @return The value of the metadata, or nullptr if the named metadata was not set on this attribute */ const char*(CARB_ABI* getMetadata)(const AttributeObj& thisAttribute, const char* key); /** * Returns the number of metadata entries on this attribute * * @param[in] thisAttribute Reference to the AttributeObj struct representing the current attribute object * @return the number of metadata key/value pairs on this attribute */ size_t(CARB_ABI* getMetadataCount)(const AttributeObj& thisAttribute); /** * Sets a metadata value on this attribute. * * Certain metadata keywords have special meaning internally: * uiName: The name of the attribute in a longer, human-readable format * * Note: The main way for metadata to be set is through the .ogn format files. If you call this directly the * metadata will not persist across sessions. If you wish to define metadata outside of the .ogn file * the best method is to override the initializeType() method in your attribute definition and set it there. * * @param[in] attrObj Reference to the AttributeObj struct representing the current attribute object * @param[in] key The keyword, used as the name of the metadata * @param[in] value The value of the metadata. Metadata can be parsed later if non-string values are desired. */ void(CARB_ABI* setMetadata)(const AttributeObj& attrObj, const char* key, const char* value); /** * Where we have dynamic scheduling, downstream nodes can have their execution disabled by turning on the flag * in the upstream attribute. Note you also have to call setDynamicDownstreamControl on the node to enable * this feature. See setDynamicDownstreamControl on INode for further information. * * @param[in] attrObj Reference to the AttributeObj struct representing the current attribute object * @return Whether downstream nodes connected to this attribute should be disabled from further work */ bool (CARB_ABI* getDisableDynamicDownstreamWork)(const AttributeObj& attrObj); /** * Where we have dynamic scheduling, downstream nodes can have their execution disabled by turning on the flag * in the upstream attribute. Note you also have to call setDynamicDownstreamControl on the node to enable * this feature. This function allows you to set the flag on the attribute that will disable the downstream * node. See setDynamicDownstreamControl on INode for further information. * * @param[in] attrObj Reference to the AttributeObj struct representing the current attribute object * @param[in] value Whether to disable the downstream connected nodes or not. */ void (CARB_ABI* setDisableDynamicDownstreamWork)(const AttributeObj& attrObj, bool value); /** * Sets the resolved type of an extended type. This should be called by a node from the * *onConnectionTypeResolve()* callback when it determines that an extended-type attribute * can be resolved to a specific type. For example a generic 2-input "Add" node could resolve input B and its * output attribute type to float when input A is connected to a float. Passing @ref omni::fabric::Type() will * reset the attribute type to "unresolved". * * @note This operation is asynchronous because it is considered as part of a whole-graph type resolution * algorithm. It also may not succeed because there could be constraints in the graph that * prevent the type from being resolved as requested. * * @param[in] attr Reference to the AttributeObj struct representing the attribute object * @param[in] type The new type of the attribute */ void(CARB_ABI* setResolvedType)(const AttributeObj& attr, const Type& type); /** * Retrieves the port type (such as input, output, state) associated with this attribute * * @param[in] attrObj The attribute object for which to retrieve the connections * @return the AttributePortType of this attribute */ AttributePortType(CARB_ABI* getPortType)(const AttributeObj& attrObj); /** * Returns whether the attribute is a dynamic attribute (not in the node definition) or not * * @param[in] attrObj The attribute object for which to query * @return Whether the attribute is a dynamic one */ bool(CARB_ABI* isDynamic)(const AttributeObj& attrObj); /** * Returns the full path to the attribute, including the node path * * @param[in] attr Reference to the AttributeObj struct representing the attribute object * @return The full path to the attribute */ const char*(CARB_ABI* getPath)(const AttributeObj& attrObj); /** * @return The string representing the extended union types, nullptr if the attribute is not a union type */ const char*(CARB_ABI* getUnionTypes)(const AttributeObj& attribute); /** * Returns whether the attribute is still valid or not * * @param[in] attrObj The attribute object for which to query * @return Whether the attribute is still valid */ bool(CARB_ABI* isValid)(const AttributeObj& attrObj); /** * Return the attribute name with the port type prepended if it isn't already present. * * @param[in] name The attribute name, with or without the port prefix * @param[in] portType The port type of the attribute * @param[in] isBundle true if the attribute name is to be used in a bundle. Note that colon is an illegal character * in bundled attributes so an underscore is used instead. * @return The name with the proper prefix for the given port type */ NameToken(CARB_ABI* ensurePortTypeInName)(NameToken name, AttributePortType portType, bool isBundle); /** * Parse the port type from the given attribute name if present. The port type is indicated by a prefix seperated by * a colon or underscore in the case of bundled attributes. * * @param[in] name The attribute name * @return The port type indicated by the attribute prefix if present. AttributePortType::kAttributePortType_Unknown * if there is no recognized prefix. */ AttributePortType(CARB_ABI* getPortTypeFromName)(NameToken name); /** * Return the attribute name with the port type removed if it is present. For example "inputs:attr" becomes "attr" * * @param[in] name The attribute name, with or without the port prefix * @param[in] isBundle true if the attribute name is to be used in a bundle. Note that colon is an illegal character * in bundled attributes so an underscore is used instead. * @return The name with the port type prefix removed */ NameToken(CARB_ABI* removePortTypeFromName)(NameToken name, bool isBundle); /** * Get the optional compute flag from the attribute. When true this flag indicates that the attribute does not * need to be valid in order for the compute() function to be called. * Note that "valid" does not necessarily mean the attribute data is up to date, it only means that everything * required to locate the attribute data is available and valid (otherwise outputs would always be invalid). * * @param[in] attrObj The attribute object being queried * @return True if the attribute is optional for compute */ bool(CARB_ABI* getIsOptionalForCompute)(const AttributeObj& attrObj); /** * Set whether the attribute is optional for compute or not; mostly used by generated code. * This flag would be set on attributes that the compute() method may not look at. It would then be up to the * compute() method to check validity if it ends up requiring the attribute's value. You might use this when an * attribute value is not used in all compute paths, like a "choice" node that selects exactly one of its inputs * to send to the output - only the selected input would need to be valid for compute to succeed. * * @param[in] attrObj The attribute object being modified * @param[in] isOptional New value for the optional flag on the attribute */ void(CARB_ABI* setIsOptionalForCompute)(const AttributeObj& attrObj, bool isOptional); /** * Returns an AttributeDataHandle to access the default data on this input attribute. * * @param[in] attrObj The input attribute object for which to update the default value for * @return The AttributeDataHandle associated with the default value of this attribute, to mutate data in the FC */ AttributeDataHandle(CARB_ABI* getDefaultValueAttributeDataHandle)(const AttributeObj& attrObj); /** * Get the deprecated flag from the attribute. When true this flag indicates that the attribute has been * deprecated and will be removed in a future version of the node. * * @param[in] attrObj The attribute object being queried * @return True if the attribute is deprecated */ bool(CARB_ABI* isDeprecated)(const AttributeObj& attrObj); /** * Return the deprecation message for an attribute. * * @param[in] attributeObj Attribute to which this function applies * @return String containing the attribute deprecation message (nullptr if the attribute is not deprecated) */ char const*(CARB_ABI* deprecationMessage)(const AttributeObj& attributeObj); /** * Returns an AttributeDataHandle to access the data on this attribute. * * @param[in] attrObj The attribute object for which to retrieve the data accessor * @param[in] instanceIdx The instance index relative to the current active instance for which you want to retrieve the data. * @return The AttributeDataHandle associated with this attribute, to mutate data in the FC */ AttributeDataHandle(CARB_ABI* getAttributeDataHandle)(const AttributeObj& attrObj, InstanceIndex instanceIdx); /** * Returns a ConstAttributeDataHandle to access the data on this attribute. * * @param[in] attrObj The attribute object for which to retrieve the data accessor * @param[in] instanceIdx The instance index relative to the current active instance for which you want to retrieve the data. * @return The ConstAttributeDataHandle associated with this attribute, to read data in the FC */ ConstAttributeDataHandle(CARB_ABI* getConstAttributeDataHandle)(const AttributeObj& attrObj, InstanceIndex instanceIdx); /** * Returns whether or not this attribute is a runtime constant or not. * Runtime constant will keep the same value every frame, for every instances * This property can be taken advantage in vectorized compute * * @param[in] attrObj The attribute object to query * * @return true if the attribute is a runtime constant, false otherwise */ bool(CARB_ABI* isRuntimeConstant)(const AttributeObj& attrObj); /** * Warn the framework that writing to the provided attributes is done, so it can trigger callbacks attached to them * * @param[in] attrObjs A pointer to an array of attribute objects for which to call change callbacks * @param[in] attrObjCount The number of object(s) in that array */ void(CARB_ABI* writeComplete)(const AttributeObj* attrObjs, size_t attrObjCount); /** * Returns the name of the attribute as a token * * @param[in] attr Reference to the AttributeObj struct representing the attribute object * @return The name of the attribute as a token */ NameToken(CARB_ABI* getNameToken)(const AttributeObj& attr); }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle STRUCT_INTEGRITY_CHECK(IAttribute, getNameToken, 47) // ====================================================================== /** Each node type in a plugin must implement this interface */ struct INodeType { //! @private to avoid doxygen problems CARB_PLUGIN_INTERFACE("omni::graph::core::INodeType", 1, 12); /** * Return node type name - this is used by 'node:type' schema in Node prim. * For retrieving the node type name of a known NodeTypeObj use getTypeName(). * * @return The node type name */ const char*(CARB_ABI* getNodeType)(); /** * Implementation of compute, see above GraphContext for functions to pull/push data with other nodes * * @param[in] context structure containing both the interface and underlying object * @param[in] node Reference to the NodeObj struct representing the node object * @return true if compute is successful, false otherwise */ bool(CARB_ABI* compute)(const GraphContextObj& context, const NodeObj& node); // functions below are optional /** * This allows each node to define custom data for each node instance * * @param[in] context structure containing both the interface and underlying object * @param[in] node Reference to the NodeObj struct representing the node object pointer */ void(CARB_ABI* initialize)(const GraphContextObj& context, const NodeObj& node); /** * Release memory created by initialize function above * * @param[in] node Reference to the NodeObj struct representing the node object * pointer */ void(CARB_ABI* release)(const NodeObj& node); /** * This allows each node to be upgraded/downgraded for each node instance * * @param[in] context structure containing both the interface and underlying object * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] oldVersion int specifying the old version (of the node) * @param[in] newVersion int specifying the new version (of the node type) * @return true if the node was modified, false otherwise */ bool(CARB_ABI* updateNodeVersion)(const GraphContextObj& context, const NodeObj& node, int oldVersion, int newVersion); /** * This allows each node type to specify its inputs and outputs in order to build up a description * of the node type. This is done by calling the provided implementations of addInput and addOuput to * add the requisite inputs and outputs respectively. * * @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object */ void(CARB_ABI* initializeType)(const NodeTypeObj& nodeType); /** * Adds an input for the node type. This is intended to be called from initializeType when specifying * the inputs for the node type. * * @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object * @param[in] name Name of the input * @param[in] typeName Typename of the input * @param[in] required Whether or not the input is required * @param[in] defaultValuePtr Pointer to the location containing the default value of the input * @param[in] defaultElemCountPtr Number of elements in the default value -- nullptr if the input is a scalar */ void(CARB_ABI* addInput)(const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr); /** * Adds an output for the node type. This is intended to be called from initializeType when specifying * the outputs for the node type. * * @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object * @param[in] name Name of the output * @param[in] typeName Typename of the output * @param[in] required Whether or not the output is required * @param[in] defaultValuePtr Pointer to the location containing the default value of the output * @param[in] defaultElemCountPtr Number of elements in the default value -- nullptr if the output is a scalar */ void(CARB_ABI* addOutput)(const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr); /** * Adds a state attribute for the node type. This is intended to be called from initializeType when specifying * the state information for the node type. State attributes differ from inputs and outputs in that they will * never leave the node (i.e. they cannot be connected). The node is responsible for ensuring that their contents * are consistent with the current evaluation. * * @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object * @param[in] name Name of the input * @param[in] typeName Typename of the input * @param[in] required Whether or not the input is required * @param[in] defaultValuePtr Pointer to the location containing the default value of the input * @param[in] defaultElemCountPtr Number of elements in the default value -- nullptr if the input is a scalar */ void(CARB_ABI* addState)(const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr); /** * Adds an extended input (not one of the usual data types) for the node type, for example, a union type. * This is intended to be called from initializeType when specifying the inputs for the node type. * * @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object * @param[in] name Name of the input * @param[in] typeInfo A buffer holding extra information about the type * @param[in] required Whether or not the input is required * @param[in] extendedAttrType Whether the attribute is an extended type, like a union or any */ void(CARB_ABI* addExtendedInput)(const NodeTypeObj& nodeType, const char* name, const char* typeInfo, bool required, ExtendedAttributeType extendedAttrType); /** * Adds an extended output (not one of the usual data types) for the node type, for example, a union type. * This is intended to be called from initializeType when specifying the inputs for the node type. * * @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object * @param[in] name Name of the input * @param[in] typeInfo A buffer holding extra information about the type * @param[in] required Whether or not the input is required * @param[in] extendedAttrType Whether the attribute is an extended type, like a union or any */ void(CARB_ABI* addExtendedOutput)(const NodeTypeObj& nodeType, const char* name, const char* typeInfo, bool required, ExtendedAttributeType extendedAttrType); /** * Adds an extended state (not one of the usual data types) for the node type, for example, a union type. * This is intended to be called from initializeType when specifying the inputs for the node type. * * @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object * @param[in] name Name of the input * @param[in] typeInfo A buffer holding extra information about the type * @param[in] required Whether or not the input is required * @param[in] extendedAttrType Whether the attribute is an extended type, like a union or any */ void(CARB_ABI* addExtendedState)(const NodeTypeObj& nodeType, const char* name, const char* typeInfo, bool required, ExtendedAttributeType extendedAttrType); /** * Return whether state information exists on the node. This is mainly used for determining how nodes can be * safely scheduled for execution. * * Internal state information is data maintained on the node that is unique to the node's evaluation instance. * As this is somewhat equivalent to member data it cannot be accessed in parallel on the same node. For example, * a node with internal state data cannot be scheduled as part of a parallel graph loop. * * @return True if this node type manages its own internal state information */ bool(CARB_ABI* hasState)(const NodeTypeObj& nodeType); /** * Sets a flag that indications state information exists on the node. This is mainly used for determining how * nodes can be safely scheduled for execution. * * @param[in] nodeHasState New value for the flag indicating if the node has state information */ void(CARB_ABI* setHasState)(const NodeTypeObj& nodeType, bool nodeHasState); /** * Returns the path to this node type object, so that nodes could be created under this path in the case * of compound nodes * * @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object * @return path to the nodeTypeObj */ const char*(CARB_ABI* getPath)(const NodeTypeObj& nodeType); /** * Registers task functions that the node intends to schedule via Realm. This is called once for each node type * during the lifetime of the process after the Realm runtime has been initialized. * */ void(CARB_ABI* registerTasks)(); /** * Returns the set of all metadata on this node. * * The keyBuf and valueBuf arrays preallocated by the caller, and contain at least "getMetadataCount()" * entries in them. * All returned strings are owned by the node type and not to be destroyed. * The returned keyBuf and valueBuf must have exactly the same size with corresponding index values; that is * keyBuf[i] is the metadata name for the string in valueBuf[i]. * * @param[in] nodeType Reference to the NodeTypeObj struct representing the current node type object * @param[out] keyBuf Buffer in which to put the list of metadata keys * @param[out] valueBuf Buffer in which to put the list of metadata values * @param[in] bufferSize the number of strings each of the two buffers is able to hold * @return Number of metadata items successfully populated */ size_t(CARB_ABI* getAllMetadata)(const NodeTypeObj& nodeType, const char** keyBuf, const char** valueBuf, size_t bufferSize); /** * Retrieves a metadata value from this node type * * @param[in] nodeType Reference to the NodeTypeObj struct representing the current node type object * @param[in] key The name of the metadata to be retrieved * @return The value of the metadata, or nullptr if the named metadata was not set on this node type */ const char*(CARB_ABI* getMetadata)(const NodeTypeObj& nodeType, const char* key); /** * Returns the number of metadata entries on this node * * @param[in] nodeType Reference to the NodeTypeObj struct representing the current node type object * @return the number of metadata key/value pairs on this node */ size_t(CARB_ABI* getMetadataCount)(const NodeTypeObj& nodeType); /** * Sets a metadata value on this node type. * * Certain metadata keywords have special meaning internally: * _extension_: The name of the extension from which the node type was loaded * uiName: The name of the node type in a longer, human-readable format * * Note: The main way for metadata to be set is through the .ogn format files. If you call this directly the * metadata will not persist across sessions. If you wish to define metadata outside of the .ogn file * the best method is to override the initializeType() method in your node definition and set it there. * * @param[in] nodeType Reference to the NodeTypeObj struct representing the current node type object * @param[in] key The keyword, used as the name of the metadata * @param[in] value The value of the metadata. Metadata can be parsed later if non-string values are desired. * @return true if the keyword was successfully set */ void(CARB_ABI* setMetadata)(const NodeTypeObj& nodeType, const char* key, const char* value); /** * Adds a sub-nodetype to an existing node type. This is used for Python and Compounds, where there * is a global type that houses all the type information for all the nodes registered in the system. * Each of those specific node types is a sub-nodetype to the bigger container node type * * @param[in] nodeType Reference to the NodeTypeObj struct representing the current node type object * @param[in] subNodeTypeName Name of the sub-nodeType * @param[in] subNodeType Reference to the NodeTypeObj struct representing the sub-nodetype object */ void(CARB_ABI* addSubNodeType)(const NodeTypeObj& nodeType, const char* subNodeTypeName, const NodeTypeObj& subNodeType); /** * Retrieves a sub-nodetype to an existing node type. This is mainly used for Python nodes, where there is a * global PythonNode type that houses all the type information for all the python nodes registered in the * system. Each of those specific python node types is a sub-nodetype to the bigger python node type * * @param[in] nodeType Reference to the NodeTypeObj struct representing the current node type object * @param[in] subNodeTypeName Name of the sub-nodeType * @return The NodeTypeObj of sub-nodetype */ NodeTypeObj(CARB_ABI* getSubNodeType)(const NodeTypeObj& nodeType, const char* subNodeTypeName); /** * Creates a new nodeType, but without the interface portion. It only contains the "handle" portion * of the NodeTypeObj - that is, a handle to an underlying object that can store the inputs/outputs of * a node type. This is currently mainly used for Python types, where the functions are stored not in * INodeType struct as other types, but elsewhere. * * @param[in] the name of the new node type to be created. * @param[in] the version of the new node type to be created. * @return The NodeTypeObj created without the INodeType portion. */ NodeTypeObj(CARB_ABI* createNodeType)(const char* nodeTypeName, int version); /** * Gets the number of scheduled instances for the node type object so that an internal buffer of the appropriate * size can be created. The number of scheduled instances should match the number created in getScheduleNode. * * This function is called as the graph is evaluated and a list of upstream nodes is provided such that the * number of scheduled instances for the node can be dependent on the results of upstream evaluations. * * Currently, we only support either zero or one scheduled instances. * * Deprecated: Use action graph for conditional scheduling * * @param[in] context structure containing both the interface and underlying object * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] upstreamScheduleNodesBuf pointer to an array of upstream schedule nodes * @param[in] upstreamBufferSize size of the upstream schedule node buffer * @return The number of scheduled instances expected for the node */ size_t(CARB_ABI* getScheduleNodeCount)(const GraphContextObj& context, const NodeObj& node, const ScheduleNodeObj* upstreamScheduleNodesBuf, size_t upstreamBufferSize); /** * Gets/Creates the scheduled instances for the node type object. The number of scheduled instances should match the * return value of getScheduleNodeCount. * * This function is called as the graph is evaluated and a list of upstream nodes is provided such that the * scheduled instances for the node can be dependent on the results of upstream evaluations. * * Currently, we only support either zero or one scheduled instances. * * Deprecated: Use action graph for conditional scheduling * * @param[in] context structure containing both the interface and underlying object * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] upstreamScheduleNodesBuf pointer to an array of upstream schedule nodes * @param[in] upstreamBufferSize size of the upstream schedule node buffer * @param[out] scheduleNodesBuf pointer to an output array of schedule nodes for this node * @param[out] upstreamBufferSize size of the output schedule node buffer * @param[in] bufferSize size of scheduleNodesBuf array */ void(CARB_ABI* getScheduleNodes)(const GraphContextObj& context, const NodeObj& node, const ScheduleNodeObj* upstreamScheduleNodesBuf, size_t upstreamBufferSize, ScheduleNodeObj* scheduleNodesBuf, size_t bufferSize); /** * This function is called when an extended type attribute has been resolved due to a connection change. The node * can then choose to call IAttribute::setResolvedType() on extended type attributes according to its * internal logic. * * @param[in] node Reference to the NodeObj struct representing the node object */ void(CARB_ABI* onConnectionTypeResolve)(const NodeObj& node); /** * Return whether this node type is a singleton (only 1 instance allowed per graph instance, and its subgraphs) * * A node type can be made to be singleton, in which case only 1 instance of that type of Node will be allowed * per graph. * * @return True if this node type is a singleton */ bool(CARB_ABI* isSingleton)(const NodeTypeObj& nodeType); /** * Runs the inspector on the data in the given node type. * * @param[in] nodeTypeObj The node type on which the inspector runs * @param[in] inspector The inspector class * @return true if the inspection ran successfully, false if the inspection type is not supported */ bool(CARB_ABI* inspect)(const NodeTypeObj& nodeTypeObj, inspect::IInspector* inspector); /** * Returns the number of subnode types on this node type. * * @param[in] nodeType Reference to node type object for which subnode types are to be found * @return the number of subnode types owned by this node type */ size_t(CARB_ABI* getSubNodeTypeCount)(const NodeTypeObj& nodeType) ; /** * Returns the set of all subnode types of this node type. * * The subNodeTypeBuf array must be preallocated by the caller, and contain at least "bufferSize" entries. * The contents of both buffers are owned by the interface and should not be freed by the caller. * * @param[in] nodeType Reference to node type object for which subnode types are to be found * @param[out] subNodeTypeNameBuf Buffer in which to put the list of subnode type names * @param[out] subNodeTypeBuf Buffer in which to put the list of subnode type definitions * @param[in] bufferSize the number of strings each of the two buffers is able to hold * @return Number of subnode types successfully populated */ size_t(CARB_ABI* getAllSubNodeTypes)(const NodeTypeObj& nodeType, const char** subNodeTypeNameBuf, NodeTypeObj* subNodeTypeBuf, size_t bufferSize); /** * Removes a sub-nodetype from an existing node type. This is mainly used for Python nodes, where there is a * global PythonNode type that houses all the type information for all the python nodes registered in the * system. Each of those specific python node types is a sub-nodetype to the bigger python node type. When a * Python module is unloaded it should be removing any sub-nodetypes it has added. * * @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object * @param[in] subNodeTypeName Name of the sub-nodeType to be removed * @return True if the removal succeeded, false if not (most likely because the sub-nodetype did not exist) */ bool(CARB_ABI* removeSubNodeType)(const NodeTypeObj& nodeType, const char* subNodeTypeName); /** * Get the currently defined scheduling hints for this node type. * Call the inline function unless you want to manage your own reference counts. * @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object * @return Scheduling hints pertaining to scheduling for this node type. */ ISchedulingHints*(CARB_ABI* getSchedulingHints)(const NodeTypeObj& nodeType); /** * Get a pointer to the currently defined scheduling hints for this node type. * This version lets you manage your own reference counts. * @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object * @return Scheduling hints pertaining to scheduling for this node type. */ inline omni::core::ObjectPtr<ISchedulingHints> getSchedulingHintsPtr(const NodeTypeObj& nodeType) const { return omni::core::steal(getSchedulingHints(nodeType)); } /** * Set the scheduling hints for this node type. * @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object * @param[in] newSchedulingHints Scheduling hints pertaining to scheduling for this node type. */ void(CARB_ABI* setSchedulingHints)(const NodeTypeObj& nodeType, ISchedulingHints* newSchedulingHints); /** * Returns the name of the node type * * @param[in] nodeType Reference to the NodeTypeObj struct representing the node type object * @return The name of the node type */ const char*(CARB_ABI* getTypeName)(const NodeTypeObj& nodeType); /** * Destroys a node type object, including any sub-nodetype objects attached to it. * * @param[in] The nodeType to destroy * @return true if the node type is destroyed, false otherwise */ bool(CARB_ABI* destroyNodeType)(const NodeTypeObj& nodeType); /** * Retrieves a sub-nodetype to an existing node type by it's SdfPath. This is mainly used for compound * nodes where subnode types that are stored have a backing Prim on the stage. * * @param[in] nodeType Reference to the NodeTypeObj struct representing the current node type object * @param[in] pathToSubNodeType Stage path to the subnode type * @return The NodeTypeObj of sub-nodetype */ NodeTypeObj(CARB_ABI* getSubNodeTypeByPath)(const NodeTypeObj& nodeType, const char* pathToSubNodeType); /** * Determines if the node type is a compound node type. A compound node type is a node type that references * an OmniGraph to define the computation. * * @param[in] nodeType Reference to the NodeTypeObj struct representing the current node type object * @return True if the node type is a compound node, false otherwise */ bool(CARB_ABI* isCompoundNodeType)(const NodeTypeObj& nodeType); /** * Implementation of computeVectorized, see above GraphContext for functions to pull/push data with other nodes * * @param[in] context structure containing both the interface and underlying object * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] count The number of vectorized instances available for compute * @return true if compute is successful, false otherwise */ size_t(CARB_ABI* computeVectorized)(const GraphContextObj& context, const NodeObj& node, size_t count); /** * Called whenever a graph instance is being removed form the stage. * This is an opportunity to release any memory allocated specifically for this instance * * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] instanceID The instanceID as would be returned by INode::getGraphInstanceID */ void (CARB_ABI* releaseInstance)(const NodeObj& node, NameToken instanceID); /** * Called by the framework when an OGN database previously created by the provided callback in INode::getOgnDatabase * needs to be destroyed * * @param[in] node Reference to the NodeObj struct representing the node object previously used to create the DB * @param[in] db An OGN database previously created by provided callback in INode::getOgnDatabase */ void(CARB_ABI* destroyDB)(const NodeObj& node, ogn::OmniGraphDatabase* db); /** * Called by the framework in order to notify an OGN database previously created by the provided callback in * INode::getOgnDatabase that a type resolution event has happened on an attribute * * @param[in] attrib Reference to the AttributeObj struct representing the attribute object that just (un)resolved its type * @param[in] db An OGN database previously created by provided callback in INode::getOgnDatabase */ void(CARB_ABI* notifyTypeResolution)(AttributeObj const& attrib, ogn::OmniGraphDatabase* db); /** * Called by the framework in order to notify an OGN database that some dynamic attributes been added or removed. * * @param[in] db An OGN database previously created by provided callback in INode::getOgnDatabase * @param[in] attrib The attribute that is created or removed * @param[in] isAttributeCreated If true, the attribute is newly created, otherwise it is going to be removed */ void(CARB_ABI* notifyDynamicAttributeChanged)(ogn::OmniGraphDatabase* db, AttributeObj const& attrib, bool isAttributeCreated); /** * Returns the ABI version against which the extension has been built */ carb::Version(CARB_ABI* getCarbABIVersion)(); ////////////////////////////////////////////////////////////////////////////////////// // REMINDER REMINDER REMINDER REMINDER REMINDER REMINDER ////////////////////////////////////////////////////////////////////////////////////// // Any change made in this ABI should come with a change in either (or both of): // - OmniGraphNode_ABI::populateNodeTypeInterface() // - NodeTypeRegistration::NodeTypeRegistration() }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle STRUCT_INTEGRITY_CHECK(INodeType, getCarbABIVersion, 42)//Check the reminder above // ====================================================================== /** Interface to a single node in a graph */ struct INode { //! @private to avoid doxygen problems CARB_PLUGIN_INTERFACE("omni::graph::core::INode", 4, 7); /** * Returns the number of attributes on this node * * @param[in] node Reference to the NodeObj struct representing the node object * @return the number of attributes on this node */ size_t(CARB_ABI* getAttributeCount)(const NodeObj& node); /** * Returns the attributes on this node * * @param[in] node Reference to the NodeObj struct representing the node object * @param[out] attrsBuf Buffer to hold the return attribute objects * @param[in] bufferSize the number of AttributeObj structures the buffer is able to hold * @return true if successful, false otherwise */ bool(CARB_ABI* getAttributes)(const NodeObj& node, AttributeObj* attrsBuf, size_t bufferSize); /** * Retrieves whether the attribute in question exists or not * * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] attrName Name of the attribute on the node * @return true if the attribute exists on the node, false if not */ bool(CARB_ABI* getAttributeExists)(const NodeObj& node, const char* attrName); /** @private Retired - do not use */ void (CARB_ABI* retired_1)(NodeObj&); /** * Retrieves an attribute that points to the attribute on the current node. * * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] attrName Name of the attribute on the node * @return the attribute object requested */ AttributeObj(CARB_ABI* getAttribute)(const NodeObj& node, const char* attrName); /** * Retrieves an attribute that points to the attribute on the current node. * * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] attrName Name of the attribute on the node * @return the attribute object requested */ AttributeObj(CARB_ABI* getAttributeByToken)(const NodeObj& node, NameToken attrName); /** * Retrieves the prim path to the node * * @param[in] node Reference to the NodeObj struct representing the node object * @return the prim path to the node */ const char*(CARB_ABI* getPrimPath)(const NodeObj& node); /** * Retrieves the user data set on the node * * @param[in] node Reference to the NodeObj struct representing the node object */ void*(CARB_ABI* getUserData)(const NodeObj& node); /** * Retrieves the user data set on the node * * @param[in] node Reference to the NodeObj struct representing the node object */ void(CARB_ABI* setUserData)(const NodeObj& node, void* userData); /** @private Retired - do not use */ void (CARB_ABI* retired_2)(NodeObj&); /** @private Retired - do not use */ bool (CARB_ABI* retired_3)(NodeObj&); /** @private Retired - do not use */ void (CARB_ABI* retired_4)(NodeObj&); /** * Returns the graph that this node belongs to * @param[in] node Reference to the NodeObj struct representing the node object * @return GraphObj structure containing the graph this node belongs to */ GraphObj(CARB_ABI* getGraph)(const NodeObj& node); /** @private Deprecated - do not use */ CARB_DEPRECATED("Deprecated - use getNodeTypeObj") INodeType(CARB_ABI* getNodeType)(const NodeObj&); /** @private Deprecated - do not use */ CARB_DEPRECATED("Deprecated - use getNodeTypeObj().getTypeName()") const char*(CARB_ABI* getPythonNodeType)(const NodeObj&); /** * Returns the node is disabled * @param[in] node Reference to the NodeObj struct representing the node object * @return true if disabled, false otherwise */ bool(CARB_ABI* isDisabled)(const NodeObj& node); /** * Sets the disabled state on the node * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] disable Whether to disable the node */ void(CARB_ABI* setDisabled)(const NodeObj& node, bool disable); /** * Lets the evaluation system know that the compute for this node is complete for this frame but not completed * overall. For example, once an animation is triggered, we want the animation to play until completion - on * a particular frame, the animation for that frame may be complete, but we're not done until the overall * animation is completed. This method is only meaningful for evaluators that implement standard flow graph * semantics. * * @param[in] node Reference to the NodeObj struct representing the node object * */ void(CARB_ABI* setComputeIncomplete)(const NodeObj& node); /** * Returns whether the node has an USD representation on the stage. * @param[in] node Reference to the NodeObj struct representing the node object * @return true if the node is backed by USD, false otherwise */ bool(CARB_ABI* isBackedByUsd)(const NodeObj& node); /** * Creates a dynamic attribute on the node. * * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] name Name of the attribute * @param[in] attributeType The Type of the attribute. Extended attributes are Token types. * @param[in] value Pointer to the location containing the initial value of the attribute * @param[in] elemCount Number of elements in the attribute -- nullptr if the attribute is a scalar * @param[in] portType Whether this attribute is an input, output, or state * @param[in] extendedAttrType The type of extended attribute to create, if any (see definition of * ExtendedAttributeType) * @param[in] unionTypes In case the extendedAttrType is union, unionTypes is a comma separated * string that lists the allowable concrete types in the union * @return true if the attribute was created, false otherwise */ bool(CARB_ABI* createAttribute)(const NodeObj& node, const char* name, Type attributeType, const void* value, const size_t* elemCount, AttributePortType portType, ExtendedAttributeType extendedAttrType, const char* unionTypes); /** * Removes a dynamic attribute from the node * * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] name Name of the attribute * @return true if the attribute was removed, false if the attribute was not found */ bool(CARB_ABI* removeAttribute)(const NodeObj& node, const char* name); /** @private Deprecated - do not use */ CARB_DEPRECATED("Use EF Framework to customize task generation by nodes") ScheduleNodeObj(CARB_ABI* createScheduleNode)(const NodeObj& node); /** * Registers a callback to be invoked when any attribute of the current node is connected * * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] connectionCallback The struct containing the callback to trigger, and a piece of user data. * The parameters are the attributes of this and the other node being connected, and a void* of user data * @return true for success, false for failure */ bool(CARB_ABI* registerConnectedCallback)(const NodeObj& node, ConnectionCallback connectionCallback); /** * Registers a callback to be invoked when any attribute of the current node is disconnected * * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] connectionCallback The struct containing the callback to trigger, and a piece of user data. * The parameters are the attributes of this and the other node being disconnected, and a void* of user data * @return true for success, false for failure */ bool(CARB_ABI* registerDisconnectedCallback)(const NodeObj& node, ConnectionCallback connectionCallback); /** * Deregisters the callback to be invoked when any attribute of the current node is connected * * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] connectionCallback The struct containing the callback to trigger, and a piece of user data. */ void(CARB_ABI* deregisterConnectedCallback)(const NodeObj& node, ConnectionCallback connectionCallback); /** * Deregisters the callback to be invoked when any attribute of the current node is disconnected * * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] connectionCallback The struct containing the callback to trigger, and a piece of user data. */ void(CARB_ABI* deregisterDisconnectedCallback)(const NodeObj& node, ConnectionCallback connectionCallback); /** * When we are using dynamic scheduling (where the exact amount of work is not known upfront), a node may * try to suppress downstream nodes from executing based on runtime execution information. To do this, * it needs to turn on dynamic downstream control. This function returns whether the node is participating * in this scheme. A good use case for this feature is where we have a lot of prim nodes ticking * unnecessarily, consuming cycles. We can turn them off using this feature with information available * at compute time. * * @param[in] node Reference to the NodeObj struct representing the node object * @return Whether this node is setup to participate in dynamic scheduling by dynamically controlling the * scheduling of downstream nodes. */ bool(CARB_ABI* getDynamicDownstreamControl)(const NodeObj& node); /** * When we are using dynamic scheduling (where the exact amount of work is not known upfront), nodes may * try to suppress downstream nodes from executing based on runtime execution information. To do this, * it needs to turn on dynamic downstream control. This function sets whether the node is participating * in this scheme. You need to call this function on the upstream node that is suppressing downstream * activity. * * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] value Whether to turn the flag on or off on the node * @return Whether this node is setup to participate in dynamic scheduling by dynamically controlling the * scheduling of downstream nodes. */ void(CARB_ABI* setDynamicDownstreamControl)(const NodeObj& node, bool value); /** * Returns the NodeTypeObj structure associated with this node. * * @param[in] node Reference to the NodeObj struct representing the node object * @return NodeTypeObj encapsulating the node type from which this node was created */ NodeTypeObj(CARB_ABI* getNodeTypeObj)(const NodeObj& node); /** * Resolves attribute types given a set of attributes which are fully type coupled. * For example if node 'Increment' has one input attribute 'a' and one output attribute 'b' * and the types of 'a' and 'b' should always match. If the input is resolved then this function will * resolve the output to the same type. * It will also take into consideration available conversions on the input side. * The type of the first (resolved) provided attribute will be used to resolve others or select appropriate conversions * * Note that input attribute types are never inferred from output attribute types. * * This function should only be called from the INodeType function `onConnectionTypeResolve` * * @param[in] node Reference to the NodeObj struct representing the node object * @param[out] attrsBuf Buffer that holds the attributes to be resolved as a coupled group * @param[in] bufferSize the number of AttributeObj structures the buffer is able to hold * @return true if successful, false otherwise, usually due to mismatched or missing resolved types */ bool(CARB_ABI* resolveCoupledAttributes)(const NodeObj& node, AttributeObj* attrsBuf, size_t bufferSize); /** * Resolves attribute types given a set of attributes, that can have differing tuple counts and/or array depth, * and differing but convertible base data type. * The three input buffers are tied together, holding the attribute, the tuple * count, and the array depth of the types to be coupled. * This function will solve base type conversion by targeting the first provided type in the list, * for all other ones that require it. * * For example if node 'makeTuple2' has two input attributes 'a' and 'b' and one output 'c' and we want to resolve * any float connection to the types 'a':float, 'b':float, 'c':float[2] (convertible base types and different tuple counts) * then the input buffers would contain: * attrsBuf = [a, b, c] * tuplesBuf = [1, 1, 2] * arrayDepthsBuf = [0, 0, 0] * rolesBuf = [AttributeRole::eNone, AttributeRole::eNone, AttributeRole::eNone] * * This is worth noting that 'b' could be of any type convertible to float. But since the first provided * attribute is 'a', the type of 'a' will be used to propagate the type resolution. * * Note that input attribute types are never inferred from output attribute types. * * This function should only be called from the INodeType function `onConnectionTypeResolve` * * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] attrsBuf Buffer that holds the attributes to be resolved as a coupled group * @param[in] tuplesBuf Buffer that holds the tuple count desired for each corresponding attribute. Any value * of kUninitializedTypeCount indicates the found tuple count is to be used when resolving. * @param[in] arrayDepthsBuf Buffer that holds the array depth desired for each corresponding attribute. Any value * of kUninitializedTypeCount indicates the found array depth is to be used when resolving. * @param[in] rolesBuf Buffer that holds the role desired for each corresponding attribute. Any value of * AttributeRole::eUnknown indicates the found role is to be used when resolving. * @param[in] bufferSize the number of AttributeObj structures the buffer is able to hold * @return true if successful, false otherwise, usually due to mismatched or missing resolved types */ bool(CARB_ABI* resolvePartiallyCoupledAttributes)(const NodeObj& node, const AttributeObj* attrsBuf, const uint8_t* tuplesBuf, const uint8_t* arrayDepthsBuf, const AttributeRole* rolesBuf, size_t bufferSize); /** @private Deprecated - do not use */ CARB_DEPRECATED("Use USD notice handling to monitor changes") bool(CARB_ABI* registerPathChangedCallback)(const NodeObj&, PathChangedCallback); /** @private Deprecated - do not use */ CARB_DEPRECATED("Use USD notice handling to monitor changes") void(CARB_ABI* deregisterPathChangedCallback)(const NodeObj& node, PathChangedCallback pathChangedCallback); /** * Returns the graph wrapped by this node, if any (as opposed to the graph this node belongs to) * @param[in] node Reference to the NodeObj struct representing the node object * @return GraphObj structure containing the graph wrapped by this node */ GraphObj(CARB_ABI* getWrappedGraph)(const NodeObj& node); /** * Returns the interface for the event stream generated by changes to this node. * @return The IEventStreamPtr that pumps events when node changes happen */ carb::events::IEventStreamPtr(CARB_ABI* getEventStream)(const NodeObj& node); /** * Returns whether the NodeObj's handle is still valid * @param[in] node Reference to the NodeObj struct representing the node object * @return true if valid, false otherwise */ bool (CARB_ABI* isValid)(const NodeObj& node); /** * Requests that the given node be computed at the next graph evaluation. This is for use with nodes that are * marked as being RequestDrivenCompute. * * @param[in] node Reference to the NodeObj struct representing the node object * @return true if the request was successful, false if there was an error */ bool(CARB_ABI* requestCompute)(const NodeObj& node); /** * Returns a NobeObj for the given NodeHandle if the referenced node is valid. * * @param[in] nodeHandle The NodeHandle for the compute node * @return The NodeObj representing the node. In case of failure, the node handle * will be kInvalidNodeHandle */ NodeObj(CARB_ABI* getNodeFromHandle)(const NodeHandle nodeHandle); /** * Returns the number of times compute() has been called on this node since the * counter last rolled over to 0. * * @param[in] node Reference to the NodeObj struct representing the node object. * @return The count. */ size_t (CARB_ABI* getComputeCount)(const NodeObj& node); /** @private Deprecated - do not use */ CARB_DEPRECATED("Use increaseComputeCount instead") size_t (CARB_ABI* incrementComputeCount)(const NodeObj& node); /** @private Deprecated - do not use */ CARB_DEPRECATED("Use logComputeMessageOnInstance instead") bool (CARB_ABI* logComputeMessage)(const NodeObj& node, ogn::Severity severity, const char* message); /** * Returns the number of compute messages of the given severity logged for the node. * * Compute messages are cleared at the start of each evaluation, so the count will be * only for the most recent evaluation. * * @param[in] node Reference to the NodeObj struct representing the node object. * @param[in] severity Severity level of the messages. * @return The number of compute messages of the specified severity. */ size_t (CARB_ABI* getComputeMessageCount)(const NodeObj& node, ogn::Severity severity); /** * Returns a specified compute message of the given severity logged for the node. * * Use getComputeMessageCount() to determine the number of messages currently available. * * @param[in] node Reference to the NodeObj struct representing the node object. * @param[in] severity Severity level of the message to return. * @param[in] index Index of the message to return, starting at 0. * @return The requested message or nullptr if 'index' was out of range. */ const char* (CARB_ABI* getComputeMessage)(const NodeObj& node, ogn::Severity severity, size_t index); /** * Clears all compute messages logged for the node prior to its most recent evaluation. * Messages from its most recent evaluation are left untouched. * * @param[in] node Reference to the NodeObj struct representing the node object. * @return The number of messages that were deleted. */ size_t (CARB_ABI* clearOldComputeMessages)(const NodeObj& node); /** * Retrieve the OGN database for the current active instance of this node. The DB is maintained up2date internally by the framework. * If it does not exists the provided ognCreate callback will be invoked to instantiate it * * @param[in] node Reference to the NodeObj struct representing the node object. * @param[in] ognCreate A callback that allocates and returns the DB associated to the current active instance of this node. Ownership is transfered to the node */ ogn::OmniGraphDatabase*(CARB_ABI* getOgnDatabase)(const NodeObj& node, CreateDbFunc ognCreate); /** * Returns whether this node is a compound node. A compound node is a node whose node type is defined by a subgraph instead * of a built-in type. * * @param[in] node Reference to the NodeObj struct representing the node object. * @returns True if the provided node is a compound node, false otherwise */ bool(CARB_ABI* isCompoundNode)(const NodeObj& node); /** * Retrieve a persistent ID for the current active graph associated to this node, optionally offseted * * @param[in] node Handle representing the node object. * @param[in] instanceOffset: In vectorized context, the instance index relative to the currently targeted graph * @return A unique and process-persistent ID that represents the current active instance of this node */ NameToken (CARB_ABI* getGraphInstanceID)(NodeHandle node, InstanceIndex instanceOffset); /** * Returns a Stamp that is incremented any time an input or state attribute is changed outside of graph evaluation. * For example, interactively or through a script. * * @param[in] node Reference to the NodeObj struct representing the node object. * @returns The stamp value */ exec::unstable::Stamp(CARB_ABI* getAttributeChangeStamp)(const NodeObj& node); /** * Returns a handle to the associated sub-graph, if the given node is a compound node. * * @param[in] node Handle representing the node object. * @return The GraphObj representing the graph. In case of failure, the graph handle * will be kInvalidGraphHandle. */ GraphObj (CARB_ABI* getCompoundGraphInstance)(const NodeObj& node); /** * Query all the node and context handles that the provided node emulate when used in an auto instancing scenario. * * @param[in] node Handle representing the "master" node object, the one that handle the execution. * @param[out] graphContexts A pointer reference that will be set to an array of all the emulated auto-instanced graph contexts * @param[out] nodeObjects A pointer reference that will be set to an array of all the emulated auto-instanced nodes * @return The number of elements in the returned arrays, 1 if there is not auto instancing associated to the provided node (itself) */ size_t(CARB_ABI* getAutoInstances)(const NodeObj& node, GraphContextObj const*& graphContexts, NodeObj const*& nodeObjects); /** @private Retired - do not use */ bool (CARB_ABI* retired_5)(const NodeObj& nodeObj); /** * Logs a compute message of a given severity for the node. * * This method is intended to be used from within the compute() method of a * node to alert the user to any problems or issues with the node's most recent * evaluation. They are accumulated until the start of the next compute, * at which point they are cleared. * * If duplicate messages are logged, with the same severity level, only one is * stored. * * @param[in] node Reference to the NodeObj struct representing the node object. * @param[in] inst In vectorized context, the instance index relative to the currently targeted graph * @param[in] severity Severity level of the message. * @param[in] message The message. * @return Returns true if the message has already been logged, false otherwise. */ bool(CARB_ABI* logComputeMessageOnInstance)(const NodeObj& node, InstanceIndex inst, ogn::Severity severity, const char* message); /** * Increase the node's compute counter by the provided amount. * * This method is provided primarily for debugging and experimental uses and * should not normally be used by end-users. * * @param[in] node Reference to the NodeObj struct representing the node object * @param[in] count the number to be added to the compute count. * @return The new count. */ size_t(CARB_ABI* increaseComputeCount)(const NodeObj& node, size_t count); }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle STRUCT_INTEGRITY_CHECK(INode, increaseComputeCount, 52) /** * Defines the node event types. */ enum class INodeEvent { eCreateAttribute, //!< Dynamic attribute added to a node eRemoveAttribute, //!< Dynamic attribute removed from a node eAttributeTypeResolve //!< Extended-type attribute resolution has changed }; /** * Defines the graph event types */ enum class IGraphEvent { eCreateVariable, ///< Variable has been added to the graph eRemoveVariable, ///< Variable has been removed from the graph eClosing, ///< Stage is closing eComputeRequested, ///< INode::requestCompute was called on a contained node eNodeAttributeChange,///< An input or state attribute changed outside of graph evaluation eVariableTypeChange ///< A variable in the graph had its type changed }; // ====================================================================== /** Interface to an OmniGraph, several of which may be present in a scene */ struct IGraph { //! @private to avoid doxygen problems CARB_PLUGIN_INTERFACE("omni::graph::core::IGraph", 3, 13); /** * Returns the number of nodes in the graph * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @return The number of nodes in the graph */ size_t(CARB_ABI* getNodeCount)(const GraphObj& graphObj); /** * Get the nodes in the graph * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[out] nodesBuf Buffer to hold the return NodeObjs * @param[in] bufferSize the number of NodeObj structures the buffer is able to hold * @return true on success, false on failure */ bool(CARB_ABI* getNodes)(const GraphObj& graphObj, NodeObj* nodesBuf, size_t bufferSize); /** * Returns the number of subgraphs in the graph * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @return The number of subgraphs in the graph */ size_t(CARB_ABI* getSubgraphCount)(const GraphObj& graphObj); /** * Get the subgraphs in the graph * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[out] graphsBuf Buffer to hold the return GraphObjs * @param[in] bufferSize the number of GraphObjs structures the buffer is able to hold * @return true on success, false on failure */ bool(CARB_ABI* getSubgraphs)(const GraphObj& graphObj, GraphObj* graphsBuf, size_t bufferSize); /** * Get a particular subgraph in the graph given its path * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] subgraphPath the path to the subgraph in question * @return The GraphObj representing the graph. In case of failure, the graph handle * will be kInvalidGraphHandle */ GraphObj(CARB_ABI* getSubgraph)(const GraphObj& graphObj, const char* subgraphPath); /** * Returns the path to the graph * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @return The path to the graph (may be empty) */ const char*(CARB_ABI* getPathToGraph)(const GraphObj& graphObj); /** * Returns whether the current graph is disabled * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @return true if the current graph is disabled false otherwise */ bool(CARB_ABI* isDisabled)(const GraphObj& graphObj); /** * Sets the disabled state of the graph * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] disable Whether or not to disable the current graph */ void(CARB_ABI* setDisabled)(const GraphObj& graphObj, bool disable); /** * Get the default graph context associated with the graph. Each graph has a default * context - it's usually one that gives you evaluation of the graph on the current * time, for example. * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @return The GraphContextObj representing the default graph context associated with * the node. */ GraphContextObj(CARB_ABI* getDefaultGraphContext)(const GraphObj& graphObj); /** * Get a particular node in the graph given its path * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] nodePath the path to the node in question * @return The NodeObj representing the node. In case of failure, the node handle * will be kInvalidNodeHandle */ NodeObj(CARB_ABI* getNode)(const GraphObj& graphObj, const char* nodePath); /** * Create a node in the graph at a given path with a given type * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] nodePath the path to where the node will be added * @param[in] nodeType the type name of the node to add * @param[in] createUsd Whether to create USD backing for the node being created * @return The NodeObj representing the node. In case of failure, the node handle * will be kInvalidNodeHandle */ NodeObj(CARB_ABI* createNode)(GraphObj& graphObj, const char* nodePath, const char* nodeType, bool createUsd); /** * Destroy the node in the graph at a given path * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] nodePath the path of the node to destroy * @param[in] destroyUsd Whether to destroy USD backing for the node being destroyed * @return True, if the node was successfully destroyed. False otherwise. */ bool(CARB_ABI* destroyNode)(GraphObj& graphObj, const char* nodePath, bool destroyUsd); /** * Rename the node in the graph at a given path * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] nodePath the path of the node to rename * @param[in] newPath the new path of the node * @return True, if the node was successfully renamed. False otherwise. */ bool(CARB_ABI* renameNode)(GraphObj& graphObj, const char* oldPath, const char* newPath); /** * Create a subgraph in the graph at a given path with a given type * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] subgraphPath the path to where the subgraph will be added * @param[in] evaluator the evaluator type to use for the subgraph being created * @param[in] createUsd whether to create USD backing for the subgraph being created * @return The GraphObj representing the subgraph. In case of failure, the graph handle * will be kInvalidGraphHandle */ GraphObj(CARB_ABI* createSubgraph)(GraphObj& graphObj, const char* subgraphPath, const char* evaluator, bool createUsd); /** * reload the graph settings based on ComputeGraphSettings * * @param[in] graphObj Reference to the GraphObj struct representing the graph object */ void(CARB_ABI* reloadGraphSettings)(GraphObj& graphObj); /** * Rename the subgraph in the graph at a given path * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] subgraphPath the path of the subgraph to rename * @param[in] newPath the new path of the subgraph * @return True, if the subgraph was successfully renamed. False otherwise. */ bool(CARB_ABI* renameSubgraph)(GraphObj& graphObj, const char* oldPath, const char* newPath); /** * Notifies the graph that some attribute has changed on a node. This will trigger * updates from things like lazy evaluation graphs, for example. * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] Reference to the AttributeObj struct representing the attribute object */ void(CARB_ABI* onAttributeChanged)(const GraphObj& graphObj, const AttributeObj& attrObj); /** @private Deprecated - do not use */ CARB_DEPRECATED("Will be removed in next major version, please use IGraph::inspect instead") void(CARB_ABI* printDiagnostic)(const GraphObj&); /** * Register a callback to be invoked when a legacy file with an older file format version * is detected. This callback is invoked before stage attach happens. * * @param[in] fileFormatUpgrade - structure containing the callback and a piece of user data to * be passed back to the callback when invoked. */ void (CARB_ABI* registerPreLoadFileFormatUpgradeCallback)(FileFormatUpgrade fileFormatUpgrade); /** * Register a callback to be invoked when a legacy file with an older file format version * is detected. This callback is invoked after stage attach happens. * * @param[in] fileFormatUpgrade - structure containing the callback and a piece of user data to * be passed back to the callback when invoked. */ void (CARB_ABI* registerPostLoadFileFormatUpgradeCallback)(FileFormatUpgrade fileFormatUpgrade); /** * Deregisters the pre-load callback to be invoked when a legacy file with an older * file format version is detected. * * @param[in] fileFormatUpgrade - structure containing the callback and a piece of user data to * be passed back to the callback when invoked. * */ void (CARB_ABI* deregisterPreLoadFileFormatUpgradeCallback)(FileFormatUpgrade fileFormatUpgrade); /** * Deregisters the post-load callback to be invoked when a legacy file with an older * file format version is detected. * * @param[in] fileFormatUpgrade - structure containing the callback and a piece of user data to * be passed back to the callback when invoked. */ void (CARB_ABI* deregisterPostLoadFileFormatUpgradeCallback)(FileFormatUpgrade fileFormatUpgrade); /** * Returns whether USD notice handling is enabled for the graph * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @return true if the current graph has USD notice handling enabled */ bool(CARB_ABI* usdNoticeHandlingEnabled)(const GraphObj& graphObj); /** * Sets whether the USD notice handling is enabled for this graph. This is an * advanced operation - do not use this method unless you know what you're doing. * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] enable Whether or not to enable USD notice handling for this graph */ void(CARB_ABI* setUSDNoticeHandlingEnabled)(const GraphObj& graphObj, bool enable); /** * Runs the inspector on the data in the given graph. * * @param[in] graphObj The graph on which the inspector runs * @param[in] inspector The inspector class * @return true if the inspection ran successfully, false if the inspection type is not supported */ bool(CARB_ABI* inspect)(const GraphObj& graphObj, inspect::IInspector* inspector); /** * Create a new graph, wrapped as a node, at the given location. * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] nodeName the name of the node that wraps the graph * @param[in] graphPath the path to where the graph that the node will wrap will be added * @param[in] evaluatorName the evaluator to use for the new graph * @param[in] isGlobalGraph Whether this graph is a top level global graph * @param[in] backByUSD Whether to back this graph by USD * @param[in] backingType What kind of FC backs this graph * @param[in] graphPipelineStage What pipeline stage this graph occupies * @return The NodeObj representing the node that wraps the graph. The newly created graph can be * retrieved from the node. In case of failure, the NodeObj will contain kInvalidNodeHandle */ NodeObj(CARB_ABI* createGraphAsNode)(GraphObj& graphObj, const char* nodeName, const char* graphPath, const char* evaluatorName, bool isGlobalGraph, bool backByUSD, GraphBackingType backingType, GraphPipelineStage graphPipelineStage); /** * Reloads the graph from the stage by deleting the current graph and creating a new one to attach * to the stage. Note: this is a complete reset - any stateful nodes will lose their state. * * @param[in] graphObj The graph to reload */ void(CARB_ABI* reloadFromStage)(const GraphObj& graphObj); /** * Returns the Fabric backing type for this graph * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @return Fabric backing type. See GraphBackingType definition for details. */ GraphBackingType(CARB_ABI* getGraphBackingType)(const GraphObj& graphObj); /** * Returns the graph pipeline stage (eg. simulation, pre-render, post-render) for this graph * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @return Graph pipeline stage. See GraphPipelineStage definition for details. */ GraphPipelineStage(CARB_ABI* getPipelineStage)(const GraphObj& graphObj); /** * Returns whether the GraphObj's handle is still valid * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @return true if valid, false otherwise */ bool(CARB_ABI* isValid)(const GraphObj& graphObj); /** * Returns the FabricId for this Graph. This id can be used with the Fabric API, but should only * be required for advanced use cases. * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[out] fabricId The output user id * @return true on success, false on failure */ bool(CARB_ABI* getFabricId)(const GraphObj& graphObj, omni::fabric::FabricId& fabricId); /** * Warning: this is an advanced function - do not call unless you know exactly what is involved here. * * This allows a graph to be "ticked" independently of the normal graph evaluation process, where * graphs are ordered into different pipeline stages (simulation, pre-render, post-render), and all * graphs of each stage are evaluated according to the order described in the orchestration graph in * each stage. * * Instead, this function allows graphs in the custom pipeline stage to be evaluated on its own. * If this function is being called from a different thread, it is the caller's responsibility to ensure * that the Fabric backing the graph is independent (stage without history), otherwise data races * will ensue. If this function is being called from the simulation / main thread, the the Fabric * backing the graph may be a shared one. * * It is illegal to call this function for any graph other than those setup with the custom pipeline * stage. * * @param[in] graphObj Reference to the GraphObj struct representing the graph object */ void (CARB_ABI* evaluate)(const GraphObj& graphObj); /** * Returns the parent of this graph * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @return The parent graph (may be invalid) */ GraphObj(CARB_ABI* getParentGraph)(const GraphObj& graphObj); /** * Returns whether the path points to a top level graph prim. * * @param[in] path to the prim in question * @return True if the path points to a prim that is a top level graph */ bool(CARB_ABI* isGlobalGraphPrim)(const char* path); /** * Registers a callback to be invoked at the end of graph evaluation for all of the nodes * whose error status changed during that evaluation. * * This is provided primarily for UI purposes. E.g. highlighting nodes with compute errors * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] errorStatusChangeCallback - structure containing the callback and a piece of user data to * be passed back to the callback when invoked. */ void (CARB_ABI* registerErrorStatusChangeCallback)(const GraphObj& graphObj, ErrorStatusChangeCallback errorStatusChangeCallback); /** * Deregisters a callback to be invoked at the end of graph evaluation for all of the nodes * whose error status changed during that evaluation. * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] errorStatusChangeCallback - structure containing the callback and a piece of user data to * be passed back to the callback when invoked. * */ void (CARB_ABI* deregisterErrorStatusChangeCallback)(const GraphObj& graphObj, ErrorStatusChangeCallback errorStatusChangeCallback); /** @private deprecated - do not use */ CARB_DEPRECATED("Will be retired in next major version: this call is not necessary anymore and can be safely removed") void(CARB_ABI* nodeErrorStatusChanged)(const GraphObj& graphObj, const NodeObj& nodeObj); /** * Returns the number of variables in the graph * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @return The number of variables in the graph. */ size_t(CARB_ABI* getVariableCount)(const GraphObj& graphObj); /** * Get the variables defined in the graph * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[out] variableBuf Buffer to hold the returned IVariable objects * @param[in] bufferSize The number of IVariable objects the buffer is able to hold * @return true on success, false on failure */ bool(CARB_ABI* getVariables)(const GraphObj& graphObj, IVariablePtr* variableBuf, size_t bufferSize); /** * Create a new variable on the graph with the given name. The name must be unique * among variables on the graph, even if the type is different. * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] name The name to give the variable. * @param[in] variableType the data type used to create the variable. * @return The newly created variable, or null if the variable could not be created. */ IVariablePtr(CARB_ABI* createVariable)(const GraphObj& graphObj, const char* name, Type variableType); /** * Removes the given variable from the graph. * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] variable The variable to remove from the graph. * @return True if the variable was successfully removed, false otherwise. */ bool(CARB_ABI* removeVariable)(const GraphObj& graphObj, const IVariablePtr& variable); /** * Retrieves a variable with the given name. * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] name The name of the variable to search for. * @return The variable with the given name on the graph, or null if the variable does * not exist. */ IVariablePtr(CARB_ABI* findVariable)(const GraphObj& graphObj, const char* name); /** * Change the pipeline stage (eg. simulation, pre-render, post-render) that this graph is in * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] newPipelineStage The new pipeline stage that this graph will be moved into */ void (CARB_ABI* changePipelineStage)(const GraphObj& graphObj, GraphPipelineStage newPipelineStage); /** * Returns the interface for the event stream generated by changes to this graph. * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @return The IEventStreamPtr that pumps events when graph changes happen */ carb::events::IEventStreamPtr(CARB_ABI* getEventStream)(const GraphObj& graphObj); /** * Returns the evaluation mode of the graph. The evaluation mode determines how the graph * will be evaluated standalone or when referenced from an OmniGraphAPI component. * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @return The evaluation mode of the graph */ GraphEvaluationMode(CARB_ABI* getEvaluationMode)(const GraphObj& graphObj); /** * Sets the evaluation mode of the graph. The evaluation mode determines if the graph * will be evaluated standalone or when referenced from an OmniGraphAPI component. * * @param[in] graphObj Reference to the graph object * @param[in] evaluationMode the evaluation mode of the graph to set */ void(CARB_ABI* setEvaluationMode)(const GraphObj& graphObj, GraphEvaluationMode evaluationMode); /** * Create a new graph, wrapped as a node, at the given location. * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @param[in] options Options relating to creating a graph as a node. * @return The NodeObj representing the node that wraps the graph. The newly created graph can be * retrieved from the node. In case of failure, the NodeObj will contain kInvalidNodeHandle */ NodeObj(CARB_ABI* createGraphAsNodeV2)(GraphObj& graphObj, const CreateGraphAsNodeOptions& options); /** * Returns the name of the evaluator for the specified graph * * @param[in] graphObj Reference to the GraphObj struct representing the graph object * @return The name of the evaluator, or the empty string if it isn't set. */ const char*(CARB_ABI* getEvaluatorName)(const GraphObj& graphObj); /** * Returns whether this graph is a compound graph instance. A compound graph is a subgraph that * is parented to a compound node * * @return True if this graph is a compound graph, false otherwise. */ bool(CARB_ABI* isCompoundGraph)(const GraphObj& graphObj); /** * Returns the number of instance currently allocated for this graph * * @return The number of instances registered in the graph, 0 if the graph is standalone */ size_t(CARB_ABI* getInstanceCount)(const GraphObj& graphObj); /** * Returns whether this graph is an auto instance. An auto instance is a graph that got merged as an instance * with all other similar graphs in the stage. * * @return True if this graph is an auto instance, false otherwise. */ bool(CARB_ABI* isAutoInstanced)(const GraphObj& graphObj); /** * Set whether or not this graph can be candidate for auto-instance merging (true by default) * * @return The old value of the allowed flag */ bool(CARB_ABI* setAutoInstancingAllowed)(const GraphObj& graphObj, bool allowed); /** * Returns the compound node for which this graph is the compound subgraph of. * * @return If this graph is a compound graph, the owning compound node. Otherwise, an invalid node is returned. */ NodeObj(CARB_ABI* getOwningCompoundNode)(const GraphObj& graphObj); }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle STRUCT_INTEGRITY_CHECK(IGraph, getOwningCompoundNode, 52) using DataAccessFlags = uint32_t; //!< Data type for specifying read/write access abilities static constexpr DataAccessFlags kReadAndWrite = 0; //!< Data is accessible for both read and write static constexpr DataAccessFlags kReadOnly = 1; //!< Data is only accessible for reading static constexpr DataAccessFlags kWriteOnly = 2; //!< Data is only accessible for writing // ====================================================================== /** Use this interface to pull data for compute node, and also push data to compute graph/cache */ struct IGraphContext { //! @private to avoid doxygen problems CARB_PLUGIN_INTERFACE("omni::graph::core::IGraphContext", 3, 7); /** * Returns the stage id the context is currently attached to * @param[in] context structure containing both the interface and underlying object * @return the USD stage id */ long int(CARB_ABI* getStageId)(const GraphContextObj& contextObj); //--------------------------------------------------------------------------------------------- // wrappers for fabric /** @private Retired - do not use */ void (CARB_ABI* retired_1)(); /** @private Retired - do not use */ void (CARB_ABI* retired_2)(); /** @private Deprecated - do not use */ size_t*(CARB_ABI* deprecated_12)(const GraphContextObj&, const AttributeObj&, DataAccessFlags); /** @private Deprecated - do not use */ size_t*(CARB_ABI* deprecated_13)(const GraphContextObj&, const AttributeObj&, DataAccessFlags); /** @private Retired - do not use */ void (CARB_ABI* retired_3)(); /** @private Retired - do not use */ void (CARB_ABI* retired_4)(); /** @private Retired - do not use */ void (CARB_ABI* retired_5)(); /** @private Retired - do not use */ void (CARB_ABI* retired_6)(); /** @private Retired - do not use */ void (CARB_ABI* retired_7)(); /** @private Retired - do not use */ void (CARB_ABI* retired_8)(); /** * Returns the graph associated with this context * @param[in] context structure containing both the interface and underlying object * @return GraphObj structure containing the graph */ GraphObj(CARB_ABI* getGraph)(const GraphContextObj& context); /** * Returns the time between last evaluation of the graph and "now" * @param[in] context structure containing both the interface and underlying object * @return the elapsed time */ float(CARB_ABI* getElapsedTime)(const GraphContextObj& contextObj); /** * Returns the global playback time * @param[in] context structure containing both the interface and underlying object * @return the global playback time in seconds */ float(CARB_ABI* getTime)(const GraphContextObj& contextObj); /** * Returns the time between last evaluation of the graph and "now", in NS. * Note this will only return valid values if the update loop is using the * void updateSimStep(int64_t timeNS, carb::tasking::Counter* counter, bool) interface * As of this writing, this is limited to the DS project * * @param[in] context structure containing both the interface and underlying object * @return the elapsed time in nano seconds */ int64_t(CARB_ABI* getElapsedTimeNS)(const GraphContextObj& contextObj); /** * Returns the global time in NS. * Note this will only return valid values if the update loop is using the * void updateSimStep(int64_t timeNS, carb::tasking::Counter* counter, bool) interface * As of this writing, this is limited to the DS project * * @param[in] context structure containing both the interface and underlying object * @return the global time in nano seconds */ int64_t(CARB_ABI* getTimeNS)(const GraphContextObj& contextObj); /** * Given an attribute, retrieves the default attribute value in system memory * * @param[in] context structure containing both the interface and underlying object * @param[in] attrObj The attribute object for which to retrieve the default value * @return the const void pointer to the data */ const void*(CARB_ABI* getDefault)(const GraphContextObj& context, const AttributeObj& attrObj); /** @private Deprecated - do not use */ ConstBundleHandle(CARB_ABI* deprecated_4)(const GraphContextObj&, NodeContextHandle, NameToken); /** @private Deprecated - do not use */ size_t (CARB_ABI* deprecated_5)(const GraphContextObj&, NodeContextHandle, NameToken); /** @private Deprecated - do not use */ void (CARB_ABI* deprecated_6)(const GraphContextObj&,NodeContextHandle,NameToken,ConstBundleHandle*); /** @private Deprecated - do not use */ BundleHandle (CARB_ABI* deprecated_3)(const GraphContextObj& , NodeContextHandle, NameToken); /** @private Deprecated - do not use */ void(CARB_ABI* deprecated_7)( ConstAttributeDataHandle*, const GraphContextObj&, NodeContextHandle, const NameToken*, size_t); /** @private Deprecated - do not use */ void(CARB_ABI* deprecated_8)(AttributeDataHandle*, const GraphContextObj&, NodeContextHandle, const NameToken*, size_t); /** * Retrieve the number of attributes that a given node has * * @param[in] contextObj: Structure containing both the interface and underlying object * @param[in] node The node to query the attribute count from * @return the number of attributes the queried node has */ size_t (CARB_ABI* getAttributesCount)(const GraphContextObj& contextObj, NodeContextHandle node); /** @private Deprecated - do not use */ void(CARB_ABI* deprecated_9)(ConstAttributeDataHandle*, const GraphContextObj&, NodeContextHandle, size_t); /** @private Deprecated - do not use */ void(CARB_ABI* deprecated_10)(AttributeDataHandle*, const GraphContextObj&, NodeContextHandle, size_t); /** @private Deprecated - do not use */ BundleHandle(CARB_ABI* deprecated_11)(const GraphContextObj&, NodeContextHandle, NameToken, ConstBundleHandle); /** @private Deprecated - do not use */ [[deprecated("Use copyBundleContentsInto")]] void (CARB_ABI* copyPrimContentsInto)(const GraphContextObj& contextObj, BundleHandle destBundleHandle, ConstBundleHandle sourceBundleHandle); /** @private Retired - do not use */ void(CARB_ABI* retired_10)(); /** @private Deprecated - do not use */ [[deprecated("Use clearBundleContents")]] void(CARB_ABI* clearPrimContents)(const GraphContextObj& contextObj, BundleHandle bundleHandle); /** @private Retired - do not use */ void(CARB_ABI* retired_9)(); /** * Returns the global playback time in frames * @param[in] context structure containing both the interface and underlying object * @return the global playback time in frames */ float(CARB_ABI* getFrame)(const GraphContextObj& contextObj); /** * Returns the state of global playback * @param[in] context structure containing both the interface and underlying object * @return true if playback has started, false is playback is stopped */ bool(CARB_ABI* getIsPlaying)(const GraphContextObj& contextObj); /** * Runs the inspector on the data in the given graph context. * * @param[in] contextObj The graph context on which the inspector runs * @param[in] inspector The inspector class * @return true if the inspection ran successfully, false if the inspection type is not supported */ bool(CARB_ABI* inspect)(const GraphContextObj& contextObj, inspect::IInspector* inspector); /** * Returns the time since the App started * @param[in] context structure containing both the interface and underlying object * @return the global time since the app started in seconds */ double(CARB_ABI* getTimeSinceStart)(const GraphContextObj& contextObj); /** * Returns whether the graph context object is still valid or not * * @param[in] contextObj The context object for which to query * @return Whether the context is still valid */ bool(CARB_ABI* isValid)(const GraphContextObj& contextObj); /** @private Deprecated - do not use */ AttributeDataHandle (CARB_ABI* deprecated_0)(const GraphContextObj&, const IVariablePtr&); /** @private Deprecated - do not use */ ConstAttributeDataHandle(CARB_ABI* deprecated_1)(const GraphContextObj&, const IVariablePtr&); /** * Returns the accumulated total of elapsed times between rendered frames * @param[in] contextObj structure containing both the interface and underlying object * @return the accumulated total of elapsed times between rendered frames */ double(CARB_ABI* getAbsoluteSimTime)(const GraphContextObj& contextObj); /** @private Deprecated - do not use */ NameToken(CARB_ABI* deprecated_2)(const GraphContextObj&); /** Deprecated - do not use - removal scheduled for 106 **/ [[deprecated("Use registerForUSDWriteBacks")]] void(CARB_ABI* registerForUSDWriteBack)(const GraphContextObj& contextObj, BundleHandle bundle, NameToken attrib); /** * Given a variable and an instance path, returns a handle to access its data. * * @param[in] contextObj The context object used to find the variable data * @param[in] variable The variable to retrieve the data from * @param[in] Path to the prim holding an instance of this graph * * @returns An attribute data handle that can be used to access the variable data. * If the given prim does not contain an instance of the graph, the data handle * returned will be invalid. */ AttributeDataHandle(CARB_ABI* getVariableInstanceDataHandle)(const GraphContextObj& contextObj, const IVariablePtr& variable, const char* instancePrimPath); /** * Given a variable and an instance path, returns a constant handle to access its data as readonly. * * @param[in] contextObj The context object used to find the variable data * @param[in] variable The variable to retrieve the data from * @param[in] Path to the prim holding an instance of this graph * * @returns An constant attribute data handle that can be used to access the variable data. * If the given prim does not contain an instance of the graph, the data handle * returned will be invalid. */ ConstAttributeDataHandle(CARB_ABI* getVariableInstanceConstDataHandle)(const GraphContextObj& contextObj, const IVariablePtr& variable, const char* instancePrimPath); /** * Get the Prim path of the graph target. * * The graph target is defined as the parent Prim of the compute graph, except during * instancing - where OmniGraph executes a graph once for each Prim. In the case * of instancing, the graph target will change at each execution to be the path of the instance. * If this is called outside of graph execution, the path of the graph Prim is returned, or an empty * token if the graph does not have a Prim associated with it. * * @param[in] contextObj The context object used to find the data. * @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph * * @returns a token representing the path of the graph target primitive. */ NameToken const&(CARB_ABI* getGraphTarget)(const GraphContextObj& contextObj, InstanceIndex instanceIndex); /** * Given a variable, returns a handle to access its data. * * @param[in] contextObj The context object used to find the variable data * @param[in] variable The variable to retrieve the data from * @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph * * @returns An attribute data handle that can be used to access the variable data. */ AttributeDataHandle(CARB_ABI* getVariableDataHandle)(const GraphContextObj& contextObj, const IVariablePtr& variable, InstanceIndex instanceIndex); /** * Given a variable, returns a constant handle to access its data as readonly. * * @param[in] contextObj The context object used to find the variable data * @param[in] variable The variable to retrieve the data from * @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph * * @returns A constant attribute data handle that can be used to access the variable data. */ ConstAttributeDataHandle(CARB_ABI* getVariableConstDataHandle)(const GraphContextObj& contextObj, const IVariablePtr& variable, InstanceIndex instanceIndex); /** @private Deprecated - do not use */ [[deprecated("Use getOutputBundle!")]] BundleHandle(CARB_ABI* getOutputPrim)(const GraphContextObj& contextObj, NodeContextHandle node, NameToken bundleName, InstanceIndex instanceIndex); /** @private Deprecated - do not use */ [[deprecated("Use getInputTarget!")]] ConstBundleHandle(CARB_ABI* getInputPrim)(const GraphContextObj& contextObj, NodeContextHandle node, NameToken bundleName, InstanceIndex instanceIndex); /** @private Deprecated - do not use */ [[deprecated("Use getInputTargetCount!")]] size_t(CARB_ABI* getInputPrimCount)(const GraphContextObj& contextObj, NodeContextHandle node, NameToken relName, InstanceIndex instanceIndex); /** @private Deprecated - do not use */ [[deprecated("Use getInputTargets!")]] void(CARB_ABI* getInputPrims)(const GraphContextObj& contextObj, NodeContextHandle node, NameToken relName, ConstBundleHandle* bundleHandles, InstanceIndex instanceIndex); /** * Requests some input attributes of the specified compute node in the specified context. * * If no input attribute with the given name exists on the node, the returned handle * will return false from its isValid() function. * * * @param[in/out] attrsOut A pre-allocated array that will be filled with the requested handles * @param[in] contextObj The context object used to find the data * @param[in] node The node object to retrieve the data from * @param[in] attrNames An array of names of attributes on the given node to retrieve a data handle for * @param[in] count The size of the provided arrays (attrName and attrsOut) * @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph */ void(CARB_ABI* getAttributesByNameR)(ConstAttributeDataHandle* attrsOut, const GraphContextObj& contextObj, NodeContextHandle node, const NameToken* attrNames, size_t count, InstanceIndex instanceIndex); /** * Requests some output attributes of the specified compute node in the specified context. * * If no input attribute with the given name exists on the node, the returned handle * will return false from its isValid() function. * * * @param[in/out] attrsOut A pre-allocated array that will be filled with the requested handles * @param[in] contextObj The context object used to find the data * @param[in] node The node object to retrieve the data from * @param[in] attrNames An array of names of attributes on the given node to retrieve a data handle for * @param[in] count The size of the provided arrays (attrName and attrsOut) * @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph */ void(CARB_ABI* getAttributesByNameW)(AttributeDataHandle* attrsOut, const GraphContextObj& contextObj, NodeContextHandle node, const NameToken* attrNames, size_t count, InstanceIndex instanceIndex); /** * Requests all input attributes of the specified compute node in the specified context. * * * @param[in/out] attrsOut A pre-allocated array that will be filled with the requested handles * @param[in] contextObj The context object used to find the data * @param[in] node The node object to retrieve the data from * @param[in] count The size of the attrsOut array * @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph */ void(CARB_ABI* getAttributesR)(ConstAttributeDataHandle* attrsOut, const GraphContextObj& contextObj, NodeContextHandle node, size_t count, InstanceIndex instanceIndex); /** * Requests all output attributes of the specified compute node in the specified context. * * * @param[in/out] attrsOut A pre-allocated array that will be filled with the requested handles * @param[in] contextObj The context object used to find the data * @param[in] node The node object to retrieve the data from * @param[in] count The size of the attrsOut array * @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph */ void(CARB_ABI* getAttributesW)(AttributeDataHandle* attrsOut, const GraphContextObj& contextObj, NodeContextHandle node, size_t count, InstanceIndex instanceIndex); /** @private Deprecated - do not use */ [[deprecated("use copyBundleContentsIntoOutput")]] BundleHandle(CARB_ABI* copyPrimContentsIntoOutput)( const GraphContextObj& contextObj, NodeContextHandle node, NameToken outBundleName, ConstBundleHandle sourceBundleHandle, InstanceIndex instanceIndex); /** * Given an attribute of array type, return a pointer to the number of elements in the array * If flags is kWriteOnly or kReadAndWrite then writing to the dereferenced pointer resizes the * array the next time it is accessed on CPU or GPU * * @param[in] context structure containing both the interface and underlying object * @param[in] attrObj The attribute object for which to retrieve the array size * @param[in] flags The data access flags indicating whether the array size is to be R, W, or RW * @return the pointer to the array size */ size_t*(CARB_ABI* getArrayAttributeSize)(const GraphContextObj& context, const AttributeObj& attrObj, DataAccessFlags flags, InstanceIndex instanceIndex); /** * Given an attribute of array type, return a GPU pointer to the number of elements in the array * Flags must be kReadOnly, because currently we don't allow GPU code to resize GPU arrays * This restriction may be relaxed in the future * If you want to resize a GPU array you can do it on the CPU using getArrayAttributeSize * * @param[in] context structure containing both the interface and underlying object * @param[in] attrObj The attribute object for which to retrieve the array size * @param[in] flags The data access flags indicating whether the array size is to be R, W, or RW * @return the pointer to the array size */ size_t*(CARB_ABI* getArrayAttributeSizeGPU)(const GraphContextObj& context, const AttributeObj& attrObj, DataAccessFlags flags, InstanceIndex instanceIndex); /** * Requests an output bundle of the specified compute node in the specified context. * * If no output bundle with the given name exists on the node, the returned handle * will return false from its isValid() function. * * @param[in] contextObj The context object used to find the data * @param[in] node The node object to retrieve the data from * @param[in] outputName The name of the attribute on the given node that represent the output bundle * @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph * * @returns A handle to the requested prim */ BundleHandle(CARB_ABI* getOutputBundle)(const GraphContextObj& contextObj, NodeContextHandle node, NameToken outputName, InstanceIndex instanceIndex); /** * Requests an input target path of the specified compute node in the specified context. * * If no input target path with the given name exists on the node, the returned path * will return uninitialized path. * * @param[in] contextObj The context object used to find the data * @param[in] node The node object to retrieve the data from * @param[in] inputName The name of the attribute on the given node that represent the input bundle * @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph * * @returns A path to the requested target */ omni::fabric::PathC(CARB_ABI* getInputTarget)(const GraphContextObj& contextObj, NodeContextHandle node, NameToken inputName, InstanceIndex instanceIndex); /** * Requests the number of input targets in the relationship with the given name on the * specified compute node in the specified context. * * This returns 0 if no relationship with the given name exists on the node * or the relationship is empty. * * @param[in] contextObj The context object used to find the data * @param[in] node The node object to retrieve the data from * @param[in] inputName The name of the relationship attribute on the given node that represent the input targets * @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph * * @returns The number of input targets under the provided relationship */ size_t(CARB_ABI* getInputTargetCount)(const GraphContextObj& contextObj, NodeContextHandle node, NameToken inputName, InstanceIndex instanceIndex); /** * Fills in the provided targets array with paths to all of the input targets(bundles or primitives) * in the relationship with the given name on the specified compute node in the specified context. * * The caller *must* first call getInputTargetCount to ensure that the targets array will be * sufficiently large to receive all of the paths. * * @param[in] contextObj The context object used to find the data * @param[in] node The node object to retrieve the data from * @param[in] inputName The name of the relationship attribute on the given node that represent the input targets * @param[in,out] targets A pre-sized array that will be filled with the requested paths * @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph */ void(CARB_ABI* getInputTargets)(const GraphContextObj& contextObj, NodeContextHandle node, NameToken inputName, omni::fabric::PathC* targets, InstanceIndex instanceIndex); /** * Creates copies of all attributes from sourceBundleHandle in the output bundle * with the specified name on the specified node. * * This function is equivalent to: * BundleHandle retBundle = iContext.getOutputBundle(context, node, outBundleName); * iContext.copyBundleContentsInto(context, retBundle, sourceBundleHandle); * return retBundle; * but with a single function pointer call, instead of two. * * @param[in] contextObj The context object used to find the data * @param[in] node The node object to retrieve the data from * @param[in] outBundleName The name of the attribute on the given node that represent the output bundle to write to * @param[in] sourceBundleHandle A handle to a bundle to copy content from * @param[in] instanceIndex In vectorized context, the instance index relative to the currently targeted graph * * @returns An handle to the targeted output bundle */ BundleHandle(CARB_ABI* copyBundleContentsIntoOutput)(const GraphContextObj& contextObj, NodeContextHandle node, NameToken outBundleName, ConstBundleHandle sourceBundleHandle, InstanceIndex instanceIndex); /** * Creates copies of all attributes from sourceBundleHandle in the bundle corresponding with destBundleHandle. * * @param contextObj The context object used to find the data * @param destBundleHandle A handle to a bundle to copy content to * @param sourceBundleHandle A handle to a bundle to copy content from */ void(CARB_ABI* copyBundleContentsInto)(const GraphContextObj& contextObj, BundleHandle destBundleHandle, ConstBundleHandle sourceBundleHandle); /** * Removes all attributes from the prim corresponding with bundleHandle, * but keeps the bundle itself. * * @param contextObj The context object used to find the data * @param bundleHandle A handle to a bundle to clear content from */ void(CARB_ABI* clearBundleContents)(const GraphContextObj& contextObj, BundleHandle bundleHandle); /** * Register provided attributes for USD write back at the end of the current frame * * * @param[in] contextObj The context object used to find the data. * @param[in] handles An array of handles of the attributes that should be written back to usd * @param[in] count The size of the provided array */ void(CARB_ABI* registerForUSDWriteBacks)(const GraphContextObj& contextObj, AttributeDataHandle const* handles, size_t count); /** * Register provided attributes for USD write back at the end of the current frame to a specific layer * * * @param[in] contextObj The context object used to find the data. * @param[in] handles An array of handles of the attributes that should be written back to usd * @param[in] count The size of the provided array * @param[in] layerIdentifier The unique name for the layer to be written */ void(CARB_ABI* registerForUSDWriteBacksToLayer)(const GraphContextObj& contextObj, AttributeDataHandle const* handles, size_t count, NameToken layerIdentifier); }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle STRUCT_INTEGRITY_CHECK(IGraphContext, registerForUSDWriteBacksToLayer, 65) // ============================================================================================================== /** The underlying schedule node represents the scheduled task(s) corresponding to the representational node in the * graph. As OmniGraph evolves, the schedule node will be extended to store the results of the instance(s)/task(s) * corresponding to the representational node. */ struct IScheduleNode { //! @private to avoid doxygen problems CARB_PLUGIN_INTERFACE("omni::graph::core::IScheduleNode", 1, 0); //! @private Deprecated: Task generation is the responsibility of the execution framework NodeObj(CARB_ABI* getNode)(const ScheduleNodeObj& scheduleNodeObj); }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle STRUCT_INTEGRITY_CHECK(IScheduleNode, getNode, 0) // ============================================================================================================== //! @private Retired prototype struct IDataStealingPrototype { //! @private to avoid doxygen problems CARB_PLUGIN_INTERFACE("omni::graph::core::IDataStealingPrototype", 1, 1); /** @private Retired - do not use */ bool(CARB_ABI* __retired_0)(const GraphContextObj&,ConstAttributeDataHandle,ConstAttributeDataHandle); /** @private Retired - do not use */ AttributeDataHandle(CARB_ABI* __retired_1)(const GraphContextObj&,ConstAttributeDataHandle ); /** @private Retired - do not use */ bool(CARB_ABI* __retired_2)(const GraphContextObj&,ConstBundleHandle,ConstBundleHandle); /** @private Retired - do not use */ BundleHandle(CARB_ABI* __retired_3)(const GraphContextObj&, ConstBundleHandle); bool(CARB_ABI* enabled)(const GraphContextObj&);//always false /** @private Retired - do not use */ void(CARB_ABI* __retired_4)(const GraphContextObj&, bool); }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle STRUCT_INTEGRITY_CHECK(IDataStealingPrototype, __retired_4, 5) } // namespace core } // namespace graph } // namespace omni
153,550
C
47.438801
164
0.671136
omniverse-code/kit/include/omni/graph/core/PostUsdInclude.h
// Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // This file should be included in conjunction with PreUsdInclude.h when including any of the individual USD // definition files. This file restores the warnings that were disabled for the inclusion of USD files. // // It includes a special ifdef detection to prevent inclusion of this file without PreUsdInclude.h as that // would have unpredictable effects on the compiler. The usual "#pragma once" is omitted, so that the mechanism works // correctly even with multiple uses. It is not legal to include anything other than USD headers between these two. // // Here is an example of how you use this mechanism to include the definition of the USD type pxr::GfHalf: // // #include <omni/graph/core/PreUsdInclude.h> // #include <pxr/base/gf/half.h> // #include <omni/graph/core/PostUsdInclude.h> // #ifdef _MSC_VER # pragma warning(pop) # undef NOMINMAX #elif defined(__GNUC__) # pragma GCC diagnostic pop # ifdef OMNI_USD_SUPPRESS_DEPRECATION_WARNINGS # define __DEPRECATED # undef OMNI_USD_SUPPRESS_DEPRECATION_WARNINGS # endif #endif #ifdef __USD_INCLUDE_PROTECTION__ # undef __USD_INCLUDE_PROTECTION__ #else # error "You must include PreUsdInclude.h before including PostUsdInclude.h" #endif
1,669
C
41.820512
117
0.75734
omniverse-code/kit/include/omni/graph/core/IGraphRegistry.h
// Copyright (c) 2022-2023 NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/events/IEvents.h> #include <carb/Interface.h> #include <omni/graph/core/Handle.h> #include <omni/inspect/IInspector.h> namespace omni { namespace graph { namespace core { /** * An event that occurs on the graph registry */ enum class IGraphRegistryEvent { //! Node type has been added to the registry. Event payloads are: //! "node_type" (std::string): Name of new node type eNodeTypeAdded, //! Node type has been removed from the registry. Event payloads are: //! "node_type" (std::string): Name of removed node type eNodeTypeRemoved, //! Node type has had its namespace changed. Event payloads are: //! "node_type" (std::string): New namespace //! "prev_type" (std::string): Previous namespace eNodeTypeNamespaceChanged, //! Node type has had its category changed. Event payloads are: //! "node_type" (std::string): Node type whose category changed //! "prev_value" (std::string): Previous category value eNodeTypeCategoryChanged }; // ====================================================================== //! Interface that manages the registration and deregistration of node types struct IGraphRegistry { //! @private to avoid doxygen problems CARB_PLUGIN_INTERFACE("omni::graph::core::IGraphRegistry", 1, 5); /** * Returns the number of registered types in the graph. This includes C++ types only. * * @return The number of subgraphs in the graph */ size_t(CARB_ABI* getRegisteredTypesCount)(); /** * Gets the list of the registered types in the graph. This includes C++ types only. * * @param[out] typesBuf Buffer to hold the return array of node type objects * @param[in] bufferSize the number of NodeTypeObj structures the buffer is able to hold * @return true on success, false on failure */ bool(CARB_ABI* getRegisteredTypes)(NodeTypeObj* typesBuf, size_t bufferSize); /** * Gets the version of the registered node type. This includes both C++ and Python types. * * @param[in] nodeType the name of the node type in question. * @return the version number of the currently registered type. If the type is not found * returns the default version number, which is 0 */ int(CARB_ABI* getNodeTypeVersion)(const char* nodeType); /** * Registers a node type as defined above with the system * Deprecated. Use registerNodeTypeInterface instead. * * @param[in] desc Reference to the node type interface (the underlying object is not yet available here) * @param[in] version Version of the node interface to be registered */ CARB_DEPRECATED("Will be removed in next major version, use IGraphRegistry::registerNodeTypeInterface instead") void(CARB_ABI* registerNodeType)(const INodeType& desc, int version); /** * Unregisters a node type interface as defined above with the system * * @param[in] nodeType Name of the node type to be unregistered */ void(CARB_ABI* unregisterNodeType)(const char* nodeType); /** * Registers an alias by which a node type can be referred to. Useful for backward compatibility in files. * * @param[in] desc Reference to the node type interface * @param[in] alias Alternate name that can be used to refer to the node type when creating */ CARB_DEPRECATED("Will be removed in next major version, use INodeTypeForwarding instead") void(CARB_ABI* registerNodeTypeAlias)(const INodeType& desc, const char* alias); /** * Runs the inspector on the contents of the graph registry. * * @param[in] inspector The inspector class * @return true if the inspection ran successfully, false if the inspection type is not supported */ bool(CARB_ABI* inspect)(inspect::IInspector* inspector); /** * Gets the node type information corresponding to the node type name. This includes aliases. * * @param[in] bufferSize the number of NodeTypeObj structures the buffer is able to hold * @return true on success, false on failure */ NodeTypeObj(CARB_ABI* getRegisteredType)(const char* nodeTypeName); /** * Returns the interface for the event stream for the changes on the graph registry * * The events that are raised are specified by IGraphRegistryEvent * * @return the event stream interface that pumps events */ carb::events::IEventStreamPtr(CARB_ABI* getEventStream)(); /** * Registers a node type as defined above with the system * * @param[in] desc Reference to the node type interface (the underlying object is not yet available here) * @param[in] version Version of the node interface to be registered * @param[in] The size of the INodeType struct being passed. Use sizeof(INodeType). * */ void(CARB_ABI* registerNodeTypeInterface)(const INodeType& desc, int version, size_t nodeTypeStructSize); }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle STRUCT_INTEGRITY_CHECK(IGraphRegistry, registerNodeTypeInterface, 9) } // namespace core } // namespace graph } // namespace omni
5,671
C
39.514285
115
0.693176
omniverse-code/kit/include/omni/graph/core/ISchedulingHints.gen.h
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Interface to the list of scheduling hints that can be applied to a node type template <> class omni::core::Generated<omni::graph::core::ISchedulingHints_abi> : public omni::graph::core::ISchedulingHints_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::ISchedulingHints") /** * Get the threadSafety status (i.e. can be run in parallel with other nodes) * * @returns Is the node compute threadsafe? */ omni::graph::core::eThreadSafety getThreadSafety() noexcept; /** * Set the flag indicating if a node is threadsafe or not. * * @param[in] newThreadSafety New value of the threadsafe flag */ void setThreadSafety(omni::graph::core::eThreadSafety newThreadSafety) noexcept; /** * Get the type of access the node has for a given data type * * @param[in] dataType Type of data for which access type is being modified * @returns Value of the access type flag */ omni::graph::core::eAccessType getDataAccess(omni::graph::core::eAccessLocation dataType) noexcept; /** * Set the flag describing how a node accesses particular data in its compute _abi (defaults to no access). * Setting any of these flags will, in most cases, automatically mark the node as "not threadsafe". * One current exception to this is allowing a node to be both threadsafe and a writer to USD, since * such behavior can be achieved if delayed writebacks (e.g. "registerForUSDWriteBack") are utilized * in the node's compute method. * * @param[in] dataType Type of data for which access type is being modified * @param[in] newAccessType New value of the access type flag */ void setDataAccess(omni::graph::core::eAccessLocation dataType, omni::graph::core::eAccessType newAccessType) noexcept; /** * Get the flag describing the compute rule which may be followed by the evaluator. * * @returns Value of the ComputeRule flag */ omni::graph::core::eComputeRule getComputeRule() noexcept; /** * Set the flag describing the compute rule which may be followed by the evaluator. * * @param[in] newComputeRule New value of the ComputeRule flag */ void setComputeRule(omni::graph::core::eComputeRule newComputeRule) noexcept; /** * Runs the inspector on the scheduling hints. * * @param[in] inspector The inspector class * @return true if the inspection ran successfully, false if the inspection type is not supported */ bool inspect(omni::core::ObjectParam<omni::inspect::IInspector> inspector) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::graph::core::eThreadSafety omni::core::Generated<omni::graph::core::ISchedulingHints_abi>::getThreadSafety() noexcept { return getThreadSafety_abi(); } inline void omni::core::Generated<omni::graph::core::ISchedulingHints_abi>::setThreadSafety( omni::graph::core::eThreadSafety newThreadSafety) noexcept { setThreadSafety_abi(newThreadSafety); } inline omni::graph::core::eAccessType omni::core::Generated<omni::graph::core::ISchedulingHints_abi>::getDataAccess( omni::graph::core::eAccessLocation dataType) noexcept { return getDataAccess_abi(dataType); } inline void omni::core::Generated<omni::graph::core::ISchedulingHints_abi>::setDataAccess( omni::graph::core::eAccessLocation dataType, omni::graph::core::eAccessType newAccessType) noexcept { setDataAccess_abi(dataType, newAccessType); } inline omni::graph::core::eComputeRule omni::core::Generated<omni::graph::core::ISchedulingHints_abi>::getComputeRule() noexcept { return getComputeRule_abi(); } inline void omni::core::Generated<omni::graph::core::ISchedulingHints_abi>::setComputeRule( omni::graph::core::eComputeRule newComputeRule) noexcept { setComputeRule_abi(newComputeRule); } inline bool omni::core::Generated<omni::graph::core::ISchedulingHints_abi>::inspect( omni::core::ObjectParam<omni::inspect::IInspector> inspector) noexcept { return inspect_abi(inspector.get()); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
4,965
C
34.726618
130
0.720846
omniverse-code/kit/include/omni/graph/core/OgnHelpers.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // This file contains helper functions used by the generated .ogn file code. // You shouldn't normally have to look in here. #include <omni/graph/core/ogn/Types.h> #include <omni/graph/core/ogn/State.h> #include <omni/graph/core/ogn/Database.h> #include <omni/graph/core/ogn/AttributeInitializer.h> #include <omni/graph/core/ogn/Registration.h> // The fabric namespace usage is somewhat more targetted though so it can be explicit using omni::fabric::IToken; using omni::fabric::IPath;
941
C
41.81818
85
0.781084
omniverse-code/kit/include/omni/graph/core/IAttributeType.h
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/iComputeGraph.h> #include <carb/Defines.h> #include <carb/Interface.h> #include <carb/Types.h> #include <omni/graph/core/Handle.h> #include <omni/graph/core/Type.h> #include <omni/inspect/IInspector.h> namespace omni { namespace graph { namespace core { // ====================================================================== /** * @brief Interface class managing various features of attribute types * */ struct IAttributeType { //! @private to avoid doxygen problems CARB_PLUGIN_INTERFACE("omni::graph::core::IAttributeType", 1, 4); /** * @brief Returns an attribute type object corresponding to the OGN-style type name * * The type name is assumed to contain no whitespace for efficiency, so prune it before calling if necessary. * * @param[in] ognTypeName Attribute type name in the format used by the .ogn files * @return The attribute type description corresponding to the type name */ Type (CARB_ABI* typeFromOgnTypeName)(const char* ognTypeName); /** * @brief Returns an attribute type object corresponding to the Sdf-style type name. * * The type name is assumed to contain no whitespace for efficiency, so prune it before calling if necessary. * Note that some types cannot be expressed in this form (e.g. the extended types such as "union" and "any", and * OGn-only types such as "bundle") so where possible use the typeFromOgnTypeName() method. * * @param[in] sdfTypeName Attribute type name in the format used by pxr::SdfValueTypeNames * @return The attribute type description corresponding to the type name */ Type (CARB_ABI* typeFromSdfTypeName)(const char* sdfTypeName); /** * @brief Returns the size of the base data (without tuples or array counts) for the given attribute type * * @param[in] type Attribute type whose size is to be returned * @return Size of the base data stored by the attribute type, 0 if none is stored */ size_t (CARB_ABI* baseDataSize)(Type const& type); /** * @brief Runs the inspector on the attribute data with the given type. * * @param[in] type The attribute type of the raw data * @param[in] data Pointer to the raw data of the given type. * @param[in] elementCount Number of array elements in the data (1 if not an array) * @param[in] inspector The inspector class * @return true if the inspection ran successfully, false if the inspection type is not supported */ bool(CARB_ABI* inspect)(Type const& type, void const* data, size_t arrayElementCount, inspect::IInspector* inspector); /** * @brief Returns the SdfValueTypeName corresponding to the given type. * * @note Not all OGN Types are fully represented in the Sdf schema since they have additional semantics in OGN * which do not exist in USD. In that case the SdfValueTypeName of the base type will be returned, which is * what is used to serialize the attribute. * * For example Type(BaseDataType::UInt64, 1, 0, AttributeRole::eObjectId) is an OGN "objectId" which will * return just "uint64" from this function. * * @param[in] type The Type in question * @return The token of the corresponding SdfTypeName */ NameToken(CARB_ABI* sdfTypeNameFromType)(Type const& type); /** * @brief Checks to see if the Type passed in corresponds to a legal OGN type. * * @param[in] type Type to be checked * @return true if the Type can be fully represented by OGN * @return false if the Type does not correspond exactly to an OGN type * */ bool(CARB_ABI* isLegalOgnType)(Type const& type); /** * @brief Retreives the number of attribute unions. * * @return The number of attribute union types. */ size_t(CARB_ABI* getUnionTypeCount)(); /** * @brief Retrieves the name of the available union types * * @param[in] buffer The array of pointers to fill in with names of the union types. * The values returned are only valid while the list of unions is not changing. * @param[in] bufferSize The number of entries to retreive. Use getUnionTypeCount to retrieve the number * available. * @return The number of entries written to buffer. * */ size_t(CARB_ABI* getUnionTypes)(const char** buffer, size_t bufferSize); /** * @brief Retrieves the number of entries for the attribute union of the given name * * @param[in] unionType The name of the attribute union to retrieve. * @return The number of entries associated with unionType. If unionType is not valid, 0 is returned. * */ size_t(CARB_ABI* getUnionTypeEntryCount)(const char* unionType); /** * @brief Gets the list of ogn type names associated with an attribute union. * * The list of returned types is fully expanded. This means if an attribute union is defined in terms * of other attributes unions, the entries returned will have recursively expanded each entry and the * final list will only contain ogn type names and not other attribute unions names. * * @param[in] unionType The name of the attribute union to retrieve. * @param[in] buffer The array of pointers to fill in with names of the ogn types names. * The values returned are only valid while the list of unions is not changing. * @param[in] bufferSize The number of entries to retrieve. Use getUnionTypeEntryCount to find the maximum available. * @return The number of items written to buffer. If unionType is not valid, 0 is returned. * */ size_t(CARB_ABI* getUnionTypeEntries)(const char* unionType, const char** buffer, size_t bufferSize); }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle STRUCT_INTEGRITY_CHECK(IAttributeType, getUnionTypeEntries, 9) } // namespace core } // namespace graph } // namespace omni
6,566
C
42.490066
122
0.683673
omniverse-code/kit/include/omni/graph/core/GpuArray.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <stddef.h> #include <stdint.h> namespace omni { namespace graph { namespace core { template <typename T> struct GpuArray { // GPU pointer to data T* const* gpuData; // GPU pointer to elem count const size_t* elemCount; #ifdef __CUDACC__ __device__ T* data() { return *gpuData; } __device__ size_t size() const { return *elemCount; } #endif }; template <typename T> struct ArrayOfGpuArray { // GPU array of GPU data pointers T* const* gpuData; // GPU array of elem counts const size_t* elemCount; #ifdef __CUDACC__ __device__ T* data(size_t i) const { return gpuData[i]; } __device__ size_t size(size_t i) const { return elemCount[i]; } #endif }; template <typename T> struct ConstGpuArray { // GPU pointer to data const T* const* gpuData; // GPU pointer to elem count const size_t* elemCount; #ifdef __CUDACC__ __device__ const T* data() const { return *gpuData; } __device__ size_t size() const { return *elemCount; } #endif }; template <typename T> struct ArrayOfConstGpuArray { // GPU array of GPU data pointers const T* const* gpuData; // GPU array of elem counts const size_t* elemCount; #ifdef __CUDACC__ __device__ const T* data(size_t i) const { return gpuData[i]; } __device__ size_t size(size_t i) const { return elemCount[i]; } #endif }; } } }
1,955
C
16.464286
77
0.627621
omniverse-code/kit/include/omni/graph/core/ComputeGraph.h
// Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "iComputeGraph.h" #include "INodeCategories.h" #include "unstable/INodeTypeForwarding.h" #include "IBundleFactory.h" #include "IDirtyID.h" #include "IBundleChanges.h" #include <carb/Interface.h> #include <carb/tasking/TaskingTypes.h> #include <omni/core/IObject.h> namespace rtx { namespace resourcemanager { class RpResource; typedef uint32_t SyncScopeId; } } namespace carb { namespace graphics { struct Semaphore; } } namespace gpu { namespace rendergraph { //! Declare the IRenderGraph interface definition OMNI_DECLARE_INTERFACE(IRenderGraph); using RenderGraph = omni::core::ObjectPtr<gpu::rendergraph::IRenderGraph>; } enum class GfResult: int32_t; } namespace omni { namespace usd { class IUsdMutex; using PathH = uint64_t; namespace hydra { struct ViewportHydraRenderResults; } } namespace kit { struct StageUpdateSettings; } namespace graph { namespace core { /** used only by kit - do not use this interface in plugins */ struct ComputeGraph { CARB_PLUGIN_INTERFACE("omni::graph::core::ComputeGraph", 2, 8) /** * Returns the number of global orchestration graphs for all stages of the graph pipelines * * Note: from version 2.3 on, the semantics of this call has changed, even if the interface has not. * There is now a orchestration graph which has nodes that each wrap either a graph or an extension to be run * This function will now return the number of such orchestration graphs. The non-orchestration global graphs * can be retrieved by iterating over the nodes of the orchestration graph and calling getWrappedGraph * * @return the number of graphs in the system, regardless of pipeline stage */ size_t(CARB_ABI* getGraphCount)(); /** * Fills the buffer with global orchestration graph objects that occupy all pipeline stages * * Note: From version 2.3 on, see note above about the global orchestration graph with nodes that * wrap previous global graphs * * @param[out] graphObjs The buffer of graphs to fill * @param[in] bufSize The size of the buffer in terms of the number of GraphObj it has room for * @return true on success, false on failure */ bool(CARB_ABI* getGraphs)(GraphObj* contextBuf, size_t bufSize); /** * Returns the number of graph contexts for all pipeline stages * * Note: From version 2.3 on, see note above about the global orchestration graph with nodes that * wrap previous global graphs * * @return the number of graph contexts in the whole system, regardless of pipeline stage */ size_t(CARB_ABI* getGraphContextCount)(); /** * Fills the buffer with graph context objects that occupy all pipeline stages * * Note: From version 2.3 on, see note above about the global orchestration graph with nodes * that wrap previous global graphs * * @param[out] contextBuf The buffer of graph contexts to fill * @param[in] bufSize The size of the buffer in terms of the number of GraphContextObj it has room for * @return true on success, false on failure */ bool(CARB_ABI* getGraphContexts)(GraphContextObj* contextBuf, size_t bufSize); /** * @brief Shut down all of the compute graph infrastructure * * The functionality in this method was originally introduced because we didn't have carbOnPluginShutdown working * properly (it's not being called for some reason), but later we decided to keep it because DS needs more control * over OG's startup and shutdown. */ void(CARB_ABI* shutdownComputeGraph)(); /** * @brief Start up all of the compute graph infrastructure * * Needed by DS to have more control over OG startup / shutdown mechanism */ void(CARB_ABI* startupComputeGraph)(); /** * @brief Attach OmniGraph to the given stage * * @param[in] stageId ID of the stage to which OmniGraph should attach * @param[in] metersPerUnit Length units of the stage * @param[in] userData Extra raw data to use for and identify the attachment * */ void(CARB_ABI* attach)(long int stageId, double metersPerUnit, void* userData); /** * @brief Detach OmniGraph using the information in the @p userData * * @param[in] userData Extra raw data to use for and identify the detachment * */ void(CARB_ABI* detach)(void* userData); /** * @deprecated This version is deprecated and will be removed in a future version. * Use updateV2() instead. */ void(CARB_ABI* update)(float currentTime, float elapsedSecs, const omni::kit::StageUpdateSettings* updateSettings, void* userData); /** * @deprecated This version is deprecated and will be removed in a future version. * Use updateSimStepWithUsd() instead. */ void(CARB_ABI* updateSimStep)(int64_t timeNS, carb::tasking::Counter*); /** * @brief Enable of disable the attachment of OmniGraph to the Kit update looop * * Currently there is a dependency in SimStageWithHistory on OG, so we cannot shutdown the graph * when it needs to be ticked from external process, such as DS2's ISimStep. * Using this method we can block (or unblock) OG from reacting to Kit's update loop. * * @param[in] state Whether the Kit update loop should be respected or not */ void(CARB_ABI* considerKitUpdateLoop)(bool state); /** * @brief postRenderBegin is called after IHydraEngine::render() after the postRender starts * in order to tick graphs in the postRender pipeline stage. * * @param[in] syncScope ID for the resource manager's sync scope * @param[in] renderGraph Graph that will be processed in the postRender */ void(CARB_ABI* postRenderBegin)(rtx::resourcemanager::SyncScopeId syncScope, gpu::rendergraph::RenderGraph renderGraph); /** * @brief postRenderBegin is called after IHydraEngine::render() during the postRender update * in order to tick graphs in the postRender pipeline stage. * * @param[in] syncScope ID for the resource manager's sync scope * @param[in] renderResults Return value from IHydraEngine::render() * @param[in] renderProductPrimPath Prim path of the RenderProduct for this view * @param[in] simTime Kit's simulation time that's passed to ComputeGraphImpl::updateV2() * @param[in] hydraTime The current time value of the USD Stage */ void(CARB_ABI* postRenderUpdate)(rtx::resourcemanager::SyncScopeId syncScope, omni::usd::hydra::ViewportHydraRenderResults* renderResults, omni::usd::PathH renderProductPrimPath, double simTime, double hydraTime); /** * @brief postRenderBegin is called after IHydraEngine::render() after the postRender ends * in order to tick graphs in the postRender pipeline stage. * * @param[in] syncScope ID for the resource manager's sync scope * @return The status of the postRender */ gpu::GfResult(CARB_ABI* postRenderEnd)(rtx::resourcemanager::SyncScopeId syncScope); /** * Returns the number of global orchestration graphs given a particular graph pipeline stage. * * @param[in] pipelineStage The stage of the pipeline (simulation, pre-render, post-render) * @return the number of graphs in that pipeline stage */ size_t(CARB_ABI* getGraphCountInPipelineStage)(GraphPipelineStage pipelineStage); /** * Fills the buffer with global orchestration graph objects that occupy a particular pipeline stage * * @param[out] graphObjs The buffer of graphs to fill * @param[in] bufSize The size of the buffer in terms of the number of GraphObj it has room for * @param[in] pipelineStage The stage of the pipeline (simulation, pre-render, post-render) * @return true on success, false on failure */ bool(CARB_ABI* getGraphsInPipelineStage)(GraphObj* graphObjs, size_t bufSize, GraphPipelineStage pipelineStage); /** * @brief This update function is used by DriveSim's ISimStep interface to tick the graph * * @param[in] timeNS Simulation time, in nanoseconds * @param[in] counter Tasking counter * @param[in] disableUsdUpdates Turn off USD updates while the simulation step is happening */ void(CARB_ABI* updateSimStepUsd)(int64_t timeNS, carb::tasking::Counter* counter, bool disableUsdUpdates); /** * @brief Gets the interface object handling the node categories * * @return Raw object that implements the node category interface */ INodeCategories*(CARB_ABI* getNodeCategoriesInterface)(); /** * @brief Gets an ONI object for the interface handling the node categories * * @return Shared object that implements the node category interface */ inline omni::core::ObjectPtr<INodeCategories> getNodeCategoriesInterfacePtr() const { return omni::core::steal(getNodeCategoriesInterface()); } /** * @brief Sets the test failure state. This is for test failures that cannot be caught by conventional means. * * @param[in] hasFailed If true then increment the test failure count, otherwise reset it to 0. */ void (CARB_ABI* setTestFailure)(bool hasFailed); /** * @return Returns the current test failure count since it was last cleared. */ size_t (CARB_ABI* testFailureCount)(); /** * @brief Gets an ONI object for the interface handling the bundle factory * * @return Shared object that implements the bundle factory interface */ IBundleFactory*(CARB_ABI* getBundleFactoryInterface)(); /** * @brief Gets an ONI object for the interface handling the bundle factory * * @return Shared object that implements the bundle factory interface */ inline omni::core::ObjectPtr<IBundleFactory> getBundleFactoryInterfacePtr() const { return omni::core::steal(getBundleFactoryInterface()); } /** * @brief Updates the graphs * * @param[in] currentTime Time at which the graphs are being updated * @param[in] elapsedSecs Amount of time elapsed in overall graph updates * @param[in] absoluteSimTime Time at which the simulation graph updates * @param[in] updateSettings Any settings required by the update * @param[in] userData Raw user data to pass to the update */ void(CARB_ABI* updateV2)(double currentTime, float elapsedSecs, double absoluteSimTime, const omni::kit::StageUpdateSettings* updateSettings, void* userData); /** * @brief preRenderBegin is called before IHydraEngine::render() before the preRender starts * in order to tick graphs in the prerender pipeline stage. * * @param[in] renderGraph Render graph that is about to be ticked */ void(CARB_ABI* preRenderBegin)(gpu::rendergraph::RenderGraph renderGraph); /** * @brief preRenderUpdate is called before IHydraEngine::render() when the preRender updates * in order to tick graphs in the prerender pipeline stage. * * @param[in] simTime Time at which the render graph is being updated according to the simulation graph * @param[in] hydraTime Time at which the render graph is being updated according to Hydra * @param[in] mutex Mutex for locking USD while updating */ void(CARB_ABI* preRenderUpdate)(double simTime, double hydraTime, omni::usd::IUsdMutex& mutex); /** * @brief preRenderEnd is called before IHydraEngine::render() after the preRender is done * in order to tick graphs in the prerender pipeline stage. */ void(CARB_ABI* preRenderEnd)(); /** * Flushes any pending USD changes from the fabric scene delegate. */ void(CARB_ABI* flushUsd)(); /** * @brief Gets the interface object handling the node type forwarding * * @return Shared object that implements the node type forwarding interface */ unstable::INodeTypeForwarding*(CARB_ABI* getNodeTypeForwardingInterface)(); inline omni::core::ObjectPtr<unstable::INodeTypeForwarding> getNodeTypeForwardingInterfacePtr() const { return omni::core::steal(getNodeTypeForwardingInterface()); } unstable::IDirtyID2*(CARB_ABI* getDirtyIDInterface)(GraphContextObj const& context); omni::core::ObjectPtr<unstable::IDirtyID2> getDirtyIDInterfacePtr(GraphContextObj const& context) const { return omni::core::steal(getDirtyIDInterface(context)); } IBundleChanges*(CARB_ABI* getBundleChangesInterface)(GraphContextObj const& context); omni::core::ObjectPtr<IBundleChanges> getBundleChangesInterfacePtr(GraphContextObj const& context) const { return omni::core::steal(getBundleChangesInterface(context)); } }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle STRUCT_INTEGRITY_CHECK(ComputeGraph, getBundleChangesInterface, 28) } } }
13,885
C
36.52973
118
0.684048
omniverse-code/kit/include/omni/graph/core/PreUsdInclude.h
// Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // This file should be included in conjunction with PostUsdInclude.h when including any of the individual USD // definition files. They have a number of warnings that are triggered by our default build configuration that these // files silence as being third party includes we can't fix them ourselves. // // It includes a special ifdef detection to prevent inclusion of this file without PostUsdInclude.h as that // would have unpredictable effects on the compiler. The usual "#pragma once" is omitted, so that the mechanism works // correctly even with multiple uses. It is not legal to include anything other than USD headers between these two. // // Here is an example of how you use this mechanism to include the definition of the USD type pxr::GfHalf: // // #include <omni/graph/core/PreUsdInclude.h> // #include <pxr/base/gf/half.h> // #include <omni/graph/core/PostUsdInclude.h> // // NOTE: At some point when USD is upgraded these might no longer be required; at that time they can be deprecated // #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4244) // = Conversion from double to float / int to float # pragma warning(disable : 4267) // conversion from size_t to int # pragma warning(disable : 4305) // argument truncation from double to float # pragma warning(disable : 4800) // int to bool # pragma warning(disable : 4996) // call to std::copy with parameters that may be unsafe # pragma warning(disable : 4003) // not enough arguments for function-like macro invocation # define NOMINMAX // Make sure nobody #defines min or max #elif defined(__GNUC__) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wdeprecated-declarations" # pragma GCC diagnostic ignored "-Wunused-local-typedefs" # pragma GCC diagnostic ignored "-Wunused-function" # pragma GCC diagnostic ignored "-Wunused-variable" // This suppresses deprecated header warnings, which is impossible with pragmas. // Alternative is to specify -Wno-deprecated build option, but that disables other useful warnings too. # ifdef __DEPRECATED # define OMNI_USD_SUPPRESS_DEPRECATION_WARNINGS # undef __DEPRECATED # endif #endif #ifdef __USD_INCLUDE_PROTECTION__ # error "You must include PostUsdInclude.h after including PreUsdInclude.h" #else # define __USD_INCLUDE_PROTECTION__ #endif
2,789
C
50.666666
117
0.752599
omniverse-code/kit/include/omni/graph/core/PyINodeCategories.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // #pragma once #include <omni/core/ITypeFactory.h> #include <omni/python/PyBind.h> #include <omni/python/PyString.h> #include <omni/python/PyVec.h> #include <sstream> auto bindINodeCategories(py::module& m) { // hack around pybind11 issues with C++17 // - https://github.com/pybind/pybind11/issues/2234 // - https://github.com/pybind/pybind11/issues/2666 // - https://github.com/pybind/pybind11/issues/2856 py::class_<omni::core::Generated<omni::graph::core::INodeCategories_abi>, omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::core::INodeCategories_abi>>, omni::core::IObject> clsParent(m, "_INodeCategories"); py::class_<omni::graph::core::INodeCategories, omni::core::Generated<omni::graph::core::INodeCategories_abi>, omni::python::detail::PyObjectPtr<omni::graph::core::INodeCategories>, omni::core::IObject> cls(m, "INodeCategories", R"OMNI_BIND_RAW_(Interface to the list of categories that a node type can belong to )OMNI_BIND_RAW_"); cls.def(py::init( [](const omni::core::ObjectPtr<omni::core::IObject>& obj) { auto tmp = omni::core::cast<omni::graph::core::INodeCategories>(obj.get()); if (!tmp) { throw std::runtime_error("invalid type conversion"); } return tmp; })); cls.def(py::init( []() { auto tmp = omni::core::createType<omni::graph::core::INodeCategories>(); if (!tmp) { throw std::runtime_error("unable to create omni::graph::core::INodeCategories instantiation"); } return tmp; })); cls.def_property_readonly("category_count", &omni::graph::core::INodeCategories::getCategoryCount); cls.def("define_category", [](omni::graph::core::INodeCategories* self, const char* categoryName, const char* categoryDescription) { auto return_value = self->defineCategory(categoryName, categoryDescription); return return_value; }, R"OMNI_BIND_RAW_(Define a new category @param[in] categoryName Name of the new category @param[in] categoryDescription Description of the category @return false if there was already a category with the given name)OMNI_BIND_RAW_", py::arg("category_name"), py::arg("category_description")); cls.def("remove_category", [](omni::graph::core::INodeCategories* self, const char* categoryName) { auto return_value = self->removeCategory(categoryName); return return_value; }, R"OMNI_BIND_RAW_(Remove an existing category, mainly to manage the ones created by a node type for itself @param[in] categoryName Name of the category to remove @return false if there was no category with the given name)OMNI_BIND_RAW_", py::arg("category_name")); return omni::python::PyBind<omni::graph::core::INodeCategories>::bind(cls); }
3,605
C
41.928571
117
0.638835
omniverse-code/kit/include/omni/graph/core/IBundle.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "bundle/IBundle2.h"
482
C
39.249997
77
0.792531
omniverse-code/kit/include/omni/graph/core/SlangScript.h
// Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/Framework.h> #include <carb/graphics/Graphics.h> #include <omni/graph/core/PreUsdInclude.h> #include <pxr/base/tf/token.h> #include <pxr/base/tf/type.h> #include <pxr/usd/sdf/path.h> #include <omni/graph/core/PostUsdInclude.h> #include <slang/slang.h> #include <slang/slang-com-ptr.h> #define SLANG_PRELUDE_NAMESPACE CPPPrelude #include <string> #include <vector> #include <slang/prelude/slang-cpp-types.h> namespace omni { namespace graph { namespace core { struct SlangScript { // A ResizeSpec allows the user to set (using USD) the size of an output // array to the size of an input array struct ResizeSpec { std::string outputArray; std::string inputArray; }; std::vector<ResizeSpec> resizeSpec; // Whether to run on CPU or GPU gpucompute::Target target; // Compiler output gpucompute::Shader* shader = nullptr; gpucompute::ComputeCompiler* compiler = nullptr; SlangScript(const char* codeString, gpucompute::Target target, const std::vector<ResizeSpec>& resizeSpec, carb::graphics::Device* device); ~SlangScript() { if (shader) compiler->destroyShader(*shader); } }; } } }
1,703
C
23.342857
77
0.696418
omniverse-code/kit/include/omni/graph/core/Handle.h
// Copyright (c) 2021-2023 NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/fabric/IPath.h> #include <omni/fabric/IToken.h> #include <omni/fabric/IFabric.h> #include <omni/graph/core/TemplateUtils.h> // Support for generic handles for interface objects. // Ideally each of the handle types would be put into interface files for their corresponding types. In the // current scheme they are too intertwined to separate them cleanly. Having this file provides a way for code // to have access to handles for passing around without pulling in all of the unrelated interfaces. namespace omni { namespace graph { namespace core { /** * Macro to validate the structure of the interface definitions. New functions must be added at the end, * and the struct itself must be a standard layout POD, as per Carbonite requirements. * @param[in] StructName Name of the structure being validated * @param[in] LastFunction Name of the last function declared in the structure * @param[in] NumberOfFunctions Total number of functions declared in the structure */ #define STRUCT_INTEGRITY_CHECK(StructName, LastFunction, NumberOfFunctions) \ static_assert(offsetof(StructName, LastFunction) == NumberOfFunctions * sizeof(void(*)()), \ "New " # StructName " ABI methods must be added at the end"); \ static_assert(std::is_trivial<StructName>::value, # StructName " must be a POD"); \ static_assert(std::is_standard_layout<StructName>::value, # StructName " must have std layout"); \ static_assert(sizeof(StructName) == (NumberOfFunctions+1) * sizeof(void (*)()), \ "Please update the integrity check macro to point to the last method of " #StructName); // ============================================================================================================== /** * @brief Template class for defining handles to various OmniGraph data types * * @tparam T The underlying type of the handle being defined * @tparam SUBCLASS The handle subclass being defined */ template <typename T, typename SUBCLASS> class HandleBase { public: //! Default constructor, gives an invalid handle HandleBase() = default; //! Default copy constructor, bitwise copy of handle data HandleBase(const HandleBase&) = default; //! Construct a handle from the underlying data it represents explicit HandleBase(const T& h) : handle(h) { } //! Default assignment operator, bitwise copy of the handle data HandleBase& operator=(const HandleBase&) = default; //! Cast to the underlying data type of the handle explicit operator T() const { return handle; } //! Returns true if the handle is currently valid bool isValid() const { return (handle != SUBCLASS::invalidValue()); } //! Equality operator - only identical handles are equal bool operator==(HandleBase rhs) const { return handle == rhs.handle; } //! Inequality operator - only identical handles are equal bool operator!=(HandleBase rhs) const { return !(handle == rhs.handle); } //! Constant representing a unique invalid handle for this instantiation static constexpr SUBCLASS invalidHandle() { return static_cast<SUBCLASS>(SUBCLASS::invalidValue()); } protected: T handle; //!< Instantiation of the underlying type of the handle }; //! Underlying data type for a handle being represented as an integer using HandleInt = uint64_t; //! Representation of a string that is referenced via a unique token. Note that although this token is //! reference counted this is the raw value which is not. Use ogn::Token at the high level to get that //! reference counting if you intend to hang on to copies of the token. using NameToken = omni::fabric::TokenC; //! Representation of a path. Note that although this path is reference counted this is the raw value //! which is not. Use ogn::Path at the high level to get that reference counting if you intend to hang //! on to copies of the path. using TargetPath = omni::fabric::PathC; //! Location of rows of data in Fabric using BucketId = omni::fabric::BucketId; //! Representation of an internal type used by AutoNode using ObjectId = HandleInt; // NOTE: Due to Linux debug linking pre-C++17 not liking constexpr static variables, // other than built-in integer types, being passed by const reference, // invalid values for handles are now constructed via inline functions, instead // of using constexpr static variables. Just to play it safe, we're using a macro // here, in case it also has an issue with built-in integer types being referenced // from an inline function at compile-time. Note that the link errors don't show // up until *runtime*, since they appear to be handled via dynamic linking. //! Representation of an invalid handle as an integer #define OG_INVALID_HANDLE_INT_VALUE (~HandleInt(0)) //! Representation of an invalid handle as an integer constexpr static HandleInt kInvalidHandleIntValue = OG_INVALID_HANDLE_INT_VALUE; // ============================================================================================================== //! Handle type representing attributes, which require two parts to be valid using AttrKey = std::pair<HandleInt, HandleInt>; //! This is here so we can use AttrKey and ConstAttributeDataHandle as a key to std::unordered_map. //! There is another below in ConstAttributeDataHandleHash, intentionally the same as they represent the same data. //! //! See https://www.techiedelight.com/use-std-pair-key-std-unordered_map-cpp/ for a full discussion. struct AttrKeyHash { //! Return a hash value for the underlying handle data std::size_t operator()(const AttrKey& attrKey) const { return std::hash<HandleInt>()(attrKey.first) ^ std::hash<HandleInt>()(attrKey.second); } }; // ============================================================================================================== //! Object representing a handle to an AttributeData type class ConstAttributeDataHandle : public HandleBase<AttrKey, ConstAttributeDataHandle> { public: using HandleBase<AttrKey, ConstAttributeDataHandle>::HandleBase; //! Path to the prim or bundle of the attribute, e.g. in "/world/cube.size", this returns "/world/cube" omni::fabric::PathC path() const noexcept { return handle.first; } //! Name of the attribute, e.g. in "/world/cube.size", this would be "size" omni::fabric::TokenC name() const noexcept { return handle.second; } //! Returns an invalid AttributeData handle value static constexpr AttrKey invalidValue() { return std::make_pair(HandleInt(omni::fabric::kUninitializedPath.path), HandleInt(omni::fabric::kUninitializedToken.token)); } }; // -------------------------------------------------------------------------------------------------------------- //! Hash definition so that AttributeDataHandle can be used in a map struct ConstAttributeDataHandleHash { //! Returns a hash value unique for AttributeData handles std::size_t operator()(const ConstAttributeDataHandle& attrDataHandle) const { AttrKey attrKey(attrDataHandle); return AttrKeyHash()(attrKey); } }; // -------------------------------------------------------------------------------------------------------------- //! Object representing a handle to a variable AttributeData type class AttributeDataHandle : public HandleBase<AttrKey, AttributeDataHandle> { public: using HandleBase<AttrKey, AttributeDataHandle>::HandleBase; //! Returns an invalid AttributeData handle value static constexpr AttrKey invalidValue() { return std::make_pair(HandleInt(omni::fabric::kUninitializedPath.path), HandleInt(omni::fabric::kUninitializedToken.token)); } //! Path to the prim or bundle of the attribute, e.g. in "/world/cube.size", this returns "/world/cube" omni::fabric::PathC path() const noexcept { return handle.first; } //! Name of the attribute, e.g. in "/world/cube.size", this would be "size" omni::fabric::TokenC name() const noexcept { return handle.second; } //! Returns a constant AttributeDataHandle pointing to the same AttributeData as this variable one operator ConstAttributeDataHandle() const { return ConstAttributeDataHandle(AttrKey(*this)); } }; // ============================================================================================================== //! Object representing a handle to a constant OmniGraph Bundle class ConstBundleHandle : public HandleBase<HandleInt, ConstBundleHandle> { public: using HandleBase<HandleInt, ConstBundleHandle>::HandleBase; //! Returns an invalid Bundle handle value static constexpr HandleInt invalidValue() { return omni::fabric::kUninitializedPath.path; } }; // -------------------------------------------------------------------------------------------------------------- //! Hash definition so that BundleHandle can be used in a map struct ConstBundleHandleHash { //! Returns a hash value unique for Bundle handles std::size_t operator()(const ConstBundleHandle& handle) const { return std::hash<HandleInt>()(HandleInt(handle)); } }; // -------------------------------------------------------------------------------------------------------------- //! Object representing a handle to an OmniGraph Bundle class BundleHandle : public HandleBase<HandleInt, BundleHandle> { public: using HandleBase<HandleInt, BundleHandle>::HandleBase; //! Returns an invalid Bundle handle value static constexpr HandleInt invalidValue() { return omni::fabric::kUninitializedPath.path; } //! Returns a constant BundleHandle pointing to the same Bundle as this variable one operator ConstBundleHandle() const { return ConstBundleHandle(HandleInt(*this)); } }; //! Deprecated - for backward compatibility only using ConstPrimHandle [[deprecated("Use ConstBundleHandle!")]] = ConstBundleHandle; //! Deprecated - for backward compatibility only using ConstPrimHandleHash [[deprecated("Use ConstBundleHandleHash!")]] = ConstBundleHandleHash; //! Deprecated - for backward compatibility only using PrimHandle [[deprecated("Use BundleHandle!")]] = BundleHandle; // ============================================================================================================== //! Object representing a handle to an OmniGraph NodeContext class NodeContextHandle : public HandleBase<HandleInt, NodeContextHandle> { public: using HandleBase<HandleInt, NodeContextHandle>::HandleBase; //! Returns an invalid NodeContext handle value static constexpr HandleInt invalidValue() { return kInvalidHandleIntValue; } }; // ====================================================================== // Support for attributes using AttributeHandle = uint64_t; //!< Handle to an OmniGraph Attribute using AttributeHash = uint64_t; //!< Hash value type for OmniGraph Attributes static constexpr AttributeHandle kInvalidAttributeHandle = 0; //!< Constant representing an invalid attribute handle struct IAttribute; //! Object representing an OmniGraph Attribute struct AttributeObj { const IAttribute* iAttribute; //!< Interface to functionality on the attribute AttributeHandle attributeHandle; //!< Opaque handle to actual underlying attribute //! Returns true if this object refers to a valid attribute bool isValid() const { return (attributeHandle != kInvalidAttributeHandle); } }; // ====================================================================== // Support for node types using NodeTypeHandle = uint64_t; //!< Handle to an OmniGraph NodeType static constexpr NodeTypeHandle kInvalidNodeTypeHandle = 0; //!< Constant representing an invalid node type handle struct INodeType; //! Object representing an OmniGraph NodeType struct NodeTypeObj { const INodeType* iNodeType; //!< Interface to functionality on the node type NodeTypeHandle nodeTypeHandle; //!< Opaque handle to actual underlying node type - managed by OmniGraph //! Returns true if this object refers to a valid node type bool isValid() const { return (nodeTypeHandle != kInvalidNodeTypeHandle); } }; // ====================================================================== // Support for evaluation contexts struct IGraphContext; struct IBundle; struct IAttributeData; using GraphContextHandle = uint64_t; //!< Handle to an OmniGraph GraphContext static constexpr GraphContextHandle kInvalidGraphContextHandle = 0; //!< Constant representing an invalid graph context handle /** * @brief Object representing an OmniGraph GraphContext */ struct GraphContextObj { const IGraphContext* iContext; //!< Interfaces to functionality on the context // Convenience location for commonly used interfaces const IBundle* iBundle; //!< Cached ABI interface pointer const IAttributeData* iAttributeData; //!< Cached ABI interface pointer const omni::fabric::IToken* iToken; //!< Cached ABI interface pointer const omni::fabric::IPath* iPath; //!< Cached ABI interface pointer GraphContextHandle contextHandle; //!< Opaque handle to actual underlying graph context //! Returns true if this object refers to a valid graph context bool isValid() const { return (contextHandle != kInvalidGraphContextHandle); } }; // ====================================================================== // Support for nodes using NodeHandle = uint64_t; static constexpr NodeHandle kInvalidNodeHandle = 0; //!< Constant representing an invalid node handle struct INode; //! Object representing an OmniGraph Node struct NodeObj { //! Interface to functionality on the node const INode* iNode{ nullptr }; //! Opaque handle to actual underlying node - managed by compute graph system NodeHandle nodeHandle{ kInvalidNodeHandle }; //! handle used to retrieve data on the node - every node has a NodeContextHandle, but not the other way around NodeContextHandle nodeContextHandle{ NodeContextHandle::invalidValue() }; //! Returns true if this object refers to a valid node bool isValid() const { return (nodeHandle != kInvalidNodeHandle); } }; // ====================================================================== // Support for graphs using GraphHandle = uint64_t; static constexpr GraphHandle kInvalidGraphHandle = 0; //!< Constant representing an invalid graph handle struct IGraph; //! Object representing an OmniGraph Graph struct GraphObj { IGraph* iGraph; //!< Interface to functionality on the graph GraphHandle graphHandle; //!< Opaque handle to actual underlying graph //! Returns true if this object refers to a valid graph bool isValid() const { return (graphHandle != kInvalidGraphHandle); } }; // ====================================================================== // Support for schedule nodes using ScheduleNodeHandle = uint64_t; //!< Handle to an OmniGraph ScheduleNode static constexpr ScheduleNodeHandle kInvalidScheduleNodeHandle = 0; //!< Constant representing an invalid schedule node handle struct IScheduleNode; //! Object representing an OmniGraph ScheduleNode struct ScheduleNodeObj { const IScheduleNode* iScheduleNode; //!< Interface to functionality on the schedule node ScheduleNodeHandle scheduleNodeHandle; //!< Opaque handle to actual underlying schedule node //! Returns true if this object refers to a valid schedule node bool isValid() const { return (scheduleNodeHandle != kInvalidScheduleNodeHandle); } }; // ====================================================================== /** * SFINAE function that will call setContext on an object if it exists as "void setContext(GraphContextObj&)". * This allows setting a context in the wrapper functions to percolate down to the member implementations * when appropriate. * * Usage: * OptionalMethod::setContext<ClassType>(classMember, context); */ template <class NodeTypeClass> using has_setContext = typename std::is_same<void, decltype(std::declval<NodeTypeClass&>().setContext( std::declval<const GraphContextObj&>()))>::value_type; /** * SFINAE function that will call setHandle on an object if it exists as "void setHandle(GraphContextObj&)". * This allows setting a handle in the wrapper functions to percolate down to the member implementations * when appropriate. * * Usage: * OptionalMethod::setHandle<ClassType, HandleType>(classMember, handle); */ template <class NodeTypeClass, typename HandleType> using has_setHandle = typename std::is_same<void, decltype(std::declval<NodeTypeClass&>().setHandle( std::declval<HandleType>()))>::value_type; //! Helper struct to make it easy to reference methods on a class that may or may not be defined. struct OptionalMethod { private: template <typename ClassToSet, typename HandleType> static void call_setHandle(ClassToSet& member, HandleType handle, std::true_type) { member.setHandle(handle); } template <typename ClassToSet, typename HandleType> static void call_setHandle(ClassToSet& member, HandleType handle, std::false_type) { } template <typename ClassToSet> static void call_setContext(ClassToSet& member, const GraphContextObj& context, std::true_type) { member.setContext(context); } template <typename ClassToSet> static void call_setContext(ClassToSet& member, const GraphContextObj& context, std::false_type) { } public: /** * @brief Set the Handle object * * @tparam ClassToSet Object class on which to set the handle * @tparam HandleType Handle class that is the object class member containing the handle * @param member Object on which the handle is to be set * @param handle Handle to be set on the object */ template <typename ClassToSet, typename HandleType> static void setHandle(ClassToSet& member, HandleType handle) { call_setHandle(member, handle, is_detected<has_setHandle, ClassToSet, HandleType>()); } /** * @brief Set the Context object, if an implementation exists * * @tparam ClassToSet Object class on which to set the context * @param member Object on which the context is to be set * @param context Context to be set on the object */ template <typename ClassToSet> static void setContext(ClassToSet& member, const GraphContextObj& context) { call_setContext(member, context, is_detected<has_setContext, ClassToSet>()); } }; /* _____ _ _ | __ \ | | | | | | | | ___ _ __ _ __ ___ ___ __ _| |_ ___ __| | | | | |/ _ \ '_ \| '__/ _ \/ __/ _` | __/ _ \/ _` | | |__| | __/ |_) | | | __/ (_| (_| | || __/ (_| | |_____/ \___| .__/|_| \___|\___\__,_|\__\___|\__,_| | | |_| May go away at any time - what you should use are in the comments */ //! Deprecated - use kInvalidTokenValue constexpr static HandleInt INVALID_TOKEN_VALUE = ~HandleInt(0); } // namespace core } // namespace graph } // namespace omni
19,883
C
40.167702
126
0.653724
omniverse-code/kit/include/omni/graph/core/Accessors.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/CppWrappers.h> #include <omni/graph/core/CudaUtils.h> #include <omni/graph/core/Type.h> #include <omni/graph/core/tuple.h> #include <omni/graph/core/Handle.h> namespace omni { namespace graph { namespace core { /** * Retrieves the Nth upstream connected attribute, invalid object if it isn't there. This is a utility function * that lets callers avoid the hassle of going through the allocation/deallocation required for ABI access. * * @template N Index of the upstream attribute * @param[in] attrObj The attribute object for which to retrieve the connection * @return Nth upstream attribute, or invalid if there is none */ template <int Count> AttributeObj getNthUpstreamAttribute(const AttributeObj& attrObj) { size_t connectionCount = attrObj.iAttribute->getUpstreamConnectionCount(attrObj); if (connectionCount > Count) { AttributeObj* attrObjs = reinterpret_cast<AttributeObj*>(alloca(sizeof(AttributeObj) * connectionCount)); attrObj.iAttribute->getUpstreamConnections(attrObj, attrObjs, connectionCount); AttributeObj toReturn = attrObjs[Count]; return toReturn; } return AttributeObj{nullptr, kInvalidAttributeHandle}; } /** * Retrieves the Nth upstream attribute if it exists, or the passed-in attribute if not. * * @template N Index of the upstream attribute * @param[in] attrObj The attribute object for which to retrieve the connection * @return Nth upstream attribute, or the passed-in attribute if there is none */ template <int Count> AttributeObj getNthUpstreamAttributeOrSelf(const AttributeObj& attrObj) { AttributeObj nth = getNthUpstreamAttribute<Count>(attrObj); return nth.iAttribute ? nth : attrObj; } template <typename T> constexpr BaseDataType baseDataTypeForType() { using Type = typename std::remove_cv<T>::type; if (std::is_same<Type, int>::value) { return BaseDataType::eInt; } if (std::is_same<Type, int64_t>::value) { return BaseDataType::eInt64; } if (std::is_same<Type, float>::value) { return BaseDataType::eFloat; } if (std::is_same<Type, double>::value) { return BaseDataType::eDouble; } CUDA_SAFE_ASSERT("Type not implemented"); return BaseDataType::eUnknown; } constexpr bool isNumericBaseType(BaseDataType type) { switch (type) { case BaseDataType::eInt: case BaseDataType::eInt64: case BaseDataType::eFloat: case BaseDataType::eDouble: return true; default: return false; } } // Scalar numeric attribute accessor. template <typename T> class NumericAccessor { public: static constexpr BaseDataType expectedType = baseDataTypeForType<T>(); static constexpr bool readOnly = std::is_const<T>::value; using RawType = typename std::remove_cv<T>::type; using HandleType = typename std::conditional<readOnly, ConstAttributeDataHandle, AttributeDataHandle>::type; NumericAccessor() : attributeType(BaseDataType::eUnknown), data(nullptr), componentCount(0), elementCount(0) { } NumericAccessor(const NumericAccessor&) = default; NumericAccessor& operator=(const NumericAccessor&) = default; #if 0 // The enable_if makes this valid only if T is const, since a non-const accessor can't // be initialized with a const attribute handle, but a const accessor can be initialized // from a const or non-const attribute handle. template <typename HANDLE_TYPE, typename IGNORED = typename std::enable_if<readOnly && (std::is_same<HANDLE_TYPE, ConstAttributeDataHandle>::value || std::is_same<HANDLE_TYPE, AttributeDataHandle>::value), int>::type> NumericAccessor(const GraphContextObj& context, HANDLE_TYPE attributeHandle, IGNORED = 0) : NumericAccessor() { if (!attributeHandle.isValid()) return; Type type = context.iAttributeData->getType(context, attributeHandle); BaseDataType baseType = type.baseType; if (isNumericBaseType(baseType)) { size_t depth = type.arrayDepth; if (depth < 2) { attributeType = baseType; const void* pData; context.iAttributeData->getDataR(&pData, context, &attributeHandle, 1); data = (depth == 0) ? pData : (*(const void* const*)pData); componentCount = type.componentCount; elementCount = omni::graph::core::getElementCount(context, attributeHandle); } } } // The enable_if makes this valid only if T is non-const, since a non-const accessor can // only be initialized with a non-const attribute handle. template <typename IGNORED = typename std::enable_if<!readOnly, int>::type> NumericAccessor(const GraphContextObj& context, AttributeDataHandle attributeHandle, IGNORED = 0) : NumericAccessor() { if (!attributeHandle.isValid()) return; Type type = context.iAttributeData->getType(context, attributeHandle); BaseDataType baseType = type.baseType; if (isNumericBaseType(baseType)) { size_t depth = type.arrayDepth; if (depth < 2) { attributeType = baseType; void* pData; context.iAttributeData->getDataW(&pData, context, &attributeHandle, 1); data = (depth == 0) ? pData : (*(void* const*)pData); componentCount = type.componentCount; elementCount = omni::graph::core::getElementCount(context, attributeHandle); } } } // Construct a NumericAccessor by prim and attribute name. // // The enable_if makes this valid only if T is const, since a non-const accessor can't // be initialized with a const attribute handle, but a const accessor can be initialized // from a const or non-const attribute handle. template <typename HANDLE_TYPE, typename IGNORED = typename std::enable_if<readOnly && (std::is_same<HANDLE_TYPE, ConstBundleHandle>::value || std::is_same<HANDLE_TYPE, BundleHandle>::value), int>::type> NumericAccessor(const GraphContextObj& context, HANDLE_TYPE primHandle, Token attributeName, IGNORED = 0) : NumericAccessor() { if (!primHandle.isValid()) return; ConstAttributeDataHandle attributeHandle; NameToken nameToken(attributeName); context.iBundle->getAttributesByNameR(&attributeHandle, context, primHandle, &nameToken, 1); *this = NumericAccessor(context, attributeHandle); } // Construct a NumericAccessor by non-const prim and attribute name. // // The enable_if makes this valid only if T is non-const, since a non-const accessor can // only be initialized with a non-const attribute handle. template <typename IGNORED = typename std::enable_if<!readOnly, int>::type> NumericAccessor(const GraphContextObj& context, BundleHandle primHandle, NameToken attributeName, IGNORED = 0) : NumericAccessor() { if (!primHandle.isValid()) return; AttributeDataHandle attributeHandle; context.iBundle->getAttributesByNameW(&attributeHandle, context, primHandle, &attributeName, 1); *this = NumericAccessor(context, attributeHandle); } #endif bool isValid() const { return (data != nullptr); } size_t getComponentCount() const { return componentCount; } size_t getElementCount() const { return elementCount; } // Returns a pointer to the data if it's a perfect type match, else nullptr. T* getPerfectMatch() const { return reinterpret_cast<T*>((attributeType == expectedType) ? data : nullptr); } CUDA_CALLABLE RawType get(size_t i = 0) const { CUDA_SAFE_ASSERT(data != nullptr); CUDA_SAFE_ASSERT(i < componentCount * elementCount); if (attributeType == expectedType) { return reinterpret_cast<const RawType*>(data)[i]; } switch (attributeType) { case BaseDataType::eInt: return RawType(reinterpret_cast<const int*>(data)[i]); case BaseDataType::eInt64: return RawType(reinterpret_cast<const int64_t*>(data)[i]); case BaseDataType::eFloat: return RawType(reinterpret_cast<const float*>(data)[i]); case BaseDataType::eDouble: return RawType(reinterpret_cast<const double*>(data)[i]); } CUDA_SAFE_ASSERT(0); return RawType(0); } // The enable_if makes this valid only if T is non-const. template <bool IsEnabled = true, typename std::enable_if<(IsEnabled && !readOnly), int>::type = 0> CUDA_CALLABLE void set(RawType value, size_t i = 0) const { CUDA_SAFE_ASSERT(data != nullptr); CUDA_SAFE_ASSERT(i < componentCount * elementCount); if (attributeType == expectedType) { reinterpret_cast<RawType*>(data)[i] = value; } else { switch (attributeType) { case BaseDataType::eInt: reinterpret_cast<int*>(data)[i] = int(value); break; case BaseDataType::eInt64: reinterpret_cast<int64_t*>(data)[i] = int64_t(value); break; case BaseDataType::eFloat: reinterpret_cast<float*>(data)[i] = float(value); break; case BaseDataType::eDouble: reinterpret_cast<double*>(data)[i] = double(value); break; default: CUDA_SAFE_ASSERT(0); break; } } } // TODO: make getNumericAccessor functions be friends of this class and make data private again // private: BaseDataType attributeType; using VoidType = typename std::conditional<readOnly, const void, void>::type; VoidType* data; size_t componentCount; size_t elementCount; }; // CPU accessors // When the dest is const, source can either be ConstBundleHandle or BundleHandle template <typename T, typename HANDLE_TYPE, typename IGNORED = typename std::enable_if<std::is_const<T>::value && (std::is_same<HANDLE_TYPE, ConstBundleHandle>::value || std::is_same<HANDLE_TYPE, BundleHandle>::value), int>::type> NumericAccessor<T> getNumericAccessor(const GraphContextObj& context, HANDLE_TYPE primHandle, Token attributeName, IGNORED = 0) { NumericAccessor<T> accessor; if (!primHandle.isValid()) return accessor; ConstAttributeDataHandle attributeHandle; NameToken nameToken(attributeName); context.iBundle->getAttributesByNameR(&attributeHandle, context, primHandle, &nameToken, 1); if (!attributeHandle.isValid()) return accessor; Type type = context.iAttributeData->getType(context, attributeHandle); BaseDataType baseType = type.baseType; if (isNumericBaseType(baseType)) { size_t depth = type.arrayDepth; if (depth < 2) { accessor.attributeType = baseType; const void* pData; context.iAttributeData->getDataR(&pData, context, &attributeHandle, 1); accessor.data = (depth == 0) ? pData : (*(const void* const*)pData); accessor.componentCount = type.componentCount; accessor.elementCount = omni::graph::core::getElementCount(context, attributeHandle); } } return accessor; } // When the dest is non-const, source can only be BundleHandle template <typename T, typename IGNORED = typename std::enable_if<!std::is_const<T>::value, int>::type> NumericAccessor<T> getNumericAccessor(const GraphContextObj& context, BundleHandle primHandle, Token attributeName, IGNORED = 0) { NumericAccessor<T> accessor; if (!primHandle.isValid()) return accessor; AttributeDataHandle attributeHandle; NameToken nameToken(attributeName); context.iBundle->getAttributesByNameW(&attributeHandle, context, primHandle, &nameToken, 1); if (!attributeHandle.isValid()) return accessor; Type type = context.iAttributeData->getType(context, attributeHandle); BaseDataType baseType = type.baseType; if (isNumericBaseType(baseType)) { size_t depth = type.arrayDepth; if (depth < 2) { accessor.attributeType = baseType; void* pData; context.iAttributeData->getDataW(&pData, context, &attributeHandle, 1); accessor.data = (depth == 0) ? pData : (*(void* const*)pData); accessor.componentCount = type.componentCount; accessor.elementCount = omni::graph::core::getElementCount(context, attributeHandle); } } return accessor; } // GPU accessors // When the dest is const, source can either be ConstBundleHandle or BundleHandle template <typename T, typename HANDLE_TYPE, typename IGNORED = typename std::enable_if<std::is_const<T>::value && (std::is_same<HANDLE_TYPE, ConstBundleHandle>::value || std::is_same<HANDLE_TYPE, BundleHandle>::value), int>::type> NumericAccessor<T> getNumericAccessorGPU(const GraphContextObj& context, HANDLE_TYPE primHandle, Token attributeName) { NumericAccessor<T> accessor; if (!primHandle.isValid()) return accessor; ConstAttributeDataHandle attributeHandle; NameToken nameToken(attributeName); context.iBundle->getAttributesByNameR(&attributeHandle, context, primHandle, &nameToken, 1); if (!attributeHandle.isValid()) return accessor; Type type = context.iAttributeData->getType(context, attributeHandle); BaseDataType baseType = type.baseType; if (isNumericBaseType(baseType)) { size_t depth = type.arrayDepth; if (depth < 2) { accessor.attributeType = baseType; const void* pData; context.iAttributeData->getDataRGPU(&pData, context, &attributeHandle, 1); accessor.data = (depth == 0) ? pData : (*(const void* const*)pData); accessor.componentCount = type.componentCount; accessor.elementCount = omni::graph::core::getElementCount(context, attributeHandle); } } return accessor; } // When the dest is non-const, source can only be BundleHandle template <typename T, typename IGNORED = typename std::enable_if<!std::is_const<T>::value, int>::type> NumericAccessor<T> getNumericAccessorGPU(const GraphContextObj& context, BundleHandle primHandle, Token attributeName, IGNORED = 0) { NumericAccessor<T> accessor; if (!primHandle.isValid()) return accessor; AttributeDataHandle attributeHandle; NameToken nameToken(attributeName); context.iBundle->getAttributesByNameW(&attributeHandle, context, primHandle, &nameToken, 1); if (!attributeHandle.isValid()) return accessor; Type type = context.iAttributeData->getType(context, attributeHandle); BaseDataType baseType = type.baseType; if (isNumericBaseType(baseType)) { size_t depth = type.arrayDepth; if (depth < 2) { accessor.attributeType = baseType; void* pData; context.iAttributeData->getDataWGPU(&pData, context, &attributeHandle, 1); accessor.data = (depth == 0) ? pData : (*(void* const*)pData); accessor.componentCount = type.componentCount; accessor.elementCount = omni::graph::core::getElementCount(context, attributeHandle); } } return accessor; } template <typename T, size_t N> class VectorAccessor { public: static constexpr BaseDataType expectedType = baseDataTypeForType<T>(); static constexpr size_t componentCount = N; static constexpr bool readOnly = std::is_const<T>::value; using RawBaseType = typename std::remove_cv<T>::type; using RawVectorType = tuple<RawBaseType, N>; using VectorType = typename std::conditional<readOnly, const tuple<RawBaseType, N>, tuple<RawBaseType, N>>::type; using VoidType = typename std::conditional<readOnly, const void, void>::type; using HandleType = typename std::conditional<readOnly, ConstAttributeDataHandle, AttributeDataHandle>::type; VectorAccessor() : attributeType(BaseDataType::eUnknown), data(nullptr), elementCount(0) { } VectorAccessor(const VectorAccessor&) = default; VectorAccessor& operator=(const VectorAccessor&) = default; // The enable_if makes this valid only if T is const, since a non-const accessor can't // be initialized with a const attribute handle, but a const accessor can be initialized // from a const or non-const attribute handle. template <typename HANDLE_TYPE, typename IGNORED = typename std::enable_if<readOnly && (std::is_same<HANDLE_TYPE, ConstAttributeDataHandle>::value || std::is_same<HANDLE_TYPE, AttributeDataHandle>::value), int>::type> VectorAccessor(const GraphContextObj& context, HANDLE_TYPE attributeHandle, IGNORED = 0) : VectorAccessor() { if (!attributeHandle.isValid()) return; Type type = context.iAttributeData->getType(context, attributeHandle); size_t componentCount = type.componentCount; if (componentCount == N) { BaseDataType baseType = type.baseType; if (isNumericBaseType(baseType)) { size_t depth = type.arrayDepth; if (depth < 2) { attributeType = baseType; const void* pData; context.iAttributeData->getDataR(&pData, context, &attributeHandle, 1); data = (depth == 0) ? pData : (*(const void* const*)pData); elementCount = omni::graph::core::getElementCount(context, attributeHandle); } } } } // The enable_if makes this valid only if T is non-const, since a non-const accessor can // only be initialized with a non-const attribute handle. template <typename IGNORED = typename std::enable_if<!readOnly, int>::type> VectorAccessor(const GraphContextObj& context, AttributeDataHandle attributeHandle, IGNORED = 0) : VectorAccessor() { if (!attributeHandle.isValid()) return; Type type = context.iAttributeData->getType(context, attributeHandle); size_t componentCount = type.componentCount; if (componentCount == N) { BaseDataType baseType = type.baseType; if (isNumericBaseType(baseType)) { size_t depth = type.arrayDepth; if (depth < 2) { attributeType = baseType; void* pData; context.iAttributeData->getDataW(&pData, context, &attributeHandle, 1); data = (depth == 0) ? pData : (*(void* const*)pData); elementCount = omni::graph::core::getElementCount(context, attributeHandle); } } } } // Construct a VectorAccessor by prim and attribute name. // // The enable_if makes this valid only if T is const, since a non-const accessor can't // be initialized with a const attribute handle, but a const accessor can be initialized // from a const or non-const attribute handle. template <typename HANDLE_TYPE, typename IGNORED = typename std::enable_if<readOnly && (std::is_same<HANDLE_TYPE, ConstBundleHandle>::value || std::is_same<HANDLE_TYPE, BundleHandle>::value), int>::type> VectorAccessor(const GraphContextObj& context, HANDLE_TYPE primHandle, NameToken attributeName, IGNORED = 0) : VectorAccessor() { if (!primHandle.isValid()) return; ConstAttributeDataHandle attributeHandle; context.iBundle->getAttributesByNameR(&attributeHandle, context, primHandle, &attributeName, 1); *this = VectorAccessor(context, attributeHandle); } // Construct a VectorAccessor by non-const prim and attribute name. // // The enable_if makes this valid only if T is non-const, since a non-const accessor can // only be initialized with a non-const attribute handle. template <typename IGNORED = typename std::enable_if<!readOnly, int>::type> VectorAccessor(const GraphContextObj& context, BundleHandle primHandle, NameToken attributeName, IGNORED = 0) : VectorAccessor() { if (!primHandle.isValid()) return; AttributeDataHandle attributeHandle; context.iBundle->getAttributesByNameW(&attributeHandle, context, primHandle, &attributeName, 1); *this = VectorAccessor(context, attributeHandle); } bool isValid() const { return (data != nullptr); } size_t getElementCount() const { return elementCount; } // Returns a pointer to the data if it's a perfect type match, else nullptr. VectorType* getPerfectMatch() const { return reinterpret_cast<VectorType*>((attributeType == expectedType) ? data : nullptr); } RawVectorType get(size_t i = 0) const { CUDA_SAFE_ASSERT(data != nullptr); CUDA_SAFE_ASSERT(i < elementCount); if (attributeType == expectedType) { return reinterpret_cast<const RawVectorType*>(data)[i]; } switch (attributeType) { case BaseDataType::eInt: return RawVectorType(reinterpret_cast<const tuple<int, N>*>(data)[i]); case BaseDataType::eInt64: return RawVectorType(reinterpret_cast<const tuple<int64_t, N>*>(data)[i]); case BaseDataType::eFloat: return RawVectorType(reinterpret_cast<const tuple<float, N>*>(data)[i]); case BaseDataType::eDouble: return RawVectorType(reinterpret_cast<const tuple<double, N>*>(data)[i]); } CUDA_SAFE_ASSERT(0); return RawVectorType(T(0)); } // The enable_if makes this valid only if T is non-const. template <typename IGNORED = typename std::enable_if<!readOnly, int>::type> void set(const RawVectorType& value, size_t i = 0, IGNORED = 0) const { CUDA_SAFE_ASSERT(data != nullptr); CUDA_SAFE_ASSERT(i < componentCount * elementCount); if (attributeType == expectedType) { reinterpret_cast<RawVectorType*>(data)[i] = value; } else { switch (attributeType) { case BaseDataType::eInt: reinterpret_cast<tuple<int, N>*>(data)[i] = tuple<int, N>(value); break; case BaseDataType::eInt64: reinterpret_cast<tuple<int64_t, N>*>(data)[i] = tuple<int64_t, N>(value); break; case BaseDataType::eFloat: reinterpret_cast<tuple<float, N>*>(data)[i] = tuple<float, N>(value); break; case BaseDataType::eDouble: reinterpret_cast<tuple<double, N>*>(data)[i] = tuple<double, N>(value); break; default: CUDA_SAFE_ASSERT(0); break; } } } private: BaseDataType attributeType; VoidType* data; size_t elementCount; }; // Bulk scalar numeric array attribute accessor (non-const implementation). template <typename T> class BulkNumericAccessor { public: static constexpr BaseDataType expectedType = baseDataTypeForType<T>(); static constexpr bool readOnly = false; using RawType = typename std::remove_cv<T>::type; using HandleType = AttributeDataHandle; BulkNumericAccessor() : attributeType(BaseDataType::eUnknown), data(nullptr), matchingData(nullptr), componentCount(0), elementCount(0) { } BulkNumericAccessor(BulkNumericAccessor&& that) { attributeType = that.attributeType; data = that.data; matchingData = that.matchingData; componentCount = that.componentCount; elementCount = that.elementCount; that.attributeType = BaseDataType::eUnknown; that.data = nullptr; that.matchingData = nullptr; that.componentCount = 0; that.elementCount = 0; } BulkNumericAccessor& operator=(BulkNumericAccessor&& that) { attributeType = that.attributeType; data = that.data; matchingData = that.matchingData; componentCount = that.componentCount; elementCount = that.elementCount; that.attributeType = BaseDataType::eUnknown; that.data = nullptr; that.matchingData = nullptr; that.componentCount = 0; that.elementCount = 0; return *this; } BulkNumericAccessor(const BulkNumericAccessor& that) = delete; BulkNumericAccessor& operator=(const BulkNumericAccessor& that) = delete; // This is valid only if T is non-const, since a non-const accessor can // only be initialized with a non-const attribute handle. BulkNumericAccessor(const GraphContextObj& context, AttributeDataHandle attributeHandle) : BulkNumericAccessor() { if (!attributeHandle.isValid()) return; Type type = context.iAttributeData->getType(context, attributeHandle); BaseDataType baseType = type.baseType; if (isNumericBaseType(baseType)) { size_t depth = type.arrayDepth; if (depth < 2) { attributeType = baseType; void* pData; context.iAttributeData->getDataW(&pData, context, &attributeHandle, 1); data = (depth == 0) ? pData : (*(void* const*)pData); componentCount = type.componentCount; elementCount = omni::graph::core::getElementCount(context, attributeHandle); if (attributeType != expectedType) { size_t fullCount = componentCount * elementCount; RawType* converted = new RawType[fullCount]; convertToMatching(converted, fullCount); matchingData = converted; } else { matchingData = reinterpret_cast<T*>(data); } } } } // Construct a BulkNumericAccessor by non-const prim and attribute name. // // This is valid only if T is non-const, since a non-const accessor can // only be initialized with a non-const attribute handle. BulkNumericAccessor(const GraphContextObj& context, BundleHandle primHandle, NameToken attributeName) : BulkNumericAccessor() { if (!primHandle.isValid()) return; AttributeDataHandle attributeHandle; context.iBundle->getAttributesByNameW(&attributeHandle, context, primHandle, &attributeName, 1); *this = BulkNumericAccessor(context, attributeHandle); } ~BulkNumericAccessor() { if (attributeType != expectedType && matchingData != nullptr) { flushInternal(matchingData); delete[] matchingData; } } bool isValid() const { return (data != nullptr); } size_t getComponentCount() const { return componentCount; } size_t getElementCount() const { return elementCount; } // Returns a pointer to the data if it's convertible, else nullptr. T* getData() const { return matchingData; } // Returns a pointer to the data if it's convertible, else nullptr. // Convertible types are always treated as a perfect match for bulk conversion. // This function is just provided for compatibility with the NumericAccessor // and VectorAccessor classes above. T* getPerfectMatch() const { return matchingData; } void flush() { flushInternal(matchingData); } private: void flushInternal(RawType* matchData) { CUDA_SAFE_ASSERT(data != nullptr); if (attributeType == expectedType) { return; } const size_t fullCount = componentCount * elementCount; switch (attributeType) { case BaseDataType::eInt: { int* source = reinterpret_cast<int*>(data); for (size_t i = 0; i < fullCount; ++i) { source[i] = int(matchData[i]); } break; } case BaseDataType::eInt64: { int64_t* source = reinterpret_cast<int64_t*>(data); for (size_t i = 0; i < fullCount; ++i) { source[i] = int64_t(matchData[i]); } break; } case BaseDataType::eFloat: { float* source = reinterpret_cast<float*>(data); for (size_t i = 0; i < fullCount; ++i) { source[i] = float(matchData[i]); } break; } case BaseDataType::eDouble: { double* source = reinterpret_cast<double*>(data); for (size_t i = 0; i < fullCount; ++i) { source[i] = double(matchData[i]); } break; } default: CUDA_SAFE_ASSERT(0); break; } } void convertToMatching(RawType* converted, const size_t fullCount) { switch (attributeType) { case BaseDataType::eInt: { const int* source = reinterpret_cast<const int*>(data); for (size_t i = 0; i < fullCount; ++i) { converted[i] = RawType(source[i]); } break; } case BaseDataType::eInt64: { const int64_t* source = reinterpret_cast<const int64_t*>(data); for (size_t i = 0; i < fullCount; ++i) { converted[i] = RawType(source[i]); } break; } case BaseDataType::eFloat: { const float* source = reinterpret_cast<const float*>(data); for (size_t i = 0; i < fullCount; ++i) { converted[i] = RawType(source[i]); } break; } case BaseDataType::eDouble: { const double* source = reinterpret_cast<const double*>(data); for (size_t i = 0; i < fullCount; ++i) { converted[i] = RawType(source[i]); } break; } default: CUDA_SAFE_ASSERT(0); break; } } BaseDataType attributeType; using VoidType = void; VoidType* data; T* matchingData; size_t componentCount; size_t elementCount; }; // Bulk scalar numeric array attribute accessor (const implementation). template <typename T> class BulkNumericAccessor<const T> { public: static constexpr BaseDataType expectedType = baseDataTypeForType<T>(); static constexpr bool readOnly = true; using RawType = typename std::remove_cv<T>::type; using HandleType = ConstAttributeDataHandle; BulkNumericAccessor() : attributeType(BaseDataType::eUnknown), data(nullptr), matchingData(nullptr), componentCount(0), elementCount(0) { } BulkNumericAccessor(BulkNumericAccessor&& that) { attributeType = that.attributeType; data = that.data; matchingData = that.matchingData; componentCount = that.componentCount; elementCount = that.elementCount; that.attributeType = BaseDataType::eUnknown; that.data = nullptr; that.matchingData = nullptr; that.componentCount = 0; that.elementCount = 0; } BulkNumericAccessor& operator=(BulkNumericAccessor&& that) { attributeType = that.attributeType; data = that.data; matchingData = that.matchingData; componentCount = that.componentCount; elementCount = that.elementCount; that.attributeType = BaseDataType::eUnknown; that.data = nullptr; that.matchingData = nullptr; that.componentCount = 0; that.elementCount = 0; return *this; } BulkNumericAccessor(const BulkNumericAccessor& that) = delete; BulkNumericAccessor& operator=(const BulkNumericAccessor& that) = delete; // HANDLE_TYPE can be ConstAttributeDataHandle or AttributeDataHandle. template <typename HANDLE_TYPE> BulkNumericAccessor(const GraphContextObj& context, HANDLE_TYPE attributeHandle) : BulkNumericAccessor() { if (!attributeHandle.isValid()) return; Type type = context.iAttributeData->getType(context, attributeHandle); BaseDataType baseType = type.baseType; if (isNumericBaseType(baseType)) { size_t depth = type.arrayDepth; if (depth < 2) { attributeType = baseType; const void* pData; context.iAttributeData->getDataR(&pData, context, &attributeHandle, 1); data = (depth == 0) ? pData : (*(const void* const*)pData); componentCount = type.componentCount; elementCount = omni::graph::core::getElementCount(context, attributeHandle); if (attributeType != expectedType) { size_t fullCount = componentCount * elementCount; RawType* converted = new RawType[fullCount]; convertToMatching(converted, fullCount); matchingData = converted; } else { matchingData = reinterpret_cast<const T*>(data); } } } } // Construct a BulkNumericAccessor by prim and attribute name. // // HANDLE_TYPE can be ConstAttributeDataHandle or AttributeDataHandle. template <typename HANDLE_TYPE> BulkNumericAccessor(const GraphContextObj& context, HANDLE_TYPE primHandle, NameToken attributeName) : BulkNumericAccessor() { if (!primHandle.isValid()) return; ConstAttributeDataHandle attributeHandle; context.iBundle->getAttributesByNameR(&attributeHandle, context, primHandle, &attributeName, 1); *this = BulkNumericAccessor(context, attributeHandle); } ~BulkNumericAccessor() { } bool isValid() const { return (data != nullptr); } size_t getComponentCount() const { return componentCount; } size_t getElementCount() const { return elementCount; } // Returns a pointer to the data if it's convertible, else nullptr. const T* getData() const { return matchingData; } // Returns a pointer to the data if it's convertible, else nullptr. // Convertible types are always treated as a perfect match for bulk conversion. // This function is just provided for compatibility with the NumericAccessor // and VectorAccessor classes above. const T* getPerfectMatch() const { return matchingData; } private: void convertToMatching(RawType* converted, const size_t fullCount) { switch (attributeType) { case BaseDataType::eInt: { const int* source = reinterpret_cast<const int*>(data); for (size_t i = 0; i < fullCount; ++i) { converted[i] = RawType(source[i]); } break; } case BaseDataType::eInt64: { const int64_t* source = reinterpret_cast<const int64_t*>(data); for (size_t i = 0; i < fullCount; ++i) { converted[i] = RawType(source[i]); } break; } case BaseDataType::eFloat: { const float* source = reinterpret_cast<const float*>(data); for (size_t i = 0; i < fullCount; ++i) { converted[i] = RawType(source[i]); } break; } case BaseDataType::eDouble: { const double* source = reinterpret_cast<const double*>(data); for (size_t i = 0; i < fullCount; ++i) { converted[i] = RawType(source[i]); } break; } default: CUDA_SAFE_ASSERT(0); break; } } BaseDataType attributeType; using VoidType = const void; VoidType* data; const T* matchingData; size_t componentCount; size_t elementCount; }; // Bulk vector numeric array attribute accessor (non-const implementation). template <typename T, size_t N> class BulkVectorAccessor { public: static constexpr BaseDataType expectedType = baseDataTypeForType<T>(); static constexpr size_t componentCount = N; static constexpr bool readOnly = false; using RawBaseType = typename std::remove_cv<T>::type; using RawVectorType = tuple<RawBaseType, N>; using VectorType = tuple<RawBaseType, N>; using HandleType = AttributeDataHandle; BulkVectorAccessor() : attributeType(BaseDataType::eUnknown), data(nullptr), matchingData(nullptr), elementCount(0) { } BulkVectorAccessor(BulkVectorAccessor&& that) { attributeType = that.attributeType; data = that.data; matchingData = that.matchingData; elementCount = that.elementCount; that.attributeType = BaseDataType::eUnknown; that.data = nullptr; that.matchingData = nullptr; that.elementCount = 0; } BulkVectorAccessor& operator=(BulkVectorAccessor&& that) { attributeType = that.attributeType; data = that.data; matchingData = that.matchingData; elementCount = that.elementCount; that.attributeType = BaseDataType::eUnknown; that.data = nullptr; that.matchingData = nullptr; that.elementCount = 0; return *this; } BulkVectorAccessor(const BulkVectorAccessor& that) = delete; BulkVectorAccessor& operator=(const BulkVectorAccessor& that) = delete; // A non-const accessor can only be initialized with a non-const attribute handle. BulkVectorAccessor(const GraphContextObj& context, AttributeDataHandle attributeHandle) : BulkVectorAccessor() { if (!attributeHandle.isValid()) return; Type type = context.iAttributeData->getType(context, attributeHandle); size_t componentCount = type.componentCount; if (componentCount == N) { BaseDataType baseType = type.baseType; if (isNumericBaseType(baseType)) { size_t depth = type.arrayDepth; if (depth < 2) { void* pData; context.iAttributeData->getDataW(&pData, context, &attributeHandle, 1); data = (depth == 0) ? pData : (*(void* const*)pData); attributeType = baseType; elementCount = omni::graph::core::getElementCount(context, attributeHandle); if (attributeType != expectedType) { size_t fullCount = componentCount * elementCount; RawVectorType* converted = new RawVectorType[fullCount]; convertToMatching(converted, fullCount); matchingData = converted; } else { matchingData = reinterpret_cast<VectorType*>(data); } } } } } // A non-const accessor can only be initialized with a non-const attribute handle. BulkVectorAccessor(const GraphContextObj& context, BundleHandle bundleHandle, Token attributeName) : BulkVectorAccessor() { if (!bundleHandle.isValid()) return; AttributeDataHandle attributeHandle; NameToken attributeNameToken(attributeName); context.iBundle->getAttributesByNameW(&attributeHandle, context, bundleHandle, &attributeNameToken, 1); *this = BulkVectorAccessor(context, attributeHandle); } ~BulkVectorAccessor() { if (attributeType != expectedType && matchingData != nullptr) { flushInternal(matchingData); delete[] matchingData; } } bool isValid() const { return (data != nullptr); } size_t getElementCount() const { return elementCount; } // Returns a pointer to the data if it's convertible, else nullptr. VectorType* getData() const { return matchingData; } // Returns a pointer to the data if it's convertible, else nullptr. // Convertible types are always treated as a perfect match for bulk conversion. // This function is just provided for compatibility with the NumericAccessor // and VectorAccessor classes above. VectorType* getPerfectMatch() const { return matchingData; } void flush() { flushInternal(matchingData); } private: void flushInternal(RawVectorType* matchData) { CUDA_SAFE_ASSERT(data != nullptr); if (attributeType == expectedType) { return; } const size_t fullCount = componentCount * elementCount; switch (attributeType) { case BaseDataType::eInt: { tuple<int, N>* source = reinterpret_cast<tuple<int, N>*>(data); for (size_t i = 0; i < fullCount; ++i) { source[i] = tuple<int, N>(matchData[i]); } break; } case BaseDataType::eInt64: { tuple<int64_t, N>* source = reinterpret_cast<tuple<int64_t, N>*>(data); for (size_t i = 0; i < fullCount; ++i) { source[i] = tuple<int64_t, N>(matchData[i]); } break; } case BaseDataType::eFloat: { tuple<float, N>* source = reinterpret_cast<tuple<float, N>*>(data); for (size_t i = 0; i < fullCount; ++i) { source[i] = tuple<float, N>(matchData[i]); } break; } case BaseDataType::eDouble: { tuple<double, N>* source = reinterpret_cast<tuple<double, N>*>(data); for (size_t i = 0; i < fullCount; ++i) { source[i] = tuple<double, N>(matchData[i]); } break; } default: CUDA_SAFE_ASSERT(0); break; } } void flushInternal(const RawVectorType* matchData) { // Does nothing. This signature is just for compiling purposes. } void convertToMatching(RawVectorType* converted, const size_t fullCount) { switch (attributeType) { case BaseDataType::eInt: { const tuple<int, N>* source = reinterpret_cast<const tuple<int, N>*>(data); for (size_t i = 0; i < fullCount; ++i) { converted[i] = RawVectorType(source[i]); } break; } case BaseDataType::eInt64: { const tuple<int64_t, N>* source = reinterpret_cast<const tuple<int64_t, N>*>(data); for (size_t i = 0; i < fullCount; ++i) { converted[i] = RawVectorType(source[i]); } break; } case BaseDataType::eFloat: { const tuple<float, N>* source = reinterpret_cast<const tuple<float, N>*>(data); for (size_t i = 0; i < fullCount; ++i) { converted[i] = RawVectorType(source[i]); } break; } case BaseDataType::eDouble: { const tuple<double, N>* source = reinterpret_cast<const tuple<double, N>*>(data); for (size_t i = 0; i < fullCount; ++i) { converted[i] = RawVectorType(source[i]); } break; } default: CUDA_SAFE_ASSERT(0); break; } } BaseDataType attributeType; using VoidType = void; VoidType* data; VectorType* matchingData; size_t elementCount; }; // Bulk vector numeric array attribute accessor (const implementation). template <typename T, size_t N> class BulkVectorAccessor<const T, N> { public: static constexpr BaseDataType expectedType = baseDataTypeForType<T>(); static constexpr size_t componentCount = N; static constexpr bool readOnly = true; using RawBaseType = typename std::remove_cv<T>::type; using RawVectorType = tuple<RawBaseType, N>; using VectorType = const tuple<RawBaseType, N>; using HandleType = ConstAttributeDataHandle; BulkVectorAccessor() : attributeType(BaseDataType::eUnknown), data(nullptr), matchingData(nullptr), elementCount(0) { } BulkVectorAccessor(BulkVectorAccessor&& that) { attributeType = that.attributeType; data = that.data; matchingData = that.matchingData; elementCount = that.elementCount; that.attributeType = BaseDataType::eUnknown; that.data = nullptr; that.matchingData = nullptr; that.elementCount = 0; } BulkVectorAccessor& operator=(BulkVectorAccessor&& that) { attributeType = that.attributeType; data = that.data; matchingData = that.matchingData; elementCount = that.elementCount; that.attributeType = BaseDataType::eUnknown; that.data = nullptr; that.matchingData = nullptr; that.elementCount = 0; return *this; } BulkVectorAccessor(const BulkVectorAccessor& that) = delete; BulkVectorAccessor& operator=(const BulkVectorAccessor& that) = delete; // A const accessor can be initialized from a const or non-const attribute handle. template <typename HANDLE_TYPE> BulkVectorAccessor(const GraphContextObj& context, HANDLE_TYPE attributeHandle) : BulkVectorAccessor() { if (!attributeHandle.isValid()) return; Type type = context.iAttributeData->getType(context, attributeHandle); size_t componentCount = type.componentCount; if (componentCount == N) { BaseDataType baseType = type.baseType; if (isNumericBaseType(baseType)) { size_t depth = type.arrayDepth; if (depth < 2) { attributeType = baseType; const void* pData; context.iAttributeData->getDataR(&pData, context, &attributeHandle, 1); data = (depth == 0) ? pData : (*(const void* const*)pData); elementCount = omni::graph::core::getElementCount(context, attributeHandle); if (attributeType != expectedType) { size_t fullCount = componentCount * elementCount; RawVectorType* converted = new RawVectorType[fullCount]; convertToMatching(converted, fullCount); matchingData = converted; } else { matchingData = reinterpret_cast<VectorType*>(data); } } } } } // A const accessor can be initialized from a const or non-const attribute handle. template <typename HANDLE_TYPE> BulkVectorAccessor(const GraphContextObj& context, HANDLE_TYPE primHandle, Token attributeName) : BulkVectorAccessor() { if (!primHandle.isValid()) return; ConstAttributeDataHandle attributeHandle; NameToken attributeNameToken(attributeName); context.iBundle->getAttributesByNameR(&attributeHandle, context, primHandle, &attributeNameToken, 1); *this = BulkVectorAccessor(context, attributeHandle); } ~BulkVectorAccessor() { if (attributeType != expectedType && matchingData != nullptr) { delete[] matchingData; } } bool isValid() const { return (data != nullptr); } size_t getElementCount() const { return elementCount; } // Returns a pointer to the data if it's convertible, else nullptr. VectorType* getData() const { return matchingData; } // Returns a pointer to the data if it's convertible, else nullptr. // Convertible types are always treated as a perfect match for bulk conversion. // This function is just provided for compatibility with the NumericAccessor // and VectorAccessor classes above. VectorType* getPerfectMatch() const { return matchingData; } private: void convertToMatching(RawVectorType* converted, const size_t fullCount) { switch (attributeType) { case BaseDataType::eInt: { const tuple<int, N>* source = reinterpret_cast<const tuple<int, N>*>(data); for (size_t i = 0; i < fullCount; ++i) { converted[i] = RawVectorType(source[i]); } break; } case BaseDataType::eInt64: { const tuple<int64_t, N>* source = reinterpret_cast<const tuple<int64_t, N>*>(data); for (size_t i = 0; i < fullCount; ++i) { converted[i] = RawVectorType(source[i]); } break; } case BaseDataType::eFloat: { const tuple<float, N>* source = reinterpret_cast<const tuple<float, N>*>(data); for (size_t i = 0; i < fullCount; ++i) { converted[i] = RawVectorType(source[i]); } break; } case BaseDataType::eDouble: { const tuple<double, N>* source = reinterpret_cast<const tuple<double, N>*>(data); for (size_t i = 0; i < fullCount; ++i) { converted[i] = RawVectorType(source[i]); } break; } default: CUDA_SAFE_ASSERT(0); break; } } BaseDataType attributeType; using VoidType = const void; VoidType* data; VectorType* matchingData; size_t elementCount; }; // Relationship attribute accessor. // NOTE: This isn't yet supported. #if 0 template <bool isReadOnly> class RelationshipAccessor { public: static constexpr BaseDataType expectedType = BaseDataType::eRelationship; static constexpr bool readOnly = isReadOnly; using BundleHandleType = typename std::conditional<readOnly, ConstBundleHandle, BundleHandle>::type; using PrimHandleType [[deprecated("Use BundleHandleType!")]] = BundleHandleType; using HandleType = typename std::conditional<readOnly, ConstAttributeDataHandle, AttributeDataHandle>::type; RelationshipAccessor() : data(nullptr), elementCount(0), m_context(nullptr), m_attributeHandle(AttributeDataHandle::invalidValue()), m_iAttributeData(nullptr) { } RelationshipAccessor(const RelationshipAccessor&) = default; RelationshipAccessor& operator=(const RelationshipAccessor&) = default; RelationshipAccessor(RelationshipAccessor&& that) { data = that.data; elementCount = that.componentCount; m_context = that.m_context; m_attributeHandle = that.m_attributeHandle; m_iAttributeData = that.m_iAttributeData; that.data = nullptr; that.elementCount = 0; that.m_context = nullptr; that.m_attributeHandle = AttributeDataHandle(AttributeDataHandle::invalidValue()); that.m_iAttributeData = nullptr; } RelationshipAccessor& operator=(RelationshipAccessor&& that) { data = that.data; elementCount = that.componentCount; m_context = that.m_context; m_attributeHandle = that.m_attributeHandle; m_iAttributeData = that.m_iAttributeData; that.data = nullptr; that.elementCount = 0; that.m_context = nullptr; that.m_attributeHandle = AttributeDataHandle(AttributeDataHandle::invalidValue()); that.m_iAttributeData = nullptr; } // The enable_if makes this valid only if readOnly is true, since a non-const accessor can't // be initialized with a const attribute handle, but a const accessor can be initialized // from a const or non-const attribute handle. template <typename HANDLE_TYPE, typename IGNORED = typename std::enable_if<readOnly && (std::is_same<HANDLE_TYPE, ConstAttributeDataHandle>::value || std::is_same<HANDLE_TYPE, AttributeDataHandle>::value), int>::type> RelationshipAccessor(const GraphContextObj& context, HANDLE_TYPE attributeHandle, IGNORED = 0) : RelationshipAccessor() { if (!attributeHandle.isValid()) return; Type type = context.iAttributeData->getType(context, attributeHandle); BaseDataType baseType = type.baseType; if (baseType == BaseDataType::eRelationship) { size_t componentCount = type.componentCount; if (componentCount == 1) { size_t depth = type.arrayDepth; if (depth < 2) { const void* pData; context.iAttributeData->getDataR(&pData, context, &attributeHandle, 1); data = (depth == 0) ? pData : (*(const void* const*)pData); elementCount = omni::graph::core::getElementCount(context, attributeHandle); } } } } // The enable_if makes this valid only if readOnly is false, since a non-const accessor can // only be initialized with a non-const attribute handle. template <typename IGNORED = typename std::enable_if<!readOnly, int>::type> RelationshipAccessor(const GraphContextObj& context, AttributeDataHandle attributeHandle, IGNORED = 0) : RelationshipAccessor() { if (!attributeHandle.isValid()) return; Type type = context.iAttributeData->getType(context, attributeHandle); BaseDataType baseType = type.baseType; if (baseType == BaseDataType::eRelationship) { size_t componentCount = type.componentCount; if (componentCount == 1) { size_t depth = type.arrayDepth; if (depth < 2) { void* pData; context.iAttributeData->getDataW(&pData, context, &attributeHandle, 1); data = (depth == 0) ? pData : (*(void* const*)pData); elementCount = omni::graph::core::getElementCount(context, attributeHandle); m_context = context.context; m_attributeHandle = attributeHandle; m_iAttributeData = context.iAttributeData; } } } } // Construct a RelationshipAccessor by prim and attribute name. // // The enable_if makes this valid only if T is const, since a non-const accessor can't // be initialized with a const attribute handle, but a const accessor can be initialized // from a const or non-const attribute handle. template <typename HANDLE_TYPE, typename IGNORED = typename std::enable_if<readOnly && (std::is_same<HANDLE_TYPE, ConstBundleHandle>::value || std::is_same<HANDLE_TYPE, BundleHandle>::value), int>::type> RelationshipAccessor(const GraphContextObj& context, HANDLE_TYPE primHandle, NameToken attributeName, IGNORED = 0) : RelationshipAccessor() { if (!primHandle.isValid()) return; ConstAttributeDataHandle attributeHandle; context.iBundle->getAttributesByNameR(&attributeHandle, context, primHandle, &attributeName, 1); *this = RelationshipAccessor(context, attributeHandle); } // Construct a RelationshipAccessor by non-const prim and attribute name. // // The enable_if makes this valid only if T is non-const, since a non-const accessor can // only be initialized with a non-const attribute handle. template <typename IGNORED = typename std::enable_if<!readOnly, int>::type> RelationshipAccessor(const GraphContextObj& context, BundleHandle primHandle, NameToken attributeName, IGNORED = 0) : RelationshipAccessor() { if (!primHandle.isValid()) return; AttributeDataHandle attributeHandle; context.iBundle->getAttributesByNameW(&attributeHandle, context, primHandle, &attributeName, 1); *this = RelationshipAccessor(context, attributeHandle); } ~RelationshipAccessor() { if (m_context != nullptr && m_iAttributeData != nullptr && m_attributeHandle.isValid()) { CUDA_SAFE_ASSERT(!readOnly, "Only non-const RelationshipAccessor's should update the reference counts!"); m_iAttributeData->updateRelationshipRefCounts(*m_context, m_attributeHandle); } } bool isValid() const { return (data != nullptr); } size_t getElementCount() const { return elementCount; } // Returns a pointer to the data if it's a perfect type match, else nullptr. BundleHandleType* getData() const { return data; } BundleHandleType get(size_t i = 0) const { CUDA_SAFE_ASSERT(data != nullptr); CUDA_SAFE_ASSERT(i < elementCount); return data[i]; } // The enable_if makes this valid only if T is non-const. template <typename IGNORED = typename std::enable_if<!readOnly, int>::type> void set(BundleHandleType value, size_t i = 0, IGNORED = 0) const { CUDA_SAFE_ASSERT(data != nullptr); CUDA_SAFE_ASSERT(i < elementCount); // FIXME: Does reference counting of the prims need to be updated now, or will it be updated later? data[i] = value; } private: BundleHandleType* data; size_t elementCount; GraphContext* m_context; AttributeDataHandle m_attributeHandle; IAttributeData* m_iAttributeData; }; #endif } } }
60,494
C
33.948007
131
0.596274
omniverse-code/kit/include/omni/graph/core/tuple.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/CudaUtils.h> #include <carb/Defines.h> #include <cmath> #include <stdint.h> #include <type_traits> // Helper to simplify the syntax when selecting different features for PoD types. template <class T> struct is_trivial { template <class Q = T> typename std::enable_if<std::is_trivial<Q>::value, bool>::type check() { return true; } template <class Q = T> typename std::enable_if<!std::is_trivial<Q>::value, bool>::type check() { return false; } }; // This is a templated, fixed-component-count (N), vector class, intended to // have data layout identical to a raw array of type T, so that data interchange // wrappers can cast a T* to a tuple<T,N>* as appropriate, as long as they // have ensured that there are at least N elements pointed-to. // // It provides a constructor for conversion between different T, but // intentionally does not allow for conversion between different N. // Conversion between different T is also explicit, to avoid accidental // conversions. Some functionality similar to std::array<T,N> is also // provided, and the data layout should be equivalent. // // For example, tuple<float,3> would be a vector of 3 floats. // // It also provides some convenience functions that allow for simple operations on the values // as a single unit such as unary and binary operators. // namespace omni { namespace graph { namespace core { template <typename T, size_t N> class tuple { public: // Some standard types, similar to std::array and std::vector. using value_type = T; using size_type = size_t; using difference_type = ptrdiff_t; using reference = value_type&; using const_reference = const value_type&; using pointer = value_type*; using const_pointer = const value_type*; using iterator = pointer; using const_iterator = const_pointer; // static constexpr variable to be able to refer to the tuple size from the type, // from outside this class definition. static constexpr size_t tuple_size = N; // Defaulted default constructor is needed for POD type, // but it cannot be constexpr, since it leaves v uninitialized. tuple() noexcept = default; constexpr tuple(const tuple<T, N>& that) noexcept = default; constexpr tuple& operator=(const tuple<T, N>& that) noexcept = default; // constexpr tuple(const tuple<T, N>&& that) noexcept = default; // constexpr tuple& operator=(const tuple<T, N>&& that) noexcept = default; // Construct a tuple with all components equal to value. explicit tuple(const T& value) noexcept { for (size_t i = 0; i < N; ++i) { v[i] = value; } } // Type conversion constructor is explicit, to avoid unintentional conversions. // Conversions are done on individual components. template <typename OTHER_T> explicit tuple(const tuple<OTHER_T, N>& that) noexcept { for (size_t i = 0; i < N; ++i) { v[i] = T(that[i]); } } template <typename OTHER_T, typename... Args> constexpr tuple(OTHER_T a, Args... args) noexcept { initHelper<0>(a, args...); } // This is a compile-time constant value static constexpr size_type size() noexcept { return N; } // Access a single component of this tuple. constexpr T& operator[](size_t i) noexcept { // Ensure that this type is a POD type if T is a POD type. // This check is unrelated to this operator, but the static_assert // must be inside some function that is likely to be called. static_assert(std::is_trivial<tuple<T, N>>::value == std::is_trivial<T>::value, "tuple<T,N> should be a POD type iff T is a POD type."); CUDA_SAFE_ASSERT(i < N); return v[i]; } constexpr const T& operator[](size_t i) const noexcept { CUDA_SAFE_ASSERT(i < N); return v[i]; } // Get a pointer to the data of this tuple. constexpr T* data() noexcept { return v; } constexpr const T* data() const noexcept { return v; } // Iterator functions for compatibility with templated code that expects an iterable container. constexpr iterator begin() noexcept { return v; } constexpr const_iterator begin() const noexcept { return v; } constexpr const_iterator cbegin() const noexcept { return v; } constexpr iterator end() noexcept { return v + N; } constexpr const_iterator end() const noexcept { return v + N; } constexpr const_iterator cend() const noexcept { return v + N; } // Since this is a fixed-component-count vector class, // it should never be empty, so this should always return false. // The compiler probably won't allow N to be 0, but if it ever // does, it would return true in that unlikely case. static constexpr bool empty() noexcept { return N == 0; } // Add two of the vectors together constexpr tuple& operator+=(const tuple<T, N>& that) noexcept { for (size_t i = 0; i < N; ++i) { v[i] += that[i]; } return *this; } // passing lhs by value helps optimize chained a+b+c friend tuple<T, N> operator+(tuple<T, N> lhs, const tuple<T, N>& rhs) { lhs += rhs; return lhs; } // Subtract another vector from this one constexpr tuple& operator-=(const tuple<T, N>& that) noexcept { for (size_t i = 0; i < N; ++i) { v[i] -= that[i]; } return *this; } // passing lhs by value helps optimize chained a-b-c friend tuple<T, N> operator-(tuple<T, N> lhs, const tuple<T, N>& rhs) { lhs -= rhs; return lhs; } // Multiply all elements of the vector by a constant constexpr tuple& operator*=(const T& multiplier) noexcept { for (size_t i = 0; i < N; ++i) { v[i] *= multiplier; } return *this; } // passing lhs by value helps optimize chained (a*b)*c friend tuple<T, N> operator*(tuple<T, N> lhs, const T& multiplier) { lhs *= multiplier; return lhs; } // Divide all elements of the vector by a constant constexpr tuple& operator/=(const T& divisor) noexcept { for (size_t i = 0; i < N; ++i) { v[i] /= divisor; } return *this; } // passing lhs by value helps optimize chained (a/b)/c friend tuple<T, N> operator/(tuple<T, N> lhs, const T& divisor) { lhs /= divisor; return lhs; } // Helper function for PoD types that computes length constexpr double length() const { T myLength{ (T)0 }; for (size_t i = 0; i < N; ++i) { myLength += operator[](i) * operator[](i); } return std::sqrt(myLength); } // Helper function for PoD types that returns a normalized version of the vector constexpr tuple<T, N> normalized() const { tuple<T, N> normalizedVector{ *this }; double vectorLength = length(); // If the length is zero then the vector is zero so it can be returned directly. if (vectorLength != 0.0) { for (size_t i = 0; i < N; ++i) { // C-style cast is needed in order to silently handle acceptable precision losses normalizedVector[i] = T(normalizedVector[i] / vectorLength); } } return normalizedVector; } // Uses the elementwise less-than in order from first to last for non-PoD types. // Should only be called if the elements have that operator. constexpr bool lessThanByElement(const tuple<T, N>& rhs) const { for (size_t i = 0; i < N; ++i) { if (operator[](i) < rhs[i]) { return true; } else if (rhs[i] < operator[](i)) { return false; } } return false; } // Algorithm for "<" that uses the vector length for comparison. Does not use the length() // method because for comparison puposes the expensize sqrt() is not needed. constexpr bool lessThanByLength(const tuple<T, N>& rhs) const { T myLength{ (T)0 }; T rhsLength{ (T)0 }; for (size_t i = 0; i < N; ++i) { myLength += operator[](i) * operator[](i); rhsLength += rhs[i] * rhs[i]; } return myLength < rhsLength; } // Ordering is by vector length for PoDs and by ordering of elements in order if not constexpr bool operator<(const tuple<T, N>& rhs) const { return is_trivial<T>().check() ? lessThanByLength(rhs) : lessThanByElement(rhs); } // Uses the elementwise less-than in order from first to last for non-PoD types. // Should only be called if the elements have that operator. constexpr bool greaterThanByElement(const tuple<T, N>& rhs) const { for (size_t i = 0; i < N; ++i) { if (operator[](i) > rhs[i]) { return true; } else if (rhs[i] > operator[](i)) { return false; } } return false; } // Algorithm for "<" that uses the vector length for comparison constexpr bool greaterThanByLength(const tuple<T, N>& rhs) const { T myLength{ (T)0 }; T rhsLength{ (T)0 }; for (size_t i = 0; i < N; ++i) { myLength += operator[](i) * operator[](i); rhsLength += rhs[i] * rhs[i]; } // No need to take sqrt as only the relative difference is important return myLength > rhsLength; } // Ordering is by vector length for PoDs and by ordering of elements in order if not constexpr bool operator>(const tuple<T, N>& rhs) const { return is_trivial<T>().check() ? greaterThanByLength(rhs) : greaterThanByElement(rhs); } // Equality is element-wise constexpr bool operator==(const tuple<T, N>& rhs) const { for (size_t i = 0; i < N; ++i) { if (operator[](i) != rhs[i]) { return false; } } return true; } // Derivative operators, where just as efficient as separate implementation constexpr bool operator!=(const tuple<T, N>& rhs) const { return !operator==(rhs); } constexpr bool operator>=(const tuple<T, N>& rhs) const { return !operator<(rhs); } constexpr bool operator<=(const tuple<T, N>& rhs) const { return !operator>(rhs); } private: T v[N]; template <size_t i, typename OTHER_T> constexpr void initHelper(OTHER_T a) { static_assert(i == N - 1, "Variadic constructor of tuple<T, N> requires N arguments"); v[i] = T(a); } template <size_t i, typename OTHER_T, typename... Args> constexpr void initHelper(OTHER_T a, Args... args) { v[i] = T(a); initHelper<i + 1>(args...); } }; } // namespace core } // namespace graph } // namespace omni
11,816
C
29.145408
99
0.587847
omniverse-code/kit/include/omni/graph/core/Type.h
// Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/fabric/IdTypes.h> #include <omni/fabric/Type.h> #include <sstream> #include <string> // Alias fabric types into omnigraph for backwards compatibility. // Use omni/fabric/Type.h outside of the omnigraph project namespace omni { namespace graph { namespace core { //! OmniGraph Type is just an alias for the Fabric Type using Type = omni::fabric::Type; //! OmniGraph BaseDataType is just an alias for the Fabric BaseDataType using BaseDataType = omni::fabric::BaseDataType; //! OmniGraph AttributeRole is just an alias for the Fabric AttributeRole using AttributeRole = omni::fabric::AttributeRole; using BucketId = omni::fabric::BucketId; // ====================================================================== /** * @brief Get a string that describes the type role in OGN format * * The role name is slightly different here than @ref omni::fabric::getAttributeRoleName for historical reasons * * @param r Role whose name is to be returned * @return std::string OGN-style name of the given role */ inline std::string getOgnRoleName(AttributeRole r) { static const std::string ognRoleNames[] = { "none", "vector", "normal", "point", "color", "texcoord", "quat", "transform", "frame", "timecode", "text", "appliedSchema", "primTypeName", "execution", "matrix", "objectId", "bundle", "path", "instancedAttribute", "ancestorPrimTypeName", "target", "unknown" }; if (r <= AttributeRole::eUnknown) { return ognRoleNames[uint8_t(r)]; } return std::string(); } // ====================================================================== /** * @brief Get a string that describes the type in OGN format * * OGN formats the type names slightly differently than @ref omni::fabric::getTypeName * - the tuples are indexed "float[3]" instead of "float3" * - the roles replace the actual name "colord[3]" instead of "double3 (color)" * * @param t Type definition whose name is to be returned * @return std::string OGN-style name of the type */ inline std::string getOgnTypeName(Type t) { std::ostringstream typeName; if (t.role == AttributeRole::eText) { typeName << "string"; return typeName.str(); } if (t.role == AttributeRole::ePath) { typeName << "path"; return typeName.str(); } if (t.role != AttributeRole::eNone) { typeName << getOgnRoleName(t.role); // For roles with explicit types, add that to the role name if ((t.role != AttributeRole::eTimeCode) && (t.role != AttributeRole::eTransform) && (t.role != AttributeRole::eFrame) && (t.role != AttributeRole::eObjectId) && (t.role != AttributeRole::eBundle) && (t.role != AttributeRole::eExecution) && (t.role != AttributeRole::eTarget)) { switch (t.baseType) { case BaseDataType::eHalf: typeName << "h"; break; case BaseDataType::eFloat: typeName << "f"; break; case BaseDataType::eDouble: typeName << "d"; break; default: typeName << t.baseType; break; } } } else { typeName << t.baseType; } if (t.componentCount > 1) { typeName << "[" << uint32_t(t.dimension()) << "]"; } if (t.arrayDepth == 1) typeName << "[]"; else if (t.arrayDepth == 2) typeName << "[][]"; return typeName.str(); } } // namespace core } // namespace graph } // namespace omni
5,053
C
33.855172
111
0.486246
omniverse-code/kit/include/omni/graph/core/IVariable2.h
// Copyright (c) 2021-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/core/IObject.h> #include <omni/graph/core/Type.h> #include <omni/graph/core/Handle.h> #include <omni/graph/core/IVariable.h> namespace omni { namespace graph { namespace core { OMNI_DECLARE_INTERFACE(IVariable2); /** * @brief Interface extension for IVariable that adds the ability to set a variable type */ class IVariable2_abi : public omni::core::Inherits<omni::graph::core::IVariable, OMNI_TYPE_ID("omni.graph.core.IVariable2")> { protected: /** * Sets the type of the variable. * * @param[in] type New type for the variable * * @return True if the type is able to be set, false otherwise */ virtual bool setType_abi(Type type) noexcept = 0; }; } // namespace core } // namespace graph } // namespace omni #include "IVariable2.gen.h" /** * Implementation of IVariable setType method */ #ifndef DOXYGEN_BUILD inline bool omni::graph::core::IVariable::setType(omni::graph::core::Type type) noexcept { auto v2 = omni::core::cast<IVariable2>(this); if (v2) { return v2->setType(type); } else { return false; } } #endif
1,585
C
23.030303
105
0.700315
omniverse-code/kit/include/omni/graph/core/IInternal.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <carb/Defines.h> #include <carb/Interface.h> #include <carb/Types.h> #include <omni/graph/core/unstable/IPrivateNodeGraphDef.h> #include <omni/graph/core/unstable/IPrivateNodeDef.h> #include <omni/graph/core/Handle.h> #include <cstddef> namespace omni { namespace graph { namespace core { /** * This class contains functions and member variables that are intended only for internal * use by the omni.graph.core extension but which, for technical reasons, we are forced to * expose externally. * * This class is explicitly exempt from the usual rules about deprecation and backwards compatibility. * Members may be added, removed, or changed at any time without notice. */ struct IInternal { CARB_PLUGIN_INTERFACE("omni::graph::core::IInternal", 1, 2); // NOTE: In the Python implementation all methods and members should begin with a single underscore (_) // to further drive home that they are meant for internal use only. /** * Mark an attribute as deprecated, meaning that it should no longer be used and will be removed in a future version. * * @param[in] attrObj The attribute being deprecated. * @param[in] message Message explaining what users should do to deal with the deprecation. */ void(CARB_ABI* deprecateAttribute)(const AttributeObj& attrObj, const char *message); /** * Factory method used to create internal generic graph definition wrapping private class * * @param[in] builder Graph builder requesting this construction * @param[in] definitionName The name associated with this definition. Used by the pass pipeline * @param[in] graphObj Authoring graph this definition belongs to * @param[in] isInstanced Is this graph a graph instance */ unstable::IPrivateNodeGraphDef* (CARB_ABI* createPrivateGraphDef)( const GraphObj& graphObj, bool isInstanced); /** * Factory method used to create internal generic node definition wrapping private class * * @param[in] nodeObj Authoring node this definition belongs to */ unstable::IPrivateNodeDef*(CARB_ABI* createPrivateNodeDef)(const NodeObj& nodeObj); }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle STRUCT_INTEGRITY_CHECK(IInternal, createPrivateNodeDef, 2) } // namespace core } // namespace graph } // namespace omni
2,858
C
36.618421
121
0.738279
omniverse-code/kit/include/omni/graph/core/ArrayWrapper.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "GpuArray.h" #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/Accessors.h> #include <tuple> namespace omni { namespace graph { namespace core { template <typename T> struct GpuArray; template <typename T> struct ConstGpuArray; // Convenience function to retrieve an attribute data handle given the attribute. The // attribute data handle is needed to retrieve the data of the attribute. This is the read only version. inline ConstAttributeDataHandle getAttributeHandleR(const AttributeObj& attrObj, InstanceIndex instIndex) { return attrObj.iAttribute->getConstAttributeDataHandle(attrObj, instIndex); } // Convenience function to retrieve an attribute data handle given the attribute. The // attribute data handle is needed to retrieve the data of the attribute. This is the writable version. inline AttributeDataHandle getAttributeHandleW(const AttributeObj& attrObj, InstanceIndex instIndex) { return attrObj.iAttribute->getAttributeDataHandle(attrObj, instIndex); } template <typename T> class ArrayWrapper { const GraphContextObj m_context = {}; const AttributeObj m_attrObj = {}; const InstanceIndex m_instIdx{ kAccordingToContextIndex }; public: ArrayWrapper(const GraphContextObj& context, const AttributeObj& attrObj, InstanceIndex instanceIdx = kAccordingToContextIndex) : m_context(context), m_attrObj(attrObj), m_instIdx(instanceIdx) { } ArrayWrapper() { } bool isValid(DataAccessFlags access) const { if (!m_context.iContext) return false; // for outputs it's okay for the size to be 0. So we only check for inputs here. return access == kReadOnly ? getArrayRd() != nullptr : true; } // CPU buffer accessors T* getArray() { return getArrayWr(); } const T* getArrayRd() const { ConstAttributeDataHandle attrDataHandle = getAttributeHandleR(m_attrObj, m_instIdx); // Technically this is a void*** at this point. Fabric stores arrays as a pointer to the actual // buffer. The getDataR interface requires a void** , so we are forcing it to be that here. // At the end of the call, out contains the address to the buffer, which we then dereference to // get at the array buffer const void** out = nullptr; void** outPtr = reinterpret_cast<void**>(&out); m_context.iAttributeData->getDataR((const void**)outPtr, m_context, &attrDataHandle, 1); if (out == nullptr) return nullptr; return reinterpret_cast<T*>(const_cast<void*>(*out)); } T* getArrayWr() { AttributeDataHandle attrDataHandle = getAttributeHandleW(m_attrObj, m_instIdx); void** out; // see note above about void*** forced to void** void** outPtr = reinterpret_cast<void**>(&out); m_context.iAttributeData->getDataW(outPtr, m_context, &attrDataHandle, 1); if (out == nullptr) return nullptr; return reinterpret_cast<T*>(*out); } void resize(size_t newCount) { AttributeDataHandle attrDataHandle = getAttributeHandleW(m_attrObj, m_instIdx); m_context.iAttributeData->setElementCount(m_context, attrDataHandle, newCount); } size_t size() const { ConstAttributeDataHandle attrDataHandle = getAttributeHandleR(m_attrObj, m_instIdx); size_t elemCount = 0; m_context.iAttributeData->getElementCount(&elemCount, m_context, &attrDataHandle, 1); return elemCount; } bool empty() const { return size() == 0; } // GPU buffer accessors GpuArray<T> getArrayGPU() { return getArrayWrGPU(); } ConstGpuArray<T> getArrayRdGPU() const { ConstAttributeDataHandle attrDataHandle = getAttributeHandleR(m_attrObj, m_instIdx); const void** out = nullptr; void** outPtr = reinterpret_cast<void**>(&out); m_context.iAttributeData->getDataRGPU((const void**)outPtr, m_context, &attrDataHandle, 1); auto array = (T* const*)(out); return ConstGpuArray<T>{ array, sizeGPU() }; } GpuArray<T> getArrayWrGPU() { AttributeDataHandle attrDataHandle = getAttributeHandleW(m_attrObj, m_instIdx); void** out; void** outPtr = reinterpret_cast<void**>(&out); m_context.iAttributeData->getDataWGPU(outPtr, m_context, &attrDataHandle, 1); auto array = (T* const*)(out); return GpuArray<T>{ array, sizeGPU() }; } // GPU size accessor const size_t* sizeGPU() const { // We don't support GPU resizing array return nullptr; } }; } } }
5,177
C
31.566038
105
0.672204
omniverse-code/kit/include/omni/graph/core/BundleAttribImpl.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "BundleAttrib.h" #include "ConstBundlePrims.h" #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/CppWrappers.h> #include <omni/math/linalg/vec.h> #include <omni/math/linalg/matrix.h> #include <omni/math/linalg/quat.h> #include <omni/math/linalg/half.h> namespace omni { namespace math { namespace linalg { template <typename T> struct TypeToBaseType { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eUnknown; }; template <> struct TypeToBaseType<half> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eHalf; }; template <> struct TypeToBaseType<float> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eFloat; }; template <> struct TypeToBaseType<double> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eDouble; }; template <> struct TypeToBaseType<bool> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eBool; }; template <> struct TypeToBaseType<unsigned char> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eUChar; }; template <> struct TypeToBaseType<int> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eInt; }; template <> struct TypeToBaseType<int64_t> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eInt64; }; template <> struct TypeToBaseType<unsigned int> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eUInt; }; template <> struct TypeToBaseType<uint64_t> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eUInt64; }; template <> struct TypeToBaseType<omni::fabric::Token> { constexpr static omni::graph::core::BaseDataType baseType = omni::graph::core::BaseDataType::eToken; }; template <typename T, size_t N> struct TypeToBaseType<base_vec<T, N>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T> struct TypeToBaseType<vec2<T>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T> struct TypeToBaseType<vec3<T>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T> struct TypeToBaseType<vec4<T>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T> struct TypeToBaseType<quat<T>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T, size_t N> struct TypeToBaseType<base_matrix<T, N>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T> struct TypeToBaseType<matrix2<T>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T> struct TypeToBaseType<matrix3<T>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T> struct TypeToBaseType<matrix4<T>> { constexpr static omni::graph::core::BaseDataType baseType = TypeToBaseType<T>::baseType; }; template <typename T> struct TypeToComponentCount { constexpr static size_t count = 1; }; template <typename T, size_t N> struct TypeToComponentCount<base_vec<T,N>> { constexpr static size_t count = N; }; template <typename T> struct TypeToComponentCount<vec2<T>> { constexpr static size_t count = 2; }; template <typename T> struct TypeToComponentCount<vec3<T>> { constexpr static size_t count = 3; }; template <typename T> struct TypeToComponentCount<vec4<T>> { constexpr static size_t count = 4; }; template <typename T> struct TypeToComponentCount<quat<T>> { constexpr static size_t count = 4; }; template <typename T, size_t N> struct TypeToComponentCount<base_matrix<T,N>> { constexpr static size_t count = N*N; }; template <typename T> struct TypeToComponentCount<matrix2<T>> { constexpr static size_t count = 4; }; template <typename T> struct TypeToComponentCount<matrix3<T>> { constexpr static size_t count = 9; }; template <typename T> struct TypeToComponentCount<matrix4<T>> { constexpr static size_t count = 16; }; } // namespace linalg } // namespace math } // namespace omni namespace omni { namespace graph { namespace core { namespace detail { // // Non-owning string buffer with compile time size evaluation // class StringBuffer { public: using value_type = char const*; using size_type = std::size_t; using const_iterator = char const*; constexpr StringBuffer(value_type data, size_type size) noexcept : m_data{ data }, m_size{ size } { } constexpr explicit StringBuffer(value_type data) noexcept : StringBuffer{ data, len(data) } { } constexpr StringBuffer(StringBuffer const&) = default; constexpr StringBuffer(StringBuffer&&) = default; constexpr value_type data() const noexcept { return m_data; } constexpr size_type size() const noexcept { return m_size; } constexpr const_iterator begin() const noexcept { return m_data; } constexpr const_iterator end() const noexcept { return m_data + m_size; } private: constexpr size_type len(value_type start) const noexcept { value_type end = start; for (; *end != '\0'; ++end) ; return end - start; } value_type m_data; size_type m_size; }; // Helper class to keep name and type together. struct AttrDefinition { AttrDefinition(StringBuffer _name, omni::graph::core::Type _type, omni::graph::core::NameToken _token) noexcept : name{ _name } , type{ _type } , token{ _token } { } AttrDefinition(omni::fabric::IToken const* iToken, char const* _text, omni::graph::core::Type _type) noexcept : AttrDefinition{ StringBuffer{_text}, _type, iToken->getHandle(_text) } { } AttrDefinition(AttrDefinition const&) = delete; AttrDefinition(AttrDefinition&&) = delete; AttrDefinition& operator=(AttrDefinition const&) = delete; AttrDefinition& operator=(AttrDefinition&&) = delete; StringBuffer name; // Name and size of the attribute omni::graph::core::Type type; // Type of the attribute omni::graph::core::NameToken token; // Token representation of the name }; // Attribute Level Definitions: inline AttrDefinition const& getAttrInterpolationDefinition() noexcept { using namespace omni::fabric; static AttrDefinition d{ carb::getCachedInterface<IToken>(), "interpolation", Type{ BaseDataType::eToken, 1, 0 } }; return d; } inline AttrDefinition const& getAttrSourceDefinition() noexcept { using namespace omni::fabric; static AttrDefinition d{ carb::getCachedInterface<IToken>(), "source", Type{ BaseDataType::eUChar, 1, 0 } }; return d; } // Primitive Level Definitions: inline AttrDefinition const& getPrimIndexDefinition() noexcept { using namespace omni::fabric; static AttrDefinition d{ carb::getCachedInterface<IToken>(), "primIndex", Type{ BaseDataType::eUInt64, 1, 0 } }; return d; } inline AttrDefinition const& getPrimPathDefinition() noexcept { using namespace omni::fabric; static AttrDefinition d{ carb::getCachedInterface<IToken>(), "sourcePrimPath", Type{ BaseDataType::eToken, 1, 0 } }; return d; } inline AttrDefinition const& getPrimTypeDefinition() noexcept { using namespace omni::fabric; static AttrDefinition d{ carb::getCachedInterface<IToken>(), "sourcePrimType", Type{ BaseDataType::eToken, 1, 0 } }; return d; } // Bundle Level Definitions: inline AttrDefinition const& getBundlePrimIndexOffsetDefinition() noexcept { using namespace omni::fabric; static AttrDefinition d{ carb::getCachedInterface<IToken>(), "bundlePrimIndexOffset", Type{ BaseDataType::eUInt64, 1, 0 } }; return d; } // Constant types. constexpr omni::graph::core::Type s_relationshipType{ omni::graph::core::BaseDataType::eToken, 1, 1 }; } // namespace detail inline bool BundleAttrib::isRelationshipData() const noexcept { return m_source == Source::Relationship && type() == detail::s_relationshipType; } inline bool BundleAttrib::setInterpolation(omni::graph::core::NameToken interpolation) noexcept { using namespace omni::graph::core; if (m_interpolation == interpolation) return true; if (interpolation == omni::fabric::kUninitializedToken) { clearInterpolation(); return true; } if (IBundle2* bundle = getBundlePtr()) { auto& interpDef = detail::getAttrInterpolationDefinition(); AttributeDataHandle interpolationAttr = bundle->getAttributeMetadataByName(m_name, interpDef.token); if (!interpolationAttr.isValid()) { interpolationAttr = bundle->createAttributeMetadata(m_name, interpDef.token, interpDef.type); } m_interpolation = interpolation; auto context = bundle->getContext(); *getDataW<NameToken>(context, interpolationAttr) = interpolation; return true; } return false; } inline bool BundleAttrib::setSource(Source source) noexcept { using namespace omni::graph::core; if(m_source == source) return true; if (IBundle2* bundle = getBundlePtr()) { auto& sourceDef = detail::getAttrSourceDefinition(); AttributeDataHandle sourceAttr = bundle->getAttributeMetadataByName(m_name, sourceDef.token); if(!sourceAttr.isValid()) { sourceAttr = bundle->createAttributeMetadata(m_name, sourceDef.token, sourceDef.type); } m_source = source; auto context = bundle->getContext(); *omni::graph::core::getDataW<SourceType>(context, sourceAttr) = static_cast<SourceType>(source); return true; } return false; } inline void BundleAttrib::copyContentsFrom(BundleAttrib const& sourceAttr) noexcept { using namespace omni::graph::core; IBundle2* dstBundle = getBundlePtr(); IConstBundle2* srcBundle = sourceAttr.getConstBundlePtr(); if (!dstBundle) { return; } auto context = dstBundle->getContext(); // Copy Attribute AttributeDataHandle dstAttrHandle = dstBundle->getAttributeByName(m_name); ConstAttributeDataHandle srcAttrHandle = srcBundle->getConstAttributeByName(sourceAttr.m_name); // Ensure that copyData updated the type correctly, if needed. CARB_ASSERT(context.iAttributeData->getType(context, dstAttrHandle) == Type(m_type)); context.iAttributeData->copyData(dstAttrHandle, context, srcAttrHandle); // Copy the cached type m_type = sourceAttr.m_type; // Copy the interpolation (does nothing if the same; clears interpolation if none on sourceAttr) setInterpolation(sourceAttr.interpolation()); // Copy source setSource(sourceAttr.m_source); } inline void BundleAttrib::clearInterpolation() noexcept { using namespace omni::graph::core; if (IBundle2* bundle = getBundlePtr()) { auto context = bundle->getContext(); auto& interpDef = detail::getAttrInterpolationDefinition(); bundle->removeAttributeMetadata(m_name, interpDef.token); m_interpolation = omni::fabric::kUninitializedToken; } } inline ConstBundlePrim* BundleAttrib::getBundlePrim() const noexcept { return m_bundlePrim; } inline void BundleAttrib::clearSource() noexcept { using namespace omni::graph::core; if (IBundle2* bundle = getBundlePtr()) { auto context = bundle->getContext(); auto& sourceDef = detail::getAttrSourceDefinition(); bundle->removeAttributeMetadata(m_name, sourceDef.token); m_source = BundleAttribSource::Attribute; } } inline omni::graph::core::NameToken BundleAttrib::name() const noexcept { return m_name; } inline omni::graph::core::NameToken BundleAttrib::interpolation() const noexcept { return m_interpolation; } inline omni::graph::core::Type BundleAttrib::type() const noexcept { return omni::graph::core::Type(m_type); } inline bool BundleAttrib::isArray() const noexcept { omni::graph::core::Type type{ m_type }; CARB_ASSERT(type.arrayDepth < 2); return (type.arrayDepth != 0); } inline BundleAttrib::Source BundleAttrib::source() const noexcept { return m_source; } inline bool BundleAttrib::isAttributeData() const noexcept { return m_source == Source::Attribute; } inline omni::graph::core::NameToken BundleAttrib::prefixedName() const noexcept { return m_name; } inline size_t BundleAttrib::size() const noexcept { using namespace omni::graph::core; if (!isArray()) { return 1; } IConstBundle2* bundle = getConstBundlePtr(); auto context = bundle->getContext(); ConstAttributeDataHandle attr = bundle->getConstAttributeByName(m_name); size_t count; context.iAttributeData->getElementCount(&count, context, &attr, 1); return count; } inline void BundleAttrib::resize(size_t arrayElementCount) noexcept { using namespace omni::graph::core; CARB_ASSERT(isArray()); if (IBundle2* bundle = getBundlePtr()) { auto context = bundle->getContext(); AttributeDataHandle attr = bundle->getAttributeByName(m_name); context.iAttributeData->setElementCount(context, attr, arrayElementCount); } } inline void* BundleAttrib::getDataInternal() noexcept { using namespace omni::graph::core; if (IBundle2* bundle = getBundlePtr()) { auto context = bundle->getContext(); AttributeDataHandle attr = bundle->getAttributeByName(m_name); if (Type(m_type).arrayDepth == 0) { return getDataW<void>(context, attr); } return *getDataW<void*>(context, attr); } return nullptr; } inline void const* BundleAttrib::getDataInternal() const noexcept { using namespace omni::graph::core; IConstBundle2* constBundle = getConstBundlePtr(); GraphContextObj context = constBundle->getContext(); ConstAttributeDataHandle attr = constBundle->getConstAttributeByName(m_name); if (Type(m_type).arrayDepth == 0) { return getDataR<void const>(context, attr); } return *getDataR<void const*>(context, attr); } inline omni::graph::core::AttributeDataHandle BundleAttrib::handle() noexcept { using namespace omni::graph::core; if (IBundle2* bundle = getBundlePtr()) { return AttributeDataHandle(AttrKey(bundle->getHandle(), m_name.token)); } return AttributeDataHandle{ AttributeDataHandle::invalidValue() }; } inline omni::graph::core::ConstAttributeDataHandle BundleAttrib::handle() const noexcept { using namespace omni::graph::core; if(IConstBundle2* bundle = getConstBundlePtr()) { return ConstAttributeDataHandle{ AttrKey(bundle->getConstHandle(), m_name.token) }; } return ConstAttributeDataHandle{ ConstAttributeDataHandle::invalidValue() }; } template <typename T> T* BundleAttrib::getData() noexcept { // It must be valid to request a pointer to type T. // requesting a float* or vec3f* for a vec3f type is valid, but double* or vec2f* is not. using namespace omni::math::linalg; using Type = omni::graph::core::Type; bool const isSameBaseType = TypeToBaseType<T>::baseType == Type(m_type).baseType; bool const isSameCount = TypeToComponentCount<T>::count == Type(m_type).componentCount; bool const isValidCast = isSameBaseType && (TypeToComponentCount<T>::count == 1 || isSameCount); return isValidCast ? reinterpret_cast<T*>(getDataInternal()) : nullptr; } template <typename T> T const* BundleAttrib::getData() const noexcept { // It must be valid to request a pointer to type T. // requesting a float* or vec3f* for a vec3f type is valid, but double* or vec2f* is not. using namespace omni::math::linalg; using Type = omni::graph::core::Type; bool const isValidCast = TypeToBaseType<T>::baseType == Type(m_type).baseType && (TypeToComponentCount<T>::count == 1 || TypeToComponentCount<T>::count == Type(m_type).componentCount); return isValidCast ? reinterpret_cast<T const*>(getDataInternal()) : nullptr; } template <typename T> T const* BundleAttrib::getConstData() const noexcept { return getData<T>(); } template <typename T> T BundleAttrib::get() const noexcept { using namespace omni::math::linalg; // TODO: Figure out how to support array attributes here. CARB_ASSERT(omni::graph::core::Type(m_type).arrayDepth == 0); // This has stronger requirements than getData, since get<float>() isn't valid // for a vec3f attribute, but getData<float>() is valid for a vec3f attribute. CARB_ASSERT(TypeToComponentCount<T>::count == omni::graph::core::Type(m_type).componentCount); return *getConstData<T>(); } template <typename T> void BundleAttrib::set(T const& value) noexcept { using namespace omni::math::linalg; CARB_ASSERT(omni::graph::core::Type(m_type).arrayDepth == 0); // This has stronger requirements than getData, since set(1.0f) isn't valid // for a vec3f attribute, but getData<float>() is valid for a vec3f attribute. CARB_ASSERT(TypeToComponentCount<T>::count == omni::graph::core::Type(m_type).componentCount); *getData<T>() = value; } template <typename T> void BundleAttrib::set(T const* values, size_t elementCount) noexcept { using namespace omni::math::linalg; CARB_ASSERT(omni::graph::core::Type(m_type).arrayDepth == 1); // This has stronger requirements than getData, since set(float const*,size_t) isn't valid // for a vec3f attribute, but getData<float>() is valid for a vec3f attribute. CARB_ASSERT(TypeToComponentCount<T>::count == omni::graph::core::Type(m_type).componentCount); resize(elementCount); if (elementCount > 0) { T* p = getData<T>(); for (size_t i = 0; i < elementCount; ++i) { p[i] = values[i]; } } } inline void BundleAttrib::clearContents() noexcept { using namespace omni::graph::core; /** * Remove attribute. Its metadata will be removed automatically together with it. */ IBundle2* bundle = getBundlePtr(); bundle->removeAttributeByName(m_name); /** * Invalidate data. */ m_source = BundleAttribSource::Attribute; m_interpolation = omni::fabric::kUninitializedToken; m_type = omni::fabric::kUnknownType; m_name = omni::fabric::kUninitializedToken; m_bundlePrim = nullptr; } } // namespace core } // namespace graph } // namespace omni
19,455
C
27.486091
128
0.695965
omniverse-code/kit/include/omni/graph/core/NodeTypeRegistryTemplates.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #ifndef NODE_TYPE_REGISTRY # error This file can only be included indirectly from NodeTypeRegistrar.h #endif #include "iComputeGraph.h" #include <type_traits> //====================================================================== // Template metaprogramming that allows the node registration to figure out at compile time // which functions a node has that can be registered. Functions come from the INodeType // interface in iComputeGraph.h. template <typename> struct sfinae_true : std::true_type { }; template <typename> struct sfinae_false : std::false_type { }; // Template collection to provide a pointer to the static method getNodeType() if it exists // Usage: auto getNodeTypeFn = getNodeTypeFunction<NodeClass>(); using GetNodeTypeFunction = std::add_pointer<const char*()>::type; template <typename NodeType> constexpr auto _checkGetNodeTypeFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkGetNodeTypeFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().getNodeType())>; template <typename NodeType> struct _hasGetNodeTypeFunction : decltype(_checkGetNodeTypeFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasGetNodeTypeFunction<NodeType>::value, bool>::type = 0> static inline GetNodeTypeFunction getNodeTypeFunction() { return &NodeType::getNodeType; }; template <typename NodeType, typename std::enable_if<!_hasGetNodeTypeFunction<NodeType>::value, bool>::type = 0> static inline GetNodeTypeFunction getNodeTypeFunction() { return nullptr; }; // Template collection to provide a pointer to the static method compute() if it exists // Usage: auto computeFn = computeFunction<NodeClass>(); using ComputeFunction = std::add_pointer<bool(const GraphContextObj&, const NodeObj&)>::type; template <typename NodeType> constexpr auto _checkComputeFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkComputeFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().compute(std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>()))>; template <typename NodeType> struct _hasComputeFunction : decltype(_checkComputeFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasComputeFunction<NodeType>::value, bool>::type = 0> static inline ComputeFunction computeFunction() { return &NodeType::compute; }; template <typename NodeType, typename std::enable_if<!_hasComputeFunction<NodeType>::value, bool>::type = 0> static inline ComputeFunction computeFunction() { return nullptr; }; // Template collection to provide a pointer to the static method initialize() if it exists // Usage: auto initializeFn = initializeFunction<NodeClass>(); using InitializeFunction = std::add_pointer<void(const GraphContextObj&, const NodeObj&)>::type; template <typename NodeType> constexpr auto _checkInitializeFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkInitializeFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().initialize(std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>()))>; template <typename NodeType> struct _hasInitializeFunction : decltype(_checkInitializeFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasInitializeFunction<NodeType>::value, bool>::type = 0> static inline InitializeFunction initializeFunction() { return &NodeType::initialize; }; template <typename NodeType, typename std::enable_if<!_hasInitializeFunction<NodeType>::value, bool>::type = 0> static inline InitializeFunction initializeFunction() { return nullptr; }; // Template collection to provide a pointer to the static method release() if it exists // Usage: auto releaseFn = releaseFunction<NodeClass>(); using ReleaseFunction = std::add_pointer<void(const NodeObj&)>::type; template <typename NodeType> constexpr auto _checkReleaseFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkReleaseFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().release(std::declval<const NodeObj&>()))>; template <typename NodeType> struct _hasReleaseFunction : decltype(_checkReleaseFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasReleaseFunction<NodeType>::value, bool>::type = 0> static inline ReleaseFunction releaseFunction() { return &NodeType::release; }; template <typename NodeType, typename std::enable_if<!_hasReleaseFunction<NodeType>::value, bool>::type = 0> static inline ReleaseFunction releaseFunction() { return nullptr; }; // Template collection to provide a pointer to the static method initializeType() if it exists // Usage: auto initializeTypeFn = initializeTypeFunction<NodeClass>(); using InitializeTypeFunction = std::add_pointer<void(const NodeTypeObj&)>::type; template <typename NodeType> constexpr auto _checkInitializeTypeFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkInitializeTypeFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().initializeType(std::declval<const NodeTypeObj&>()))>; template <typename NodeType> struct _hasInitializeTypeFunction : decltype(_checkInitializeTypeFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasInitializeTypeFunction<NodeType>::value, bool>::type = 0> static inline InitializeTypeFunction initializeTypeFunction() { return &NodeType::initializeType; }; template <typename NodeType, typename std::enable_if<!_hasInitializeTypeFunction<NodeType>::value, bool>::type = 0> static inline InitializeTypeFunction initializeTypeFunction() { return nullptr; }; // Template collection to provide a pointer to the static method updateNodeVersion() if it exists // Usage: auto updateNodeVersionFn = updateNodeVersionFunction<NodeClass>(); using UpdateNodeVersionFunction = std::add_pointer<bool(const GraphContextObj&, const NodeObj&, int, int)>::type; template <typename NodeType> constexpr auto _checkUpdateNodeVersionFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkUpdateNodeVersionFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().updateNodeVersion( std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>(), std::declval<int>(), std::declval<int>()))>; template <typename NodeType> struct _hasUpdateNodeVersionFunction : decltype(_checkUpdateNodeVersionFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasUpdateNodeVersionFunction<NodeType>::value, bool>::type = 0> static inline UpdateNodeVersionFunction updateNodeVersionFunction() { return &NodeType::updateNodeVersion; }; template <typename NodeType, typename std::enable_if<!_hasUpdateNodeVersionFunction<NodeType>::value, bool>::type = 0> static inline UpdateNodeVersionFunction updateNodeVersionFunction() { return nullptr; }; // Template collection to provide a pointer to the static method addInput() if it exists // Usage: auto addInputFn = addInputFunction<NodeClass>(); using AddInputFunction = std::add_pointer<void(const NodeTypeObj&, const char*, const char*, bool, const void*, const size_t*)>::type; template <typename NodeType> constexpr auto _checkAddInputFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkAddInputFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().addInput(std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<const void*>(), std::declval<const size_t*>()))>; template <typename NodeType> struct _hasAddInputFunction : decltype(_checkAddInputFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasAddInputFunction<NodeType>::value, bool>::type = 0> static inline AddInputFunction addInputFunction() { return &NodeType::addInput; }; template <typename NodeType, typename std::enable_if<!_hasAddInputFunction<NodeType>::value, bool>::type = 0> static inline AddInputFunction addInputFunction() { return nullptr; }; // Template collection to provide a pointer to the static method addExtendedInput() if it exists // Usage: auto addExtendedInputFn = addExtendedInputFunction<NodeClass>(); using AddExtendedInputFunction = std::add_pointer<void(const NodeTypeObj&, const char*, const char*, bool, ExtendedAttributeType)>::type; template <typename NodeType> constexpr auto _checkAddExtendedInputFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkAddExtendedInputFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().addExtendedInput(std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<ExtendedAttributeType>()))>; template <typename NodeType> struct _hasAddExtendedInputFunction : decltype(_checkAddExtendedInputFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasAddExtendedInputFunction<NodeType>::value, bool>::type = 0> static inline AddExtendedInputFunction addExtendedInputFunction() { return &NodeType::addExtendedInput; }; template <typename NodeType, typename std::enable_if<!_hasAddExtendedInputFunction<NodeType>::value, bool>::type = 0> static inline AddExtendedInputFunction addExtendedInputFunction() { return nullptr; }; // Template collection to provide a pointer to the static method addOutput() if it exists // Usage: auto addOutputFn = addOutputFunction<NodeClass>(); using AddOutputFunction = std::add_pointer<void(const NodeTypeObj&, const char*, const char*, bool, const void*, const size_t*)>::type; template <typename NodeType> constexpr auto _checkAddOutputFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkAddOutputFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().addOutput(std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<const void*>(), std::declval<const size_t*>()))>; template <typename NodeType> struct _hasAddOutputFunction : decltype(_checkAddOutputFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasAddOutputFunction<NodeType>::value, bool>::type = 0> static inline AddOutputFunction addOutputFunction() { return &NodeType::addOutput; }; template <typename NodeType, typename std::enable_if<!_hasAddOutputFunction<NodeType>::value, bool>::type = 0> static inline AddOutputFunction addOutputFunction() { return nullptr; }; // Template collection to provide a pointer to the static method addExtendedOutput() if it exists // Usage: auto addExtendedOutputFn = addExtendedOutputFunction<NodeClass>(); using AddExtendedOutputFunction = std::add_pointer<void(const NodeTypeObj&, const char*, const char*, bool, ExtendedAttributeType)>::type; template <typename NodeType> constexpr auto _checkAddExtendedOutputFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkAddExtendedOutputFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().addExtendedOutput(std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<ExtendedAttributeType>()))>; template <typename NodeType> struct _hasAddExtendedOutputFunction : decltype(_checkAddExtendedOutputFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasAddExtendedOutputFunction<NodeType>::value, bool>::type = 0> static inline AddExtendedOutputFunction addExtendedOutputFunction() { return &NodeType::addExtendedOutput; }; template <typename NodeType, typename std::enable_if<!_hasAddExtendedOutputFunction<NodeType>::value, bool>::type = 0> static inline AddExtendedOutputFunction addExtendedOutputFunction() { return nullptr; }; // Template collection to provide a pointer to the static method addState() if it exists // Usage: auto addStateFn = addStateFunction<NodeClass>(); using AddStateFunction = std::add_pointer<void(const NodeTypeObj&, const char*, const char*, bool, const void*, const size_t*)>::type; template <typename NodeType> constexpr auto _checkAddStateFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkAddStateFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().addState(std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<const void*>(), std::declval<const size_t*>()))>; template <typename NodeType> struct _hasAddStateFunction : decltype(_checkAddStateFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasAddStateFunction<NodeType>::value, bool>::type = 0> static inline AddStateFunction addStateFunction() { return &NodeType::addState; }; template <typename NodeType, typename std::enable_if<!_hasAddStateFunction<NodeType>::value, bool>::type = 0> static inline AddStateFunction addStateFunction() { return nullptr; }; // Template collection to provide a pointer to the static method addExtendedState() if it exists // Usage: auto addExtendedStateFn = addExtendedStateFunction<NodeClass>(); using AddExtendedStateFunction = std::add_pointer<void(const NodeTypeObj&, const char*, const char*, bool, ExtendedAttributeType)>::type; template <typename NodeType> constexpr auto _checkAddExtendedStateFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkAddExtendedStateFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().addExtendedState(std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<ExtendedAttributeType>()))>; template <typename NodeType> struct _hasAddExtendedStateFunction : decltype(_checkAddExtendedStateFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasAddExtendedStateFunction<NodeType>::value, bool>::type = 0> static inline AddExtendedStateFunction addExtendedStateFunction() { return &NodeType::addExtendedState; }; template <typename NodeType, typename std::enable_if<!_hasAddExtendedStateFunction<NodeType>::value, bool>::type = 0> static inline AddExtendedStateFunction addExtendedStateFunction() { return nullptr; }; // Template collection to provide a pointer to the static method hasState() if it exists // Usage: auto hasStateFn = hasStateFunction<NodeClass>(); using HasStateFunction = std::add_pointer<bool(const NodeTypeObj&)>::type; template <typename NodeType> constexpr auto _checkHasStateFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkHasStateFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().hasState(std::declval<const NodeTypeObj&>()))>; template <typename NodeType> struct _hasHasStateFunction : decltype(_checkHasStateFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasHasStateFunction<NodeType>::value, bool>::type = 0> static inline HasStateFunction hasStateFunction() { return &NodeType::hasState; }; template <typename NodeType, typename std::enable_if<!_hasHasStateFunction<NodeType>::value, bool>::type = 0> static inline HasStateFunction hasStateFunction() { return nullptr; }; // Template collection to provide a pointer to the static method registerTasks() if it exists // Usage: auto registerTasksFn = registerTasksFunction<NodeClass>(); using RegisterTasksFunction = std::add_pointer<void()>::type; template <typename NodeType> constexpr auto _checkRegisterTasksFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkRegisterTasksFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().registerTasks())>; template <typename NodeType> struct _hasRegisterTasksFunction : decltype(_checkRegisterTasksFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasRegisterTasksFunction<NodeType>::value, bool>::type = 0> static inline RegisterTasksFunction registerTasksFunction() { return &NodeType::registerTasks; }; template <typename NodeType, typename std::enable_if<!_hasRegisterTasksFunction<NodeType>::value, bool>::type = 0> static inline RegisterTasksFunction registerTasksFunction() { return nullptr; }; // Template collection to provide a pointer to the static method getAllMetadata() if it exists // Usage: auto getAllMetadataFn = getAllMetadataFunction<NodeClass>(); using GetAllMetadataFunction = std::add_pointer<size_t(const NodeTypeObj& nodeType, const char**, const char**, size_t)>::type; template <typename NodeType> constexpr auto _checkGetAllMetadataFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkGetAllMetadataFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().getAllMetadata( std::declval<const NodeTypeObj&>(), std::declval<const char**>(), std::declval<const char**>(), std::declval<size_t>()))>; template <typename NodeType> struct _hasGetAllMetadataFunction : decltype(_checkGetAllMetadataFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasGetAllMetadataFunction<NodeType>::value, bool>::type = 0> static inline GetAllMetadataFunction getAllMetadataFunction() { return &NodeType::getAllMetadata; }; template <typename NodeType, typename std::enable_if<!_hasGetAllMetadataFunction<NodeType>::value, bool>::type = 0> static inline GetAllMetadataFunction getAllMetadataFunction() { return nullptr; }; // Template collection to provide a pointer to the static method getMetadata() if it exists // Usage: auto getMetadataFn = getMetadataFunction<NodeClass>(); using GetMetadataFunction = std::add_pointer<const char*(const NodeTypeObj& nodeType, const char*)>::type; template <typename NodeType> constexpr auto _checkGetMetadataFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkGetMetadataFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().getMetadata(std::declval<const NodeTypeObj&>(), std::declval<const char*>()))>; template <typename NodeType> struct _hasGetMetadataFunction : decltype(_checkGetMetadataFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasGetMetadataFunction<NodeType>::value, bool>::type = 0> static inline GetMetadataFunction getMetadataFunction() { return &NodeType::getMetadata; }; template <typename NodeType, typename std::enable_if<!_hasGetMetadataFunction<NodeType>::value, bool>::type = 0> static inline GetMetadataFunction getMetadataFunction() { return nullptr; }; // Template collection to provide a pointer to the static method getMetadataCount() if it exists // Usage: auto getMetadataCountFn = getMetadataCountFunction<NodeClass>(); using GetMetadataCountFunction = std::add_pointer<size_t(const NodeTypeObj& nodeType)>::type; template <typename NodeType> constexpr auto _checkGetMetadataCountFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkGetMetadataCountFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().getMetadataCount(std::declval<const NodeTypeObj&>()))>; template <typename NodeType> struct _hasGetMetadataCountFunction : decltype(_checkGetMetadataCountFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasGetMetadataCountFunction<NodeType>::value, bool>::type = 0> static inline GetMetadataCountFunction getMetadataCountFunction() { return &NodeType::getMetadataCount; }; template <typename NodeType, typename std::enable_if<!_hasGetMetadataCountFunction<NodeType>::value, bool>::type = 0> static inline GetMetadataCountFunction getMetadataCountFunction() { return nullptr; }; // Template collection to provide a pointer to the static method setMetadata() if it exists // Usage: auto setMetadataFn = setMetadataFunction<NodeClass>(); using SetMetadataFunction = std::add_pointer<void(const NodeTypeObj& nodeType, const char*, const char*)>::type; template <typename NodeType> constexpr auto _checkSetMetadataFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkSetMetadataFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().setMetadata( std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>()))>; template <typename NodeType> struct _hasSetMetadataFunction : decltype(_checkSetMetadataFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasSetMetadataFunction<NodeType>::value, bool>::type = 0> static inline SetMetadataFunction setMetadataFunction() { return &NodeType::setMetadata; }; template <typename NodeType, typename std::enable_if<!_hasSetMetadataFunction<NodeType>::value, bool>::type = 0> static inline SetMetadataFunction setMetadataFunction() { return nullptr; }; // Template collection to provide a pointer to the static method getScheduleNodeCount() if it exists // Usage: auto getScheduleNodeCountFn = getScheduleNodeCountFunction<NodeClass>(); using GetScheduleNodeCountFunction = std::add_pointer<size_t(const GraphContextObj&, const NodeObj&, const ScheduleNodeObj*, size_t)>::type; template <typename NodeType> constexpr auto _checkGetScheduleNodeCountFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkGetScheduleNodeCountFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().getScheduleNodeCount(std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>(), std::declval<const ScheduleNodeObj*>(), std::declval<size_t>()))>; template <typename NodeType> struct _hasGetScheduleNodeCountFunction : decltype(_checkGetScheduleNodeCountFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasGetScheduleNodeCountFunction<NodeType>::value, bool>::type = 0> static inline GetScheduleNodeCountFunction getScheduleNodeCountFunction() { return &NodeType::getScheduleNodeCount; }; template <typename NodeType, typename std::enable_if<!_hasGetScheduleNodeCountFunction<NodeType>::value, bool>::type = 0> static inline GetScheduleNodeCountFunction getScheduleNodeCountFunction() { return nullptr; }; // Template collection to provide a pointer to the static method getScheduleNodes() if it exists // Usage: auto getScheduleNodesFn = getScheduleNodesFunction<NodeClass>(); using GetScheduleNodesFunction = std::add_pointer<void( const GraphContextObj&, const NodeObj&, const ScheduleNodeObj*, size_t, ScheduleNodeObj*, size_t)>::type; template <typename NodeType> constexpr auto _checkGetScheduleNodesFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkGetScheduleNodesFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().getScheduleNodes(std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>(), std::declval<const ScheduleNodeObj*>(), std::declval<size_t>(), std::declval<ScheduleNodeObj*>(), std::declval<size_t>()))>; template <typename NodeType> struct _hasGetScheduleNodesFunction : decltype(_checkGetScheduleNodesFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasGetScheduleNodesFunction<NodeType>::value, bool>::type = 0> static inline GetScheduleNodesFunction getScheduleNodesFunction() { return &NodeType::getScheduleNodes; }; template <typename NodeType, typename std::enable_if<!_hasGetScheduleNodesFunction<NodeType>::value, bool>::type = 0> static inline GetScheduleNodesFunction getScheduleNodesFunction() { return nullptr; }; // Template collection to provide a pointer to the static method onConnectionMade() if it exists // Usage: auto onConnectionTypeResolveFn = onConnectionTypeResolve<NodeClass>(); using OnConnectionTypeResolveFunction = std::add_pointer<void(const NodeObj&)>::type; template <typename NodeType> constexpr auto _checkOnConnectionTypeResolveFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkOnConnectionTypeResolveFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().onConnectionTypeResolve( std::declval<const NodeObj&>()))>; template <typename NodeType> struct _hasOnConnectionTypeResolveFunction : decltype(_checkOnConnectionTypeResolveFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasOnConnectionTypeResolveFunction<NodeType>::value, bool>::type = 0> static inline OnConnectionTypeResolveFunction onConnectionTypeResolveFunction() { return &NodeType::onConnectionTypeResolve; }; template <typename NodeType, typename std::enable_if<!_hasOnConnectionTypeResolveFunction<NodeType>::value, bool>::type = 0> static inline OnConnectionTypeResolveFunction onConnectionTypeResolveFunction() { return nullptr; }; // Template collection to provide a pointer to the static method inspect() if it exists // Usage: auto inspectFn = inspectFunction<NodeClass>(); using InspectFunction = std::add_pointer<bool(const NodeTypeObj& nodeType, inspect::IInspector*)>::type; template <typename NodeType> constexpr auto _checkInspectFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkInspectFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().inspect(std::declval<const NodeTypeObj&>(), std::declval<inspect::IInspector*>()))>; template <typename NodeType> struct _hasInspectFunction : decltype(_checkInspectFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasInspectFunction<NodeType>::value, bool>::type = 0> static inline InspectFunction inspectFunction() { return &NodeType::inspect; }; template <typename NodeType, typename std::enable_if<!_hasInspectFunction<NodeType>::value, bool>::type = 0> static inline InspectFunction inspectFunction() { return nullptr; }; // Template collection to provide a pointer to the static method computeVectorized() if it exists // Usage: auto computeVectorizedFn = computeVectorizedFunction<NodeClass>(); using ComputeVectorizedFunction = std::add_pointer<size_t(const GraphContextObj&, const NodeObj&, size_t)>::type; template <typename NodeType> constexpr auto _checkComputeVectorizedFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkComputeVectorizedFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().computeVectorized(std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>(), std::declval<size_t>()))>; template <typename NodeType> struct _hasComputeVectorizedFunction : decltype(_checkComputeVectorizedFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasComputeVectorizedFunction<NodeType>::value, bool>::type = 0> static inline ComputeVectorizedFunction computeVectorizedFunction() { return &NodeType::computeVectorized; }; template <typename NodeType, typename std::enable_if<!_hasComputeVectorizedFunction<NodeType>::value, bool>::type = 0> static inline ComputeVectorizedFunction computeVectorizedFunction() { return nullptr; }; // Template collection to provide a pointer to the static method releaseInstance() if it exists // Usage: auto releaseInstanceFn = releaseInstanceFunction<NodeClass>(); using ReleaseInstanceFunction = std::add_pointer<void(const NodeObj&, NameToken)>::type; template <typename NodeType> constexpr auto _checkReleaseInstanceFunction(long) -> sfinae_false<NodeType>; template <typename NodeType> constexpr auto _checkReleaseInstanceFunction(int) -> sfinae_true<decltype(std::declval<const NodeType&>().releaseInstance( std::declval<const NodeObj&>(), std::declval<NameToken>()))>; template <typename NodeType> struct _hasReleaseInstanceFunction : decltype(_checkReleaseInstanceFunction<NodeType>(0)) { }; template <typename NodeType, typename std::enable_if<_hasReleaseInstanceFunction<NodeType>::value, bool>::type = 0> static inline ReleaseInstanceFunction releaseInstanceFunction() { return &NodeType::releaseInstance; }; template <typename NodeType, typename std::enable_if<!_hasReleaseInstanceFunction<NodeType>::value, bool>::type = 0> static inline ReleaseInstanceFunction releaseInstanceFunction() { return nullptr; };
31,955
C
49.245283
126
0.704084
omniverse-code/kit/include/omni/graph/core/ISchedulingHints2.gen.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL //! Interface extension for ISchedulingHints that adds a new "pure" hint template <> class omni::core::Generated<omni::graph::core::ISchedulingHints2_abi> : public omni::graph::core::ISchedulingHints2_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::ISchedulingHints2") /** * Get the flag describing the node's purity state. * * @returns Value of the PurityStatus flag. */ omni::graph::core::ePurityStatus getPurityStatus() noexcept; /** * Set the flag describing the node's purity status. * * @param[in] newPurityStatus New value of the PurityStatus flag. */ void setPurityStatus(omni::graph::core::ePurityStatus newPurityStatus) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline omni::graph::core::ePurityStatus omni::core::Generated<omni::graph::core::ISchedulingHints2_abi>::getPurityStatus() noexcept { return getPurityStatus_abi(); } inline void omni::core::Generated<omni::graph::core::ISchedulingHints2_abi>::setPurityStatus( omni::graph::core::ePurityStatus newPurityStatus) noexcept { setPurityStatus_abi(newPurityStatus); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
2,017
C
28.67647
131
0.730293
omniverse-code/kit/include/omni/graph/core/IDirtyID.h
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "dirtyid/IDirtyID1.h" #include "dirtyid/IDirtyID2.h"
516
C
38.769228
77
0.790698
omniverse-code/kit/include/omni/graph/core/INodeCategories.gen.h
// Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // //! @file //! //! @brief This file was generated by <i>omni.bind</i>. #include <omni/core/Interface.h> #include <omni/core/OmniAttr.h> #include <omni/core/ResultError.h> #include <functional> #include <type_traits> #include <utility> #ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL /** Interface to the list of categories that a node type can belong to */ template <> class omni::core::Generated<omni::graph::core::INodeCategories_abi> : public omni::graph::core::INodeCategories_abi { public: OMNI_PLUGIN_INTERFACE("omni::graph::core::INodeCategories") /** * Get the number of categories available * * @returns Count of fixed category types */ size_t getCategoryCount() noexcept; /** * Get the list of available categories and their descriptions. * * The caller is responsible for allocating and destroying buffers large enough to hold "bufferSize" results. * If bufferSize > getCategoryCount() then the entries at the ends of the buffers will be filled with nullptr. * * @param[in] categoryNameBuffer List of category names * @param[in] categoryDescriptionBuffer List of category descriptions corresponding to the names * @param[in] bufferSize Number of entries to fill in the buffers * * @return true if the category buffer was successfully filled and the bufferSize matched the category count */ bool getCategories(const char** categoryNameBuffer, const char** categoryDescriptionBuffer, size_t bufferSize) noexcept; /** * Define a new category * * @param[in] categoryName Name of the new category * @param[in] categoryDescription Description of the category * * @return false if there was already a category with the given name */ bool defineCategory(const char* categoryName, const char* categoryDescription) noexcept; /** * Remove an existing category, mainly to manage the ones created by a node type for itself * * @param[in] categoryName Name of the category to remove * * @return false if there was no category with the given name */ bool removeCategory(const char* categoryName) noexcept; }; #endif #ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL inline size_t omni::core::Generated<omni::graph::core::INodeCategories_abi>::getCategoryCount() noexcept { return getCategoryCount_abi(); } inline bool omni::core::Generated<omni::graph::core::INodeCategories_abi>::getCategories( const char** categoryNameBuffer, const char** categoryDescriptionBuffer, size_t bufferSize) noexcept { return getCategories_abi(categoryNameBuffer, categoryDescriptionBuffer, bufferSize); } inline bool omni::core::Generated<omni::graph::core::INodeCategories_abi>::defineCategory( const char* categoryName, const char* categoryDescription) noexcept { return defineCategory_abi(categoryName, categoryDescription); } inline bool omni::core::Generated<omni::graph::core::INodeCategories_abi>::removeCategory(const char* categoryName) noexcept { return removeCategory_abi(categoryName); } #endif #undef OMNI_BIND_INCLUDE_INTERFACE_DECL #undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
3,679
C
34.047619
124
0.730905
omniverse-code/kit/include/omni/graph/core/BundlePrimsImpl.h
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "BundlePrims.h" namespace omni { namespace graph { namespace core { // ==================================================================================================== // // Bundle Attribute // // Because entire Bundle Prims is inlined, we have to put definition of those functions // after declaration of ConstBundlePrim and ConstBundlePrims. // ==================================================================================================== inline BundleAttrib::BundleAttrib(ConstBundlePrim& prim, omni::graph::core::NameToken name) noexcept { using namespace omni::graph::core; // Get attribute handle and attribute properties IConstBundle2* bundle = prim.getConstBundlePtr(); ConstAttributeDataHandle attributeHandle = bundle->getConstAttributeByName(name); if(!attributeHandle.isValid()) { return; } GraphContextObj const& context = prim.getConstBundlePrims()->context(); m_bundlePrim = &prim; m_name = name; m_type = omni::fabric::TypeC(context.iAttributeData->getType(context, attributeHandle)); // Read attribute properties. ConstAttributeDataHandle propertyAttributeHandle; propertyAttributeHandle = bundle->getConstAttributeMetadataByName(name, detail::getAttrInterpolationDefinition().token); if(propertyAttributeHandle.isValid()) { m_interpolation = *getDataR<NameToken>(context, propertyAttributeHandle); } propertyAttributeHandle = bundle->getConstAttributeMetadataByName(name, detail::getAttrSourceDefinition().token); if(propertyAttributeHandle.isValid()) { m_source = static_cast<Source>(*getDataR<SourceType>(context, propertyAttributeHandle)); } } inline BundleAttrib::BundleAttrib(BundlePrim& prim, omni::graph::core::NameToken name, omni::graph::core::Type type, size_t arrayElementCount, Source source) noexcept : BundleAttrib{ prim, name } { using namespace omni::graph::core; // Attribute exists! if (m_bundlePrim) { return; } // Attribute does not exist. IBundle2* bundle = prim.getBundlePtr(); GraphContextObj const& context = prim.getConstBundlePrims()->context(); auto handle = bundle->createAttribute(name, type, arrayElementCount); omni::graph::core::getDataW<void*>(context, handle); // remove after OM-50059 is merged. m_bundlePrim = &prim; m_name = name; m_type = omni::fabric::TypeC(type); // Interpolation is optional. // Source of the attribute identifies "data" or "relationship" setSource(source); } inline BundlePrim* BundleAttrib::getBundlePrim() noexcept { IConstBundle2* constBundle = getConstBundlePtr(); if(auto bundle = omni::cast<IBundle2>(constBundle)) { return static_cast<BundlePrim*>(m_bundlePrim); } return nullptr; } inline omni::graph::core::IConstBundle2* BundleAttrib::getConstBundlePtr() const noexcept { ConstBundlePrim* bundlePrim = getBundlePrim(); return bundlePrim->getConstBundlePtr(); } inline omni::graph::core::IBundle2* BundleAttrib::getBundlePtr() noexcept { BundlePrim* bundlePrim = getBundlePrim(); return bundlePrim->getBundlePtr(); } inline DirtyIDType BundleAttrib::dirtyID() const noexcept { auto const context = getConstBundlePtr()->getContext(); auto id = carb::getCachedInterface<omni::graph::core::ComputeGraph>()->getDirtyIDInterfacePtr(context); return id->getForAttribute(this->handle()); } // ==================================================================================================== // // Bundle Primitive // // ==================================================================================================== inline BundlePrim::BundlePrim(BundlePrims& bundlePrims, omni::core::ObjectPtr<IBundle2> bundle) : ConstBundlePrim{ bundlePrims, std::move(bundle) } { } inline void BundlePrim::setPath(NameToken path) noexcept { const detail::AttrDefinition& attrDef = detail::getPrimPathDefinition(); AttributeDataHandle pathAttr = getBundlePtr()->getAttributeByName(attrDef.token); if (!pathAttr.isValid()) { pathAttr = getBundlePtr()->createAttribute(attrDef.token, attrDef.type); } BundlePrims* bundlePrims = getBundlePrims(); *getDataW<NameToken>(bundlePrims->context(), pathAttr) = path; } inline void BundlePrim::setType(NameToken type) noexcept { const detail::AttrDefinition& attrDef = detail::getPrimTypeDefinition(); AttributeDataHandle typeAttr = getBundlePtr()->getAttributeByName(attrDef.token); if (!typeAttr.isValid()) { typeAttr = getBundlePtr()->createAttribute(attrDef.token, attrDef.type); } BundlePrims* bundlePrims = getBundlePrims(); *getDataW<NameToken>(bundlePrims->context(), typeAttr) = type; } inline BundleAttrib* BundlePrim::addAttr(omni::graph::core::NameToken attrName, omni::graph::core::Type type, size_t arrayElementCount, BundleAttribSource source) noexcept { using namespace omni::graph::core; auto& attrs = getAttributes(); // Erase existing attribute. auto it = attrs.find(attrName); if (it != attrs.end()) { it->second->clearContents(); attrs.erase(it); } auto attr = new BundleAttrib{ *this, attrName, type, arrayElementCount, source }; attrs.emplace(attrName, attr); return attr; } inline BundleAttrib* BundlePrim::addRelationship(omni::graph::core::NameToken name, size_t targetCount) noexcept { return addAttr(name, detail::s_relationshipType, targetCount, BundleAttribSource::Relationship); } inline bool BundlePrim::addAttrs(std::vector<BundlePrim::AddAttrInfo> const& attrList) noexcept { using namespace omni::graph::core; IBundle2* bundle = getBundlePtr(); auto& attrs = getAttributes(); // Remove attributes that exists but properties are different. std::vector<BundlePrim::AddAttrInfo> attrToCreate; attrToCreate.reserve(attrList.size()); for (auto const& newAttr : attrList) { auto it = attrs.find(newAttr.attrName); if (it == attrs.end()) { attrToCreate.push_back(newAttr); continue; } BundleAttrib const* attr = it->second.get(); if (attr->type() != newAttr.type || attr->size() != newAttr.arrayElementCount || attr->source() != newAttr.source) { it->second->clearContents(); attrs.erase(it); attrToCreate.push_back(newAttr); } // attribute is the same nothing to do. } // Create attributes that require instantiation. for (auto const& tmp : attrToCreate) { auto attr = new BundleAttrib{ *this, tmp.attrName, tmp.type, tmp.arrayElementCount, tmp.source }; attrs.emplace(tmp.attrName, attr); } return true; } inline void BundlePrim::removeAttr(omni::graph::core::NameToken attrName) noexcept { using namespace omni::graph::core; // Remove attribute from internal member. auto& attrs = getAttributes(); auto it = attrs.find(attrName); if (it != attrs.end()) { it->second->clearContents(); attrs.erase(it); } } inline void BundlePrim::clearContents() noexcept { auto& attrs = getAttributes(); for (auto& attr : attrs) { attr.second->clearContents(); } getAttributes().clear(); } inline void BundlePrim::copyContentsFrom(ConstBundlePrim const& source, bool removeAttrsNotInSource /* = true*/) noexcept { return copyContentsFrom(const_cast<ConstBundlePrim&>(source), removeAttrsNotInSource); } inline void BundlePrim::copyContentsFrom(ConstBundlePrim& source, bool removeAttrsNotInSource /* = true*/) noexcept { CARB_IGNOREWARNING_MSC_WITH_PUSH(4996) CARB_IGNOREWARNING_GNUC_WITH_PUSH("-Wdeprecated-declarations") // Nothing to do if they're already equal. if (dirtyID() == source.dirtyID()) return; BundlePrims* bundlePrims = getBundlePrims(); // Add/set any attributes from source, if the dirty IDs are different, being sure to copy the dirty IDs. // first we batch add them, then we copy the contents std::vector<BundlePrim::AddAttrInfo> attrsToAdd; attrsToAdd.reserve(source.attrCount()); for (auto const& sourceAttr : source) { NameToken name = sourceAttr.name(); // NOTE: Request a const attribute, to avoid bumping its dirty ID. BundleAttrib const* constDestAttr = getConstAttr(name); if (constDestAttr != nullptr && constDestAttr->dirtyID() == sourceAttr.dirtyID()) { continue; } if (constDestAttr == nullptr) { attrsToAdd.push_back( { sourceAttr.m_name, Type(sourceAttr.m_type), 0, sourceAttr.m_source }); } } // add the attributes addAttrs(attrsToAdd); // copy the data for (auto const& sourceAttr : source) { NameToken name = sourceAttr.name(); // NOTE: Request a const attribute, to avoid bumping its dirty ID. BundleAttrib const* constDestAttr = getConstAttr(name); CARB_ASSERT(constDestAttr != nullptr); if (constDestAttr == nullptr || constDestAttr->dirtyID() == sourceAttr.dirtyID()) { continue; } const_cast<BundleAttrib*>(constDestAttr)->copyContentsFrom(sourceAttr); } CARB_ASSERT(attrCount() >= source.attrCount()); // If there are more attributes in this than in source, remove any that aren't in source. auto& attrMap = getAttributes(); if (attrCount() > source.attrCount() && removeAttrsNotInSource) { std::vector<NameToken> attrsToRemove; for (auto it = attrMap.begin(); it != attrMap.end();) { if (source.getConstAttr(it->second->name()) == nullptr) { it->second->clearContents(); it = attrMap.erase(it); } else { ++it; } } } CARB_IGNOREWARNING_GNUC_POP CARB_IGNOREWARNING_MSC_POP } inline BundleAttrib* BundlePrim::getAttr(omni::graph::core::NameToken attrName) noexcept { auto& attrs = getAttributes(); auto it = attrs.find(attrName); if (it == attrs.end()) { return nullptr; } BundleAttrib* attr = it->second.get(); return attr; } inline omni::graph::core::BundleHandle BundlePrim::handle() noexcept { return getBundlePtr()->getHandle(); } inline BundlePrims* BundlePrim::getBundlePrims() noexcept { omni::graph::core::IBundle2* bundle = getBundlePtr(); if (bundle) { ConstBundlePrims* bundlePrims = ConstBundlePrim::getConstBundlePrims(); return static_cast<BundlePrims*>(bundlePrims); } return nullptr; } inline BundlePrims* BundlePrim::bundlePrims() noexcept { return getBundlePrims(); } inline BundlePrimAttrIterator BundlePrim::begin() noexcept { return BundlePrimAttrIterator(*this, getAttributes().begin()); } inline BundlePrimAttrIterator BundlePrim::end() noexcept { return BundlePrimAttrIterator(*this, getAttributes().end()); } inline ConstBundlePrimAttrIterator BundlePrim::cbegin() noexcept { return ConstBundlePrim::begin(); } inline ConstBundlePrimAttrIterator BundlePrim::cend() noexcept { return ConstBundlePrim::end(); } inline omni::graph::core::IBundle2* BundlePrim::getBundlePtr(omni::graph::core::IConstBundle2* constBundle) noexcept { auto bundle = omni::cast<omni::graph::core::IBundle2>(constBundle); return bundle.get(); } inline omni::graph::core::IBundle2* BundlePrim::getBundlePtr() noexcept { using namespace omni::graph::core; IConstBundle2* constBundle = getConstBundlePtr(); IBundle2* bundle = getBundlePtr(constBundle); return bundle; } // ==================================================================================================== // // Bundle Primitives // // ==================================================================================================== inline BundlePrims::~BundlePrims() noexcept { detach(); } inline omni::graph::core::BundleHandle BundlePrims::handle() noexcept { using namespace omni::graph::core; if (IBundle2* bundle = getBundlePtr()) { return bundle->getHandle(); } return BundleHandle{ BundleHandle::invalidValue() }; } inline void BundlePrims::ensurePrimAttrsCached(BundlePrimIndex primIndex) noexcept { } inline BundlePrims::BundlePrims() : ConstBundlePrims() { } inline BundlePrims::BundlePrims(omni::graph::core::GraphContextObj const& context, omni::graph::core::BundleHandle const& bundle) : BundlePrims() { attach(context, bundle); } inline void BundlePrims::attach(omni::graph::core::GraphContextObj const& context, omni::graph::core::BundleHandle const& bundleHandle) noexcept { using namespace omni::graph::core; ComputeGraph* computeGraph = carb::getCachedInterface<ComputeGraph>(); omni::core::ObjectPtr<IBundleFactory> factoryPtr = computeGraph->getBundleFactoryInterfacePtr(); omni::core::ObjectPtr<IBundle2> bundlePtr = factoryPtr->getBundle(context, bundleHandle); ConstBundlePrims::attach(std::move(factoryPtr), std::move(bundlePtr)); IBundle2* bundle = getBundlePtr(); auto& bundlePrimIndexOffsetDef = detail::getBundlePrimIndexOffsetDefinition(); m_bundlePrimIndexOffsetAttr = bundle->getBundleMetadataByName(bundlePrimIndexOffsetDef.token); } inline void BundlePrims::detach() noexcept { using omni::graph::core::AttributeDataHandle; // // Bundle Level Attributes // m_bundlePrimIndexOffsetAttr = AttributeDataHandle{ AttributeDataHandle::invalidValue() }; ConstBundlePrims::detach(); } inline BundlePrim* BundlePrims::getPrim(BundlePrimIndex primIndex) noexcept { using namespace omni::graph::core; auto createSortedBundlePrims = [this, &bundlePrims = *this]() -> BundlePrimArray { const size_t childBundleCount = getBundlePtr()->getChildBundleCount(); std::vector<BundleHandle> handles(childBundleCount); getBundlePtr()->getChildBundles(handles.data(), handles.size()); const GraphContextObj& graphContext = context(); BundlePrimArray prims(childBundleCount); BundlePrimArray nonIndexedPrims; for (BundleHandle& handle : handles) { auto childBundle = getBundleFactoryPtr()->getBundle(graphContext, handle); BundlePrim* prim = new BundlePrim(bundlePrims, childBundle); BundlePrimIndex index = prim->primIndex(); CARB_ASSERT(index < childBundleCount || index == kInvalidBundlePrimIndex); if (index < childBundleCount) { prims[index].reset(prim); } else { nonIndexedPrims.emplace_back(prim); } } // Merge non-indexed prims into the sorted array. if (!nonIndexedPrims.empty()) { BundlePrimIndex index = 0; for (ConstBundlePrimPtr& nonIndexedPrim : nonIndexedPrims) { while (index < childBundleCount) { ConstBundlePrimPtr& prim = prims[index++]; if (!prim) { prim = std::move(nonIndexedPrim); break; } } } } return prims; }; // Since we acquire BundlePrim instance through BundlePrims interface, // we are required to bump dirty id of this prim because intention is to modify it. auto bundlePrim = static_cast<BundlePrim*>(ConstBundlePrims::getConstPrim(primIndex, createSortedBundlePrims)); return bundlePrim; } inline BundlePrim* BundlePrims::getClearedPrim(BundlePrimIndex primIndex) noexcept { BundlePrim* bundlePrim = getPrim(primIndex); if(!bundlePrim) { return nullptr; } bundlePrim->clearContents(); return bundlePrim; } inline BundlePrim& BundlePrims::getCommonAttrs() noexcept { ConstBundlePrim& commonAttributes = ConstBundlePrims::getConstCommonAttrs(); return static_cast<BundlePrim&>(commonAttributes); } inline omni::graph::core::IBundle2* BundlePrims::getBundlePtr() noexcept { using namespace omni::graph::core; auto constBundle = getConstBundlePtr(); auto bundle = omni::cast<IBundle2>(constBundle); return bundle.get(); } inline void BundlePrims::clearContents() noexcept { for (BundlePrimIndex primIndex = getPrimCount(); primIndex != 0;) { --primIndex; removePrim(primIndex); } // Delete all attributes from this bundle. BundlePrim& thisBundle = getCommonAttrs(); thisBundle.clearContents(); // remove internal data IBundle2* bundle = getBundlePtr(); if (m_bundlePrimIndexOffsetAttr.isValid()) { const detail::AttrDefinition& attrDef = detail::getBundlePrimIndexOffsetDefinition(); bundle->removeBundleMetadata(attrDef.token); m_bundlePrimIndexOffsetAttr = AttributeDataHandle{ AttributeDataHandle::invalidValue() }; } // Clearing bundle prims internal attributes such as bundleDirtyID and others causes downstream problems. // Initial implementation never cleared those attributes. #if 0 auto bundlePrimsInternalAttributes = { std::ref(m_bundleDirtyIDAttr), // std::ref(m_primIndexAttr), // }; for (auto& internalAttribute : bundlePrimsInternalAttributes) { if (internalAttribute.get().isValid()) { bundle->removeAttribute(internalAttribute.get()); } internalAttribute.get() = AttributeDataHandle{ AttributeDataHandle::invalidValue() }; } #endif } inline bool BundlePrims::removePrim(ConstBundlePrim* prim) noexcept { return removePrim(prim->primIndex()); } inline bool BundlePrims::removePrim(BundlePrimIndex primIndex) noexcept { using namespace omni::graph::core; IBundle2* bundle = getBundlePtr(); auto& context = this->context(); auto& prims = getPrimitives(); // remove children and attributes BundlePrim* childBundlePrim = getPrim(primIndex); if (!childBundlePrim) { return false; } // clear contents and remove bundle from a map childBundlePrim->clearContents(); bundle->removeChildBundle(childBundlePrim->handle()); // If removed primitive is not the last one, // swap last one with removed one and update index. size_t const newPrimCount = prims.size() - 1; if (primIndex != newPrimCount) { prims[primIndex] = std::move(prims[newPrimCount]); childBundlePrim = getPrim(primIndex); IBundle2* childBundle = childBundlePrim->getBundlePtr(); CARB_ASSERT(childBundle); if (childBundle) { const detail::AttrDefinition& attrDef = detail::getPrimIndexDefinition(); AttributeDataHandle primIndexAttr = childBundle->getBundleMetadataByName(attrDef.token); CARB_ASSERT(primIndexAttr.isValid()); if (primIndexAttr.isValid()) { *getDataW<uint64_t>(context, primIndexAttr) = primIndex; } } } prims.resize(newPrimCount); return true; } inline size_t BundlePrims::addPrims(size_t primCountToAdd) noexcept { using namespace omni::graph::core; size_t oldPrimCount = getConstBundlePtr()->getChildBundleCount(); if (primCountToAdd == 0) { return oldPrimCount; } size_t const newPrimCount = oldPrimCount + primCountToAdd; CARB_ASSERT(newPrimCount > oldPrimCount); IBundle2* bundle = getBundlePtr(); IBundleFactory* factory = getBundleFactoryPtr(); auto& context = this->context(); if (!m_bundlePrimIndexOffsetAttr.isValid()) { auto& attrDef = detail::getBundlePrimIndexOffsetDefinition(); m_bundlePrimIndexOffsetAttr = bundle->getBundleMetadataByName(attrDef.token); if (!m_bundlePrimIndexOffsetAttr.isValid()) { m_bundlePrimIndexOffsetAttr = bundle->createBundleMetadata(attrDef.token, attrDef.type); *getDataW<uint64_t>(context, m_bundlePrimIndexOffsetAttr) = 0; } } uint64_t* bundlePrimIndexOffsetData = getDataW<uint64_t>(context, m_bundlePrimIndexOffsetAttr); auto& primIndexDef = detail::getPrimIndexDefinition(); // Create new child bundles. // All children are called 'prim' + primIndex, because IBundle2 interface does not allow sparse hierarchy. // Then child paths are stored as an attribute. BundlePrimArray& prims = getPrimitives(); prims.resize(newPrimCount); std::string primPathStr; for (BundlePrimIndex primIndex = oldPrimCount; primIndex < newPrimCount; ++primIndex) { primPathStr = "prim" + std::to_string(*bundlePrimIndexOffsetData + primIndex - oldPrimCount); NameToken primName = context.iToken->getHandle(primPathStr.data()); BundleHandle childHandle = bundle->createChildBundle(primName); auto childBundle = factory->getBundle(context, childHandle); CARB_ASSERT(childBundle); // A metadata attribute is created for each child bundle to store its prim index, so that each BundlePrims or // ConstBundlePrims instance attached to this bundle can have consistent prim indices. if (childBundle) { AttributeDataHandle primIndexAttr = childBundle->createBundleMetadata(primIndexDef.token, primIndexDef.type); *getDataW<uint64_t>(context, primIndexAttr) = primIndex; } auto newPrim = new BundlePrim(*this, std::move(childBundle)); prims[primIndex].reset(newPrim); } *bundlePrimIndexOffsetData += primCountToAdd; // Update prim index offset. return oldPrimCount; } inline BundlePrimIterator BundlePrims::begin() noexcept { return BundlePrimIterator(*this); } inline BundlePrimIterator BundlePrims::end() noexcept { return BundlePrimIterator(*this, getPrimCount()); } inline ConstBundlePrimIterator BundlePrims::cbegin() noexcept { return ConstBundlePrims::begin(); } inline ConstBundlePrimIterator BundlePrims::cend() noexcept { return ConstBundlePrims::end(); } // ==================================================================================================== // // Bundle Primitive Iterator // // ==================================================================================================== inline BundlePrimIterator::BundlePrimIterator(BundlePrims& bundlePrims, BundlePrimIndex primIndex) noexcept : m_bundlePrims(&bundlePrims), m_primIndex(primIndex) { } inline bool BundlePrimIterator::operator==(BundlePrimIterator const& that) const noexcept { return m_bundlePrims == that.m_bundlePrims && m_primIndex == that.m_primIndex; } inline bool BundlePrimIterator::operator!=(BundlePrimIterator const& that) const noexcept { return !(*this == that); } inline BundlePrim& BundlePrimIterator::operator*() noexcept { return *(m_bundlePrims->getPrim(m_primIndex)); } inline BundlePrim* BundlePrimIterator::operator->() noexcept { return m_bundlePrims->getPrim(m_primIndex); } inline BundlePrimIterator& BundlePrimIterator::operator++() noexcept { ++m_primIndex; return *this; } // ==================================================================================================== // // Bundle Primitive Attribute Iterator // // ==================================================================================================== inline BundlePrimAttrIterator::BundlePrimAttrIterator(BundlePrim& bundlePrim, BundlePrim::AttrMapIteratorType attrIter) noexcept : m_bundlePrim(&bundlePrim), m_attrIter(attrIter) { } inline bool BundlePrimAttrIterator::operator==(BundlePrimAttrIterator const& that) const noexcept { return m_bundlePrim == that.m_bundlePrim && m_attrIter == that.m_attrIter; } inline bool BundlePrimAttrIterator::operator!=(BundlePrimAttrIterator const& that) const noexcept { return !(*this == that); } inline BundleAttrib const* BundlePrimAttrIterator::getConst() noexcept { CARB_ASSERT(m_bundlePrim != nullptr); CARB_ASSERT(m_attrIter->second); BundleAttrib* attr = m_attrIter->second.get(); return attr; } inline BundleAttrib& BundlePrimAttrIterator::operator*() noexcept { CARB_ASSERT(m_bundlePrim != nullptr); CARB_ASSERT(m_attrIter->second); BundleAttrib* attr = m_attrIter->second.get(); return *attr; } inline BundleAttrib* BundlePrimAttrIterator::operator->() noexcept { CARB_ASSERT(m_bundlePrim != nullptr); CARB_ASSERT(m_attrIter->second); BundleAttrib* attr = m_attrIter->second.get(); return attr; } inline BundlePrimAttrIterator& BundlePrimAttrIterator::operator++() noexcept { ++m_attrIter; return *this; } } // namespace core } // namespace graph } // namespace omni
25,740
C
30.429792
166
0.645027
omniverse-code/kit/include/omni/graph/core/PyIBundle.gen.h
// Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // #pragma once #include <omni/core/ITypeFactory.h> #include <omni/python/PyBind.h> #include <omni/python/PyString.h> #include <omni/python/PyVec.h> #include <sstream> auto bindIBundle2(py::module& m) { // hack around pybind11 issues with C++17 // - https://github.com/pybind/pybind11/issues/2234 // - https://github.com/pybind/pybind11/issues/2666 // - https://github.com/pybind/pybind11/issues/2856 py::class_<omni::core::Generated<omni::graph::core::IBundle2_abi>, omni::core::ObjectPtr<omni::core::Generated<omni::graph::core::IBundle2_abi>>, omni::core::Api<omni::graph::core::IConstBundle2_abi>> clsParent(m, "_IBundle2"); py::class_<omni::graph::core::IBundle2, omni::core::Generated<omni::graph::core::IBundle2_abi>, omni::core::ObjectPtr<omni::graph::core::IBundle2>, omni::core::Api<omni::graph::core::IConstBundle2_abi>> cls(m, "IBundle2", R"OMNI_BIND_RAW_(Provide read write access to recursive bundles.)OMNI_BIND_RAW_"); cls.def(py::init( [](const omni::core::ObjectPtr<omni::core::IObject>& obj) { auto tmp = omni::core::cast<omni::graph::core::IBundle2>(obj.get()); if (!tmp) { throw std::runtime_error("invalid type conversion"); } return tmp; })); cls.def(py::init( []() { auto tmp = omni::core::createType<omni::graph::core::IBundle2>(); if (!tmp) { throw std::runtime_error("unable to create omni::graph::core::IBundle2 instantiation"); } return tmp; })); return omni::python::PyBind<omni::graph::core::IBundle2>::bind(cls); }
2,263
C
40.163636
121
0.623951
omniverse-code/kit/include/omni/graph/core/PyIVariable.gen.h
// Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // // --------- Warning: This is a build system generated file. ---------- // #pragma once #include <omni/core/ITypeFactory.h> #include <omni/python/PyBind.h> #include <omni/python/PyString.h> #include <omni/python/PyVec.h> #include <sstream> auto bindeVariableScope(py::module& m) { py::enum_<omni::graph::core::eVariableScope> e( m, "eVariableScope", R"OMNI_BIND_RAW_(Scope in which the variable has been made available)OMNI_BIND_RAW_"); e.value("E_PRIVATE", omni::graph::core::eVariableScope::ePrivate, R"OMNI_BIND_RAW_(Variable is accessible only to its graph )OMNI_BIND_RAW_"); e.value("E_READ_ONLY", omni::graph::core::eVariableScope::eReadOnly, R"OMNI_BIND_RAW_(Variable can be read by other graphs )OMNI_BIND_RAW_"); e.value("E_PUBLIC", omni::graph::core::eVariableScope::ePublic, R"OMNI_BIND_RAW_(Variable can be read/written by other graphs )OMNI_BIND_RAW_"); return e; } auto bindIVariable(py::module& m) { // hack around pybind11 issues with C++17 // - https://github.com/pybind/pybind11/issues/2234 // - https://github.com/pybind/pybind11/issues/2666 // - https://github.com/pybind/pybind11/issues/2856 py::class_<omni::core::Generated<omni::graph::core::IVariable_abi>, omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::core::IVariable_abi>>, omni::core::IObject> clsParent(m, "_IVariable"); py::class_<omni::graph::core::IVariable, omni::core::Generated<omni::graph::core::IVariable_abi>, omni::python::detail::PyObjectPtr<omni::graph::core::IVariable>, omni::core::IObject> cls(m, "IVariable", R"OMNI_BIND_RAW_(Object that contains a value that is local to a graph, available from anywhere in the graph)OMNI_BIND_RAW_"); cls.def(py::init( [](const omni::core::ObjectPtr<omni::core::IObject>& obj) { auto tmp = omni::core::cast<omni::graph::core::IVariable>(obj.get()); if (!tmp) { throw std::runtime_error("invalid type conversion"); } return tmp; })); cls.def(py::init( []() { auto tmp = omni::core::createType<omni::graph::core::IVariable>(); if (!tmp) { throw std::runtime_error("unable to create omni::graph::core::IVariable instantiation"); } return tmp; })); cls.def_property_readonly("name", &omni::graph::core::IVariable::getName); cls.def_property_readonly("source_path", &omni::graph::core::IVariable::getSourcePath); cls.def_property("category", &omni::graph::core::IVariable::getCategory, [](omni::graph::core::IVariable* self, const char* category) { self->setCategory(category); }); cls.def_property("display_name", &omni::graph::core::IVariable::getDisplayName, [](omni::graph::core::IVariable* self, const char* displayName) { self->setDisplayName(displayName); }); cls.def_property("tooltip", &omni::graph::core::IVariable::getTooltip, [](omni::graph::core::IVariable* self, const char* toolTip) { self->setTooltip(toolTip); }); cls.def_property("scope", &omni::graph::core::IVariable::getScope, &omni::graph::core::IVariable::setScope); cls.def_property_readonly("valid", &omni::graph::core::IVariable::isValid); return omni::python::PyBind<omni::graph::core::IVariable>::bind(cls); }
3,955
C
48.449999
138
0.639191
omniverse-code/kit/include/omni/graph/core/ConstBundlePrims.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // ==================================================================================================== /* _____ _ _ _ _ _ | __ \ | \ | | | | | | | | | | | | ___ | \| | ___ | |_ | | | |___ ___ | | | |/ _ \ | . ` |/ _ \| __| | | | / __|/ _ \ | |__| | (_) | | |\ | (_) | |_ | |__| \__ \ __/ |_____/ \___/ |_| \_|\___/ \__| \____/|___/\___| This is a temporary interface that can change at any time. */ // ==================================================================================================== #include "BundleAttrib.h" #include <omni/graph/core/IBundleFactory.h> #include <unordered_map> #include <memory> #include <vector> namespace omni { namespace graph { namespace core { class ConstBundlePrims; class ConstBundlePrimIterator; class ConstBundlePrimAttrIterator; /** * Index used to identify primitives in a bundle. */ using BundlePrimIndex = size_t; constexpr BundlePrimIndex kInvalidBundlePrimIndex = ~BundlePrimIndex(0); /** * Collection of read-only attributes in a primitive. * * Const Bundle Primitive is not movable, not copyable. It lifespan is managed by Const Bundle Primitives. */ class ConstBundlePrim { public: using BundleAttributeMap = std::unordered_map<NameToken, std::unique_ptr<BundleAttrib>>; using AttrMapIteratorType = BundleAttributeMap::const_iterator; ConstBundlePrim(ConstBundlePrim const&) = delete; ConstBundlePrim(ConstBundlePrim&&) = delete; ConstBundlePrim& operator=(ConstBundlePrim const& that) = delete; ConstBundlePrim& operator=(ConstBundlePrim&&) = delete; /** * @return Bundle handle of this primitive. */ ConstBundleHandle getConstHandle() noexcept; /** * @return Parent bundle prims of this primitive. */ ConstBundlePrims* getConstBundlePrims() noexcept; /** * @return Number of attributes in this primitive. Does not include internal attributes. */ size_t attrCount() noexcept; /** * @return PrimAttribute if attribute with given name is found, nullptr otherwise. */ BundleAttrib const* getConstAttr(NameToken attrName) noexcept; /** * @return Index of this primitive in parent bundle. */ BundlePrimIndex primIndex() noexcept; /** * @return Path of this primitive. */ NameToken path() noexcept; /** * @return Type of this primitive. */ NameToken type() noexcept; [[deprecated("Dirty ID management has been moved to core. Use IBundleChanges.")]] DirtyIDType dirtyID() noexcept; /** * @return Attribute iterator pointing to the first attribute in this bundle. */ ConstBundlePrimAttrIterator begin() noexcept; /** * @return Attribute iterator pointing to the last attribute in this bundle. */ ConstBundlePrimAttrIterator end() noexcept; /*********************************************************************************************** * * TODO: Following methods might be deprecated in the future. * In the next iteration when real interface starts to emerge, we can retire those methods. * ***********************************************************************************************/ /** * @deprecated Do not use!. Use getConstAttr(). */ [[deprecated("Use non const instead.")]] BundleAttrib const* getAttr(NameToken attrName) const noexcept; /** * @deprecated Do not use!. Use non-const variant of path(). */ [[deprecated("Use non const instead.")]] NameToken path() const noexcept; /** * @deprecated Do not use!. Use non-const variant of type(). */ [[deprecated("Use non const instead.")]] NameToken type() const noexcept; [[deprecated("Dirty ID management has been moved to core. Use IBundleChanges.")]] DirtyIDType dirtyID() const noexcept; /** * @deprecated Do not use!. Use non-const variant of begin(). */ [[deprecated("Use non const instead.")]] ConstBundlePrimAttrIterator begin() const noexcept; /** * @deprecated Do not use!. Use non-const variant of end(). */ [[deprecated("Use non const instead.")]] ConstBundlePrimAttrIterator end() const noexcept; protected: /** * Direct initialization with IConstBundle interface. * * ConstBundlePrim and BundlePrim take advantage of polymorphic relationship * between IConstBundle and IBundle interfaces. * In order to modify bundles, BundlePrim makes attempt to down cast IConstBundle * to IBundle interface. When this process is successful then, bundle can be modified. * * Only ConstBundlePrims is allowed to create instances of ConstBundlePrim. */ ConstBundlePrim(ConstBundlePrims& bundlePrims, omni::core::ObjectPtr<IConstBundle2> bundle); /** * @return IConstBundle interface for this bundle primitive. */ IConstBundle2* getConstBundlePtr() noexcept; /** * @return Get attribute used by ConstBundlePrims and BundlePrims. */ BundleAttributeMap& getAttributes() noexcept; /** * Reads public attributes from the bundle and caches them as BundleAttribs. */ void readAndCacheAttributes() noexcept; private: ConstBundlePrims* m_bundlePrims{ nullptr }; // Parent of this bundle prim. omni::core::ObjectPtr<IConstBundle2> m_bundle; ConstAttributeDataHandle m_primIndexAttr{ ConstAttributeDataHandle::invalidValue() }; ConstAttributeDataHandle m_pathAttr{ ConstAttributeDataHandle::invalidValue() }; ConstAttributeDataHandle m_typeAttr{ ConstAttributeDataHandle::invalidValue() }; BundleAttributeMap m_attributes; // Cached public attributes that belong to this primitive. friend class BundleAttrib; // Required to access IConstBundle interface. friend class BundlePrim; // Required to access primitive type. friend class BundlePrims; // Required to update internal indices. friend class ConstBundlePrims; // Required to call constructor. }; /** * Collection of read-only primitives in a bundle. * * Const Bundle Primitives is not movable, not copyable. It lifespan is managed by the user. */ class ConstBundlePrims { public: ConstBundlePrims(); ConstBundlePrims(GraphContextObj const& context, ConstBundleHandle const& bundle); ConstBundlePrims(ConstBundlePrims const&) = delete; ConstBundlePrims(ConstBundlePrims&&) = delete; ConstBundlePrims& operator=(ConstBundlePrims const&) = delete; ConstBundlePrims& operator=(ConstBundlePrims&&) = delete; /** * @return Bundle handle of this primitive. */ ConstBundleHandle getConstHandle() noexcept; /** * @return Number of primitives in this bundle of primitives. */ size_t getPrimCount() noexcept; /** * @return Get read only primitive under specified index. */ ConstBundlePrim* getConstPrim(BundlePrimIndex primIndex) noexcept; [[deprecated("Dirty ID management has been moved to core. Use IBundleChanges.")]] DirtyIDType getBundleDirtyID() noexcept; /** * Common Attributes are attributes that are shared for entire bundle. * An example of a common attribute is "transform" attribute. * * @return ConstBundlePrims as ConstBundlePrim to access attributes. */ ConstBundlePrim& getConstCommonAttrs() noexcept; /** * @return Context where bundle primitives belongs to. */ GraphContextObj const& context() noexcept; /** * @return Primitive iterator pointing to the first primitive in this bundle. */ ConstBundlePrimIterator begin() noexcept; /** * @return Primitive iterator pointing to the last primitive in this bundle. */ ConstBundlePrimIterator end() noexcept; /*********************************************************************************************** * * TODO: Following methods might be deprecated in the future. * In the next iteration when real interface starts to emerge, we can retire those methods. * ***********************************************************************************************/ /** * @deprecated Do not use! Use getConstPrim(). */ ConstBundlePrim* getPrim(BundlePrimIndex primIndex) noexcept; [[deprecated("Getting next DirtyID has no effect, Dirty ID management has been moved to core. Use IBundleChanges.")]] DirtyIDType getNextDirtyID() noexcept { return carb::getCachedInterface<IDirtyID>()->getNextDirtyID(); } /** * @deprecated Use appropriate constructor and heap allocate ConstBundlePrims. * * @todo: There is no benefit of using this method. Cache has to be rebuild from scratch * whenever ConstBundlePrims is attached/detached. * It would be better to remove default constructor and enforce cache construction * through constructor with arguments. */ void attach(GraphContextObj const& context, ConstBundleHandle const& bundle) noexcept; /** * @deprecated Use appropriate constructor and heap allocate ConstBundlePrims. */ void detach() noexcept; /** * @deprecated Use getConstHandle. */ ConstBundleHandle handle() noexcept; /** * @deprecated Use getConstCommonAttrs. */ ConstBundlePrim& getCommonAttrs() noexcept; /** * @deprecated There is no need to separate attributes. Inherently IBundle2 interface keeps them separated. */ void separateAttrs() noexcept; /** * @deprecated Caching attributes is not needed. Calling this method doesn't do anything. */ void ensurePrimAttrsCached(BundlePrimIndex primIndex) noexcept; protected: using ConstBundlePrimPtr = std::unique_ptr<ConstBundlePrim>; using BundlePrimArray = std::vector<ConstBundlePrimPtr>; /** * Get bundle primitives in this bundle. */ BundlePrimArray& getPrimitives() noexcept; /** * IConstBundle2 is a polymorphic base for IBundle2, thus passing bundle argument allows passing * version of the interface that allows mutations. */ void attach(omni::core::ObjectPtr<IBundleFactory>&& factory, omni::core::ObjectPtr<IConstBundle2>&& bundle) noexcept; /** * @return Factory to spawn instances of IBundle interface. */ IBundleFactory* getBundleFactoryPtr() noexcept; /** * @return IBundle instance of this bundle. */ IConstBundle2* getConstBundlePtr() noexcept; /** * Instances of BundlePrim are instantiated on demand. Argument create allows * instantiation mutable or immutable IConstBundle2 interface. */ template<typename FUNC> ConstBundlePrim* getConstPrim(BundlePrimIndex primIndex, FUNC create) noexcept; private: omni::core::ObjectPtr<IBundleFactory> m_factory; omni::core::ObjectPtr<IConstBundle2> m_bundle; GraphContextObj m_context; // Backward compatibility. /** * ConstBundlePrims is a bundle as well. To access attributes under this bundle we need to acquire * an instance of ConstBundlePrim for this bundle. Common attributes, with unfortunate name, * gives us ability to access those attributes. */ ConstBundlePrimPtr m_commonAttributes; BundlePrimArray m_primitives; // Cached instances of BundlePrim. friend class ConstBundlePrim; friend class BundlePrim; friend class BundleAttrib; }; /** * Primitives in Bundle iterator. */ class ConstBundlePrimIterator { public: ConstBundlePrimIterator(ConstBundlePrims& bundlePrims, BundlePrimIndex primIndex = 0) noexcept; ConstBundlePrimIterator(ConstBundlePrimIterator const& that) noexcept = default; ConstBundlePrimIterator& operator=(ConstBundlePrimIterator const& that) noexcept = default; bool operator==(ConstBundlePrimIterator const& that) const noexcept; bool operator!=(ConstBundlePrimIterator const& that) const noexcept; ConstBundlePrim& operator*() noexcept; ConstBundlePrim* operator->() noexcept; ConstBundlePrimIterator& operator++() noexcept; private: ConstBundlePrims* m_bundlePrims; BundlePrimIndex m_primIndex; }; /** * Attributes in Primitive iterator. */ class ConstBundlePrimAttrIterator { public: ConstBundlePrimAttrIterator(ConstBundlePrim& bundlePrim, ConstBundlePrim::AttrMapIteratorType attrIter) noexcept; ConstBundlePrimAttrIterator(ConstBundlePrimAttrIterator const& that) noexcept = default; ConstBundlePrimAttrIterator& operator=(ConstBundlePrimAttrIterator const& that) noexcept = default; bool operator==(ConstBundlePrimAttrIterator const& that) const noexcept; bool operator!=(ConstBundlePrimAttrIterator const& that) const noexcept; BundleAttrib const& operator*() const noexcept; BundleAttrib const* operator->() const noexcept; ConstBundlePrimAttrIterator& operator++() noexcept; private: ConstBundlePrim* m_bundlePrim; ConstBundlePrim::AttrMapIteratorType m_attrIter; }; } // namespace core } // namespace graph } // namespace omni #include "ConstBundlePrimsImpl.h"
13,648
C
32.048426
121
0.659217
omniverse-code/kit/include/omni/graph/core/IBundleChanges.h
// Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include "bundle/IBundleChanges1.h"
483
C
42.999996
77
0.797101
omniverse-code/kit/include/omni/graph/core/iAttributeData.h
// Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/Handle.h> #include <omni/graph/core/Type.h> namespace omni { namespace graph { namespace core { using RawPtr = uint8_t*; //!< Type for casting byte arrays to actual values using ConstRawPtr = uint8_t const*; //!< Type for casting const byte arrays to actual values // ====================================================================== /** Interface to data belonging to a specific attribute */ struct IAttributeData { //! @private to avoid doxygen problems CARB_PLUGIN_INTERFACE("omni::graph::core::IAttributeData", 1, 6); /** * Gets the name of the attribute containing this attribute data. * * The attribute data handle may refer to attribute data from an attribute that is upstream * of the attribute from which the handle was retrieved, in which case, this will * return the upstream attribute's name. * * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] handle The handle to the attribute whose name is being requested * @return A NameToken representing the attribute's name, for which the text can be retrieved * using IToken::getText */ NameToken (CARB_ABI* getName)(const GraphContextObj& contextObj, ConstAttributeDataHandle handle); /** * Gets the name of the type of this attribute data. Use getType to get a representation of * the type that is easier to interpret in code. * * The actual attribute data may have a different type than an associated attribute on a node, * for example, if the upstream source of the data has a different type. * This function returns the type of the data, not the type of the associated attribute. * * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] handle The handle to the attribute data whose type name is being requested * @return A NameToken representing the attribute data's type name, for which the text can be retrieved * using IToken::getText */ NameToken (CARB_ABI* getTypeName)(const GraphContextObj& contextObj, ConstAttributeDataHandle handle); /** * Gets the type of this attribute data in a representation that is easily interpreted by code. * * The actual attribute data may have a different type than an associated attribute on a node, * for example, if the upstream source of the data has a different type. * This function returns the type of the data, not the type of the associated attribute. * * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] handle The handle to the attribute data whose type is being requested * @return A Type structure representing the attribute data's type, whose members provide * information about the type */ Type (CARB_ABI* getType)(const GraphContextObj& contextObj, ConstAttributeDataHandle handle); /** * Checks whether the type of this attribute data is an array type, i.e. array depth of 1 * (array) or 2 (array of arrays; not yet supported). * * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] handle The handle to the attribute data to check * @return true if the attribute data is an array type, else false */ bool isArray(const GraphContextObj& contextObj, ConstAttributeDataHandle handle) { return (*getType)(contextObj, handle).arrayDepth != 0; } /** deprecated function, do not use */ void(CARB_ABI* deprecated_0)(const void**, const GraphContextObj&, const ConstAttributeDataHandle*, size_t); /** * Gets GPU pointers to the read-only GPU data of some number of attributes. * * Any invalid attributes will have null pointers. Array attributes have an extra level of indirection. * For example, after requesting int attribute data, attrsOut will effectively be of type * const int** upon returning, but after requesting int array attribute data, attrsOut will effectively be * of type const int* const**. * * If the attribute data is not on the GPU at the time of this call, but is on the CPU, * it will be copied to the GPU before returning. * * Deprecated: Use getDataRGpuAt * * @param[out] attrsOut Array to be filled in with pointers to data of each attribute * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] attrHandles Array of attribute data handles whose data are being requested * @param[in] attrCount Number of attributes whose data are being requested */ void (CARB_ABI* getDataRGPU)(const void** attrsOut, const GraphContextObj& contextObj, const ConstAttributeDataHandle* attrHandles, size_t attrCount); /** * Gets CPU pointers to the writable CPU data of some number of attributes. * * Any invalid attributes will have null pointers. Array attributes have an extra level of indirection. * For example, after requesting int attribute data, attrsOut will effectively be of type * int** upon returning, but after requesting int array attribute data, attrsOut will effectively be * of type int*const**. * * If the attribute data is not on the CPU at the time of this call, but is on the GPU, * it will be copied to the CPU and invalidated on the GPU before returning. * * @param[out] attrsOut Array to be filled in with pointers to data of each attribute * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] attrHandles Array of attribute data handles whose data are being requested * @param[in] attrCount Number of attributes whose data are being requested */ void (CARB_ABI* getDataW)(void** attrsOut, const GraphContextObj& contextObj, const AttributeDataHandle* attrHandles, size_t attrCount); /** * Gets GPU pointers to the writable GPU data of some number of attributes. * * Any invalid attributes will have null pointers. Array attributes have an extra level of indirection. * For example, after requesting int attribute data, attrsOut will effectively be of type * int** upon returning, but after requesting int array attribute data, attrsOut will effectively be * of type int*const**. * * If the attribute data is not on the GPU at the time of this call, but is on the CPU, * it will be copied to the GPU and invalidated on the CPU before returning. * * Deprecated: Use getDataWGpuAt * * @param[out] attrsOut Array to be filled in with pointers to data of each attribute * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] attrHandles Array of attribute data handles whose data are being requested * @param[in] attrCount Number of attributes whose data are being requested */ void (CARB_ABI* getDataWGPU)(void** attrsOut, const GraphContextObj& contextObj, const AttributeDataHandle* attrHandles, size_t attrCount); /** Deprecated function, do not use */ void (CARB_ABI* deprecated_1)(const GraphContextObj&, const AttributeDataHandle*, size_t); /** * Gets the number of array elements in each of the specified attributes. * * Any invalid attributes will considered to have 0 elements. Attributes that are not * arrays will be considered to have 1 element. Array of array attributes are not * yet supported. * * @param[out] countOut Array to be filled in with number of elements in each attribute * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] attrHandles Array of attribute data handles whose element counts are being requested * @param[in] attrCount Number of attributes whose element counts are being requested */ void (CARB_ABI* getElementCount)(size_t* countOut, const GraphContextObj& contextObj, const ConstAttributeDataHandle* attrHandles, size_t attrCount); /** * Sets the number of array elements in the specified array attribute. * * The array attribute's data will not be resized until a pointer to its data is requested. * * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] handle Attribute data handle referring to an array attribute * @param[in] count Element count to which the array attribute data should be resized. */ void (CARB_ABI* setElementCount)(const GraphContextObj& contextObj, AttributeDataHandle handle, size_t count); /** * Copies the data from an existing attribute data value into this one. * As only data is being copied a name for the destination is not required and will remain unchanged. * * @param[in] destination Data location to be overwritten * @param[in] contextObj Location of both sets of attribute data * @param[in] source Data being copied */ void (CARB_ABI* copyData)(AttributeDataHandle destination, const GraphContextObj& contextObj, ConstAttributeDataHandle source); /** * Get the location and total number of bytes occupied by the readable attribute data on the CPU. * * If the data is not currently valid on the CPU the pointer returned will be nullptr. * * @param[in] contextObj Location of both sets of attribute data * @param[in] attrHandle Handle to the attribute whose size is to be returned * @param[out] refToData Resulting pointer to the attribute data * @param[out] refToSize Size of the data being pointed at */ void (CARB_ABI* getDataReferenceR)(const ConstAttributeDataHandle attrHandle, const GraphContextObj& contextObj, ConstRawPtr& refToData, size_t& refToSize); /** * Get the location and total number of bytes occupied by the readable attribute data on the GPU. * * If the data is not currently valid on the GPU the pointer returned will be nullptr. * If it is then it will point to GPU memory, and should not be dereferenced on the CPU side. * * Deprecated: Use getDataReferenceRGpuAt * * @param[in] contextObj Location of both sets of attribute data * @param[in] attrHandle Handle to the attribute whose size is to be returned * @param[out] refToData Resulting pointer to the attribute data * @param[out] refToSize Size of the data being pointed at */ void (CARB_ABI* getDataReferenceRGpu)(const ConstAttributeDataHandle attrHandle, const GraphContextObj& contextObj, ConstRawPtr& refToData, size_t& refToSize); /** * Get the location and total number of bytes occupied by the writable attribute data on the CPU. * * If the data is not currently valid on the CPU the pointer returned will be nullptr. * * @param[in] contextObj Location of both sets of attribute data * @param[in] attrHandle Handle to the attribute whose size is to be returned * @param[out] refToData Resulting pointer to the attribute data * @param[out] refToSize Size of the data being pointed at */ void (CARB_ABI* getDataReferenceW)(const AttributeDataHandle attrHandle, const GraphContextObj& contextObj, RawPtr& refToData, size_t& refToSize); /** * Get the location and total number of bytes occupied by the writable attribute data on the GPU. * * If the data is not currently valid on the GPU the pointer returned will be nullptr. * If it is then it will point to GPU memory, and should not be dereferenced on the CPU side. * * Deprecated: Use getDataReferenceWGpuAt * * @param[in] contextObj Location of both sets of attribute data * @param[in] attrHandle Handle to the attribute whose size is to be returned * @param[out] refToData Resulting pointer to the attribute data * @param[out] refToSize Size of the data being pointed at */ void (CARB_ABI* getDataReferenceWGpu)(const AttributeDataHandle attrHandle, const GraphContextObj& contextObj, RawPtr& refToData, size_t& refToSize); /** * Check if the cpu data of the given attribute is currently valid * * @param[in] contextObj Location of both sets of attribute data * @param[in] attrHandle Handle to the attribute */ bool (CARB_ABI* cpuValid)(const ConstAttributeDataHandle attrHandle, const GraphContextObj& contextObj); /** * Check if the gpu data of the given attribute is currently valid * * @param[in] contextObj Location of both sets of attribute data * @param[in] attrHandle Handle to the attribute */ bool (CARB_ABI* gpuValid)(const ConstAttributeDataHandle attrHandle, const GraphContextObj& contextObj); /** * Gets GPU pointers to the read-only GPU data of some number of attributes. * * Any invalid attributes will have null pointers. Array attributes have an extra level of indirection. * For example, after requesting int attribute data, attrsOut will effectively be of type * const int** upon returning, but after requesting int array attribute data, attrsOut will effectively be * of type const int* const**. * * If the attribute data is not on the GPU at the time of this call, but is on the CPU, * it will be copied to the GPU before returning. * * @param[out] attrsOut Array to be filled in with pointers to data of each attribute * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] attrHandles Array of attribute data handles whose data are being requested * @param[in] attrCount Number of attributes whose data are being requested * @param[in] whereGpuPtrs For array data, the location of the array pointer - either on the CPU or on the GPU */ void (CARB_ABI* getDataRGpuAt)(const void** attrsOut, const GraphContextObj& contextObj, const ConstAttributeDataHandle* attrHandles, size_t attrCount, omni::fabric::PtrToPtrKind whereGpuPtrs); /** * Gets GPU pointers to the writable GPU data of some number of attributes. * * Any invalid attributes will have null pointers. Array attributes have an extra level of indirection. * For example, after requesting int attribute data, attrsOut will effectively be of type * int** upon returning, but after requesting int array attribute data, attrsOut will effectively be * of type int*const**. * * If the attribute data is not on the GPU at the time of this call, but is on the CPU, * it will be copied to the GPU and invalidated on the CPU before returning. * * @param[out] attrsOut Array to be filled in with pointers to data of each attribute * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] attrHandles Array of attribute data handles whose data are being requested * @param[in] attrCount Number of attributes whose data are being requested * @param[in] whereGpuPtrs For array data, the location of the array pointer - either on the CPU or on the GPU */ void (CARB_ABI* getDataWGpuAt)(void** attrsOut, const GraphContextObj& contextObj, const AttributeDataHandle* attrHandles, size_t attrCount, omni::fabric::PtrToPtrKind whereGpuPtrs); /** * Get the location and total number of bytes occupied by the readable attribute data on the GPU. * * If the data is not currently valid on the GPU the pointer returned will be nullptr. * If it is then it will point to GPU memory, and should not be dereferenced on the CPU side. * * @param[in] attrHandle Handle to the attribute whose size is to be returned * @param[in] contextObj Location of both sets of attribute data * @param[in] whereGpuPtrs For array data, the location of the array pointer - either on the CPU or on the GPU * @param[out] refToData Resulting pointer to the attribute data * @param[out] refToSize Size of the data being pointed at */ void (CARB_ABI* getDataReferenceRGpuAt)(const ConstAttributeDataHandle attrHandle, const GraphContextObj& contextObj, omni::fabric::PtrToPtrKind whereGpuPtrs, ConstRawPtr& refToData, size_t& refToSize); /** * Get the location and total number of bytes occupied by the writable attribute data on the GPU. * * If the data is not currently valid on the GPU the pointer returned will be nullptr. * If it is then it will point to GPU memory, and should not be dereferenced on the CPU side. * * @param[in] attrHandle Handle to the attribute whose size is to be returned * @param[in] contextObj Location of both sets of attribute data * @param[in] whereGpuPtrs For array data, the location of the array pointer - either on the CPU or on the GPU * @param[out] refToData Resulting pointer to the attribute data * @param[out] refToSize Size of the data being pointed at */ void (CARB_ABI* getDataReferenceWGpuAt)(AttributeDataHandle attrHandle, const GraphContextObj& contextObj, omni::fabric::PtrToPtrKind whereGpuPtrs, RawPtr& refToData, size_t& refToSize); /** * Perform a conversion between 2 data types * * The actual attribute data may have a different type than an associated attribute on a node, * for example, if the upstream source of the data has a different type. * This function can be used to perform the conversion from the actual attribute data, * to the provided buffer of the type of the attribute * * @param[out] dstDataOut A pointer to the destination buffer to be filled with the result of the conversion * @param[in] dstType The type the destination buffer * @param[in] srcDataIn A pointer to the actual attribute data * @param[in] srcType The type of the attribute data pointer * @return True if a conversion exists and succeeded, False otherwise. */ bool(CARB_ABI* performConversion)(void* dstDataOut, Type dstType, void* srcDataIn, Type srcType); /** * Gets CPU pointers to the read-only CPU data of some number of attributes. * * Any invalid attributes will have null pointers. Array attributes have an extra level of indirection. * For example, after requesting int attribute data, attrsOut will effectively be of type * const int** upon returning, but after requesting int array attribute data, attrsOut will effectively be * of type const int* const**. * * If the attribute data is not on the CPU at the time of this call, but is on the GPU, * it will be copied to the CPU before returning. * * @param[out] attrsOut Array to be filled in with pointers to data of each attribute * @param[in] contextObj The GraphContextObj containing the attribute data * @param[in] attrHandles Array of attribute data handles whose data are being requested * @param[in] attrCount Number of attributes whose data are being requested */ void (CARB_ABI* getDataR)(const void** attrsOut, const GraphContextObj& contextObj, const ConstAttributeDataHandle* attrHandles, size_t attrCount); /** * In a vectorized context, retrieve the write handle to another instance from a given one * * @param[in] contextObj Location of attribute data * @param[in] attrHandle The source handle to offset * @param[in] offset An offset to apply to the provided handle. Can be negative. * @return A handle to the instance located at the provided offset relative to the provided handle */ AttributeDataHandle(CARB_ABI* moveToAnotherInstanceW)(const GraphContextObj& contextObj, AttributeDataHandle attrHandle, int offset); /** * In a vectorized context, retrieve the read handle to another instance from a given one * * @param[in] contextObj Location of attribute data * @param[in] attrHandle The source handle to offset * @param[in] offset An offset to apply to the provided handle. Can be negative. * @return A handle to the instance located at the provided offset relative to the provided handle */ ConstAttributeDataHandle(CARB_ABI* moveToAnotherInstanceR)(const GraphContextObj& contextObj, ConstAttributeDataHandle attrHandle, int offset); }; // Update this every time a new ABI function is added, to ensure one isn't accidentally added in the middle STRUCT_INTEGRITY_CHECK(IAttributeData, moveToAnotherInstanceR, 24) } // namespace core } // namespace graph } // namespace omni
22,400
C
50.496552
114
0.670179
omniverse-code/kit/include/omni/graph/core/ogn/SimpleAttribute.h
// Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/CppWrappers.h> #include <omni/graph/core/ogn/TypeConversion.h> #include <omni/graph/core/ogn/Types.h> // ================================================================================================================= // This file contains simple interface classes which wrap data in the OGN database for easier use. // // SimpleInput Read-only wrapper for simple (POD and tuple) attribute values on CPU or GPU // SimpleOutput Writable data wrapper for simple (POD and tuple) attribute values on CPU or GPU // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // ================================================================================================================= namespace omni { namespace graph { namespace core { namespace ogn { // ====================================================================== /** * Wrapper template that handles POD input attribute values. * Its unowned data points to the real data in the fabric. It provides * a consistent interface to the data with a isValid() method and an operator(), * as well as enforcing const-correctness with the values. * * Rather than split this into CPU and GPU versions some key methods are enabled by template based * on the template parameter. This avoids duplication of the common code and reduces clutter. * * The data in this class is owned by the OGN generated code, this class only maintains pointers to those references. * By doing this, those generated references can be updated from fabric and this class will automatically pick up * the change, avoiding the need for synchronization code. */ template <typename DataType, eAttributeType AttributeType, eMemoryType MemoryType = kCpu> struct SimpleAttribute { // Make const-ness aware at compile time so that this class be used in const and non-const contexts static constexpr bool readOnly = (AttributeType == ogn::kOgnInput); using data_t = DataType; using handle_t = std::conditional_t<readOnly, ConstAttributeDataHandle, AttributeDataHandle>; /** * Set up the accessor for input attributes with simple data * * @param[in] offset: A reference to the instance offset currently pointed by owning database * @param[in] role Attribute's role */ SimpleAttribute(size_t const& offset, AttributeRole role = AttributeRole::eNone) : m_role(role), m_offset(offset) { static_assert(readOnly == std::is_const<DataType>::value, "Cannot construct input attributes from non-const types"); } /** * Set up the accessor for input attributes with simple data where the data pointer is known at construction time * * @param[in] dataPtr Pointer to the attribute's data * @param[in] role Attribute's role */ SimpleAttribute(size_t const& offset, data_t* dataPtr, AttributeRole role = AttributeRole::eNone) : m_ptrToData{ dataPtr }, m_role(role), m_offset(offset) {} /** * @param[in] idx: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return Reference to the raw fabric data extracted for use on the CPU */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type != kCuda>::type> const DataType& cpu(size_t idx = 0) const { // Cast is necessary to generically handle both const and non-const internal data m_ptrToData = (data_t*)getDataR<DataType>(*m_context, m_handle); return m_ptrToData[m_offset+idx]; } /** * @param[in] idx: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return Pointer to the raw fabric data extracted for use on the GPU */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type != kCpu>::type> const DataType* gpu(size_t idx = 0) const { // Cast is necessary to generically handle both const and non-const internal data m_ptrToData = (data_t*)getDataRGPU<DataType>(*m_context, m_handle); return m_ptrToData + m_offset + idx; } /** * Set the context. This is done to allow the same wrapper class to be used for * multiple evaluations in different contexts. * * @param[in] context The graph context to which the attribute belongs */ void setContext(const GraphContextObj& context) { m_context = &context; if (m_ptrToData) { OptionalMethod::setContext<data_t>(*m_ptrToData, context); } } /** * Set the attribute handle. This is done to allow the same wrapper class to be used for * multiple evaluations in different contexts. * * @param[in] handle Handle to the attribute to which the attribute belongs */ void setHandle(handle_t handle) { m_handle = handle; } /** * @return Role of the managed attribute */ AttributeRole role() const { return m_role; } /** * @return True if the underlying attribute data is valid for accessing */ template <eMemoryType Type = MemoryType> bool isValid() const { return m_handle.isValid(); } protected: mutable data_t* m_ptrToData{ nullptr }; //!< Cached pointer to fabric data AttributeRole m_role{ AttributeRole::eNone }; //!< Role interpretation for the attribute this struct manages const GraphContextObj* m_context{ nullptr }; //!< ABI OmniGraph object, for JIT access to data handle_t m_handle{ handle_t::invalidValue() }; //!< Handle to this attribute's data, for JIT access to data mutable Type m_originalDataType{ BaseDataType::eUnknown }; //!< The actual type of the underlying data in fabric //!< (used for auto conversion) size_t const& m_offset; //!< An offset (in terms of object count) to apply to the pointer to access the object }; // ====================================================================== /** * Wrapper template that handles POD input attribute values. */ template <typename DataType, eMemoryType MemoryType = kCpu> struct SimpleInput : public SimpleAttribute<std::add_const_t<DataType>, kOgnInput, MemoryType> { using parent_t = SimpleAttribute<DataType, kOgnInput, MemoryType>; using data_t = typename parent_t::data_t; using handle_t = typename parent_t::handle_t; /** * Set up the accessor for input attributes with simple data * * @param[in] offset: A reference to the instance offset currently pointed by owning database * @param[in] role: Attribute's role */ SimpleInput(size_t const& offset, AttributeRole role = AttributeRole::eNone) : parent_t(offset, role) { } /** * @param[in] idx: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return Reference to the raw fabric data; only enabled when it lives on the CPU */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCpu>::type> const DataType& operator()(size_t idx = 0) const { auto& data = this->m_ptrToData == nullptr ? this->template cpu<Type>(idx) : this->m_ptrToData[idx+this->m_offset]; if (this->m_originalDataType.baseType == BaseDataType::eUnknown) { if (this->m_context) this->m_originalDataType = this->m_context->iAttributeData->getType(*this->m_context, this->m_handle); else return data; } return *converter.convertValue(&data, this->m_originalDataType); } /** * @param[in] idx: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return Pointer to the raw flatcache data (or the converted value); only enabled when it lives on the GPU */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCuda>::type> const DataType* operator()(size_t idx = 0) const { if (!this->m_context->iAttributeData->gpuValid(this->m_handle, *this->m_context)) this->m_ptrToData = nullptr; return this->m_ptrToData == nullptr ? this->template gpu<Type>(idx) : (this->m_ptrToData + idx + this->m_offset); } /** * @return True if the attribute can be accessed for vectorized compute */ bool const canVectorize() const { if (this->m_originalDataType.baseType == BaseDataType::eUnknown) { if (this->m_context) this->m_originalDataType = this->m_context->iAttributeData->getType(*this->m_context, this->m_handle); else return false; } return converter.willConvert(this->m_originalDataType) == false; } /** * @param[in] count: The number of instances available for vectorized access * @return A span for the vectorized range if available. If not available, user must call operator() in a loop with incremented indices instead */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCpu>::type> gsl::span<DataType const> vectorized(size_t count) const { auto& data = this->m_ptrToData == nullptr ? this->template cpu<Type>() : this->m_ptrToData[this->m_offset]; if (this->m_originalDataType.baseType == BaseDataType::eUnknown) { if (this->m_context) this->m_originalDataType = this->m_context->iAttributeData->getType(*this->m_context, this->m_handle); else return { &data, count }; } if (converter.willConvert(this->m_originalDataType)) { if (count != 1) return {}; return { converter.convertValue(&data, this->m_originalDataType), count /*= 1*/ }; } return { &data, count }; } /** * @param[in] index: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return A copy of the underlying ABI data handle for the attribute */ handle_t abi_handle(size_t index = 0) const { size_t const idx = this->m_offset + index; return idx == 0 ? this->m_handle : this->m_context->iAttributeData->moveToAnotherInstanceR(*this->m_context, this->m_handle, (int)idx); } private: Converter<DataType> converter; }; // ====================================================================== /** * Wrapper template that handles POD output attribute values. * It adds methods that provide write access to the underlying attribute data onto the functionality of SimpleInput. */ template <typename DataType, eAttributeType AttributeType, eMemoryType MemoryType = kCpu> struct SimpleWritableAttribute : public SimpleAttribute<DataType, AttributeType, MemoryType> { using parent_t = SimpleAttribute<DataType, AttributeType, MemoryType>; using data_t = typename parent_t::data_t; using handle_t = typename parent_t::handle_t; /** * Set up the accessor for output attributes with simple data * * @param[in] offset: A reference to the instance offset currently pointed by owning database * @param[in] Attribute's role */ SimpleWritableAttribute(size_t const& offset, AttributeRole role = AttributeRole::eNone) : parent_t(offset, role) { } /** * Query if the attribute can be accessed in a vectorized manner * @return True: SimpleWritableAttribute can always vectorize */ bool const canVectorize() const { return true; } /** * @return A span for the vectorized range */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCpu>::type> gsl::span<DataType> vectorized(size_t count) { if (this->m_ptrToData) return { this->m_ptrToData + this->m_offset, count }; return { &cpu(), count }; } /** * @param[in] idx: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return Reference to the raw fabric data; only enabled when it lives on the CPU */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCpu>::type> DataType& operator()(size_t idx = 0) const { if (this->m_ptrToData) return this->m_ptrToData[idx+this->m_offset]; return cpu(idx); } /** * @param[in] idx: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return Pointer to the raw fabric data; only enabled when it lives on the GPU */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCuda>::type> DataType* operator()(size_t idx = 0) const { if (!this->m_context->iAttributeData->gpuValid(this->m_handle, *this->m_context)) this->m_ptrToData = nullptr; if (this->m_ptrToData) return this->m_ptrToData + idx + this->m_offset; return gpu(idx); } /** * @param[in] idx: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return Reference to the raw fabric data extracted for use on the CPU */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type != kCuda>::type> DataType& cpu(size_t idx = 0) const { this->m_ptrToData = getDataW<DataType>(*this->m_context, this->m_handle); return this->m_ptrToData[idx+this->m_offset]; } /** * @param[in] idx: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return Pointer to the raw fabric data extracted for use on the GPU */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type != kCpu>::type> DataType* gpu(size_t idx = 0) const { this->m_ptrToData = getDataWGPU<DataType>(*this->m_context, this->m_handle); return this->m_ptrToData + idx + this->m_offset; } /** * @param[in] index: For vectorized compute, the instance index/offset relative to the one currently pointed by the owning database * @return A copy of the underlying ABI data handle for the attribute */ handle_t abi_handle(size_t index = 0) const { size_t const idx = this->m_offset + index; return idx == 0 ? this->m_handle : this->m_context->iAttributeData->moveToAnotherInstanceW(*this->m_context, this->m_handle, (int)idx); } }; // Typedefs to differentiate state from output types template <typename DataType, eMemoryType MemoryType = kCpu> using SimpleOutput = SimpleWritableAttribute<DataType, kOgnOutput, MemoryType>; template <typename DataType, eMemoryType MemoryType = kCpu> using SimpleState = SimpleWritableAttribute<DataType, kOgnState, MemoryType>; // Backward compatibility for previously existing data types template <typename DataType> using DualInput = SimpleInput<DataType, kAny>; template <typename DataType> using DualOutput = SimpleOutput<DataType, kAny>; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
16,062
C
41.160105
147
0.651289
omniverse-code/kit/include/omni/graph/core/ogn/ArrayAttribute.h
// Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // ================================================================================================================= // This file contains simple interface classes which wrap data in the OGN database for easier use. // // ArrayInput Read-only wrapper for attributes that are arrays of values on CPU or GPU // ArrayOutput Writable wrapper for attributes that are arrays of values on CPU or GPU // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // ================================================================================================================= #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/CppWrappers.h> #include <omni/graph/core/ogn/Types.h> #include <omni/graph/core/ogn/array.h> #include <omni/graph/core/ogn/string.h> #include <carb/InterfaceUtils.h> #include <omni/fabric/Enums.h> using omni::fabric::PtrToPtrKind; namespace omni { namespace graph { namespace core { namespace ogn { // ============================================================================================================== /** House the shared data types that will be used by all array type accessors. * Provides typedefs for data access with similar but subtly different types, e.g. const versus non-const * Its unowned data points to the real data in the fabric. It provides a consistent interface to the data, * with an isValid() method to use for compute validation and an appropriate operator() for data extraction. */ template <typename DataType, eAttributeType AttributeType, eMemoryType MemoryType = kCpu, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct ArrayAttribute { // Make const-ness aware at compile time so that this class be used in const and non-const contexts static constexpr bool readOnly = (AttributeType == ogn::kOgnInput); using this_t = ArrayAttribute<DataType, AttributeType, MemoryType, GpuPtrType>; using data_t = DataType; using array_t = std::conditional_t< readOnly, std::conditional_t<std::is_same<const char, DataType>::value, const_string, const_array<std::remove_const_t<DataType>>>, std::conditional_t<std::is_same<char, DataType>::value, string, array<DataType>> >; using handle_t = std::conditional_t<readOnly, ConstAttributeDataHandle, AttributeDataHandle>; /** * Set up the accessor for attributes with array data * * @param[in] offset: A reference to the instance offset currently pointed by owning database * @param[in] role Attribute's role */ ArrayAttribute(size_t const& offset, AttributeRole role = AttributeRole::eNone) : m_role(role), m_offset(offset), m_currentOffset(offset) { static_assert(readOnly == std::is_const<DataType>::value, "Cannot construct input attributes from non-const types"); } /** * @return Role of the managed attribute */ AttributeRole role() const { return m_role; } /** * Set the context. This is done to allow the same wrapper class to be used for * multiple evaluations in different contexts. * * @param[in] context The graph context to which the array belongs */ void setContext(const GraphContextObj& context) { m_arrayData.setContext(context); } /** * Set the attribute handle. This is done to allow the same wrapper class to be used for * multiple evaluations in different contexts. * * @param[in] handle Handle to the attribute to which the array belongs */ void setHandle(handle_t handle) { m_arrayData.setHandle(handle); } /** * Perform any action necessary before computation happens */ void preCompute() { //array data needs to be re-fetched every frame, // as any external change of the value might have invalidated the pointers m_arrayData.setDirty(); } /** * @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database * @return Reference to an array wrapper around the raw fabric CPU data */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCpu>::type> const array_t& operator()(size_t index = 0) const { return cpu(index); } /** * @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database * @return Reference to an array wrapper around the raw fabric GPU data */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCuda>::type> const DataType** operator()(size_t index = 0) const { return gpu(index); } /** * @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database * @return Reference to an array wrapper around the raw fabric CPU data */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type != kCuda>::type> const array_t& cpu(size_t index = 0) const { adjustHandle(index); return m_arrayData; } /** * @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database * @return Reference to GPU data */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type != kCpu>::type> auto gpu(size_t index = 0) const { return gpuGet(abi_handle(index), context()); } /** * Query the size of the underlying array * @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database * @return the size of the underlying array */ const size_t size(size_t index = 0) const { size_t count = 0; ConstAttributeDataHandle chdl = abi_handle(index); context()->iAttributeData->getElementCount(&count, *context(), &chdl, 1); return count; } /** * @return True if the underlying attribute data is valid for accessing */ bool isValid() const { return m_arrayData.isValid(); } /** * @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database * @return A copy of the underlying ABI data handle for the attribute */ handle_t abi_handle(size_t index = 0) const { adjustHandle(index); return m_arrayData.m_handle; } //@deprecated use abi_handle instead [[deprecated("Calling handle() is deprecated. Use abi_handle() instead")]] inline handle_t handle(size_t index = 0) const { return abi_handle(index);} /** * @return A copy of the underlying ABI data handle for the attribute */ GraphContextObj const* context() const { return m_arrayData.m_context; } /** * @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database * @param[in] defValue: The default value to return if the array is empty * @return the first item in the array, or the default value if the array is empty */ const DataType& firstOrDefault(size_t index = 0, const DataType& defValue = DataType()) { return size(index) ? (*this)(index)[0] : defValue; } protected: /** * Make the handle point to the proper instance, referred by its index, in a vectorized context * @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database */ void adjustHandle(size_t index) const { if (m_arrayData.isValid()) { size_t wantedIndex = m_offset + index; if (m_currentOffset != wantedIndex) { const_cast<array_t&>(m_arrayData).adjustHandle(wantedIndex - m_currentOffset); m_currentOffset = wantedIndex; } } } protected: //! Role interpretation for the attribute this struct manages AttributeRole m_role{ AttributeRole::eNone }; //! Helper for accessing array data (last as it uses the others in initializing) array_t m_arrayData; //! Offset (in terms of objects) to apply to the fabric pointer to access the current object size_t const& m_offset; //! Offset at which the array data is currently configured size_t mutable m_currentOffset{ 0 }; private: //select appropriate ABI function based on handle type static const DataType** gpuGet(ConstAttributeDataHandle const& hdl, GraphContextObj const* ctx) { const DataType** ptrToData{ nullptr }; ctx->iAttributeData->getDataRGpuAt((const void**)&ptrToData, *ctx, &hdl, 1, GpuPtrType); return ptrToData; } static DataType** gpuGet(AttributeDataHandle const& hdl, GraphContextObj const* ctx) { DataType** ptrToData{ nullptr }; ctx->iAttributeData->getDataWGpuAt((void**)&ptrToData, *ctx, &hdl, 1, GpuPtrType); return ptrToData; } }; // ====================================================================== /** * Wrapper template that handles arrays of input attribute values. */ template <typename DataType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> using ArrayInput = ArrayAttribute<std::add_const_t<DataType>, kOgnInput, MemoryType, GpuPtrType>; // ====================================================================== /** * Handle arrays of output attribute values. * It adds methods that provide write access to the underlying attribute data onto the functionality of ArrayInput. */ template <typename DataType, eAttributeType AttributeType, eMemoryType MemoryType = kCpu, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct ArrayWritableAttribute : public ArrayAttribute<DataType, AttributeType, MemoryType, GpuPtrType> { // Make const-ness aware at compile time so that this class be used in const and non-const contexts static constexpr bool readOnly = (AttributeType == ogn::kOgnInput); using parent_t = ArrayAttribute<DataType, AttributeType, MemoryType, GpuPtrType>; using data_t = typename parent_t::data_t; using handle_t = typename parent_t::handle_t; using array_t = typename parent_t::array_t; using this_t = ArrayWritableAttribute<DataType, AttributeType, MemoryType, GpuPtrType>; /** * Set up the accessor for output attributes with array data * * @param[in] offset: A reference to the instance offset currently pointed by owning database * @param[in] role Attribute's role */ ArrayWritableAttribute(size_t const& offset, AttributeRole role = AttributeRole::eNone) : parent_t(offset, role) { } /** * @param[in] offset: A reference to the instance offset currently pointed by owning database * @return Reference to an array wrapper around the raw fabric CPU data */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCpu>::type> array_t& operator()(size_t index = 0) { //always go through fetch for array as it has special treatment in datamodel return cpu(index); } /** * @param[in] offset: A reference to the instance offset currently pointed by owning database * @return Reference to an array wrapper around the raw fabric GPU data */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type == kCuda>::type> data_t** operator()(size_t index = 0) { // always go through fetch for array as it has special treatment in datamodel return gpu(index); } /** * @param[in] offset: A reference to the instance offset currently pointed by owning database * @return Reference to an array wrapper around the raw fabric CPU data */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type != kCuda>::type> array_t& cpu(size_t index = 0) { auto const* const_this = this; auto const& ret = const_this->parent_t::cpu(index); return const_cast<array_t&>(ret); } /** * @param[in] offset: A reference to the instance offset currently pointed by owning database * @return Reference to an array wrapper around the raw fabric GPU data */ template <eMemoryType Type = MemoryType, typename = typename std::enable_if<Type != kCpu>::type> data_t** gpu(size_t index = 0) { auto const* const_this = this; auto ret = const_this->parent_t::gpu(index); return const_cast<data_t**>(ret); } /** * Resize of the underlying array * @input newSize: the size to set for the underlying array * @input index: the index of the instance to query in a vectorized context */ const void resize(size_t newSize, size_t index = 0) { auto& ctx = *this->context(); ctx.iAttributeData->setElementCount(ctx, this->abi_handle(index), newSize); this->m_arrayData.setDirty(); } /** * Copy some array data to another through the ABI * Will handle all the underlying optimizations (such as CoW or DataStealing) * * @return Reference to itself */ this_t& operator=(const ArrayInput<DataType, MemoryType, GpuPtrType>& toBeCopied) { return shallowCopy(toBeCopied);} // @param[in] index: For vectorized compute, the instance index relative to the one currently pointed by the owning database this_t& shallowCopy(const ArrayInput<DataType, MemoryType, GpuPtrType>& toBeCopied, size_t index = 0) { const IAttributeData& iData = *(this->context()->iAttributeData); iData.copyData(this->abi_handle(index), *this->context(), toBeCopied.abi_handle(index)); this->m_arrayData.setDirty(); return *this; } }; // Convenience types for distinguishing output and state attributes template <typename DataType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> using ArrayOutput = ArrayWritableAttribute<DataType, kOgnOutput, MemoryType, GpuPtrType>; template <typename DataType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> using ArrayState = ArrayWritableAttribute<DataType, kOgnState, MemoryType, GpuPtrType>; // Backward compatibility for previously existing data types template <typename DataType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> using DualArrayInput = ArrayInput<DataType, kAny, GpuPtrType>; template <typename DataType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> using DualArrayOutput = ArrayOutput<DataType, kAny, GpuPtrType>; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
15,510
C
39.393229
145
0.663701
omniverse-code/kit/include/omni/graph/core/ogn/UsdTypes.h
// Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // This file contains helper utilities for managing casting and interpretation of the USD // data types. Keeping this separate enables the ability for any other libraries to // provide their own type casting, so that the OGN code can use native types only. #include <omni/graph/core/PreUsdInclude.h> #include <pxr/base/gf/half.h> #include <pxr/base/gf/matrix2d.h> #include <pxr/base/gf/matrix3d.h> #include <pxr/base/gf/matrix4d.h> #include <pxr/base/gf/vec2d.h> #include <pxr/base/gf/vec2f.h> #include <pxr/base/gf/vec2h.h> #include <pxr/base/gf/vec2i.h> #include <pxr/base/gf/vec3d.h> #include <pxr/base/gf/vec3f.h> #include <pxr/base/gf/vec3h.h> #include <pxr/base/gf/vec3i.h> #include <pxr/base/gf/vec4d.h> #include <pxr/base/gf/vec4f.h> #include <pxr/base/gf/vec4h.h> #include <pxr/base/gf/vec4i.h> #include <pxr/base/gf/quatd.h> #include <pxr/base/gf/quatf.h> #include <pxr/base/gf/quath.h> #include <pxr/usd/sdf/path.h> #include <pxr/usd/sdf/timeCode.h> #include <pxr/base/tf/token.h> #include <omni/graph/core/PostUsdInclude.h> #include <omni/fabric/IPath.h> #include <omni/fabric/IToken.h> #include <omni/graph/core/Type.h> #include <omni/graph/core/ogn/TypeTraits.h> namespace omni { namespace graph { namespace core { namespace ogn { // GfHalf, TfToken, SdfPath, and SdfTimeCode are the base types that allow special casting template <> struct attribute_base_t<pxr::GfHalf> { static constexpr BaseDataType value = BaseDataType::eHalf; }; template <> struct attribute_base_t<pxr::TfToken> { static constexpr BaseDataType value = BaseDataType::eToken; }; template <> struct attribute_base_t<pxr::SdfPath> { static constexpr BaseDataType value = BaseDataType::eRelationship; }; template <> struct attribute_base_t<pxr::SdfTimeCode> { static constexpr BaseDataType value = BaseDataType::eDouble; }; // All of the matrix and vector types have USD implementations template <> struct attribute_base_t<pxr::GfMatrix2d> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<pxr::GfMatrix3d> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<pxr::GfMatrix4d> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<pxr::GfVec2d> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<pxr::GfVec2f> { static constexpr BaseDataType value = BaseDataType::eFloat; }; template <> struct attribute_base_t<pxr::GfVec2h> { static constexpr BaseDataType value = BaseDataType::eHalf; }; template <> struct attribute_base_t<pxr::GfVec2i> { static constexpr BaseDataType value = BaseDataType::eInt; }; template <> struct attribute_base_t<pxr::GfVec3d> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<pxr::GfVec3f> { static constexpr BaseDataType value = BaseDataType::eFloat; }; template <> struct attribute_base_t<pxr::GfVec3h> { static constexpr BaseDataType value = BaseDataType::eHalf; }; template <> struct attribute_base_t<pxr::GfVec3i> { static constexpr BaseDataType value = BaseDataType::eInt; }; template <> struct attribute_base_t<pxr::GfVec4d> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<pxr::GfVec4f> { static constexpr BaseDataType value = BaseDataType::eFloat; }; template <> struct attribute_base_t<pxr::GfVec4h> { static constexpr BaseDataType value = BaseDataType::eHalf; }; template <> struct attribute_base_t<pxr::GfVec4i> { static constexpr BaseDataType value = BaseDataType::eInt; }; template <> struct attribute_base_t<pxr::GfQuatd> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<pxr::GfQuatf> { static constexpr BaseDataType value = BaseDataType::eFloat; }; template <> struct attribute_base_t<pxr::GfQuath> { static constexpr BaseDataType value = BaseDataType::eHalf; }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni // Simple type casting from the internal Fabric types to their equivalent USD types inline const pxr::TfToken& asTfToken(const omni::fabric::TokenC& token) { return reinterpret_cast<const pxr::TfToken&>(token); } inline pxr::TfToken& asTfToken(omni::fabric::TokenC& token) { return reinterpret_cast<pxr::TfToken&>(token); } inline const pxr::SdfPath& asSdfPath(const omni::fabric::PathC& path) { return reinterpret_cast<const pxr::SdfPath&>(path); } inline pxr::SdfPath& asSdfPath(omni::fabric::PathC& path) { return reinterpret_cast<pxr::SdfPath&>(path); } namespace omni { namespace graph { namespace core { namespace ogn { // ============================================================================================================== // Specializing the attribute_type_traits gives more options for casting data extracted from RuntimeAttributes. // // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfHalf>(); template <> struct attribute_type_traits<pxr::GfHalf> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = true; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfHalf>::value; using actual_t = pxr::GfHalf; using element_t = pxr::GfHalf; using data_t = pxr::GfHalf; static constexpr int tupleCount = 1; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfHalf[]>(); template <> struct attribute_type_traits<pxr::GfHalf[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = true; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfHalf>::value; using actual_t = pxr::GfHalf[]; using element_t = pxr::GfHalf; using data_t = pxr::GfHalf; static constexpr int tupleCount = 1; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::TfToken>(); template <> struct attribute_type_traits<pxr::TfToken> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = true; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::TfToken>::value; using actual_t = pxr::TfToken; using element_t = pxr::TfToken; using data_t = pxr::TfToken; static constexpr int tupleCount = 1; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::TfToken[]>(); template <> struct attribute_type_traits<pxr::TfToken[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = true; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::TfToken>::value; using actual_t = pxr::TfToken[]; using element_t = pxr::TfToken; using data_t = pxr::TfToken; static constexpr int tupleCount = 1; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::SdfPath>(); template <> struct attribute_type_traits<pxr::SdfPath> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = true; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::SdfPath>::value; using actual_t = pxr::SdfPath; using element_t = pxr::SdfPath; using data_t = pxr::SdfPath; static constexpr int tupleCount = 1; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::SdfPath[]>(); template <> struct attribute_type_traits<pxr::SdfPath[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = true; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::SdfPath>::value; using actual_t = pxr::SdfPath[]; using element_t = pxr::SdfPath; using data_t = pxr::SdfPath; static constexpr int tupleCount = 1; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::SdfTimeCode>(); template <> struct attribute_type_traits<pxr::SdfTimeCode> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = true; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::SdfTimeCode>::value; using actual_t = pxr::SdfTimeCode; using element_t = pxr::SdfTimeCode; using data_t = pxr::SdfTimeCode; static constexpr int tupleCount = 1; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::SdfTimeCode[]>(); template <> struct attribute_type_traits<pxr::SdfTimeCode[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = true; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::SdfTimeCode>::value; using actual_t = pxr::SdfTimeCode[]; using element_t = pxr::SdfTimeCode; using data_t = pxr::SdfTimeCode; static constexpr int tupleCount = 1; };// -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfMatrix2d>(); template <> struct attribute_type_traits<pxr::GfMatrix2d> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfMatrix2d>::value; using actual_t = pxr::GfMatrix2d; using element_t = double; using data_t = pxr::GfMatrix2d; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfMatrix2d[]>(); template <> struct attribute_type_traits<pxr::GfMatrix2d[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfMatrix2d>::value; using actual_t = pxr::GfMatrix2d[]; using element_t = double; using data_t = pxr::GfMatrix2d; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfMatrix3d>(); template <> struct attribute_type_traits<pxr::GfMatrix3d> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfMatrix3d>::value; using actual_t = pxr::GfMatrix3d; using element_t = double; using data_t = pxr::GfMatrix3d; static constexpr int tupleCount = 9; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfMatrix3d[]>(); template <> struct attribute_type_traits<pxr::GfMatrix3d[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfMatrix3d>::value; using actual_t = pxr::GfMatrix3d[]; using element_t = double; using data_t = pxr::GfMatrix3d; static constexpr int tupleCount = 9; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfMatrix4d>(); template <> struct attribute_type_traits<pxr::GfMatrix4d> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfMatrix4d>::value; using actual_t = pxr::GfMatrix4d; using element_t = double; using data_t = pxr::GfMatrix4d; static constexpr int tupleCount = 16; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfMatrix4d[]>(); template <> struct attribute_type_traits<pxr::GfMatrix4d[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfMatrix4d>::value; using actual_t = pxr::GfMatrix4d[]; using element_t = double; using data_t = pxr::GfMatrix4d; static constexpr int tupleCount = 16; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec2d>(); template <> struct attribute_type_traits<pxr::GfVec2d> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec2d>::value; using actual_t = pxr::GfVec2d; using element_t = double; using data_t = pxr::GfVec2d; static constexpr int tupleCount = 2; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec2d[]>(); template <> struct attribute_type_traits<pxr::GfVec2d[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec2d>::value; using actual_t = pxr::GfVec2d[]; using element_t = double; using data_t = pxr::GfVec2d; static constexpr int tupleCount = 2; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec2f>(); template <> struct attribute_type_traits<pxr::GfVec2f> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec2f>::value; using actual_t = pxr::GfVec2f; using element_t = float; using data_t = pxr::GfVec2f; static constexpr int tupleCount = 2; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec2f[]>(); template <> struct attribute_type_traits<pxr::GfVec2f[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec2f>::value; using actual_t = pxr::GfVec2f[]; using element_t = float; using data_t = pxr::GfVec2f; static constexpr int tupleCount = 2; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec2h>(); template <> struct attribute_type_traits<pxr::GfVec2h> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec2h>::value; using actual_t = pxr::GfVec2h; using element_t = pxr::GfHalf; using data_t = pxr::GfVec2h; static constexpr int tupleCount = 2; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec2h[]>(); template <> struct attribute_type_traits<pxr::GfVec2h[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec2h>::value; using actual_t = pxr::GfVec2h[]; using element_t = pxr::GfHalf; using data_t = pxr::GfVec2h; static constexpr int tupleCount = 2; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec2i>(); template <> struct attribute_type_traits<pxr::GfVec2i> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec2i>::value; using actual_t = pxr::GfVec2i; using element_t = int; using data_t = pxr::GfVec2i; static constexpr int tupleCount = 2; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec2i[]>(); template <> struct attribute_type_traits<pxr::GfVec2i[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec2i>::value; using actual_t = pxr::GfVec2i[]; using element_t = int; using data_t = pxr::GfVec2i; static constexpr int tupleCount = 2; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec3d>(); template <> struct attribute_type_traits<pxr::GfVec3d> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec3d>::value; using actual_t = pxr::GfVec3d; using element_t = double; using data_t = pxr::GfVec3d; static constexpr int tupleCount = 3; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec3d[]>(); template <> struct attribute_type_traits<pxr::GfVec3d[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec3d>::value; using actual_t = pxr::GfVec3d[]; using element_t = double; using data_t = pxr::GfVec3d; static constexpr int tupleCount = 3; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec3f>(); template <> struct attribute_type_traits<pxr::GfVec3f> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec3f>::value; using actual_t = pxr::GfVec3f; using element_t = float; using data_t = pxr::GfVec3f; static constexpr int tupleCount = 3; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec3f[]>(); template <> struct attribute_type_traits<pxr::GfVec3f[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec3f>::value; using actual_t = pxr::GfVec3f[]; using element_t = float; using data_t = pxr::GfVec3f; static constexpr int tupleCount = 3; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec3h>(); template <> struct attribute_type_traits<pxr::GfVec3h> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec3h>::value; using actual_t = pxr::GfVec3h; using element_t = pxr::GfHalf; using data_t = pxr::GfVec3h; static constexpr int tupleCount = 3; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec3h[]>(); template <> struct attribute_type_traits<pxr::GfVec3h[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec3h>::value; using actual_t = pxr::GfVec3h[]; using element_t = pxr::GfHalf; using data_t = pxr::GfVec3h; static constexpr int tupleCount = 3; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec3i>(); template <> struct attribute_type_traits<pxr::GfVec3i> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec3i>::value; using actual_t = pxr::GfVec3i; using element_t = int; using data_t = pxr::GfVec3i; static constexpr int tupleCount = 3; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec3i[]>(); template <> struct attribute_type_traits<pxr::GfVec3i[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec3i>::value; using actual_t = pxr::GfVec3i[]; using element_t = int; using data_t = pxr::GfVec3i; static constexpr int tupleCount = 3; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec4d>(); template <> struct attribute_type_traits<pxr::GfVec4d> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec4d>::value; using actual_t = pxr::GfVec4d; using element_t = double; using data_t = pxr::GfVec4d; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec4d[]>(); template <> struct attribute_type_traits<pxr::GfVec4d[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec4d>::value; using actual_t = pxr::GfVec4d[]; using element_t = double; using data_t = pxr::GfVec4d; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec4f>(); template <> struct attribute_type_traits<pxr::GfVec4f> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec4f>::value; using actual_t = pxr::GfVec4f; using element_t = float; using data_t = pxr::GfVec4f; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec4f[]>(); template <> struct attribute_type_traits<pxr::GfVec4f[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec4f>::value; using actual_t = pxr::GfVec4f[]; using element_t = float; using data_t = pxr::GfVec4f; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec4h>(); template <> struct attribute_type_traits<pxr::GfVec4h> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec4h>::value; using actual_t = pxr::GfVec4h; using element_t = pxr::GfHalf; using data_t = pxr::GfVec4h; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec4h[]>(); template <> struct attribute_type_traits<pxr::GfVec4h[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec4h>::value; using actual_t = pxr::GfVec4h[]; using element_t = pxr::GfHalf; using data_t = pxr::GfVec4h; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec4i>(); template <> struct attribute_type_traits<pxr::GfVec4i> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec4i>::value; using actual_t = pxr::GfVec4i; using element_t = int; using data_t = pxr::GfVec4i; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfVec4i[]>(); template <> struct attribute_type_traits<pxr::GfVec4i[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfVec4i>::value; using actual_t = pxr::GfVec4i[]; using element_t = int; using data_t = pxr::GfVec4i; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfQuatd>(); template <> struct attribute_type_traits<pxr::GfQuatd> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfQuatd>::value; using actual_t = pxr::GfQuatd; using element_t = double; using data_t = pxr::GfQuatd; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfQuatd[]>(); template <> struct attribute_type_traits<pxr::GfQuatd[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfQuatd>::value; using actual_t = pxr::GfQuatd[]; using element_t = double; using data_t = pxr::GfQuatd; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfQuatf>(); template <> struct attribute_type_traits<pxr::GfQuatf> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfQuatf>::value; using actual_t = pxr::GfQuatf; using element_t = float; using data_t = pxr::GfQuatf; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfQuatf[]>(); template <> struct attribute_type_traits<pxr::GfQuatf[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfQuatf>::value; using actual_t = pxr::GfQuatf[]; using element_t = float; using data_t = pxr::GfQuatf; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfQuath>(); template <> struct attribute_type_traits<pxr::GfQuath> { static constexpr bool isArray = false; static constexpr int arrayDepth = 0; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = true; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = false; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfQuath>::value; using actual_t = pxr::GfQuath; using element_t = pxr::GfHalf; using data_t = pxr::GfQuath; static constexpr int tupleCount = 4; }; // -------------------------------------------------------------------------------------------------------------- // auto value = runtimeAttribute.get<pxr::GfQuath[]>(); template <> struct attribute_type_traits<pxr::GfQuath[]> { static constexpr bool isArray = true; static constexpr int arrayDepth = 1; static constexpr bool isSimpleType = false; static constexpr bool isTupleType = false; static constexpr bool isArrayType = false; static constexpr bool isTupleArrayType = true; static constexpr BaseDataType baseType = attribute_base_t<pxr::GfQuath>::value; using actual_t = pxr::GfQuath[]; using element_t = pxr::GfHalf; using data_t = pxr::GfQuath; static constexpr int tupleCount = 4; }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
37,767
C
42.712963
121
0.622792
omniverse-code/kit/include/omni/graph/core/ogn/OmniGraphNodeABI.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/iComputeGraph.h> #include <omni/graph/core/ComputeGraph.h> #include <omni/graph/core/IGraphRegistry.h> #include <omni/graph/core/TemplateUtils.h> #include <omni/graph/core/ogn/Database.h> #include <omni/graph/core/ogn/Types.h> #include <omni/graph/core/unstable/INodeTypeForwarding.h> #include <carb/profiler/Profile.h> #include <carb/Framework.h> #include <carb/logging/Log.h> // ================================================================================================================= // This file contains simple interface classes which wrap data in the OGN database for easier use. // // OmniGraphNode_ABI Templated base class for generated OmniGraph node type definitions // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // ================================================================================================================= // Uncomment the first line to see debug output, the second to see nothing // #define OGN_DBG #define OGN_DBG if (false) #include <iostream> namespace omni { namespace graph { namespace core { namespace ogn { // ====================================================================== // Function declarations for registering and deregistering node types. Used so that a node type // can either use the static functions available within the core or the ABI functions outside of it. using OmniGraphNodeRegisterFn = void (*)(const omni::graph::core::INodeType&, int); using OmniGraphNodeDeregisterFn = void (*)(const char*); using OmniGraphNodeRegisterAliasFn = void (*)(const omni::graph::core::INodeType&, const char*); // ====================================================================== // The has_XXX templated types are a set of metaprograms that use type dispatching in conjunction with the // is_detected template to resolve to a std::true_type when a class contains a certain function and std::false_type // when it does not. This allows compile time choice of two overloaded versions of each of the ABI functions from // the call_XXX() version of them. // // See the description of is_detected in TemplateUtils.h for more details on how the template match occurs. // // The functions defined are the ones that are used as part of the interface to the OmniGraph Node C ABI. // The formatting is consistent to make it easier to recognize and add new types: // Line 1. The template declaration // Line 2. The typename declaration, with the expected return type of the function // Line 3. The function name declaration // Lines 4+. Declarations for each of the function parameters // clang-format off // ---------------------------------------------------------------------------------------------------- // static void addInput(const NodeTypeObj&, const char*, const char*, bool, const void*, const size_t*) template <class NodeTypeClass> using has_addInput = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().addInput( std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<const void*>(), std::declval<const size_t*>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void addOutput(const NodeTypeObj&, const char*, const char*, bool, const void*, const size_t*) template <class NodeTypeClass> using has_addOutput = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().addOutput( std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<const void*>(), std::declval<const size_t*>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void addState(const NodeTypeObj&, const char*, const char*, bool, const void*, const size_t*) template <class NodeTypeClass> using has_addState = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().addState( std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<const void*>(), std::declval<const size_t*>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void addExtendedInput(const NodeTypeObj&, const char*, const char*, bool, ExtendedAttributeType) template <class NodeTypeClass> using has_addExtendedInput = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().addExtendedInput( std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<ExtendedAttributeType>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void addExtendedOutput(const NodeTypeObj&, const char*, const char*, bool, ExtendedAttributeType) template <class NodeTypeClass> using has_addExtendedOutput = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().addExtendedOutput( std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<ExtendedAttributeType>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void addExtendedState(const NodeTypeObj&, const char*, const char*, bool, ExtendedAttributeType) template <class NodeTypeClass> using has_addExtendedState = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().addExtendedState( std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>(), std::declval<bool>(), std::declval<ExtendedAttributeType>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static bool hasState() template <class NodeTypeClass> using has_hasState = typename std::is_same<bool, decltype(std::declval<const NodeTypeClass&>().hasState( std::declval<const NodeTypeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void setHasState(bool) template <class NodeTypeClass> using has_setHasState = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().setHasState( std::declval<const NodeTypeObj&>(), std::declval<bool>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static bool compute(const GraphContextObj&, const NodeObj&) template <class NodeTypeClass> using has_computeABI = typename std::is_same<bool, decltype(std::declval<const NodeTypeClass&>().compute( std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static bool compute(NodeTypeDataClass&, size_t) template <class NodeTypeClass, class NodeTypeDataClass> using has_computeOGNT = typename std::is_same<bool, decltype(std::declval<NodeTypeClass&>().compute( std::declval<NodeTypeDataClass&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static bool computeVectorized(const GraphContextObj&, const NodeObj&, size_t) template <class NodeTypeClass> using has_computeVectorizedABI = typename std::is_same<size_t, decltype(std::declval<const NodeTypeClass&>().computeVectorized( std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>(), std::declval<size_t>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static bool computeVectorized(NodeTypeDataClass&, size_t) template <class NodeTypeClass, class NodeTypeDataClass> using has_computeVectorizedOGNT = typename std::is_same<size_t, decltype(std::declval<NodeTypeClass&>().computeVectorized( std::declval<NodeTypeDataClass&>(), std::declval<size_t>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static const char* getNodeType() template <class NodeTypeClass> using has_getNodeType = typename std::is_same<const char*, decltype(std::declval<const NodeTypeClass&>().getNodeType( ))>::value_type; // ---------------------------------------------------------------------------------------------------- // static const char* getTypeName(const NodeTypeObj&) template <class NodeTypeClass> using has_getTypeName = typename std::is_same<const char*, decltype(std::declval<const NodeTypeClass&>().getTypeName( std::declval<const NodeTypeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void getScheduleNodeCount() template <class NodeTypeClass> using has_getScheduleNodeCount = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().getScheduleNodeCount( std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>(), std::declval<const ScheduleNodeObj*>(), std::declval<size_t>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void getScheduleNodes() template <class NodeTypeClass> using has_getScheduleNodes = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().getScheduleNodes( std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>(), std::declval<const ScheduleNodeObj*>(), std::declval<size_t>(), std::declval<ScheduleNodeObj*>(), std::declval<size_t>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void initialize(const GraphContextObj&, const NodeObj&) template <class NodeTypeClass> using has_initialize = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().initialize( std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void initializeType(const NodeTypeObj&) template <class NodeTypeClass> using has_initializeType = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().initializeType( std::declval<const NodeTypeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void registerTasks() template <class NodeTypeClass> using has_registerTasks = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().registerTasks( ))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void release(const NodeObj&) template <class NodeTypeClass> using has_release = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().release( std::declval<const NodeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static bool updateNodeVersion(const GraphContextObj&, const NodeObj&, int, int) template <class NodeTypeClass> using has_updateNodeVersion = typename std::is_same<bool, decltype(std::declval<const NodeTypeClass&>().updateNodeVersion( std::declval<const GraphContextObj&>(), std::declval<const NodeObj&>(), std::declval<int>(), std::declval<int>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static size_t getAllMetadata(const NodeTypeObj&, const char**, const char**, size_t) template <class NodeTypeClass> using has_getAllMetadata = typename std::is_same<size_t, decltype(std::declval<const NodeTypeClass&>().getAllMetadata( std::declval<const NodeTypeObj&>(), std::declval<const char**>(), std::declval<const char**>(), std::declval<size_t>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static const char* getMetadata(const NodeTypeObj&, const char*) template <class NodeTypeClass> using has_getMetadata = typename std::is_same<const char*, decltype(std::declval<const NodeTypeClass&>().getMetadata( std::declval<const NodeTypeObj&>(), std::declval<const char*>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static size_t getMetadataCount(const NodeTypeObj&) template <class NodeTypeClass> using has_getMetadataCount = typename std::is_same<size_t, decltype(std::declval<const NodeTypeClass&>().getMetadataCount( std::declval<const NodeTypeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void setMetadata(const NodeTypeObj&, const char*, const char*) template <class NodeTypeClass> using has_setMetadata = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().setMetadata( std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const char*>()))>::value_type; // ---------------------------------------------------------------------------------------------------- template <class NodeTypeClass> using has_addSubNodeType = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().addSubNodeType( std::declval<const NodeTypeObj&>(), std::declval<const char*>(), std::declval<const NodeTypeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- template <class NodeTypeClass> using has_getSubNodeType = typename std::is_same<NodeTypeObj, decltype(std::declval<const NodeTypeClass&>().getSubNodeType( std::declval<const NodeTypeObj&>(), std::declval<const char*>()))>::value_type; // ---------------------------------------------------------------------------------------------------- template <class NodeTypeClass> using has_createNodeType = typename std::is_same<NodeTypeObj, decltype(std::declval<const NodeTypeClass&>().createNodeType( std::declval<const char*>(), std::declval<int>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void onConnectionTypeResolve(const NodeTypeObj&) template <class NodeTypeClass> using has_onConnectionTypeResolve = typename std::is_same<void, decltype(std::declval<const NodeTypeClass&>().onConnectionTypeResolve( std::declval<const NodeObj&>()))>::value_type; // ---------------------------------------------------------------------------------------------------- // static void inspect(const NodeTypeObj&, inspect::IInspector*) template <class NodeTypeClass> using has_inspect = typename std::is_same<bool, decltype(std::declval<const NodeTypeClass&>().inspect( std::declval<const NodeTypeObj&>(), std::declval<inspect::IInspector*>()))>::value_type; // clang-format on // ============================================================================================================== /** * @brief Common base class for all node type implementation definitions, so that they can be in a common container */ class NodeTypeABI { protected: // Remembering these values allow the node type to be registered and deregistered at will const char* m_nodeTypeName{ nullptr }; //!< Unique name of the node type int m_version{ 1 }; //!< Current version of the node type const char* m_extensionName{ nullptr }; //!< Extension to which this node type belongs /** * @brief Construct a new NodeTypeABI object * * @param nodeTypeName Unique name of the node type * @param version Version number of the node type * @param extensionName Extension to which this node type belongs */ NodeTypeABI(const char* nodeTypeName, int version, const char* extensionName) : m_nodeTypeName{nodeTypeName} , m_version(version) , m_extensionName(extensionName) {} public: // -------------------------------------------------------------------------------------------------------------- /** * @brief Populate an INodeType interface with the functions that implement this particular templated node type * * @param[out] nodeTypeInterface Interface to be populated */ virtual void populateNodeTypeInterface(INodeType& nodeTypeInterface) const = 0; // -------------------------------------------------------------------------------------------------------------- /** * @brief Register the node type encapsulated in this description. */ void registerNodeType(IGraphRegistry& iGraphRegistry) { INodeType iNodeType{}; populateNodeTypeInterface(iNodeType); OGN_DBG std::cout << "DBG: ABI Registration of " << m_nodeTypeName << ", version " << m_version << " from " << m_extensionName << std::endl; iGraphRegistry.registerNodeTypeInterface(iNodeType, m_version, sizeof(INodeType)); // One potential source of node type forwarding is if the node has overridden the getNodeType() method and // supplied a different name than the one generated through the .ogn file. Add that one here. if (iNodeType.getNodeType) { const char* overriddenTypeName = iNodeType.getNodeType(); if (strcmp(overriddenTypeName, m_nodeTypeName) != 0) { auto iNodeTypeForwarding = carb::getCachedInterface<ComputeGraph>()->getNodeTypeForwardingInterfacePtr(); if (iNodeTypeForwarding) { iNodeTypeForwarding->defineForward(m_nodeTypeName, m_version, overriddenTypeName, m_version, m_extensionName); } } } if (iNodeType.getScheduleNodeCount || iNodeType.getScheduleNodes) { CARB_LOG_WARN_ONCE( "%s: getScheduleNodeCount() and getScheduleNodes() are deprecated, please remove", m_nodeTypeName); } } // -------------------------------------------------------------------------------------------------------------- /** * @brief Deregister the node type encapsulated in this description. */ void deregisterNodeType(IGraphRegistry& iGraphRegistry) { iGraphRegistry.unregisterNodeType(m_nodeTypeName); } }; // ====================================================================== /** * @brief ABI proxy class for OGN generated nodes. It provides implementations for all of the * INodeType ABI functions which will call the actual node's versions of those functions * if the node defines them. * * This class uses a technique called "tag dispatching", which is a compile-time switch that decides * which version of a method will be called. By defining overloaded methods taking either the * `std::true_type` or `std::false_type` type as the first parameter, the version that is called can * be decided at compile time by using a template that instantiates one of the two. * * In this template class the methods "X()" use tag dispatching to decide which two versions of the `call_X()` * method to call. The version accepting `std::true_type` is called when an override of `X()` is detected and calls * the override directly. The version accepting std::false_type performs the default version of `X()`. * * Each of the INode ABI functions is implemented with these three functions. Here is an annotated example * of how this works for a fictional ABI method X that takes a single int argument: * * @code{.cpp} * // By making this a template it can change types at compile-time * template <class NodeTypeClass> * // std::is_same will check to see if the named type matches the return type of the declared function * using has_X = typename std::is_same<void, * // decltype extracts the type of the declared entity * // declval gives a compile-time stand-in for the declared entity * // This statement says "get the type of the value returned by a NodeTypeClass function named X" * decltype(std::declval<const NodeTypeClass&>().X( * // This line adds the requirement that the function X takes an integer argument * std::declval<int>()) * // The value_type trait gets the return type of the is_same template (std::true_type/std::false_type) * )>::value_type; * // These are two overloads of the same method * // Since the last parameters are incompatible only one can be chosen * void call_X(int value, std::true_type) {} * void call_X(int value, std::false_type) {} * // This is the public method called from outside the class * void X(int value) { * // If NodeTypeClass::X exists then is_detected<> selects the std::true_type variation * // for the first argument. Subsequent arguments are passed through from this method's arguments. * call_X(is_detected<has_X, NodeTypeClass>(), value); * } * @endcode * * For a full description of the SFINAE technique in general and the tag dispatching implementation see * https://www.bfilipek.com/2016/02/notes-on-c-sfinae.html#tag-dispatching * * The net result of this metaprogramming is that OmniGraphNode_ABI<MyNode, MyNodeDatabase> instantiates * an ABI-compatible class that calls overrides in MyNode where available and the default where not. This * is functionally equivalent to acquiring and extracting the Node ABI interface and calling the methods * on it. MyNodeDatabase is automatically generated by the .ogn processor and MyNode is the class the node * writer provides, giving them full control over the ABI implementation if they wish, and use of the * default implementation and generated helper classes if not. * * @tparam NodeTypeClass Class the user has defined for implementing the custom parts of the node type interface * @tparam NodeTypeDataClass Generated database class for @p NodeTypeClass */ template <typename NodeTypeClass, typename NodeTypeDataClass> class OmniGraphNode_ABI : public NodeTypeABI { static void call_addInput(std::true_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { OGN_DBG std::cout << "DBG: Override(addInput " << name << ", " << typeName << ")" << std::endl; NodeTypeClass::addInput(nodeType, name, typeName, required, defaultValuePtr, defaultElemCountPtr); } static void call_addInput(std::false_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { OGN_DBG std::cout << "DBG: Default(addInput " << name << ", " << typeName << ")" << std::endl; return; } // ---------------------------------------------------------------------- static void call_addOutput(std::true_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { OGN_DBG std::cout << "DBG: Override(addOutput " << name << ", " << typeName << ")" << std::endl; NodeTypeClass::addOutput(nodeType, name, typeName, required, defaultValuePtr, defaultElemCountPtr); } static void call_addOutput(std::false_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { OGN_DBG std::cout << "DBG: Default(addOutput " << name << ", " << typeName << ")" << std::endl; return; } // ---------------------------------------------------------------------- static void call_addState(std::true_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { OGN_DBG std::cout << "DBG: Override(addState " << name << ", " << typeName << ")" << std::endl; NodeTypeClass::addState(nodeType, name, typeName, required, defaultValuePtr, defaultElemCountPtr); } static void call_addState(std::false_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { OGN_DBG std::cout << "DBG: Default(addState " << name << ", " << typeName << ")" << std::endl; return; } // ---------------------------------------------------------------------- static void call_addExtendedInput(std::true_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType extendedType) noexcept { OGN_DBG std::cout << "DBG: Override(addExtendedInput " << name << ", " << typeName << ")" << std::endl; NodeTypeClass::addExtendedInput(nodeType, name, typeName, required, extendedType); } static void call_addExtendedInput(std::false_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType extendedType) noexcept { OGN_DBG std::cout << "DBG: Default(addExtendedInput " << name << ", " << typeName << ")" << std::endl; return; } // ---------------------------------------------------------------------- static void call_addExtendedOutput(std::true_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType extendedType) noexcept { OGN_DBG std::cout << "DBG: Override(addExtendedOutput " << name << ", " << typeName << ")" << std::endl; NodeTypeClass::addExtendedOutput(nodeType, name, typeName, required, extendedType); } static void call_addExtendedOutput(std::false_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType) noexcept { OGN_DBG std::cout << "DBG: Default(addExtendedOutput " << name << ", " << typeName << ")" << std::endl; return; } // ---------------------------------------------------------------------- static void call_addExtendedState(std::true_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType extendedType) noexcept { OGN_DBG std::cout << "DBG: Override(addExtendedState " << name << ", " << typeName << ")" << std::endl; NodeTypeClass::addExtendedState(nodeType, name, typeName, required, extendedType); } static void call_addExtendedState(std::false_type, const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType extendedType) noexcept { OGN_DBG std::cout << "DBG: Default(addExtendedState " << name << ", " << typeName << ")" << std::endl; return; } // ---------------------------------------------------------------------- static bool call_hasState(std::true_type, const NodeTypeObj& nodeType) noexcept { OGN_DBG std::cout << "DBG: Override(hasState)" << std::endl; return NodeTypeClass::hasState(nodeType); } static bool call_hasState(std::false_type, const NodeTypeObj&) noexcept { OGN_DBG std::cout << "DBG: Default(hasState)" << std::endl; return false; } // ---------------------------------------------------------------------- static void call_setHasState(std::true_type, const NodeTypeObj& nodeType, bool hasState) noexcept { OGN_DBG std::cout << "DBG: Override(setHasState)" << std::endl; NodeTypeClass::setHasState(nodeType, hasState); } static void call_setHasState(std::false_type, const NodeTypeObj&, bool) noexcept { OGN_DBG std::cout << "DBG: Default(setHasState)" << std::endl; } // ---------------------------------------------------------------------- template<typename FUNC> static bool call_computeCommonOGN(const GraphContextObj& context, const NodeObj& node, FUNC const& compute, size_t batchCount) { auto create = [](GraphContextObj const* contexts, NodeObj const* nodes, size_t count) -> ogn::OmniGraphDatabase* { return new NodeTypeDataClass(contexts, nodes, count); }; NodeTypeDataClass* nodeDataPtr = reinterpret_cast<NodeTypeDataClass*>(node.iNode->getOgnDatabase(node, create)); if (nodeDataPtr == nullptr || nodeDataPtr->validate() == false) return false; //warn the node we're about to compute node.iNode->increaseComputeCount(node, batchCount); // Call compute nodeDataPtr->preCompute(); bool result = compute(*nodeDataPtr); return result; } // ---------------------------------------------------------------------- template <typename T> using has_computeOGN = has_computeOGNT<T, NodeTypeDataClass>; template <typename T> using has_computeVectorizedOGN = has_computeVectorizedOGNT<T, NodeTypeDataClass>; using regOGN = is_detected<has_computeOGN, NodeTypeClass>; using regABI = is_detected<has_computeABI, NodeTypeClass>; using vecOGN = is_detected<has_computeVectorizedOGN, NodeTypeClass>; using vecABI = is_detected<has_computeVectorizedABI, NodeTypeClass>; static_assert((regOGN() || regABI()) ^ (vecOGN() || vecABI()), "Implements 'compute' xor 'computeVectorized' (ie. not both)"); //=============== template <typename UNSED1, typename UNSED2, typename UNSED3> //regOGN, regABI, vecOGN, vecABI static bool call_compute(std::true_type, UNSED1, UNSED2, UNSED3, const GraphContextObj& context, const NodeObj& node) noexcept { CARB_PROFILE_ZONE(carb::profiler::kCaptureMaskDefault, "[Compute][OGN] %s", node.iNode->getPrimPath(node)); return call_computeCommonOGN(context, node, NodeTypeClass::compute, 1); } //=============== template <typename UNSED1, typename UNSED2> // regOGN, regABI, vecOGN, vecABI static bool call_compute( std::false_type, std::true_type, UNSED1, UNSED2, const GraphContextObj& context, const NodeObj& node) noexcept { CARB_PROFILE_ZONE(carb::profiler::kCaptureMaskDefault, "[Compute][ABI] %s", node.iNode->getPrimPath(node)); node.iNode->increaseComputeCount(node, 1); return NodeTypeClass::compute(context, node); } //=============== template <typename UNSED1> // regOGN, regABI, vecOGN, vecABI static bool call_compute(std::false_type,std::false_type, std::true_type, UNSED1, const GraphContextObj& context, const NodeObj& node) noexcept { CARB_PROFILE_ZONE(carb::profiler::kCaptureMaskDefault, "[ComputeVectorized][OGN][x1] %s", node.iNode->getPrimPath(node)); auto adapter = [](NodeTypeDataClass& db) { return NodeTypeClass::computeVectorized(db, 1) != 0; }; return call_computeCommonOGN(context, node, adapter, 1); } //=============== // regOGN, regABI, vecOGN, vecABI static bool call_compute(std::false_type,std::false_type, std::false_type, std::true_type, const GraphContextObj& context, const NodeObj& node) noexcept { CARB_PROFILE_ZONE( carb::profiler::kCaptureMaskDefault, "[ComputeVectorized][ABI][x1] %s", node.iNode->getPrimPath(node)); node.iNode->increaseComputeCount(node, 1); return NodeTypeClass::computeVectorized(context, node, 1) != 0; } // ---------------------------------------------------------------------- //=============== template <typename UNSED1, typename UNSED2, typename UNSED3> //regOGN, regABI, vecOGN, vecABI static size_t call_computeVectorized( UNSED1, UNSED2, std::true_type, UNSED3, const GraphContextObj& context, const NodeObj& node, size_t count) noexcept { // user has implemented the OGN version size_t ret = 0; auto adapter = [count, &ret, &node](NodeTypeDataClass& db) { db.resetToFirstInstance(); if (db.canVectorize()) // auto conversion for instance might prevent vectorization { CARB_PROFILE_ZONE(carb::profiler::kCaptureMaskDefault, "[ComputeVectorized][OGN][x%d] %s", (int)count, node.iNode->getPrimPath(node)); ret = NodeTypeClass::computeVectorized(db, count); return true; } CARB_PROFILE_ZONE(carb::profiler::kCaptureMaskDefault, "[ComputeVectorized][OGN][1 by 1][x%d] %s", (int)count, node.iNode->getPrimPath(node)); auto remaining = count; db.resetToFirstInstance(); while (remaining--) { if (NodeTypeClass::computeVectorized(db, 1) != 0) ++ret; db.moveToNextInstance(); } db.resetToFirstInstance(); return true; }; call_computeCommonOGN(context, node, adapter, count); return ret; } //=============== template <typename UNSED1, typename UNSED2> //regOGN, regABI, vecOGN, vecABI static size_t call_computeVectorized( UNSED1, UNSED2, std::false_type, std::true_type, const GraphContextObj& context, const NodeObj& node, size_t count) noexcept { CARB_PROFILE_ZONE(carb::profiler::kCaptureMaskDefault, "[ComputeVectorized][ABI][x%d] %s", (int)count, node.iNode->getPrimPath(node)); // user has implemented the ABI version node.iNode->increaseComputeCount(node, count); return NodeTypeClass::computeVectorized(context, node, count); } //=============== template <typename UNSED1> //regOGN, regABI, vecOGN, vecABI static size_t call_computeVectorized( std::true_type, UNSED1, std::false_type, std::false_type, const GraphContextObj& context, const NodeObj& node, size_t count) noexcept { // user has implemented the OGN version size_t ret = 0; auto adapter = [count, &ret, &node](NodeTypeDataClass& db) { CARB_PROFILE_ZONE(carb::profiler::kCaptureMaskDefault, "[Compute][OGN][1 by 1][x%d] %s", (int)count, node.iNode->getPrimPath(node)); auto remaining = count; db.resetToFirstInstance(); while (remaining--) { if (NodeTypeClass::compute(db)) ret++; db.moveToNextInstance(); } db.resetToFirstInstance(); return true; }; call_computeCommonOGN(context, node, adapter, count); return ret; } //=============== //regOGN, regABI, vecOGN, vecABI static size_t call_computeVectorized(std::false_type, std::true_type, std::false_type, std::false_type, const GraphContextObj& context, const NodeObj& node, size_t count) noexcept { //unreachable CARB_LOG_FATAL("reaching abnormal code path in OmniGraphNodeABI"); return 0; } // ---------------------------------------------------------------------- static const char* call_getNodeType(std::true_type) noexcept { OGN_DBG std::cout << "DBG: Override(getNodeType)" << std::endl; return NodeTypeClass::getNodeType(); } static const char* call_getNodeType(std::false_type) noexcept { // The node type name must always be available, usually as the one the .ogn file specified OGN_DBG std::cout << "DBG: Default(getNodeType)" << std::endl; return sm_nodeTypeName; } // ---------------------------------------------------------------------- static const char* call_getTypeName(std::true_type, const NodeTypeObj& nodeType) noexcept { OGN_DBG std::cout << "DBG: Override(getTypeName)" << std::endl; return NodeTypeClass::getTypeName(nodeType); } static const char* call_getTypeName(std::false_type, const NodeTypeObj& nodeType) noexcept { OGN_DBG std::cout << "DBG: Default(getTypeName)" << std::endl; return sm_nodeTypeName; } // ---------------------------------------------------------------------- static size_t call_getScheduleNodeCount(std::true_type, const GraphContextObj& context, const NodeObj& node, const ScheduleNodeObj* upstreamScheduleNodesBuf, size_t upstreamBufferSize) noexcept { OGN_DBG std::cout << "DBG: Override(getScheduleNodeCount)" << std::endl; return NodeTypeClass::getScheduleNodeCount(context, node, upstreamScheduleNodesBuf, upstreamBufferSize); } static size_t call_getScheduleNodeCount(std::false_type, const GraphContextObj& context, const NodeObj& node, const ScheduleNodeObj* upstreamScheduleNodesBuf, size_t upstreamBufferSize) noexcept { OGN_DBG std::cout << "DBG: Default(getScheduleNodeCount)" << std::endl; return 0; } // ---------------------------------------------------------------------- static void call_getScheduleNodes(std::true_type, const GraphContextObj& context, const NodeObj& node, const ScheduleNodeObj* upstreamScheduleNodesBuf, size_t upstreamBufferSize, ScheduleNodeObj* scheduleNodesBuf, size_t bufferSize) noexcept { OGN_DBG std::cout << "DBG: Override(getScheduleNodes)" << std::endl; NodeTypeClass::getScheduleNodes( context, node, upstreamScheduleNodesBuf, upstreamBufferSize, scheduleNodesBuf, bufferSize); } static void call_getScheduleNodes(std::false_type, const GraphContextObj& context, const NodeObj& node, const ScheduleNodeObj* upstreamScheduleNodesBuf, size_t upstreamBufferSize, ScheduleNodeObj* scheduleNodesBuf, size_t bufferSize) noexcept { OGN_DBG std::cout << "DBG: Default(getScheduleNodes)" << std::endl; return; } // ---------------------------------------------------------------------- // The generated database class may have overrides for the initialize() method if attribute metadata is present // so this function has two layers of calls. static void call_database_initialize(std::true_type, const GraphContextObj& context, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Database Override(initialize)" << std::endl; NodeTypeDataClass::initialize(context, node); } static void call_database_initialize(std::false_type, const GraphContextObj& context, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Default(initialize)" << std::endl; return; } static void call_initialize(std::true_type, const GraphContextObj& context, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Override(initialize)" << std::endl; call_database_initialize(is_detected<has_initialize, NodeTypeDataClass>(), context, node); NodeTypeClass::initialize(context, node); } static void call_initialize(std::false_type, const GraphContextObj& context, const NodeObj& node) noexcept { call_database_initialize(is_detected<has_initialize, NodeTypeDataClass>(), context, node); return; } // ---------------------------------------------------------------------- static void call_initializeType(std::true_type, const NodeTypeObj& nodeType) noexcept { OGN_DBG std::cout << "DBG: Override(initializeType)" << std::endl; // Rely on the database to handle the case of state attributes, this is just for internal state data if (!std::is_empty<NodeTypeClass>::value) { nodeType.iNodeType->setHasState(nodeType, true); } NodeTypeDataClass::initializeType(nodeType); NodeTypeClass::initializeType(nodeType); } static void call_initializeType(std::false_type, const NodeTypeObj& nodeType) noexcept { OGN_DBG std::cout << "DBG: Default(initializeType)" << std::endl; // Rely on the database to handle the case of state attributes, this is just for internal state data if (!std::is_empty<NodeTypeClass>::value) { nodeType.iNodeType->setHasState(nodeType, true); } NodeTypeDataClass::initializeType(nodeType); } // ---------------------------------------------------------------------- static void call_registerTasks(std::true_type) noexcept { OGN_DBG std::cout << "DBG: Override(registerTasks)" << std::endl; NodeTypeClass::registerTasks(); } static void call_registerTasks(std::false_type) noexcept { OGN_DBG std::cout << "DBG: Default(registerTasks)" << std::endl; return; } // ---------------------------------------------------------------------- static void call_release(std::true_type, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Override(release)" << std::endl; NodeTypeClass::release(node); } static void call_release(std::false_type, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Default(release)" << std::endl; return; } // ---------------------------------------------------------------------- static bool call_updateNodeVersion( std::true_type, const GraphContextObj& context, const NodeObj& node, int oldVersion, int newVersion) noexcept { OGN_DBG std::cout << "DBG: Override(updateNodeVersion)" << std::endl; return NodeTypeClass::updateNodeVersion(context, node, oldVersion, newVersion); } static bool call_updateNodeVersion( std::false_type, const GraphContextObj& context, const NodeObj& node, int oldVersion, int newVersion) noexcept { OGN_DBG std::cout << "DBG: Default(updateNodeVersion)" << std::endl; return true; } // ---------------------------------------------------------------------- static size_t call_getAllMetadata( std::true_type, const NodeTypeObj& nodeType, const char** keyBuf, const char** valueBuf, size_t bufSize) noexcept { OGN_DBG std::cout << "DBG: Override(getAllMetadata)" << std::endl; return NodeTypeClass::getAllMetadata(nodeType, keyBuf, valueBuf, bufSize); } static size_t call_getAllMetadata( std::false_type, const NodeTypeObj& nodeType, const char** keyBuf, const char** valueBuf, size_t bufSize) noexcept { OGN_DBG std::cout << "DBG: Default(getAllMetadata)" << std::endl; return 0; } // ---------------------------------------------------------------------- static const char* call_getMetadata(std::true_type, const NodeTypeObj& nodeType, const char* key) noexcept { OGN_DBG std::cout << "DBG: Override(getMetadata)" << std::endl; return NodeTypeClass::getMetadata(nodeType, key); } static const char* call_getMetadata(std::false_type, const NodeTypeObj& nodeType, const char* key) noexcept { OGN_DBG std::cout << "DBG: Default(getMetadata)" << std::endl; return nullptr; } // ---------------------------------------------------------------------- static size_t call_getMetadataCount(std::true_type, const NodeTypeObj& nodeType) noexcept { OGN_DBG std::cout << "DBG: Override(getMetadataCount)" << std::endl; return NodeTypeClass::getMetadataCount(nodeType); } static size_t call_getMetadataCount(std::false_type, const NodeTypeObj& nodeType) noexcept { OGN_DBG std::cout << "DBG: Default(getMetadataCount)" << std::endl; return 0; } // ---------------------------------------------------------------------- static void call_setMetadata(std::true_type, const NodeTypeObj& nodeType, const char* key, const char* value) noexcept { OGN_DBG std::cout << "DBG: Override(setMetadata)" << std::endl; NodeTypeClass::setMetadata(nodeType, key, value); } static void call_setMetadata(std::false_type, const NodeTypeObj& nodeType, const char* key, const char* value) noexcept { OGN_DBG std::cout << "DBG: Default(setMetadata)" << std::endl; } // ---------------------------------------------------------------------- static void call_addSubNodeType(std::true_type, const NodeTypeObj& nodeType, const char* subNodeTypeName, const NodeTypeObj& subNodeType) noexcept { OGN_DBG std::cout << "DBG: Override(addSubNodeType)" << std::endl; NodeTypeClass::addSubNodeType(nodeType, subNodeTypeName, subNodeType); } static void call_addSubNodeType(std::false_type, const NodeTypeObj& nodeType, const char* subNodeTypeName, const NodeTypeObj& subNodeType) noexcept { OGN_DBG std::cout << "DBG: Default(addSubNodeType)" << std::endl; } // ---------------------------------------------------------------------- static NodeTypeObj call_getSubNodeType(std::true_type, const NodeTypeObj& nodeType, const char* subNodeTypeName) noexcept { OGN_DBG std::cout << "DBG: Override(getSubNodeType)" << std::endl; return NodeTypeClass::getSubNodeType(nodeType, subNodeTypeName); } static NodeTypeObj call_getSubNodeType(std::false_type, const NodeTypeObj& nodeType, const char* subNodeTypeName) noexcept { OGN_DBG std::cout << "DBG: Default(getSubNodeType)" << std::endl; return NodeTypeObj(); } // ---------------------------------------------------------------------- static NodeTypeObj call_createNodeType(std::true_type, const char* nodeTypeName, int version) noexcept { OGN_DBG std::cout << "DBG: Override(createNodeType)" << std::endl; return NodeTypeClass::createNodeType(nodeTypeName, version); } static NodeTypeObj call_createNodeType(std::false_type, const char* nodeTypeName, int version) noexcept { OGN_DBG std::cout << "DBG: Default(createNodeType)" << std::endl; return NodeTypeObj(); } // ---------------------------------------------------------------------- static void call_database_onConnectionTypeResolve(std::true_type, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Database Override(onConnectionTypeResolve)" << std::endl; NodeTypeDataClass::onConnectionTypeResolve(node); } static void call_database_onConnectionTypeResolve(std::false_type, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Database Default(onConnectionTypeResolve)" << std::endl; } static void call_onConnectionTypeResolve(std::true_type, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Override(onConnectionTypeResolve)" << std::endl; NodeTypeClass::onConnectionTypeResolve(node); call_database_onConnectionTypeResolve( is_detected<has_onConnectionTypeResolve, NodeTypeDataClass>(), node); } static void call_onConnectionTypeResolve(std::false_type, const NodeObj& node) noexcept { OGN_DBG std::cout << "DBG: Default(onConnectionTypeResolve)" << std::endl; call_database_onConnectionTypeResolve( is_detected<has_onConnectionTypeResolve, NodeTypeDataClass>(), node); } // ---------------------------------------------------------------------- static bool call_inspect( std::true_type, const NodeTypeObj& nodeType, inspect::IInspector* inspector) noexcept { OGN_DBG std::cout << "DBG: Override(inspect)" << std::endl; return NodeTypeClass::inspect(nodeType, inspector); } static bool call_inspect( std::false_type, const NodeTypeObj& nodeType, inspect::IInspector* inspector) noexcept { OGN_DBG std::cout << "DBG: Default(inspect)" << std::endl; return false; } public: // ---------------------------------------------------------------------- // These are the ABI implementations, which call the right version of the above call_DBG: functions // based on whether the node class has custom implementations of the ABI functions or not. /** * Implementation of omni::graph::core::INodeType::addInput to use as part of a node type definition */ static void addInput(const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { return call_addInput(is_detected<has_addInput, NodeTypeClass>(), nodeType, name, typeName, required, defaultValuePtr, defaultElemCountPtr); } /** * Implementation of omni::graph::core::INodeType::addOutput to use as part of a node type definition */ static void addOutput(const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { return call_addOutput(is_detected<has_addOutput, NodeTypeClass>(), nodeType, name, typeName, required, defaultValuePtr, defaultElemCountPtr); } /** * Implementation of omni::graph::core::INodeType::addState to use as part of a node type definition */ static void addState(const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, const void* defaultValuePtr, const size_t* defaultElemCountPtr) noexcept { return call_addState(is_detected<has_addState, NodeTypeClass>(), nodeType, name, typeName, required, defaultValuePtr, defaultElemCountPtr); } /** * Implementation of omni::graph::core::INodeType::addExtendedInput to use as part of a node type definition */ static void addExtendedInput(const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType extendedType) noexcept { return call_addExtendedInput(is_detected<has_addExtendedInput, NodeTypeClass>(), nodeType, name, typeName, required, extendedType); } /** * Implementation of omni::graph::core::INodeType::addExtendedOutput to use as part of a node type definition */ static void addExtendedOutput(const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType extendedType) noexcept { return call_addExtendedOutput(is_detected<has_addExtendedOutput, NodeTypeClass>(), nodeType, name, typeName, required, extendedType); } /** * Implementation of omni::graph::core::INodeType::addExtendedState to use as part of a node type definition */ static void addExtendedState(const NodeTypeObj& nodeType, const char* name, const char* typeName, bool required, ExtendedAttributeType extendedType) noexcept { return call_addExtendedState(is_detected<has_addExtendedState, NodeTypeClass>(), nodeType, name, typeName, required, extendedType); } /** * Implementation of omni::graph::core::INodeType::hasState to use as part of a node type definition */ static bool hasState(const NodeTypeObj& nodeType) noexcept { return call_hasState(is_detected<has_hasState, NodeTypeClass>(), nodeType); } /** * Implementation of omni::graph::core::INodeType::setHasState to use as part of a node type definition */ static void setHasState(const NodeTypeObj& nodeType, bool hasState) noexcept { call_setHasState(is_detected<has_setHasState, NodeTypeClass>(), nodeType, hasState); } // If the compute ABI function is overridden the user loses all of the nice setup // we've done with the data class, however we won't disallow it as there may be reasons // for doing it that aren't currently apparent. /** * Implementation of omni::graph::core::INodeType::compute to use as part of a node type definition */ static bool compute(const GraphContextObj& context, const NodeObj& node) noexcept { return call_compute(regOGN(), regABI(), vecOGN(), vecABI(), context, node); } /** * Implementation of omni::graph::core::INodeType::computeVectorized to use as part of a node type definition */ static size_t computeVectorized(const GraphContextObj& context, const NodeObj& node, size_t count) noexcept { return call_computeVectorized(regOGN(), regABI(), vecOGN(), vecABI(), context, node, count); } /** * Implementation of omni::graph::core::INodeType::getNodeType to use as part of a node type definition */ static const char* getNodeType() { return call_getNodeType(is_detected<has_getNodeType, NodeTypeClass>()); } /** * Implementation of omni::graph::core::INodeType::getTypeName to use as part of a node type definition */ static const char* getTypeName(const NodeTypeObj& nodeType) { return call_getTypeName(is_detected<has_getTypeName, NodeTypeClass>(), nodeType); } /** * Implementation of omni::graph::core::INodeType::getScheduleNodeCount to use as part of a node type definition */ static size_t getScheduleNodeCount(const GraphContextObj& context, const NodeObj& node, const ScheduleNodeObj* upstreamScheduleNodesBuf, size_t upstreamBufferSize) noexcept { return call_getScheduleNodeCount(is_detected<has_getScheduleNodeCount, NodeTypeClass>(), context, node, upstreamScheduleNodesBuf, upstreamBufferSize); } /** * Implementation of omni::graph::core::INodeType::getScheduleNodes to use as part of a node type definition */ static void getScheduleNodes(const GraphContextObj& context, const NodeObj& node, const ScheduleNodeObj* upstreamScheduleNodesBuf, size_t upstreamBufferSize, ScheduleNodeObj* scheduleNodesBuf, size_t bufferSize) noexcept { call_getScheduleNodes(is_detected<has_getScheduleNodes, NodeTypeClass>(), context, node, upstreamScheduleNodesBuf, upstreamBufferSize, scheduleNodesBuf, bufferSize); } /** * Implementation of omni::graph::core::INodeType::initialize to use as part of a node type definition */ static void initialize(const GraphContextObj& context, const NodeObj& node) noexcept { call_initialize(is_detected<has_initialize, NodeTypeClass>(), context, node); } /** * Implementation of omni::graph::core::INodeType::initializeType to use as part of a node type definition */ static void initializeType(const NodeTypeObj& nodeType) noexcept { call_initializeType(is_detected<has_initializeType, NodeTypeClass>(), nodeType); } /** * Implementation of omni::graph::core::INodeType::registerTasks to use as part of a node type definition */ static void registerTasks() noexcept { call_registerTasks(is_detected<has_registerTasks, NodeTypeClass>()); } /** * Implementation of omni::graph::core::INodeType::release to use as part of a node type definition */ static void release(const NodeObj& node) noexcept { call_release(is_detected<has_release, NodeTypeClass>(), node); } /** * Implementation of omni::graph::core::INodeType::releaseInstace to use as part of a node type definition */ static void releaseInstance(const NodeObj& node, NameToken instanceID) noexcept { NodeTypeDataClass::release(node, instanceID); } /** * Implementation of omni::graph::core::INodeType::destroyDB to use as part of a node type definition */ static void destroyDB(const NodeObj& node, ogn::OmniGraphDatabase* db) { delete static_cast<NodeTypeDataClass*>(db); } /** * Implementation of omni::graph::core::INodeType::notifyTypeResolution to use as part of a node type definition */ static void notifyTypeResolution(AttributeObj const& attrib, ogn::OmniGraphDatabase* db) { static_cast<NodeTypeDataClass*>(db)->onTypeResolutionChanged(attrib); } /** * Implementation of omni::graph::core::INodeType::notifyDynamicAttributeChanged to use as part of a node type definition */ static void notifyDynamicAttributeChanged(ogn::OmniGraphDatabase* db, AttributeObj const& attr, bool isAttributeCreated) { static_cast<NodeTypeDataClass*>(db)->onDynamicAttributesChanged(attr, isAttributeCreated); } /** * Implementation of omni::graph::core::INodeType::udpateNodeVersion to use as part of a node type definition */ static bool updateNodeVersion(const GraphContextObj& context, const NodeObj& node, int oldVersion, int newVersion) noexcept { return call_updateNodeVersion( is_detected<has_updateNodeVersion, NodeTypeClass>(), context, node, oldVersion, newVersion); } /** * Implementation of omni::graph::core::INodeType::getAllMetadata to use as part of a node type definition */ static size_t getAllMetadata(const NodeTypeObj& nodeType, const char** keyBuf, const char** valueBuf, size_t bufSize) noexcept { return call_getAllMetadata(is_detected<has_getAllMetadata, NodeTypeClass>(), nodeType, keyBuf, valueBuf, bufSize); } /** * Implementation of omni::graph::core::INodeType::getMetadata to use as part of a node type definition */ static const char* getMetadata(const NodeTypeObj& nodeType, const char* key) noexcept { return call_getMetadata(is_detected<has_getMetadata, NodeTypeClass>(), nodeType, key); } /** * Implementation of omni::graph::core::INodeType::getMetadataCount to use as part of a node type definition */ static size_t getMetadataCount(const NodeTypeObj& nodeType) noexcept { return call_getMetadataCount(is_detected<has_getMetadataCount, NodeTypeClass>(), nodeType); } /** * Implementation of omni::graph::core::INodeType::setMetadata to use as part of a node type definition */ static void setMetadata(const NodeTypeObj& nodeType, const char* key, const char* value) noexcept { call_setMetadata(is_detected<has_setMetadata, NodeTypeClass>(), nodeType, key, value); } /** * Implementation of omni::graph::core::INodeType::addSubNodeType to use as part of a node type definition */ static void addSubNodeType(const NodeTypeObj& nodeType, const char* subNodeTypeName, const NodeTypeObj& subNodeType) { call_addSubNodeType(is_detected<has_addSubNodeType, NodeTypeClass>(), nodeType, subNodeTypeName, subNodeType); } /** * Implementation of omni::graph::core::INodeType::getSubNodeType to use as part of a node type definition */ static NodeTypeObj getSubNodeType(const NodeTypeObj& nodeType, const char* subNodeTypeName) { return call_getSubNodeType(is_detected<has_getSubNodeType, NodeTypeClass>(), nodeType, subNodeTypeName); } /** * Implementation of omni::graph::core::INodeType::createNodeType to use as part of a node type definition */ static NodeTypeObj createNodeType(const char* nodeTypeName, int version) { return call_createNodeType(is_detected<has_createNodeType, NodeTypeClass>(), nodeTypeName, version); } /** * Implementation of omni::graph::core::INodeType::onConnectionTypeResolve to use as part of a node type definition */ static void onConnectionTypeResolve(const NodeObj& node) { call_onConnectionTypeResolve(is_detected<has_onConnectionTypeResolve, NodeTypeClass>(), node); } /** * Implementation of omni::graph::core::INodeType::inspect to use as part of a node type definition */ static bool inspect(const NodeTypeObj& nodeType, inspect::IInspector* inspector) { return call_inspect(is_detected<has_inspect, NodeTypeClass>(), nodeType, inspector); } public: /** * @brief Constructor with the basic information that is needed to identify a node type * * @param nodeTypeName Unique name of the node type * @param nodeTypeVersion Version of the node type being defined * @param nodeTypeExtension Extension owning the node type */ OmniGraphNode_ABI(const char* nodeTypeName, int nodeTypeVersion, const char* nodeTypeExtension) : NodeTypeABI(nodeTypeName, nodeTypeVersion, nodeTypeExtension) { sm_nodeTypeName = nodeTypeName; } /** * @brief Populate an INodeType interface with the functions that implement this particular templated node type * * @param[out] nodeTypeInterface Interface to be populated */ void populateNodeTypeInterface(INodeType& nodeTypeInterface) const override { // Any functions required in order for the interface to work are left as nullptr when // there is no implementation of them overridden by the node so that the defaults can be used. nodeTypeInterface.addInput = is_detected<has_addInput, NodeTypeClass>::value ? addInput : nullptr; nodeTypeInterface.addOutput = is_detected<has_addOutput, NodeTypeClass>::value ? addOutput : nullptr; nodeTypeInterface.addState = is_detected<has_addState, NodeTypeClass>::value ? addState : nullptr; nodeTypeInterface.addExtendedInput = is_detected<has_addExtendedInput, NodeTypeClass>::value ? addExtendedInput : nullptr; nodeTypeInterface.addExtendedOutput = is_detected<has_addExtendedOutput, NodeTypeClass>::value ? addExtendedOutput : nullptr; nodeTypeInterface.addExtendedState = is_detected<has_addExtendedState, NodeTypeClass>::value ? addExtendedState : nullptr; nodeTypeInterface.hasState = is_detected<has_hasState, NodeTypeClass>::value ? hasState : nullptr; nodeTypeInterface.setHasState = is_detected<has_setHasState, NodeTypeClass>::value ? setHasState : nullptr; nodeTypeInterface.getNodeType = getNodeType; nodeTypeInterface.getTypeName = getTypeName; nodeTypeInterface.initialize = initialize; nodeTypeInterface.initializeType = initializeType; nodeTypeInterface.registerTasks = registerTasks; nodeTypeInterface.release = release; nodeTypeInterface.updateNodeVersion = updateNodeVersion; nodeTypeInterface.getAllMetadata = is_detected<has_getAllMetadata, NodeTypeClass>::value ? getAllMetadata : nullptr; nodeTypeInterface.getMetadata = is_detected<has_getMetadata, NodeTypeClass>::value ? getMetadata : nullptr; nodeTypeInterface.getMetadataCount = is_detected<has_getMetadataCount, NodeTypeClass>::value ? getMetadataCount : nullptr; nodeTypeInterface.setMetadata = is_detected<has_setMetadata, NodeTypeClass>::value ? setMetadata : nullptr; nodeTypeInterface.addSubNodeType = is_detected<has_addSubNodeType, NodeTypeClass>::value ? addSubNodeType : nullptr; nodeTypeInterface.getScheduleNodeCount = is_detected<has_getScheduleNodeCount, NodeTypeClass>::value ? getScheduleNodeCount : nullptr; nodeTypeInterface.getScheduleNodes = is_detected<has_getScheduleNodes, NodeTypeClass>::value ? getScheduleNodes : nullptr; nodeTypeInterface.getSubNodeType = is_detected<has_getSubNodeType, NodeTypeClass>::value ? getSubNodeType : nullptr; nodeTypeInterface.createNodeType = is_detected<has_createNodeType, NodeTypeClass>::value ? createNodeType : nullptr; nodeTypeInterface.onConnectionTypeResolve = onConnectionTypeResolve; nodeTypeInterface.inspect = is_detected<has_inspect, NodeTypeClass>::value ? inspect : nullptr; nodeTypeInterface.compute = regOGN() || regABI() || vecOGN() || vecABI() ? compute : nullptr; nodeTypeInterface.computeVectorized = regOGN() || vecOGN() || vecABI() ? computeVectorized : nullptr; nodeTypeInterface.releaseInstance = releaseInstance; nodeTypeInterface.destroyDB = destroyDB; nodeTypeInterface.notifyTypeResolution = notifyTypeResolution; nodeTypeInterface.notifyDynamicAttributeChanged = notifyDynamicAttributeChanged; nodeTypeInterface.getCarbABIVersion = []() { return INodeType::getInterfaceDesc().version; }; } static const char* sm_nodeTypeName; //!< Name of node type, to allow passing a static function to ABI }; template <typename NodeTypeClass, typename NodeTypeDataClass> const char* OmniGraphNode_ABI<NodeTypeClass, NodeTypeDataClass>::sm_nodeTypeName{ nullptr }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
70,145
C
48.294448
148
0.584774
omniverse-code/kit/include/omni/graph/core/ogn/ComputeHelpersDynamicInputsDetails.h
namespace Private { template <size_t TUPLE_SIZE, typename ComputeType, typename InputType> inline bool validateInputsAndOutput(gsl::span<InputType const> const inputs, ogn::RuntimeAttribute<ogn::kOgnOutput, ogn::kCpu> result) { if (TUPLE_SIZE == 1) { if (result.type().arrayDepth == 0) { // handle single values auto resultValue = result.template get<ComputeType>(); if (!TryComputeHelper<decltype(resultValue)>::testValid(resultValue)) return false; for (auto const& input : inputs) { auto const inputValue = input.template get<ComputeType>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } } else { // handle arrays auto resultValue = result.template get<ComputeType[]>(); if (!TryComputeHelper<decltype(resultValue)>::testValid(resultValue)) return false; for (auto const& input : inputs) { if (RuntimeAttribHelper<TUPLE_SIZE, typename remove_const_ref<decltype(input)>::type>::isArray(input)) { auto const inputValue = input.template get<ComputeType[]>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } else { auto const inputValue = input.template get<ComputeType>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } } } } else { using ComputeTypeTuple = ComputeType[TUPLE_SIZE]; if (result.type().componentCount != TUPLE_SIZE) return false; if (result.type().arrayDepth == 0) { // handle tuple values auto resultValue = result.get<ComputeTypeTuple>(); if (!TryComputeHelper<decltype(resultValue)>::testValid(resultValue)) return false; for (auto const& input : inputs) { if (RuntimeAttribHelper<TUPLE_SIZE, typename remove_const_ref<decltype(input)>::type>::isTuple(input)) { auto const inputValue = input.template get<ComputeTypeTuple>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } else { auto const inputValue = input.template get<ComputeType>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } } } else { // handle arrays of tuples auto resultValue = result.get<ComputeTypeTuple[]>(); if (!TryComputeHelper<decltype(resultValue)>::testValid(resultValue)) return false; for (auto const& input : inputs) { if (RuntimeAttribHelper<TUPLE_SIZE, typename remove_const_ref<decltype(input)>::type>::isArray(input)) { if (RuntimeAttribHelper<TUPLE_SIZE, typename remove_const_ref<decltype(input)>::type>::isTuple(input)) { auto const inputValue = input.template get<ComputeTypeTuple[]>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } else { auto const inputValue = input.template get<ComputeType[]>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } } else { if (RuntimeAttribHelper<TUPLE_SIZE, typename remove_const_ref<decltype(input)>::type>::isTuple(input)) { auto const inputValue = input.template get<ComputeTypeTuple>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } else { auto const inputValue = input.template get<ComputeType>(); if (!TryComputeHelper<decltype(inputValue)>::testValid(inputValue)) return false; } } } } } return true; } template <typename ComputeType, typename InputType, typename Functor> inline bool tryComputeInputsWithArrayBroadcasting(gsl::span<InputType const> const inputs, ogn::RuntimeAttribute<ogn::kOgnOutput, ogn::kCpu> result, Functor functor, size_t count) { if (!validateInputsAndOutput<1, ComputeType, InputType>(inputs, result)) return false; if (result.type().arrayDepth == 0) { // the output is not an array auto resultValue = result.get<ComputeType>(); using TResultValue = typename remove_const_ref<decltype(*resultValue)>::type; auto inputIt = inputs.begin(); // initialize the result { auto const inputValue = inputIt->template get<ComputeType>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t instance = 0; instance < count; ++instance) { auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, 0, 0, instance); auto& resultData = ArrayHelper<1, TResultValue>::accessArg(*resultValue, 0, 0, instance); resultData = inputData; } } // accumulate the result by iterating over the remaining inputs ++inputIt; for (; inputIt != inputs.end(); ++inputIt) { auto const inputValue = inputIt->template get<ComputeType>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t instance = 0; instance < count; ++instance) { functor(ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, 0, 0, instance), ArrayHelper<1, TResultValue>::accessArg(*resultValue, 0, 0, instance)); } } return true; } else { // cache for the array data, to avoid having to get it from fabric multiple times // (once for getting the size, a second time for computing the result) std::vector<ArrayDataReadOnly<ComputeType[], ogn::kCpu>> arrayDataCache; arrayDataCache.reserve(inputs.size()); // the output is an array of single values // result.get<ComputeType[]>() returns a temporary value, so it has to be cached. auto arrayObj = result.template get<ComputeType[]>(); auto& resultArray = *arrayObj; using TResultValue = typename remove_const_ref<decltype(resultArray)>::type; for (size_t instance = 0; instance < count; ++instance) { // find the output length size_t len = 1; for (const auto& input : inputs) { using TInput = typename remove_const_ref<decltype(input)>::type; if (RuntimeAttribHelper<1, TInput>::isArray(input)) { arrayDataCache.emplace_back(input.template get<ComputeType[]>()); auto const& array = *arrayDataCache.back(); auto s = ArrayHelper<1, decltype(array)>::getArgsLengthAndAdjustHandle(array, instance); len = std::max(len, s); } } if (instance) resultArray.adjustHandle(1); resultArray.resize(len); auto arrayDataCacheIt = arrayDataCache.cbegin(); auto inputIt = inputs.begin(); using TInput = typename remove_const_ref<decltype(*inputIt)>::type; if (RuntimeAttribHelper<1, TInput>::isArray(*inputIt)) { const auto& inputValue = *arrayDataCacheIt++; using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t idx = 0; idx < len; ++idx) { auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, idx, 0, instance); auto& resultData = ArrayHelper<1, TResultValue>::accessArg(resultArray, idx, 0, instance); resultData = inputData; } } else { auto const inputValue = inputIt->template get<ComputeType>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, 0, 0, instance); for (size_t idx = 0; idx < len; ++idx) { auto& resultData = ArrayHelper<1, TResultValue>::accessArg(resultArray, idx, 0, instance); resultData = inputData; } } ++inputIt; for (; inputIt != inputs.end(); ++inputIt) { using TInput = typename remove_const_ref<decltype(*inputIt)>::type; if (RuntimeAttribHelper<1, TInput>::isArray(*inputIt)) { auto const& inputValue = *arrayDataCacheIt++; using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t idx = 0; idx < len; ++idx) { auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, idx, 0, instance); auto& resultData = ArrayHelper<1, TResultValue>::accessArg(resultArray, idx, 0, instance); functor(inputData, resultData); } } else { auto const inputValue = inputIt->template get<ComputeType>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, 0, 0, instance); for (size_t idx = 0; idx < len; ++idx) { auto& resultData = ArrayHelper<1, TResultValue>::accessArg(resultArray, idx, 0, instance); functor(inputData, resultData); } } } } return true; } return false; } template<size_t N, typename InputIterator> inline bool isTuple(InputIterator it) { return N != 1 && it->type().componentCount == N; } template<typename InputIterator> inline bool isArray(InputIterator it) { return it->type().arrayDepth != 0; } template <size_t N, typename ComputeType, typename InputType, typename Functor> inline bool tryComputeInputsWithTupleBroadcasting(gsl::span<InputType const> const inputs, ogn::RuntimeAttribute<ogn::kOgnOutput, ogn::kCpu> result, Functor functor, size_t count) { if (!validateInputsAndOutput<N, ComputeType, InputType>(inputs, result)) return false; using ComputeTypeTuple = ComputeType[N]; if (result.type().arrayDepth == 0) { // the output is not an array auto resultValue = result.get<ComputeTypeTuple>(); using TResultValue = typename remove_const_ref<decltype(*resultValue)>::type; auto inputIt = inputs.begin(); // initialize the result using TInput = typename remove_const_ref<decltype(*inputIt)>::type; if (RuntimeAttribHelper<N, TInput>::isTuple(*inputIt)) { auto const inputValue = inputIt->template get<ComputeTypeTuple>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t instance = 0; instance < count; ++instance) { for (uint8_t t = 0; t < N; ++t) { auto const& inputData = ArrayHelper<N, TInputValue>::accessArgConst(*inputValue, 0, t, instance); auto& resultData = ArrayHelper<N, TResultValue>::accessArg(*resultValue, 0, t, instance); resultData = inputData; } } } else { auto const inputValue = inputIt->template get<ComputeType>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t instance = 0; instance < count; ++instance) { auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, 0, 0, instance); for (uint8_t t = 0; t < N; ++t) { auto& resultData = ArrayHelper<N, TResultValue>::accessArg(*resultValue, 0, t, instance); resultData = inputData; } } } ++inputIt; for (; inputIt != inputs.end(); ++inputIt) { using TInput = typename remove_const_ref<decltype(*inputIt)>::type; if (RuntimeAttribHelper<N, TInput>::isTuple(*inputIt)) { auto const inputValue = inputIt->template get<ComputeTypeTuple>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t instance = 0; instance < count; ++instance) { for (uint8_t t = 0; t < N; ++t) { auto const& inputData = ArrayHelper<N, TInputValue>::accessArgConst(*inputValue, 0, t, instance); auto& resultData = ArrayHelper<N, TResultValue>::accessArg(*resultValue, 0, t, instance); functor(inputData, resultData); } } } else { auto const& inputValue = inputIt->template get<ComputeType>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t instance = 0; instance < count; ++instance) { auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, 0, 0, instance); for (uint8_t t = 0; t < N; ++t) { auto& resultData = ArrayHelper<N, TResultValue>::accessArg(*resultValue, 0, t, instance); functor(inputData, resultData); } } } } return true; } else // result.type().arrayDepth > 0 { // cache for the array data, to avoid having to get it from fabric multiple times // (once for getting the size, a second time for computing the result) // Note: we need different caches for tuple and non tuple arrays std::vector<ArrayDataReadOnly<ComputeType[], ogn::kCpu>> arrayDataCache; arrayDataCache.reserve(inputs.size()); std::vector<TupleArrayDataReadOnly<ComputeTypeTuple[], ogn::kCpu>> tupleArrayDataCache; tupleArrayDataCache.reserve(inputs.size()); // result.get<ComputeTypeTuple[]>() returns a temporary value, so it has to be cached. auto arrayObj = result.template get<ComputeTypeTuple[]>(); auto& resultArray = *arrayObj; using TResultValue = typename remove_const_ref<decltype(resultArray)>::type; for (size_t instance = 0; instance < count; ++instance) { size_t len = 1; for (auto const& input : inputs) { using TInput = typename remove_const_ref<decltype(input)>::type; if (RuntimeAttribHelper<N, TInput>::isArray(input)) { tupleArrayDataCache.emplace_back(input.template get<ComputeTypeTuple[]>()); auto const& array = *tupleArrayDataCache.back(); auto s = ArrayHelper<N, decltype(array)>::getArgsLengthAndAdjustHandle(array, instance); len = std::max(len, s); } else if (RuntimeAttribHelper<1, TInput>::isArray(input)) { arrayDataCache.emplace_back(input.template get<ComputeType[]>()); auto const& array = *arrayDataCache.back(); auto s = ArrayHelper<1, decltype(array)>::getArgsLengthAndAdjustHandle(array, instance); len = std::max(len, s); } } if (instance) resultArray.adjustHandle(1); resultArray.resize(len); auto arrayDataCacheIt = arrayDataCache.cbegin(); auto tupleArrayDataCacheIt = tupleArrayDataCache.cbegin(); auto inputIt = inputs.begin(); if (!isArray(inputIt)) { if (isTuple<N>(inputIt)) { auto const inputValue = inputIt->template get<ComputeTypeTuple>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t idx = 0; idx < len; ++idx) { for (uint8_t t = 0; t < N; ++t) { auto const& inputData = ArrayHelper<N, TInputValue>::accessArgConst(*inputValue, 0, t, instance); auto& resultData = ArrayHelper<N, TResultValue>::accessArg(resultArray, idx, t, instance); resultData = inputData; } } } else { auto const inputValue = inputIt->template get<ComputeType>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, 0, 0, instance); for (size_t idx = 0; idx < len; ++idx) { for (uint8_t t = 0; t < N; ++t) { auto& resultData = ArrayHelper<N, TResultValue>::accessArg(resultArray, idx, t, instance); resultData = inputData; } } } } else { if (isTuple<N>(inputIt)) { auto const& inputValue = *tupleArrayDataCacheIt++; using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t idx = 0; idx < len; ++idx) { for (uint8_t t = 0; t < N; ++t) { auto const& inputData = ArrayHelper<N, TInputValue>::accessArgConst(*inputValue, idx, t, instance); auto& resultData = ArrayHelper<N, TResultValue>::accessArg(resultArray, idx, t, instance); resultData = inputData; } } } else { const auto& inputValue = *arrayDataCacheIt++; using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t idx = 0; idx < len; ++idx) { auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, idx, 0, instance); for (uint8_t t = 0; t < N; ++t) { auto& resultData = ArrayHelper<N, TResultValue>::accessArg(resultArray, idx, t, instance); resultData = inputData; } } } } ++inputIt; for (; inputIt != inputs.end(); ++inputIt) { if (!isArray(inputIt)) { if (isTuple<N>(inputIt)) { auto const inputValue = inputIt->template get<ComputeTypeTuple>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; for (size_t idx = 0; idx < len; ++idx) { for (uint8_t t = 0; t < N; ++t) { auto const& inputData = ArrayHelper<N, TInputValue>::accessArgConst(*inputValue, 0, t, instance); auto& resultData = ArrayHelper<N, TResultValue>::accessArg(resultArray, idx, t, instance); functor(inputData, resultData); } } } else { auto const inputValue = inputIt->template get<ComputeType>(); using TInputValue = typename remove_const_ref<decltype(*inputValue)>::type; auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValue, 0, 0, instance); for (size_t idx = 0; idx < len; ++idx) { for (uint8_t t = 0; t < N; ++t) { auto& resultData = ArrayHelper<N, TResultValue>::accessArg(resultArray, idx, t, instance); functor(inputData, resultData); } } } } else { if (isTuple<N>(inputIt)) { auto const& inputValueArray = *tupleArrayDataCacheIt++; using TInputValue = typename remove_const_ref<decltype(*inputValueArray)>::type; for (size_t idx = 0; idx < len; ++idx) { for (uint8_t t = 0; t < N; ++t) { auto const& inputData = ArrayHelper<N, TInputValue>::accessArgConst(*inputValueArray, idx, t, instance); auto& resultData = ArrayHelper<N, TResultValue>::accessArg(resultArray, idx, t, instance); functor(inputData, resultData); } } } else { auto const& inputValueArray = *arrayDataCacheIt++; using TInputValue = typename remove_const_ref<decltype(*inputValueArray)>::type; for (size_t idx = 0; idx < len; ++idx) { auto const& inputData = ArrayHelper<1, TInputValue>::accessArgConst(*inputValueArray, idx, 0, instance); for (uint8_t t = 0; t < N; ++t) { auto& resultData = ArrayHelper<N, TResultValue>::accessArg(resultArray, idx, t, instance); functor(inputData, resultData); } } } } } } return true; } return false; } }//namespace private
24,309
C
43.119782
136
0.495454
omniverse-code/kit/include/omni/graph/core/ogn/Types.h
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/Handle.h> #include <omni/graph/core/TemplateUtils.h> #include <omni/fabric/IPath.h> #include <omni/fabric/IToken.h> namespace omni { namespace graph { namespace core { namespace ogn { // ====================================================================== /** * Enumeration of the memory locations an attribute's data might have. The data location will determine which of * the data accessors will be used, and what type of data they will return. (GPU data will always be returned * as raw pointers since the CPU cannot access that memory.) * * This type will be used as a template parameter to adjust the behaviour of OGN wrapper classes. */ enum eMemoryType { kCpu, //!< The attribute's data is always on the CPU kCuda, //!< The attribute's data is always on the GPU kAny //!< The attribute's data location can be either, decided at runtime }; // ====================================================================== /** * Enumeration of an attribute's access type. In order to provide information to the scheduler about how * fabric data will be accessed, one of these access types is associated with all generated attributes. * * This type will be used as a template parameter to adjust the behaviour of OGN wrapper classes. */ enum eAttributeType { kOgnInput, kOgnOutput, kOgnState }; // ====================================================================== /** * Severity level for logging messages. */ enum class Severity : uint8_t { eInfo = 0, eWarning, eError, eCount }; // ====================================================================== /** * When templating methods by data type the template types must be unique. The implementation of * NameToken in iComputeGraph defines it as a simple uint64_t, which is also a raw data type used by OGN. * To allow different templates to be instantiated for these two data types, identical in implementation but * vastly different in semantics, this wrapper can be used instead as a drop-in replacement for NameToken. * * Thus these two template instantiations will be determined to be unique by the compiler, but the data * passed in will be identical, to be interpreted in the correct way by the function: * * template <typename DataType> void myData(DataType& value); * myData(OgnToken&); // Receives an OgnToken, which is directly convertible to NameToken * myData(uint64_t&); // Receives a raw uint64_t * * This type will be used as a template parameter to adjust the behaviour of OGN wrapper classes. */ using Token = omni::fabric::Token; static_assert(::std::is_convertible<Token, NameToken>::value, "ogn::Token must be equivalent to NameToken"); using Path = omni::fabric::Path; static_assert(::std::is_convertible<Path, TargetPath>::value, "ogn::Path must be equivalent to TargetPath"); } // namespace ogn } // namespace core } // namespace graph } // namespace omni // Declare this outside of the namespaces, relying on the uniqueness of the name to provide easy access using OgnToken = omni::graph::core::ogn::Token; using OgnPath = omni::graph::core::ogn::Path; // Generated code is kept smaller by assuming this namespace is active. The alternative would be to explicitly // reference all types and values used by the generated code, which would end up amounting to the same thing. // Core types can be accessed directly (e.g. getDataW()) and OGN types use the shortened "ogn::string" using namespace omni::graph::core;
3,952
C
39.336734
112
0.690283
omniverse-code/kit/include/omni/graph/core/ogn/array.h
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // ================================================================================================================= // This file contains simple interface classes which wrap data in the OGN database for easier use. // // ogn::const_array Read-only array of fabric data // ogn::array Array of fabric data, with writing and resizing capabilities // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // ================================================================================================================= #include <gsl/span> #include <cstring> #include <stdexcept> #include <type_traits> #include <omni/graph/core/Handle.h> #include <omni/graph/core/Type.h> #include <omni/graph/core/CudaUtils.h> #include <omni/graph/core/iAttributeData.h> using omni::fabric::PtrToPtrKind; namespace omni { namespace graph { namespace core { namespace ogn { // ================================================================================================================= /** * std::span-like wrapper class for array attribute data in the Ogn Database. * * In attribute terms an array is a variable sized collection of data of a single type, not to be confused with * the C++ notion of an array, which is a fixed sized collection of data of a single type. * * This wrapper operates by using the Fabric interface to interact with array data it has stored. * The base array class provides common operations common to both const and non-const data. * * @tparam BaseDataType Type of data contained within the array * @tparam HandleType Attribute handle used to access the underlying Fabric copy of the data */ template <typename BaseDataType, typename HandleType> class base_array { //from regular attributes template <typename, eAttributeType, eMemoryType, PtrToPtrKind> friend struct ArrayAttribute; //from runtime attributes template <typename, bool, eMemoryType, PtrToPtrKind > friend struct ArrayData; public: /** const version of the BaseDataType */ using ConstBaseDataType = const typename std::remove_const<BaseDataType>::type; /** Type definition of this class */ using this_t = base_array<BaseDataType, HandleType>; // Pass through the span iterator so that this class can iterate over it transparently /** Iterator over the array contents */ using iterator = typename gsl::span<BaseDataType>::iterator; /** Reverse iterator over the array contents */ using reverse_iterator = typename gsl::span<BaseDataType>::reverse_iterator; /** * Constructor */ base_array() = default; /** * Confirm that the data values are safe for accessing * * @return true if the context and attribute handle values are both valid */ bool isValid() const { return context() && (AttrKey)handle() != handle().invalidValue(); } /** * @return The number of elements in the currently managed array */ size_t size() const { return span().size();} /** * @return Is the currently managed array empty? */ bool empty() const { return span().empty(); } /** * @return Pointer to the raw data in the array (first element) */ ConstBaseDataType* data() const { return span().data();} /** * @return Iterator pointing to the beginning of the array */ const iterator begin() const { return span().begin(); } /** * @return Iterator pointing past the end of the array */ const iterator end() const { return span().end(); } /** * @return Iterator pointing to the end of the array */ const reverse_iterator rbegin() const { return span().rbegin(); } /** * @return Iterator pointing before the beginning of the array */ const reverse_iterator rend() const { return span().rend(); } /** * Access a specific element of the array. No bounds checking is performed. * * @param[in] index Element index into the array * @return Reference to the index'th element of the array */ ConstBaseDataType& operator[](size_t index) const { return span()[index]; } /** * Access a specific element of the array with bounds checking. * * @param[in] index Element index into the array * @return Reference to the index'th element of the array * @exception std::out_of_range if there is no data for the given index */ ConstBaseDataType& at(size_t index) const { auto const& spanObj = span(); if (!spanObj.data() || (spanObj.size() <= index)) { std::string rangeMessage("Attempt to access out of range index "); rangeMessage += std::to_string(index); throw std::out_of_range(rangeMessage); } return spanObj[index]; } /** * Access the underlying span that allows to access the array data * * @return A reference to the underlying span */ gsl::span<BaseDataType> const& span() const { if (m_dirty) const_cast<this_t*>(this)->reset(); m_dirty = false; return m_span; } /** * @brief Access the context to which this array belongs * * @returns Pointer to the context to which this array belongs */ GraphContextObj const* context() const { return m_context;} /** * @brief Access the attribute handle used to access the array data in Fabric * * @returns Pointer to the attribute handle used to access the array data in Fabric */ HandleType const& handle() const { return m_handle;} /** * Flag the span data as not being up2Date * */ void setDirty() const { m_dirty = true; } /** * In a vectorized context, move forward the current handle * * @param[in] idx The amount of instance(s) to offset the current handle */ void adjustHandle(size_t idx) { const IAttributeData& iData = *(context()->iAttributeData); moveHdl(iData, idx, m_handle); setDirty(); } protected: /** * Reset the internal span to point to new data. * */ void reset() { if (isValid()) { const IAttributeData& iData = *(m_context->iAttributeData); size_t count = 0; ConstAttributeDataHandle chdl = m_handle; iData.getElementCount(&count, *m_context, &chdl, 1); BaseDataType** ptrToData = getData(iData, m_handle); this->m_span = gsl::span<BaseDataType>(*ptrToData, count); } else { this->m_span = gsl::span<BaseDataType>(); } } /** * @brief Get a pointer to the array data from a specific const data handle * * @param iData Interface class containing the accessor functions * @param hdl Handle to the attribute data whose value is being retrieved * @return BaseDataType** Pointer to the array of retrieved data */ BaseDataType** getData(const IAttributeData& iData, ConstAttributeDataHandle const& hdl) { BaseDataType** ptrToData = nullptr; iData.getDataR((const void**)&ptrToData, *m_context, &hdl, 1); return ptrToData; } /** * @brief Get a pointer to the array data from a specific mutable data handle * * @param iData Interface class containing the accessor functions * @param hdl Handle to the attribute data whose value is being retrieved * @return BaseDataType** Pointer to the array of retrieved data */ BaseDataType** getData(const IAttributeData& iData, AttributeDataHandle const& hdl) { BaseDataType** ptrToData = nullptr; iData.getDataW((void**)&ptrToData, *m_context, &hdl, 1); return ptrToData; } /** * Set the context. This is done to allow the same wrapper class to be used for * multiple evaluations in different contexts. * * @param[in] context The graph context to which the array belongs */ void setContext(const GraphContextObj& context) { m_context = &context; setDirty(); } /** * Set the attribute handle. This is done to allow the same wrapper class to be used for * multiple evaluations in different contexts. * * @param[in] handle Handle to the attribute to which the array belongs */ void setHandle(HandleType handle) { m_handle = handle; setDirty(); } private: /** * helper that allows to make the proper call depending on the type of the handle */ void moveHdl(const IAttributeData& iData, size_t index, ConstAttributeDataHandle& hdl) const { hdl = iData.moveToAnotherInstanceR(*m_context, hdl, (int)index); } void moveHdl(const IAttributeData& iData, size_t index, AttributeDataHandle& hdl) const { hdl = iData.moveToAnotherInstanceW(*m_context, hdl, (int)index); } const GraphContextObj* m_context{ nullptr }; //!< The graph context to which the array belongs HandleType m_handle{ HandleType::invalidValue() }; //!< Handle to the attribute data bool mutable m_dirty { true }; //!< whether the span is uptodate or not gsl::span<BaseDataType> m_span; //!< Iterable managed array data }; // ================================================================================================================= /** * std::vector-like wrapper class for constant array attribute data in the Ogn Database. * It operates by using the Fabric interface to interact with array data it has stored. * This const version of the array wrapper should be used for input attributes, whose data cannot be changed. * (The naming is "array" for consistency with how attribute types are named, even though it doesn't * behave like a std::array, whose content size is determined at compile time.) * * @tparam BaseDataType Type of data contained within the array */ template <typename BaseDataType> class const_array : public base_array<const BaseDataType, ConstAttributeDataHandle> { public: /** * Constructor */ const_array() = default; }; // ================================================================================================================= /** * std::vector-like wrapper class for array attribute data in the Ogn Database. * It operates by using the Fabric interface to interact with array data it has stored. * This non-const version of the array wrapper should be used for output attributes, whose data will be changed. * (The naming is "array" for consistency with how attribute types are named, even though it doesn't * behave like a std::array, whose content size is determined at compile time.) * * @tparam BaseDataType Type of data contained within the array */ template <typename BaseDataType> class array : public base_array<BaseDataType, AttributeDataHandle> { public: /** The type of the parent class */ using parent_t = base_array<BaseDataType, AttributeDataHandle>; // Make non-templated functions available to pass 1 of template resolution // http://www.gotw.ca/gotw/087.htm using parent_t::data; using parent_t::size; // Pass through the span iterator so that this class can iterate over it transparently /** Iterator over the array contents */ using iterator = typename gsl::span<BaseDataType>::iterator; /** Reverse iterator over the array contents */ using reverse_iterator = typename gsl::span<BaseDataType>::reverse_iterator; /** * Constructor */ array() = default; /** * Assignment operator: performs a shallow copy * * @param[in] rhs The array being copied in * @return Reference to this */ array& operator=(array<BaseDataType> const& rhs) { return shallowCopy(reinterpret_cast<const_array<BaseDataType> const&>(rhs)); } /** * Assignment operator: performs a shallow copy of a const array of const data * * @param[in] rhs The array being copied in * @return Reference to this */ array& operator=(const_array<const BaseDataType> const& rhs) { return shallowCopy(reinterpret_cast<const_array<BaseDataType> const&>(rhs)); } /** * Assignment operator: performs a shallow copy of a const array of non-const data * * @param[in] rhs The array being copied in * @return Reference to this */ array& operator=(const_array<BaseDataType> const& rhs) { return shallowCopy(rhs); } /** * Performs a shallow copy the provided object over this object * * This will create a reference for this object that points to the provided object * * @param[in] from The array being copied in * @return Reference to this */ array& shallowCopy(const_array<BaseDataType> const& from) { this->context()->iAttributeData->copyData(this->handle(), *this->context(), from.handle()); this->setDirty(); return *this; } /** * Overwrite this object data with data from the provided object * * This will create a duplicate of all array members and reset the references * * @param[in] rhs The array being copied in * @return Reference to this */ array& deepCopy(parent_t const& rhs) { // Resize this array first so that it has the proper space to receive the new data resize(rhs.size()); if (rhs.size() == 0) { // No work to do when the new array is empty return *this; } // Get the raw arrays and walk them directly for the copy to minimize friction. rawCopyFrom(rhs.data(), std::conditional_t<std::is_assignable<BaseDataType, std::add_const_t<BaseDataType>>::value, std::true_type, std::false_type>()); return *this; } /** * Set the size of the array data to a new value. * * This may or may not relocate memory. If anything is holding a raw pointer from contents() * that pointer should be refreshed by calling contents() again when this method returns. * * @param[in] newCount New element count of the array. */ void resize(size_t newCount) { CUDA_SAFE_ASSERT(this->isValid()); const IAttributeData& iData = *(this->context()->iAttributeData); iData.setElementCount(*this->context(), this->handle(), newCount); this->setDirty(); } /** * @return Non-const pointer to the raw data in the array (first element) */ BaseDataType* data() { return const_cast<BaseDataType*>(this->parent_t::data()); } /** * Access a specific element of the array. No bounds checking is performed. * * @param[in] index Element index into the array * @return Reference to the index'th element of the array */ BaseDataType& operator[](size_t index) { return const_cast<BaseDataType&>(this->parent_t::operator[](index)); } /** * Access a specific element of the array with bounds checking. * * @param[in] index Element index into the array * @return Reference to the index'th element of the array * @exception std::out_of_range if there is no data for the given index */ BaseDataType& at(size_t index) { return const_cast<BaseDataType&>(this->parent_t::at(index)); } private: /** * Safe copy; one version for when the array members are assignable, the other as a fallback to do raw memcpy * * @param[in] srcArray The location of the raw data to be copied in (already vetted for size compatibility) * @param[in] assignable Overload selected when the base data type can or cannot be assigned */ void rawCopyFrom(const BaseDataType* srcArray, std::true_type assignable) { auto dstArray = data(); for (size_t i = 0; i < size(); ++i) { dstArray[i] = srcArray[i]; } } void rawCopyFrom(const BaseDataType* srcArray, std::false_type assignable) { memcpy(data(), srcArray, sizeof(BaseDataType) * size()); } }; /** Default trait indicating if the class is one of our array types * @tparam T Class type to check for being an array */ template<class T> struct is_array : std::false_type {}; /** Trait indicating that specific templated types are array types * @tparam T Class type to check for being an array * @tparam HandleType Attribute data handle type for the array */ template<class T, typename HandleType> struct is_array<base_array<T, HandleType>> : std::true_type {}; /** Trait indicating that mutable templated types are array types * @tparam T Class type to check for being an array */ template<class T> struct is_array<array<T>> : std::true_type {}; /** Trait indicating that constant templated types are array types * @tparam T Class type to check for being an array */ template<class T> struct is_array<const_array<T>> : std::true_type {}; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
17,758
C
34.027613
123
0.631208
omniverse-code/kit/include/omni/graph/core/ogn/RuntimeAttribute.h
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // ================================================================================================================= // This file contains simple interface classes which wrap data in the OGN database for easier use. // // RuntimeAttribute Wrapper providing access to attributes whose type is only known at runtime // This includes attributes inside bundles and attributes with extended types // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // ================================================================================================================= #ifdef THIS_IS_INCLUDED_IN_THE_DOCUMENTATION // The information bracketed here with begin/end describes the interface that is recommended for use with attributes // whose data type is decided at runtime. The documentation uses these markers to perform a literal include of this // code into the docs so that it can be the single source of truth. // Note that the interface described here is not the complete set of C++ functions available, merely the ones that make // sense for the user to access when dealing with runtime attributes. // // // begin-extended-attribute-interface-description // A runtime attribute can either be an attribute defined as one of the extended types ("union" or "any") or an // attribute that is a member of a bundle. As you might guess, the defining feature of such attributes is the fact // that the type of data they store is not known until runtime. And further, that type can change from one evaluation // to the next. For that reason the runtime attribute accessors have different methods of acquiring their data than // regular attributes. // // The two ways of acquiring the accessor for a runtime attribute are directly, for extended types const auto& anyType = db.inputs.anyTypeAttribute(); // and as a member, for bundled attributes const auto memberAttribute = db.inputs.myBundle().attributeByName(db.tokens.memberName); // Runtime attributes can be copied, which copies both the type and the data of the attribute (unlike regular // attributes, which would just copy the data) auto newAttribute = anyType; // There is also another method that will just copy the data, though it is up to you to ensure that the data // types of the two attributes are the same. newAttribute.copyData(anyType); // As with regular attributes you can check if their data is valid... const bool anyIsValid = anyType.isValid(); // ...get the number of elements in the array, if they are an array type... const size_t howManyAnysDoIHave = anyType.size(); // ...and drop down to the ABI to get a handle for direct ABI calls (although for runtime attributes the handle // is AttributeDataHandle/ConstAttributeDataHandle, not AttributeHandle as it is for regular attributes since // the ABI has different capabilities for them) const auto& abiHandle = anyType.abi_handle(); // They also have a method to determine if the actual type of the attribute has been resolved. Until the type is // resolved the attribute's data cannot be accessed const bool isTheTypeKnown = anyType.resolved(); // For bundled attributes the name is not known until runtime either so a method to access that is provided, // returning the hardcoded name for extended attributes const memberName& = memberAttribute.name(); // And the resolved type information is also available. Checking for an unknown type is another way to determine // if the attribute type has not yet been resolved. const auto& anyAttributesType = anyType.type(); // Finally there are the data access methods. The primary one is a templated function through which you can access // the attribute's data in its raw form. The value returned isn't the data itself, it is a thin wrapper around the // data that has a few functions of its own. // // This is the function to call for the majority of attributes, whose memory space is fixed either to CPU or GPU. // It returns an object that can be used to access information about the attribute's value, including its memory location. const auto dataAsFloatObj = anyType.get<float>(); // The types allowed in the template are the set of all allowed attribute types, expressed as regular C++ types // without the attribute roles. For example float, float[], float[3], float[][3], etc. In most cases trying to access // the data with an unsupported type will result in a compile error (the exceptions being types that are aliases for // a supported type, e.g. "using float3 = float[3]"). In fact, since the "NameToken" supported type is an alias for // another supported type it must be retrieved with a special type set up for that purpose const auto dataAsToken = anyType.get<OgnToken>(); // The wrapper has a boolean cast operator, which checks to see if the requested type matches the actual resolved // data type of the attribute. This allows you to make a cascading check for types of attributes you are supporting if (const auto dataAsFloatObj = anyType.get<float>()) { processAsFloat(*dataAsFloatObj); } else if (const auto dataAsDoubleObj = anyType.get<double>()) { processAsDouble(*dataAsDoubleObj); } // In addition to the simple boolean validity test, the wrapper returned will have a few different methods, // depending on the template data type. // The dereference operators return references to the actual underlying attribute data (on the CPU - if your // attribute lives on the GPU you'll get a reference to a pointer to the underlying attribute data, which lives in // the GPU memory space and cannot be dereferenced on the CPU). Note that the default memory location of a bundled // attribute is whatever was specified for the bundle itself. const auto dataAsFloatObj = anyType.get<float>(); float const& floatValueDeref = *dataAsFloatObj; float const& floatValueFn = dataAsFloatObj(); float const* floatValuePtr = dataAsFloatObj.operator->(); // The same dereference operators work for tuple types as well const auto dataAsFloat3Obj = anyType.get<float[3](); float const (&float3ValueDeref)[3] = *dataAsFloat3Obj; // The tuple values also give you access to the tuple count and element-wise access float x = dataAsFloat3Obj[0]; assert( dataAsFloat3Obj.tupleSize() == 3); // Array data type wrappers dereference to the same array wrappers you get from regular attributes const auto dataAsFloatArrayObj = anyType.get<float[]>(); for (const auto& floatValue : *dataAsFloatArrayObj) { /* ... */ } size_t arrayElements = dataAsFloatArrayObj->size(); // For GPU attributes, which do not have the ability to dereference their array memory location, the wrapper instead // returns a raw pointer to the underlying GPU memory location of the array. const auto gpuFloatArrayObj = anyType.get<float[]>(); float const ***ptrToRawGpuData = *gpuFloatArrayObj; // When the node is configured to extract CUDA pointers on the CPU there is one fewer level of indirection for // arrays as the pointer returned is on the CPU. const auto gpuFloatArrayObj = anyType.get<float[]>(); float const ***ptrToGpuDataOnCpu = *gpuFloatArrayObj; float const **ptrToRawGpuData = *ptrToGpuDataOnCpu; // As with regular array attributes, before writing to elements of an output array attribute you must first resize // it to have the desired number of elements. auto outputFloatArrayObj = data.outputs.results().get<float[]>(); outputFloatArrayObj.resize( howManyDoINeed ); // For attributes whose memory space is determined at runtime, or when you want to access attribute data in a different // memory space than they were originally defined, you can force the retrieved data to be either on the CPU or GPU. const auto gpuVersionObj = anyType.getGpu<float>(); const auto cpuVersionObj = anyType.getCpu<float>(); // On rare occasions you may need to resolve the attribute's type at runtime, inside a node's compute() function. In // those cases the runtime attribute data can get out of sync so you need to notify it that a change has been made. AttributeObj out = db.abi_node().iNode->getAttributeByToken(db.abi_node(), outputs::anyOutput.m_token); out.iAttribute->setResolvedType(out, someNewType); anyOutput.reset(db.abi_context(), out.iAttribute->getAttributeDataHandle(out, kAccordingToContextIndex)); // end-extended-attribute-interface-description // #endif #include <omni/graph/core/ogn/TypeConversion.h> #include <omni/graph/core/ogn/Types.h> #include <omni/graph/core/ogn/array.h> #include <omni/graph/core/ogn/string.h> #include <omni/fabric/Enums.h> using omni::fabric::PtrToPtrKind; namespace omni { namespace graph { namespace core { namespace ogn { // ============================================================================================================== // House the shared data types that will be used by all data type accessors. // Not all types are used by all classes, this just provides a single point of type definitions. template <typename CppType, bool readOnly, eMemoryType MemoryType, PtrToPtrKind GpuPtrType> struct data_type_traits : attribute_type_traits<CppType> { using parent_t = attribute_type_traits<CppType>; // Type that allows switching on memory type using isCpu_t = std::integral_constant<bool, MemoryType == kCpu>; // Template inheritance isn't smart enough to forward the types and constexprs so manually forward them here static constexpr bool isArray = parent_t::isArray; static constexpr uint8_t tupleCount = parent_t::tupleCount; static constexpr uint8_t arrayDepth = parent_t::arrayDepth; static constexpr BaseDataType baseType = parent_t::baseType; using data_t = typename parent_t::data_t; // The type of attribute handle used to call the ABI functions using handle_t = std::conditional_t<readOnly, const ConstAttributeDataHandle, AttributeDataHandle>; // The type that will be returned from individual element access (simple or tuple data) using element_t = typename std::conditional_t< readOnly, typename parent_t::element_t const, typename parent_t::element_t>; // The types used for storage where the constness is hardcoded into the attribute type using data_access_t = std::conditional_t<readOnly, data_t const, data_t>; using array_t = std::conditional_t< readOnly, std::conditional_t<std::is_same<const char, data_access_t>::value, const_string, const_array<data_access_t>>, std::conditional_t<std::is_same<char, data_access_t>::value, string, array<data_access_t>>>; // const, non-const, and appropriate const pointers to the Fabric data using data_ptr_t = std::conditional_t<isArray, data_t**, data_t*>; using data_ptr_const_t = std::conditional_t<isArray, data_t const**, data_t const*>; using data_ptr_access_t = std::conditional_t<readOnly, data_ptr_const_t, data_ptr_t>; // CPU array data lives in the wrappers, GPU data is raw since it cannot be dereferenced using array_data_t = std::conditional_t<MemoryType == kCpu, array_t, data_ptr_access_t>; // ============================================================================================================== // Simple test to see if the type of data in the template parameter is compatible with a specific Type() static bool matchesType(const Type& attributeType) { return attributeType.baseType == baseType && attributeType.componentCount == tupleCount && attributeType.arrayDepth == arrayDepth; } // ============================================================================================================== // Templated access to getting read-only and writable values, calling the correct ABI functions based on memory location static data_ptr_const_t readOnlyData(const GraphContextObj& contextObj, const ConstAttributeDataHandle& attrHandle) { return _readOnlyData(contextObj, attrHandle, std::conditional_t<MemoryType == kCpu, std::false_type, std::true_type>()); } static data_ptr_t writableData(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle) { return _writableData(contextObj, attrHandle, std::conditional_t<MemoryType == kCpu, std::false_type, std::true_type>()); } // Calling the right one depending on the handle type static data_ptr_const_t data(const GraphContextObj& contextObj, const ConstAttributeDataHandle& attrHandle) { return _readOnlyData(contextObj, attrHandle, std::conditional_t<MemoryType == kCpu, std::false_type, std::true_type>()); } static data_ptr_t data(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle) { return _writableData(contextObj, attrHandle, std::conditional_t<MemoryType == kCpu, std::false_type, std::true_type>()); } // ============================================================================================================== // Retrieving a reference on existing data. // If data is not located at the right place (gpu/cpu), returns nullptr static void dataReference(const GraphContextObj& contextObj, ConstAttributeDataHandle const& attrHandle, ConstRawPtr& ref, size_t& size) { _dataReference(contextObj, attrHandle, ref, size, std::conditional_t<MemoryType == kCpu, std::false_type, std::true_type>(), std::conditional_t<readOnly, std::true_type, std::false_type>()); } static void dataReference(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle, RawPtr& ref, size_t& size) { _dataReference(contextObj, attrHandle, ref, size, std::conditional_t<MemoryType == kCpu, std::false_type, std::true_type>(), std::conditional_t<readOnly, std::true_type, std::false_type>()); } private: // These methods could not use the getDataX<> templates due to an oddity in how const is handled in composed typedefs. // It's slightly more efficient to build anyway, and since it goes through the ABI it's just as safe static data_ptr_const_t _readOnlyData(const GraphContextObj& contextObj, ConstAttributeDataHandle const& attrHandle, std::true_type) { data_ptr_const_t out{ nullptr }; const void** outPtr = (const void**)(&out); contextObj.iAttributeData->getDataRGpuAt(outPtr, contextObj, &attrHandle, 1, GpuPtrType); return out; } static data_ptr_const_t _readOnlyData(const GraphContextObj& contextObj, ConstAttributeDataHandle const& attrHandle, std::false_type) { data_ptr_const_t out{ nullptr }; const void** outPtr = (const void**)(&out); contextObj.iAttributeData->getDataR(outPtr, contextObj, &attrHandle, 1); return out; } static data_ptr_t _writableData(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle, std::true_type) { data_ptr_t out{ nullptr }; void** outPtr = (void**)(&out); contextObj.iAttributeData->getDataWGpuAt(outPtr, contextObj, &attrHandle, 1, GpuPtrType); return out; } static data_ptr_t _writableData(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle, std::false_type) { data_ptr_t out{ nullptr }; void** outPtr = (void**)(&out); contextObj.iAttributeData->getDataW(outPtr, contextObj, &attrHandle, 1); return out; } static void _dataReference(const GraphContextObj& contextObj, ConstAttributeDataHandle const& attrHandle, ConstRawPtr& ref, size_t& size, std::true_type isGpu, std::true_type isRO) { contextObj.iAttributeData->getDataReferenceRGpuAt(attrHandle, contextObj, GpuPtrType, ref, size); } static void _dataReference(const GraphContextObj& contextObj, ConstAttributeDataHandle const& attrHandle, ConstRawPtr& ref, size_t& size, std::false_type isGpu, std::true_type isRO) { contextObj.iAttributeData->getDataReferenceR(attrHandle, contextObj, ref, size); } static void _dataReference(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle, RawPtr& ref, size_t& size, std::true_type isGpu, std::false_type isRO) { contextObj.iAttributeData->getDataReferenceWGpuAt(attrHandle, contextObj, GpuPtrType, ref, size); } static void _dataReference(const GraphContextObj& contextObj, AttributeDataHandle const& attrHandle, RawPtr& ref, size_t& size, std::false_type isGpu, std::false_type isRO) { contextObj.iAttributeData->getDataReferenceW(attrHandle, contextObj, ref, size); } }; // ============================================================================================================== // Simple wrapper to access the actual value with potential conversion on the read one template <typename CppType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct SimpleDataReadOnly { using data_traits = data_type_traits<CppType, true, MemoryType, GpuPtrType>; using return_type = const typename data_traits::data_t; typename data_traits::data_ptr_access_t m_value{nullptr}; Converter<typename data_traits::data_t> m_converter; Type const& m_type; explicit SimpleDataReadOnly(typename data_traits::data_access_t* value, Type const& type) : m_value{ value }, m_type{ type } {} return_type& operator*() const { return *m_converter.convertValue(m_value, m_type); } return_type* operator->() const { return m_converter.convertValue(m_value, m_type); } gsl::span<return_type> vectorized(size_t count) const { if (m_converter.willConvert(m_type)) { if (count != 1) return gsl::span<return_type>{}; return gsl::span<return_type>{ m_converter.convertValue(m_value, m_type), 1 }; } return gsl::span<return_type>{ m_value, count }; } operator bool() const { return m_value; } }; template <typename CppType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct SimpleDataWritable { using data_traits = data_type_traits<CppType, false, MemoryType, GpuPtrType>; using return_type = typename data_traits::data_t; typename data_traits::data_ptr_access_t m_value{ nullptr }; SimpleDataWritable(typename data_traits::data_access_t* m_value, Type const&) : m_value{m_value} {} return_type& operator*() { return *this->m_value; } return_type* operator->() { return this->m_value; } gsl::span<return_type> vectorized(size_t count) const { return { m_value, count }; } operator bool() const { return m_value; } }; // ============================================================================================================== // Tuple data behaves the same as simple data, with the addition of element accessors and a size function. template <typename CppType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct TupleDataReadOnly : SimpleDataReadOnly<CppType, MemoryType, GpuPtrType> { using data_traits = data_type_traits<CppType, true, MemoryType, GpuPtrType>; TupleDataReadOnly(typename data_traits::data_access_t* value, Type const& type) : SimpleDataReadOnly<CppType, MemoryType, GpuPtrType>{value, type} {} // GPU data is passed as a pointer to the tuple so for now there is no need for elementwise access here const typename data_traits::element_t& operator[](uint8_t index) const { static_assert(MemoryType == kCpu, "Cannot access individual tuple elements on GPU data"); CARB_ASSERT(index < this->tupleSize()); return (**this)[index]; } uint8_t tupleSize() const { return data_traits::tupleCount; } }; template <typename CppType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct TupleDataWritable : SimpleDataWritable<CppType, MemoryType, GpuPtrType> { using data_traits = data_type_traits<CppType, false, MemoryType, GpuPtrType>; TupleDataWritable(typename data_traits::data_access_t* value, Type const& type) : SimpleDataWritable<CppType, MemoryType, GpuPtrType>{value, type} {} // GPU data is passed as a pointer to the tuple so for now there is no need for elementwise access here typename data_traits::element_t& operator[](uint8_t index) { static_assert(MemoryType == kCpu, "Cannot access individual tuple elements on GPU data"); CARB_ASSERT(index < this->tupleSize()); return (**this)[index]; } uint8_t tupleSize() const { return data_traits::tupleCount; } }; // ============================================================================================================== //Default version, for CPU template <typename CppType, bool readOnly, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct ArrayData { using data_traits = data_type_traits<CppType, readOnly, MemoryType, GpuPtrType>; using return_type = typename data_traits::array_data_t; using handle_type = typename data_traits::handle_t; explicit ArrayData(const GraphContextObj& context, handle_type const* handle, bool isValid) { if (isValid) { m_arrayData.setContext(context); m_arrayData.setHandle(*handle); } } size_t size() const { return m_arrayData.isValid() ? m_arrayData.size() : 0; } return_type& operator*() { return m_arrayData; } return_type& operator()() { return m_arrayData; } return_type* operator->() { return &m_arrayData; } //const accessors return_type const& operator*() const { return m_arrayData; } return_type const& operator()() const { return m_arrayData; } return_type const* operator->() const { return &m_arrayData; } //Bool operator operator bool() const { return m_arrayData.isValid(); } //Invalidate void invalidate() { m_arrayData.setDirty();} GraphContextObj const* context() const { return m_arrayData.context(); } handle_type const& handle() const { return m_arrayData.handle(); } protected: return_type m_arrayData; }; // ============================================================================================================== // Special version, for GPU template <typename CppType, bool readOnly, PtrToPtrKind GpuPtrType> struct ArrayData<CppType, readOnly, kCuda, GpuPtrType> { using data_traits = data_type_traits<CppType, readOnly, kCuda, GpuPtrType>; using return_type = typename data_traits::array_data_t; using handle_type = typename data_traits::handle_t; using this_t = ArrayData<CppType, readOnly, kCuda, GpuPtrType>; explicit ArrayData(const GraphContextObj& context, handle_type const* handle, bool isValid) { if (isValid) { m_ctx = &context; m_hdl = handle; } else { m_ctx = nullptr; m_hdl = nullptr; } } size_t size() const { size_t count = 0; ConstAttributeDataHandle hdl = *m_hdl; m_ctx->iAttributeData->getElementCount(&count, *m_ctx, &hdl, 1); return count; } //accessors return_type deref() { using PtrType = typename std::conditional<readOnly, ConstRawPtr, RawPtr>::type; return_type dataPtr{ nullptr }; size_t size = 0; data_traits::dataReference(*m_ctx, *m_hdl, (PtrType&) dataPtr, size); return size ? dataPtr : 0; } return_type operator*() { return data_traits::data(*this->m_ctx, *this->m_hdl); } return_type operator()() { return data_traits::data(*this->m_ctx, *this->m_hdl); } //const accessors return_type const operator*() const { return const_cast<this_t*>(this)->operator*(); } return_type const operator()() const { return const_cast<this_t*>(this)->operator()(); } operator bool() const { return m_ctx && m_hdl; } void invalidate() {} GraphContextObj const* context() const { return m_ctx; } handle_type const& handle() const { return *m_hdl; } protected: GraphContextObj const* m_ctx; handle_type const* m_hdl; }; template <typename CppType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct ArrayDataReadOnly : ArrayData<CppType, true, MemoryType, GpuPtrType> { ArrayDataReadOnly(const GraphContextObj& context, ConstAttributeDataHandle const* handle, bool isValid) : ArrayData<CppType, true, MemoryType, GpuPtrType>{ context, handle, isValid } {} }; template <typename CppType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct ArrayDataWritable : ArrayData<CppType, false, MemoryType, GpuPtrType> { ArrayDataWritable(const GraphContextObj& context, AttributeDataHandle const* handle, bool isValid) : ArrayData<CppType, false, MemoryType, GpuPtrType>{context, handle, isValid} {} void resize(size_t newCount) { auto const& ctx = *this->context(); const IAttributeData& iData = *(ctx.iAttributeData); iData.setElementCount(ctx, this->handle(), newCount); this->invalidate(); } }; // ============================================================================================================== template <typename CppType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct TupleArrayDataReadOnly : ArrayDataReadOnly<CppType, MemoryType, GpuPtrType> { using data_traits = data_type_traits<CppType, true, MemoryType, GpuPtrType>; TupleArrayDataReadOnly(const GraphContextObj& context, ConstAttributeDataHandle const* handle, bool isValid) : ArrayDataReadOnly<CppType, MemoryType, GpuPtrType>{context, handle, isValid} {} // GPU data is passed as a pointer to the tuple so for now there is no need for element wise access here const typename data_traits::data_t& operator[] (uint8_t index) const { static_assert(MemoryType == kCpu, "Cannot access individual tuple elements on GPU data"); CARB_ASSERT(index < this->tupleSize()); return this->m_arrayData[index]; } uint8_t tupleSize() const { return data_traits::tupleCount; } }; template <typename CppType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct TupleArrayDataWritable : ArrayDataWritable<CppType, MemoryType, GpuPtrType> { using data_traits = data_type_traits<CppType, false, MemoryType, GpuPtrType>; TupleArrayDataWritable(const GraphContextObj& context, AttributeDataHandle const* handle, bool isValid) : ArrayDataWritable<CppType, MemoryType, GpuPtrType>{context, handle, isValid} {} // GPU data is passed as a pointer to the tuple so for now there is no need for element wise access here typename data_traits::data_t& operator[](uint8_t index) { static_assert(MemoryType == kCpu, "Cannot access individual tuple elements on GPU data"); CARB_ASSERT(index < this->tupleSize()); return this->m_arrayData[index]; } uint8_t tupleSize() const { return data_traits::tupleCount; } }; // ====================================================================== /** * Class responsible for managing the interaction with an attribute whose data type is determined at runtime. * These attributes may or may not not have a corresponding node attribute object. Those within bundles are virtual * and do not have a concrete attribute. Those with extended types, do. * * It wraps the attribute information in an interface with a more natural interaction than the raw ABI calls. */ template <eAttributeType AttributeType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> class RuntimeAttribute { public : // Make const-ness aware at compile time so that this class be used in const and non-const contexts static constexpr bool readOnly = (AttributeType == ogn::kOgnInput); //! The handle types are not simply "X" and "const X" variations so the type has to be explicitly defined //! for writable (output/state) and non-writable (input) attribute types. using dataHandle_t = typename std::conditional_t<readOnly, ConstAttributeDataHandle, AttributeDataHandle>; private: //! Traits for generic access to the raw memory. template <eMemoryType MT> using raw_data_traits_mt = data_type_traits<uint8_t, readOnly, MT, GpuPtrType>; using raw_data_traits = raw_data_traits_mt<MemoryType>; template <typename CppType, eMemoryType AccessorMemoryType, PtrToPtrKind AccessorGpuPtrType = PtrToPtrKind::eNotApplicable> using simpleData_t = std::conditional_t< readOnly, ogn::SimpleDataReadOnly<CppType, AccessorMemoryType, AccessorGpuPtrType>, ogn::SimpleDataWritable<CppType, AccessorMemoryType, AccessorGpuPtrType> >; template <typename CppType, eMemoryType AccessorMemoryType, PtrToPtrKind AccessorGpuPtrType = PtrToPtrKind::eNotApplicable> using tupleData_t = typename std::conditional_t< readOnly, ogn::TupleDataReadOnly<CppType, AccessorMemoryType, AccessorGpuPtrType>, ogn::TupleDataWritable<CppType, AccessorMemoryType, AccessorGpuPtrType> >; template <typename CppType, eMemoryType AccessorMemoryType, PtrToPtrKind AccessorGpuPtrType = PtrToPtrKind::eNotApplicable> using arrayData_t = typename std::conditional_t< readOnly, ogn::ArrayDataReadOnly<CppType, AccessorMemoryType, AccessorGpuPtrType>, ogn::ArrayDataWritable<CppType, AccessorMemoryType, AccessorGpuPtrType> >; template <typename CppType, eMemoryType AccessorMemoryType, PtrToPtrKind AccessorGpuPtrType = PtrToPtrKind::eNotApplicable> using tupleArrayData_t = typename std::conditional_t< readOnly, ogn::TupleArrayDataReadOnly<CppType, AccessorMemoryType, AccessorGpuPtrType>, ogn::TupleArrayDataWritable<CppType, AccessorMemoryType, AccessorGpuPtrType> >; /** * helper that allows to make the proper call depending on the type of the handle */ inline void _moveHdl(const IAttributeData& iData, size_t index, ConstAttributeDataHandle& hdl) const { hdl = iData.moveToAnotherInstanceR(m_context, hdl, (int)index); } inline void _moveHdl(const IAttributeData& iData, size_t index, AttributeDataHandle& hdl) const { hdl = iData.moveToAnotherInstanceW(m_context, hdl, (int)index); } /** * helper that prefetch the data pointer for the attribute */ template <eMemoryType MT = MemoryType> inline void _prefetch() const { if (MT != kAny) m_cachedData = raw_data_traits_mt<MT>::data(m_context, m_handle); } GraphContextObj m_context{ 0 }; //!< Evaluation context for which this attribute is valid NameToken m_name; //!< Name by which this attribute is accessed Type m_type; //!< Type information for the actual attribute data Type m_resolvedType; //!< Type information for the attribute interface dataHandle_t m_handle; //!< Handle of the attribute data mutable typename raw_data_traits::data_access_t* m_cachedData{ nullptr }; public: /** * Default constructor will create an invalid attribute */ RuntimeAttribute() : m_type(BaseDataType::eUnknown), m_resolvedType(BaseDataType::eUnknown), m_handle(dataHandle_t::invalidValue()) { } /** * Although the destructor should always be implemented with copy constructors it has no resources to release */ ~RuntimeAttribute() = default; /** * Copy constructor, to allow these objects to be easily passed around. * Only attributes with the same accessibility and memory type should be copied. */ RuntimeAttribute(const RuntimeAttribute<AttributeType, MemoryType, GpuPtrType>& toCopy) : m_context(toCopy.m_context), m_name(toCopy.m_name), m_type(toCopy.m_type), m_resolvedType(toCopy.m_resolvedType), m_handle(toCopy.m_handle) { if (m_handle != dataHandle_t::invalidHandle()) _prefetch<>(); else m_cachedData = nullptr; } /** * Move constructor, to allow these objects to be efficiently passed around * Only attributes with the same accessibility and memory type should be copied. */ RuntimeAttribute(RuntimeAttribute<AttributeType, MemoryType, GpuPtrType>&& toCopy) : m_context(toCopy.m_context), m_name(toCopy.m_name), m_type(toCopy.m_type), m_resolvedType(toCopy.m_resolvedType), m_handle(toCopy.m_handle) { if (m_handle != dataHandle_t::invalidHandle()) _prefetch(); else m_cachedData = nullptr; } /** * Copy assignment, to match the constructor * Only attributes with the same accessibility and memory type should be copied. */ RuntimeAttribute<AttributeType, MemoryType, GpuPtrType>& operator=(const RuntimeAttribute<AttributeType, MemoryType, GpuPtrType>& toCopy) { m_context = toCopy.m_context; m_name = toCopy.m_name; m_type = toCopy.m_type; m_resolvedType = toCopy.m_resolvedType; m_handle = toCopy.m_handle; m_cachedData = toCopy.m_cachedData; return *this; } /** * Move assignment, to match the constructor * Only attributes with the same accessibility and memory type should be copied. */ RuntimeAttribute<AttributeType, MemoryType, GpuPtrType>& operator=(RuntimeAttribute<AttributeType, MemoryType, GpuPtrType>&& toCopy) { m_context = toCopy.m_context; m_name = toCopy.m_name; m_type = toCopy.m_type; m_resolvedType = toCopy.m_resolvedType; m_handle = toCopy.m_handle; m_cachedData = toCopy.m_cachedData; return *this; } /** * Standard constructor, extracts the attribute information if it is valid. * * @param[in] context Evaluaton context of this attribute * @param[in] handle Handle to the attribute * @param[in] resolvedType The type exposed to end user (conversion will happen if different from the actual real type) */ RuntimeAttribute(const GraphContextObj& context, dataHandle_t& handle, Type const& resolvedType = { BaseDataType::eUnknown }) : m_context(context), m_type(BaseDataType::eUnknown), m_resolvedType(resolvedType), m_handle(handle) { if (m_handle.isValid()) { m_name = context.iAttributeData->getName(context, handle); m_type = context.iAttributeData->getType(context, handle); if (m_resolvedType.baseType == BaseDataType::eUnknown) m_resolvedType = m_type; _prefetch(); } else { m_cachedData = nullptr; } } /** * Standard constructor, extracts the attribute information if it is valid. * * @param[in] context Evaluaton context of this attribute * @param[in] handle Handle to the attribute * @param[in] @param[in] resolvedType The type exposed to end user (conversion will happen if different from the actual real type) */ RuntimeAttribute(GraphContextObj&& context, dataHandle_t&& handle, Type const& resolvedType = { BaseDataType::eUnknown }) : m_context(context), m_type(BaseDataType::eUnknown), m_resolvedType(resolvedType), m_handle(handle) { if (m_handle.isValid()) { m_name = context.iAttributeData->getName(context, handle); m_type = context.iAttributeData->getType(context, handle); if (m_resolvedType.baseType == BaseDataType::eUnknown) m_resolvedType = m_type; _prefetch(); } else { m_cachedData = nullptr; } } /** * Copy the data from another runtime attribute into this one (only valid for non-const objects) * * @param[in] rhs Runtime attribute being copied */ template <typename SourceAttribute> void copyData(const SourceAttribute& rhs) { static_assert(! readOnly, "Attribute data can only be copied to writable attributes"); ConstAttributeDataHandle constSrcHandle(rhs.abi_handle()); m_context.iAttributeData->copyData(m_handle, m_context, constSrcHandle); } /** * Set the context and attribute handle for evaluation. Delayed so that the contents can be created * early with just-in-time initialization. * * @param[in] context Evaluation context to use when extracting information * @param[in] handle Handle to the attribute being wrapped * @param[in] attr The attribute object represented by this wrapper */ void reset(const GraphContextObj& context, const dataHandle_t& handle, const AttributeObj& attr) { m_handle = handle; m_context = context; if (m_handle.isValid()) { m_name = context.iAttributeData->getName(context, handle); m_type = context.iAttributeData->getType(context, handle); m_resolvedType = attr.iAttribute->getResolvedType(attr); _prefetch(); } else { m_type = Type{BaseDataType::eUnknown}; m_resolvedType = Type{ BaseDataType::eUnknown }; m_name = fabric::kUninitializedToken; m_cachedData = nullptr; } } /** * @return true if the handle and context point to valid data within the fabric */ bool isValid() const { return m_handle.isValid(); } /** * @return Name by which this attribute's data is referenced */ const NameToken& name() const { return m_name; } /** * @return Type information for this attribute's data */ const Type& type() const { return m_resolvedType.baseType != BaseDataType ::eUnknown ? m_resolvedType : m_type; } /** * @return True if the data can be accessed in a vectorized manner for this attribute */ const bool canVectorize() const { //attrib needs to exists, and to be resolved as the the same type (ie. no auto conversion) //we don't care about role return m_type.baseType != BaseDataType::eUnknown && m_type.baseType == m_resolvedType.baseType && m_type.componentCount == m_resolvedType.componentCount && m_type.arrayDepth == m_resolvedType.arrayDepth; } /** * @return The standardized type name for this attribute's data */ std::string typeName() const { return getOgnTypeName(type()); } /** * @return True if the attribute has a fully resolved type */ bool resolved() const { return m_type.baseType != BaseDataType::eUnknown; } /** * @return Raw attribute data handle to use for direct ABI manipulation */ dataHandle_t abi_handle() const { return m_handle; } /** * In vectorized context, make this attribute point to another instance * @param[in] offset The distance at which the target instance is located relative to the currently pointed one */ void adjustHandle(size_t offset) { const IAttributeData& iData = *m_context.iAttributeData; _moveHdl(iData, offset, m_handle); _prefetch(); } /** * @return Raw graph context assotiated to the handle to use for direct ABI manipulation */ GraphContextObj const& abi_context() const { return m_context; } /** * @return the number of elements in the array, or 1 if it is not an array type */ size_t size() const { size_t count{ 1 }; if (m_type.arrayDepth > 0) { ConstAttributeDataHandle constHandle{ m_handle }; // getElementCount requires the Const version m_context.iAttributeData->getElementCount(&count, m_context, &constHandle, 1); } return count; } // ------------------------------------------------------------------------------------------------------------ // Support for the various methods to retrieve a generic value type as get<TYPE>(). // // The details in the template and traits information is intended to make access to the data is generic as possible // given the information provided by the data types and this attribute's class members. // // Almost all of the time you will access what appears to be a single templated method as this: // // auto dataAccessor = thisAttribute.get<DATA_TYPE>(); // if (dataAccessor.isValid()) ... // The data is the right type, and is valid // // e.g. auto intData = intAttribute.get<int>(); // // This hardcodes the compile time information about whether the attribute was read-only, what type of data it // accepts, and the memory location, into what appears to be a single accessor (though behind the scenes it is // actually a small set of them that provide appropriate access points for the type of data). // // For special attributes who have their memory location designated as "any", the decision of memory location is // made at access time so two variations of the above method are available which explicitly get accessors on either // the CPU or GPU memory locations: // // auto dataAccessorCpu = thisAttribute.getCpu<DATA_TYPE>(); // auto dataAccessorGpu = thisAttribute.getGpu<DATA_TYPE>(); // // Note: Use only one of the two memory location accessors as accessing one type will often invalidate the other // type, potentially causing excessive slow copying of data. // template <typename POD, eMemoryType GetAtMemoryType, PtrToPtrKind GetAtGpuPtrType = PtrToPtrKind::eNotApplicable, typename std::enable_if_t<attribute_type_traits<POD>::isSimpleType, int> = 0> const simpleData_t<POD, GetAtMemoryType, GetAtGpuPtrType> getAt() const { using data_traits = data_type_traits<POD, readOnly, GetAtMemoryType, GetAtGpuPtrType>; using data_ptr_access_t = typename data_traits::data_ptr_access_t; return simpleData_t<POD, GetAtMemoryType, GetAtGpuPtrType>( data_traits::matchesType(m_resolvedType) ? data_ptr_access_t(m_cachedData) : nullptr, m_type); } template <typename POD, eMemoryType GetAtMemoryType, PtrToPtrKind GetAtGpuPtrType = PtrToPtrKind::eNotApplicable, typename std::enable_if_t<attribute_type_traits<POD>::isSimpleType, int> = 0> simpleData_t<POD, GetAtMemoryType, GetAtGpuPtrType> getAt() { static_assert(! readOnly, "non-const get() can only be called on output attributes"); using data_traits = data_type_traits<POD, readOnly, GetAtMemoryType, GetAtGpuPtrType>; using data_ptr_access_t = typename data_traits::data_ptr_access_t; return simpleData_t<POD, GetAtMemoryType, GetAtGpuPtrType>( data_traits::matchesType(m_type) ? data_ptr_access_t(m_cachedData) : nullptr, m_type); } template <typename POD, eMemoryType GetAtMemoryType, PtrToPtrKind GetAtGpuPtrType = PtrToPtrKind::eNotApplicable, typename std::enable_if_t<attribute_type_traits<POD>::isTupleType, int> = 0> const tupleData_t<POD, GetAtMemoryType, GetAtGpuPtrType> getAt() const { using data_traits = data_type_traits<POD, readOnly, GetAtMemoryType, GetAtGpuPtrType>; using data_ptr_access_t = typename data_traits::data_ptr_access_t; return tupleData_t<POD, GetAtMemoryType, GetAtGpuPtrType>( data_traits::matchesType(m_resolvedType) ? data_ptr_access_t(m_cachedData) : nullptr, m_type); } template <typename POD, eMemoryType GetAtMemoryType, PtrToPtrKind GetAtGpuPtrType = PtrToPtrKind::eNotApplicable, typename std::enable_if_t<attribute_type_traits<POD>::isTupleType, int> = 0> tupleData_t<POD, GetAtMemoryType, GetAtGpuPtrType> getAt() { static_assert(! readOnly, "non-const get() can only be called on output attributes"); using data_traits = data_type_traits<POD, readOnly, GetAtMemoryType, GetAtGpuPtrType>; using data_ptr_access_t = typename data_traits::data_ptr_access_t; return tupleData_t<POD, GetAtMemoryType, GetAtGpuPtrType>( data_traits::matchesType(m_type) ? data_ptr_access_t(m_cachedData) : nullptr, m_type); } template <typename POD, eMemoryType GetAtMemoryType, PtrToPtrKind GetAtGpuPtrType = PtrToPtrKind::eNotApplicable, typename std::enable_if_t<attribute_type_traits<POD>::isArrayType, int> = 0> const arrayData_t<POD, GetAtMemoryType, GetAtGpuPtrType> getAt() const { using data_traits = data_type_traits<POD, readOnly, GetAtMemoryType, GetAtGpuPtrType>; return arrayData_t<POD, GetAtMemoryType, GetAtGpuPtrType>(m_context, &m_handle, data_traits::matchesType(m_type)); } template <typename POD, eMemoryType GetAtMemoryType, PtrToPtrKind GetAtGpuPtrType = PtrToPtrKind::eNotApplicable, typename std::enable_if_t<attribute_type_traits<POD>::isArrayType, int> = 0> arrayData_t<POD, GetAtMemoryType, GetAtGpuPtrType> getAt() { static_assert(! readOnly, "non-const get() can only be called on output attributes"); using data_traits = data_type_traits<POD, readOnly, GetAtMemoryType, GetAtGpuPtrType>; return arrayData_t<POD, GetAtMemoryType, GetAtGpuPtrType>(m_context, &m_handle, data_traits::matchesType(m_type)); } template <typename POD, eMemoryType GetAtMemoryType, PtrToPtrKind GetAtGpuPtrType = PtrToPtrKind::eNotApplicable, typename std::enable_if_t<attribute_type_traits<POD>::isTupleArrayType, int> = 0> const tupleArrayData_t<POD, GetAtMemoryType, GetAtGpuPtrType> getAt() const { using data_traits = data_type_traits<POD, readOnly, GetAtMemoryType, GetAtGpuPtrType>; return tupleArrayData_t<POD, GetAtMemoryType, GetAtGpuPtrType>(m_context, &m_handle, data_traits::matchesType(m_type)); } template <typename POD, eMemoryType GetAtMemoryType, PtrToPtrKind GetAtGpuPtrType = PtrToPtrKind::eNotApplicable, typename std::enable_if_t<attribute_type_traits<POD>::isTupleArrayType, int> = 0> tupleArrayData_t<POD, GetAtMemoryType, GetAtGpuPtrType> getAt() { static_assert(! readOnly, "non-const get() can only be called on output attributes"); using data_traits = data_type_traits<POD, readOnly, GetAtMemoryType, GetAtGpuPtrType>; return tupleArrayData_t<POD, GetAtMemoryType, GetAtGpuPtrType>(m_context, &m_handle, data_traits::matchesType(m_type)); } // ------------------------------------------------------------------------------------------------------------ // Variations of the get<>() functions that force either CPU or GPU memory location. These are used when the // memory location of the attributes was set to "any", meaning they decide CPU or GPU at runtime, though there's // no reason they can't be used for explicit memory locations either. template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isSimpleType, int> = 0> const simpleData_t<POD, MemoryType, GpuPtrType> get() const { static_assert(MemoryType != kAny, "Use getCpu() or getGpu() to specify where the attribute's memory lives"); return getAt<POD, MemoryType, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isSimpleType, int> = 0> simpleData_t<POD, MemoryType, GpuPtrType> get() { static_assert(MemoryType != kAny, "Use getCpu() or getGpu() to specify where the attribute's memory lives"); return getAt<POD, MemoryType, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleType, int> = 0> const tupleData_t<POD, MemoryType, GpuPtrType> get() const { static_assert(MemoryType != kAny, "Use getCpu() or getGpu() to specify where the attribute's memory lives"); return getAt<POD, MemoryType, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleType, int> = 0> tupleData_t<POD, MemoryType, GpuPtrType> get() { static_assert(MemoryType != kAny, "Use getCpu() or getGpu() to specify where the attribute's memory lives"); return getAt<POD, MemoryType, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isArrayType, int> = 0> const arrayData_t<POD, MemoryType, GpuPtrType> get() const { static_assert(MemoryType != kAny, "Use getCpu() or getGpu() to specify where the attribute's memory lives"); return getAt<POD, MemoryType, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isArrayType, int> = 0> arrayData_t<POD, MemoryType, GpuPtrType> get() { static_assert(MemoryType != kAny, "Use getCpu() or getGpu() to specify where the attribute's memory lives"); return getAt<POD, MemoryType, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleArrayType, int> = 0> const tupleArrayData_t<POD, MemoryType, GpuPtrType> get() const { static_assert(MemoryType != kAny, "Use getCpu() or getGpu() to specify where the attribute's memory lives"); return getAt<POD, MemoryType, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleArrayType, int> = 0> tupleArrayData_t<POD, MemoryType, GpuPtrType> get() { static_assert(MemoryType != kAny, "Use getCpu() or getGpu() to specify where the attribute's memory lives"); return getAt<POD, MemoryType, GpuPtrType>(); } // -------------------------------------------------------------------------------------------------------------- template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isSimpleType, int> = 0> const simpleData_t<POD, kCpu> getCpu() const { _prefetch<kCpu>(); return getAt<POD, kCpu>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isSimpleType, int> = 0> simpleData_t<POD, kCpu> getCpu() { _prefetch<kCpu>(); return getAt<POD, kCpu>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleType, int> = 0> const tupleData_t<POD, kCpu> getCpu() const { _prefetch<kCpu>(); return getAt<POD, kCpu>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleType, int> = 0> tupleData_t<POD, kCpu> getCpu() { _prefetch<kCpu>(); return getAt<POD, kCpu>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isArrayType, int> = 0> const arrayData_t<POD, kCpu> getCpu() const { return getAt<POD, kCpu>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isArrayType, int> = 0> arrayData_t<POD, kCpu> getCpu() { return getAt<POD, kCpu>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleArrayType, int> = 0> const tupleArrayData_t<POD, kCpu> getCpu() const { return getAt<POD, kCpu>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleArrayType, int> = 0> tupleArrayData_t<POD, kCpu> getCpu() { return getAt<POD, kCpu>(); } // -------------------------------------------------------------------------------------------------------------- template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isSimpleType, int> = 0> const simpleData_t<POD, kCuda, GpuPtrType> getGpu() const { _prefetch<kCuda>(); return getAt<POD, kCuda, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isSimpleType, int> = 0> simpleData_t<POD, kCuda, GpuPtrType> getGpu() { _prefetch<kCuda>(); return getAt<POD, kCuda, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleType, int> = 0> const tupleData_t<POD, kCuda, GpuPtrType> getGpu() const { _prefetch<kCuda>(); return getAt<POD, kCuda, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleType, int> = 0> tupleData_t<POD, kCuda, GpuPtrType> getGpu() { _prefetch<kCuda>(); return getAt<POD, kCuda, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isArrayType, int> = 0> const arrayData_t<POD, kCuda, GpuPtrType> getGpu() const { return getAt<POD, kCuda, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isArrayType, int> = 0> arrayData_t<POD, kCuda, GpuPtrType> getGpu() { return getAt<POD, kCuda, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleArrayType, int> = 0> const tupleArrayData_t<POD, kCuda, GpuPtrType> getGpu() const { return getAt<POD, kCuda, GpuPtrType>(); } template <typename POD, typename std::enable_if_t<attribute_type_traits<POD>::isTupleArrayType, int> = 0> tupleArrayData_t<POD, kCuda, GpuPtrType> getGpu() { return getAt<POD, kCuda, GpuPtrType>(); } // -------------------------------------------------------------------------------------------------------------- // Raw data access method, which returns size and pointer but ignores type information. Its usage should be rare, // but it is useful to have it available for those times. template <eMemoryType RawMemoryType = MemoryType, PtrToPtrKind RawGpuPtrType = PtrToPtrKind::eNotApplicable> void rawData(ConstRawPtr& ptr, size_t& size) const { static_assert(RawMemoryType != kAny, "Cannot access raw data on an attribute with runtime memory location"); using data_traits = data_type_traits<uint8_t, readOnly, RawMemoryType, RawGpuPtrType>; data_traits::dataReference(m_context, m_handle, ptr, size); } template <eMemoryType RawMemoryType = MemoryType, PtrToPtrKind RawGpuPtrType = PtrToPtrKind::eNotApplicable> void rawData(RawPtr& ptr, size_t& size) { static_assert(RawMemoryType != kAny, "Cannot access raw data on an attribute with runtime memory location"); static_assert(! readOnly, "Cannot access writable raw data on a read-only attribute"); using data_traits = data_type_traits<uint8_t, readOnly, RawMemoryType, RawGpuPtrType>; data_traits::dataReference(m_context, m_handle, ptr, size); } }; /* Runtime Attribute type traits */ template<class T> struct is_runtime_data : std::false_type {}; template<class T, bool ReadOnly, eMemoryType MemoryType> struct is_runtime_data<ArrayData<T, ReadOnly, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; template<class T, eMemoryType MemoryType> struct is_runtime_data<SimpleDataReadOnly<T, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; template<class T, eMemoryType MemoryType> struct is_runtime_data<SimpleDataWritable<T, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; template<class T, eMemoryType MemoryType> struct is_runtime_data<TupleDataReadOnly<T, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; template<class T, eMemoryType MemoryType> struct is_runtime_data<TupleDataWritable<T, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; template<class T, eMemoryType MemoryType> struct is_runtime_data<ArrayDataReadOnly<T, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; template<class T, eMemoryType MemoryType> struct is_runtime_data<ArrayDataWritable<T, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; template<class T, eMemoryType MemoryType> struct is_runtime_data<TupleArrayDataReadOnly<T, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; template<class T, eMemoryType MemoryType> struct is_runtime_data<TupleArrayDataWritable<T, MemoryType, PtrToPtrKind::eNotApplicable>> : std::true_type {}; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
57,389
C
48.219554
199
0.672063
omniverse-code/kit/include/omni/graph/core/ogn/TypeTraits.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/Type.h> #include <omni/graph/core/ogn/Types.h> #include <omni/graph/core/TemplateUtils.h> namespace omni { namespace graph { namespace core { namespace ogn { // Helper that provides a templated conversion from C++ simple data type to the base data type enum in core::Type template <typename CppType> struct attribute_base_t { static constexpr BaseDataType value = BaseDataType::eUnknown; }; template <> struct attribute_base_t<bool> { static constexpr BaseDataType value = BaseDataType::eBool; }; template <> struct attribute_base_t<double> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<float> { static constexpr BaseDataType value = BaseDataType::eFloat; }; template <> struct attribute_base_t<int> { static constexpr BaseDataType value = BaseDataType::eInt; }; template <> struct attribute_base_t<int64_t> { static constexpr BaseDataType value = BaseDataType::eInt64; }; template <> struct attribute_base_t<Token> { static constexpr BaseDataType value = BaseDataType::eToken; }; template <> struct attribute_base_t<NameToken> { static constexpr BaseDataType value = BaseDataType::eToken; }; template <> struct attribute_base_t<Path> { static constexpr BaseDataType value = BaseDataType::eRelationship; }; template <> struct attribute_base_t<TargetPath> { static constexpr BaseDataType value = BaseDataType::eRelationship; }; template <> struct attribute_base_t<uint32_t> { static constexpr BaseDataType value = BaseDataType::eUInt; }; template <> struct attribute_base_t<uint64_t> { static constexpr BaseDataType value = BaseDataType::eUInt64; }; template <> struct attribute_base_t<uint8_t> { static constexpr BaseDataType value = BaseDataType::eUChar; }; template <> struct attribute_base_t<char> { static constexpr BaseDataType value = BaseDataType::eUChar; };//char is used for string, but implemented as uchar //carb base types template <> struct attribute_base_t<carb::Float2> { static constexpr BaseDataType value = BaseDataType::eFloat; }; template <> struct attribute_base_t<carb::Float3> { static constexpr BaseDataType value = BaseDataType::eFloat; }; template <> struct attribute_base_t<carb::Float4> { static constexpr BaseDataType value = BaseDataType::eFloat; }; template <> struct attribute_base_t<carb::Double2> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<carb::Double3> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<carb::Double4> { static constexpr BaseDataType value = BaseDataType::eDouble; }; template <> struct attribute_base_t<carb::Int2> { static constexpr BaseDataType value = BaseDataType::eInt; }; template <> struct attribute_base_t<carb::Int3> { static constexpr BaseDataType value = BaseDataType::eInt; }; template <> struct attribute_base_t<carb::Int4> { static constexpr BaseDataType value = BaseDataType::eInt; }; template <> struct attribute_base_t<carb::Uint2> { static constexpr BaseDataType value = BaseDataType::eUInt; }; template <> struct attribute_base_t<carb::Uint3> { static constexpr BaseDataType value = BaseDataType::eUInt; }; template <> struct attribute_base_t<carb::Uint4> { static constexpr BaseDataType value = BaseDataType::eUInt; }; // Helper struct to convert at compile time from BaseDataType to corresponding cpp type template <BaseDataType eBaseType> struct attribute_t { using type = void; }; template <> struct attribute_t<BaseDataType::eBool> { using type = bool; }; template <> struct attribute_t<BaseDataType::eDouble> { using type = double; }; template <> struct attribute_t<BaseDataType::eFloat> { using type = float; }; template <> struct attribute_t<BaseDataType::eInt> { using type = int; }; template <> struct attribute_t<BaseDataType::eInt64> { using type = int64_t; }; template <> struct attribute_t<BaseDataType::eToken> { using type = Token; }; template <> struct attribute_t<BaseDataType::eUInt> { using type = uint32_t; }; template <> struct attribute_t<BaseDataType::eUInt64> { using type = uint64_t; }; template <> struct attribute_t<BaseDataType::eUChar> { using type = uint8_t; }; // Templated conversions defining attribute traits given the actual C++ data types they implement. // The role does not enter in to this conversion as it is an interpretation of a data type, not a separate data type. // // Constants: // isArray Boolean indicating if the data type includes an array of variable length // tupleCount Number of tuple values in the data type (1 for a simple value) // baseType BaseDataType enum matching the data's unencumbered type // Types: // actual_t Actual data type to be handled (managing types that resolve to the same POD) // element_t Data type for the unencumbered value (e.g float for a float[][3]) // data_t Data type for a single value (e.g. float for float/float[] but float[3] for float[3]/float[][3]) // // The comments above the definitions give examples of how the values are set for representative template types // // Note: The pxr GfVec/GfMatrix types are not included here for simplicity, though they can be defined in a separate // file for those that wish to use them by specializing the attribute_type_traits struct. template <typename CppType> struct attribute_type_traits { // float/float[3] -> false, float[]/float[][3] -> true static constexpr bool isArray = !is_bounded_array<CppType>::value && std::is_array<CppType>::value; static constexpr uint8_t arrayDepth = isArray ? 1 : 0; // Flags that break apart the data type into the four main mutually-exclusive categories used as accessors static constexpr bool isSimpleType = !is_bounded_array<CppType>::value && !std::is_array<CppType>::value; static constexpr bool isTupleType = is_bounded_array<CppType>::value; static constexpr bool isArrayType = std::is_array<CppType>::value && !is_bounded_array<CppType>::value && !is_bounded_array<std::remove_extent_t<CppType>>::value; static constexpr bool isTupleArrayType = std::is_array<CppType>::value && !is_bounded_array<CppType>::value && is_bounded_array<std::remove_extent_t<CppType>>::value; // Get the actual data type this class references. This is needed due to the fact that our token implementation, // NameToken, is typedefed to uint64_t, making it indistinguishable from a regular uint64_t to the compiler. By // passing ogn::Token/ogn::Token[] instead, the ambiguity can be resolved and the actual type deduced. (This // wouldn't quite work as-is if it supported tuples, but as it doesn't the extra complexity can be omitted.) using actual_t = std::conditional_t<std::is_same<std::remove_all_extents_t<CppType>, ogn::Token>::value, std::conditional_t<isArray, NameToken[], NameToken>, CppType >; // float/float[3]/float[]/float[][3] -> float using element_t = std::remove_const_t<std::remove_all_extents_t<actual_t>>; // float/float[]/float[3]/float[][3] -> BaseDataType::eFloat static constexpr BaseDataType baseType = attribute_base_t<element_t>::value; // float/float[] -> float, float[3]/float[][3] -> float[3] using data_t = std::conditional_t<isArray, std::remove_extent_t<actual_t>, actual_t>; // float/float[] -> 1, float[3]/float[][3] -> 3 static constexpr int tupleCount = std::is_array<data_t>::value ? std::extent<data_t, 0>::value : 1; }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
7,978
C
62.831999
170
0.736901
omniverse-code/kit/include/omni/graph/core/ogn/TypeConversion.h
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once #include <omni/graph/core/ogn/TypeTraits.h> namespace omni { namespace graph { namespace core { namespace ogn { // Utility struct to implement conversion between 2 BaseDataType // Note: simple runtime dispatch on the source type (dst type must be known at compile time) static constexpr BaseDataType kLastBaseType = BaseDataType::eToken; template <typename DST_TYPE, BaseDataType BASE_SRC_TYPE = kLastBaseType> struct BaseTypeConversion { using next = BaseTypeConversion<DST_TYPE, (BaseDataType)((uint8_t)BASE_SRC_TYPE - 1)>; static int canConvertFrom(BaseDataType srcType) { if (srcType == BASE_SRC_TYPE) return false; return next::canConvertFrom(srcType); } static bool convert(DST_TYPE* dst, void const* src, BaseDataType srcType, size_t count) { if (srcType == BASE_SRC_TYPE) return false; return next::convert(dst, src, srcType, count); } }; template <typename T> struct BaseTypeConversion<T, BaseDataType::eUnknown> { static constexpr int canConvertFrom(BaseDataType) { return 0; } static bool convert(T*, void const*, BaseDataType, size_t) { return false; } }; template<typename T, BaseDataType eDataType> struct BaseTypeConversionBase { using next = BaseTypeConversion<T, (BaseDataType)((uint8_t)eDataType - 1)>; static bool canConvertFromBase(BaseDataType srcType) { if (eDataType == srcType) return true; return false; } template<typename FUNC> static bool convertBase(T* dst, void const* srcBuffer, BaseDataType srcType, size_t count, FUNC const& func) { if (eDataType == srcType) { using srcType = typename attribute_t<eDataType>::type; srcType const* srcPtr = (srcType const*)srcBuffer; while (count--) func(*dst++, *srcPtr++); return true; } return next::convert(dst, srcBuffer, srcType, count); } }; #define IMPLEMENT_BASE_TYPE_CONVERSION(dstType, srcBaseType, func)\ template <> struct BaseTypeConversion<dstType, srcBaseType> : public BaseTypeConversionBase<dstType, srcBaseType> {\ static int canConvertFrom(BaseDataType srcType) {if(canConvertFromBase(srcType)) return __LINE__; return next::canConvertFrom(srcType);}\ static bool convert(dstType* dst, void const* srcBuffer, BaseDataType srcType, size_t count) {return convertBase(dst, srcBuffer, srcType, count, func);}} #define IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(SRC, DST) \ IMPLEMENT_BASE_TYPE_CONVERSION(DST, attribute_base_t<SRC>::value, [](DST& dst, SRC const& src) { dst = (DST)src; }) //! <summary> //! Implements all conversions //! Note: order is important for BOTH source type and destination type //! source type oder : template instantiation needs to happen in order of the enum since higher type ID template //! will call lower ones (so they need to be already defined) //! => ORDER IN THE BASE TYPE ENUM ORDER //! dst type order : conversion precedence will follow order of declaration //! ie. : when several conversions are possible, the one declared before the others will be used //! => ORDER BY "QUALITY" OF THE CONVERSION //! </summary> IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int, unsigned int); // signed -> unsigned is allowed by fabric IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int, int64_t); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int, float); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int, double); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int, bool); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(unsigned int, uint64_t); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(unsigned int, int); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(unsigned int, int64_t); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(unsigned int, float); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(unsigned int, double); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(unsigned int, bool); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int64_t, uint64_t); // signed -> unsigned is allowed by fabric IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int64_t, int); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int64_t, double); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int64_t, float); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(int64_t, bool); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(uint64_t, int64_t); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(uint64_t, unsigned int); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(uint64_t, int); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(uint64_t, double); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(uint64_t, float); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(uint64_t, bool); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(float, double); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(float, int); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(float, int64_t); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(double, float); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(double, int64_t); IMPLEMENT_BASE_TYPE_CONVERSION_AS_CAST(double, int); //!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!// //!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!// // This implements the Double dispatch solve, when none of the type are known at compile time template <BaseDataType BASE_DST_TYPE = kLastBaseType> struct BaseDoubleDispatchTypeConversionTest { using TYPE = typename attribute_t<BASE_DST_TYPE>::type; static int canConvert(BaseDataType from, BaseDataType to) { if (BASE_DST_TYPE == to) return BaseTypeConversion<TYPE>::canConvertFrom(from); return BaseDoubleDispatchTypeConversionTest<(BaseDataType)((uint8_t)BASE_DST_TYPE - 1)>::canConvert(from, to); } static bool convert(void* dstBuffer, BaseDataType dstType, void const* srcBuffer, BaseDataType srcType, size_t count) { if (BASE_DST_TYPE == dstType) return BaseTypeConversion<TYPE>::convert((TYPE*) dstBuffer, srcBuffer, srcType, count); return BaseDoubleDispatchTypeConversionTest<(BaseDataType)((uint8_t)BASE_DST_TYPE - 1)>::convert(dstBuffer, dstType, srcBuffer, srcType, count); } }; template <> struct BaseDoubleDispatchTypeConversionTest<BaseDataType::eUnknown> { static int canConvert(BaseDataType from, BaseDataType to) { return 0; } static bool convert(void* dst, BaseDataType dstType, void const* srcBuffer, BaseDataType srcType, size_t count) { return false; } }; //!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!// //!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!//!// //Convert at compile time a type to its corresponding BaseDataType template<typename T> constexpr BaseDataType getDataType() { using U = std::decay_t<T>; using V = std::remove_pointer_t<U>; return attribute_base_t<V>::value; } //Returns a positive value if a conversion exists between the 2 provided types, 0 else // The lower the returned value is, the prefered is the conversion static inline int isRawDataConvertible(BaseDataType from, BaseDataType to) { return BaseDoubleDispatchTypeConversionTest<>::canConvert(from, to); } // Indicate whether, for a given type pair reputed to be convertible, the actual conversion can be bypassed static inline bool isRawBinaryDataCompatible(BaseDataType t0, BaseDataType t1) { if (t0 == t1) return true; if (t0 > t1) std::swap(t0, t1); switch (t0)//always smaller { case omni::graph::core::BaseDataType::eUnknown: break; case omni::graph::core::BaseDataType::eBool: break; case omni::graph::core::BaseDataType::eUChar: break; case omni::graph::core::BaseDataType::eInt: break; case omni::graph::core::BaseDataType::eUInt: break; case omni::graph::core::BaseDataType::eInt64: break; case omni::graph::core::BaseDataType::eUInt64: return t1 == omni::graph::core::BaseDataType::eToken; break; case omni::graph::core::BaseDataType::eHalf: break; case omni::graph::core::BaseDataType::eFloat: break; case omni::graph::core::BaseDataType::eDouble: break; case omni::graph::core::BaseDataType::eToken: break; case omni::graph::core::BaseDataType::eRelationship: break; case omni::graph::core::BaseDataType::eAsset: break; case omni::graph::core::BaseDataType::ePrim: break; case omni::graph::core::BaseDataType::eConnection: break; case omni::graph::core::BaseDataType::eTag: break; default: break; } return false; } //Perform the actual conversion between 2 instantiated values template <typename Dst> static inline bool doConversion(Dst* dst, void const* src, BaseDataType srcType, size_t count) { static_assert(getDataType<Dst>() != BaseDataType::eUnknown, ""); CARB_ASSERT(srcType != BaseDataType::eUnknown); using UnderlyingType = typename attribute_t<getDataType<Dst>()>::type;//ex: Vec3 -> float return BaseTypeConversion<UnderlyingType>::convert((UnderlyingType*)dst, src, srcType, count); } static inline bool doConversion(void* dst, BaseDataType dstType, void const* src, BaseDataType srcType, size_t count) { CARB_ASSERT(dstType != BaseDataType::eUnknown); CARB_ASSERT(srcType != BaseDataType::eUnknown); return BaseDoubleDispatchTypeConversionTest<>::convert(dst, dstType, src, srcType, count); } // Helper to determine if the given roles are compatible for matching base type static inline bool areMatchedBaseTypesRoleCompatible(BaseDataType baseType, AttributeRole srcRole, AttributeRole destRole) { // Execution (which has base type uint) can only be connected to another Execution attribute if ((baseType == BaseDataType::eUInt) && ((destRole == AttributeRole::eExecution) != (srcRole == AttributeRole::eExecution))) { return false; } else if (baseType == omni::fabric::BaseDataType::eUChar) { // Path and strings are compatible bool isSrcString = (srcRole == AttributeRole::ePath || srcRole == AttributeRole::eText); bool isDstString = (destRole == AttributeRole::ePath || destRole == AttributeRole::eText); return isSrcString == isDstString; } return true; }; // Helper to return true if the given types are compatible static inline int areTypesCompatible(const Type& srcType, const Type& destType) { if (srcType == destType) return 1; // They aren't exactly the same, but check for signed/unsigned compatibility, which FC can support if (srcType.arrayDepth != destType.arrayDepth) return 0; if (srcType.componentCount != destType.componentCount) return 0; // If base types match, we are compatible with a possible exception for the role if (srcType.baseType == destType.baseType) return areMatchedBaseTypesRoleCompatible(srcType.baseType, srcType.role, destType.role) ? 1 : 0; // Arrays are not convertible // but arrays of int(64) signed <-> unsigned are through fabric if (srcType.arrayDepth) { return ((srcType.baseType == BaseDataType::eInt || srcType.baseType == BaseDataType::eUInt) && (destType.baseType == BaseDataType::eInt || destType.baseType == BaseDataType::eUInt)) || ((srcType.baseType == BaseDataType::eInt64 || srcType.baseType == BaseDataType::eUInt64) && (destType.baseType == BaseDataType::eInt64 || destType.baseType == BaseDataType::eUInt64)) ? 1 : 0; } return isRawDataConvertible(srcType.baseType, destType.baseType); }; // Helper to return if there is any compatibility between two type groups static inline int areAnyTypesCompatible(const std::vector<Type>& srcTypes, const std::vector<Type>& destTypes) { for (const auto& src: srcTypes) { for (const auto& dst: destTypes) { if (areTypesCompatible(src, dst)) return true; } } return false; } //Small struct helper that: // - hold the converted value so a reference can be returned // - specialize to an empty pass-through if no conversion exists template <typename DataType, BaseDataType eType = getDataType<DataType>()> struct Converter { private: using Data = typename std::decay<DataType>::type; Data convertedValue{}; public: Converter(){} DataType const* convertValue(DataType const* originalData, Type const& originalDataType) const { if (willConvert(originalDataType)) { doConversion( &convertedValue, (void*)originalData, originalDataType.baseType, originalDataType.componentCount); return (DataType const*)&convertedValue; } return originalData; } bool willConvert(Type const& originalDataType) const { BaseDataType constexpr bdt = getDataType<DataType>(); return !isRawBinaryDataCompatible(originalDataType.baseType, bdt); } }; template <typename DataType> struct Converter<DataType, BaseDataType::eUnknown> { inline DataType const* convertValue(DataType const* originalData, Type const&) const { return originalData; } bool willConvert(Type const& originalDataType) const { return false; } }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
13,832
C
37.002747
157
0.683126
omniverse-code/kit/include/omni/graph/core/ogn/Bundle.h
// Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // #pragma once // ================================================================================================================= // This file contains interface classes which wrap attribute bundles in the OGN database for ease of use // // BundleContents Accessor to get at the attributes inside the bundle // BundleAttribute Access to the bundle attribute, with appropriate read/write abilities depending on port type // // WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code. // If you call them directly you may have to modify your code when they change. // ================================================================================================================= #ifdef THIS_IS_INCLUDED_IN_THE_DOCUMENTATION // The information bracketed here with begin/end describes the interface that is recommended for use with bundled // attributes. The documentation uses these markers to perform a literal include of this code into the docs so that // it can be the single source of truth. Note that the interface described here is not the complete set of C++ // functions available, merely the ones that make sense for the user to access when dealing with bundles. // begin-bundle-interface-description // A bundle can be described as an opaque collection of attributes that travel together through the graph, whose // contents and types can be introspected in order to determine how to deal with them. This section describes how // the typical node will interface with the bundle content access. Use of the attributes within the bundles is the // same as for the extended type attributes, described with their access methods. // // An important note regarding GPU bundles is that the bundle itself always lives on the CPU, specifying a memory // space of "GPU/CUDA" for the bundle actually means that the default location of the attributes it contains will // be on the GPU. // // The main bundle is extracted the same as any other attribute, by referencing its generated database location. // For this example the bundle will be called "color" and it will have members that could either be the set // ("r", "g", "b", "a") or the set ("c", "m", "y", "k") with the obvious implications of implied color space. // // The bundle itself has a path to which it refers; normally unnecessary to use but helpful for debugging std::cout << "The color bundle came from " << db.inputs.color.path() << std::endl; // As with other attribute types you can get an accessor to the bundle: const auto& colorBundle = db.inputs.color(); // The accessor can determine if it points to valid data const bool validColor = colorBundle.isValid(); // It can be queried for the number of attributes it holds auto bundleAttributeCount = colorBundle.size(); // It can have its contents iterated over for (const auto& bundledAttribute : colorBundle) { /* ... */ } // It can be queried for an attribute in it with a specific name auto bundledAttribute = colorBundle.attributeByName(db.tokens.red); // And on the rare occasion when it is necessary, it can access the low level IBundle interface or ABI handle of the bundle's data // to make direct ABI calls on it. (This is discouraged as it may bypass some important state updates.) const auto& bundleHandle = colorBundle.abi_bundleHandle(); // *** The rest of these methods are for output bundles only, as they change the makeup of the bundle // It can be assigned to an output bundle, which merely transfers ownership of the bundle. // As in all C++ it's important to make the distinction between assignment and merely obtaining a reference auto& computedColorBundle = db.outputs.computedColorBundle(); // No copy, just assignment of a reference object computedColorBundle = colorBundle; // Copy the input bundle to the output bundle // It can have its contents (i.e. attribute membership) cleared computedColorBundle.clear(); // It can insert a new bundle, without replacing its current contents (with the caveat that all attribute names // in the current and inserted bundle must be unique) computedColorBundle.insertBundle(colorBundle); // It can have a single attribute from another bundle inserted into its current list, like if you don't want // the transparency value in your output color computedColorBundle.clear(); computedColorBundle.insertAttribute(colorBundle.attributeByName(db.tokens.red)); computedColorBundle.insertAttribute(colorBundle.attributeByName(db.tokens.green)); computedColorBundle.insertAttribute(colorBundle.attributeByName(db.tokens.blue)); // It can add a brand new attribute with a specific type and name namespace og = omni::graph::core; og::Type floatType(og::BaseDataType::eFLOAT); computedColorBundle.addAttribute(db.tokens.opacity, floatType); // If you are adding an array attribute you can set its initial element count with the same call og::Type boolArrayType(og::BaseDataType::eBOOLEAN, 1, 1); computedColorBundle.addAttribute(db.tokens.bits, boolArrayType, 32); // If you want to remove an attribute from a bundle you only need its name computedColorBundle.removeAttribute(db.tokens.bits); // end-bundle-interface-description #endif #include <omni/graph/core/ogn/RuntimeAttribute.h> #include <omni/graph/core/Type.h> #include <omni/graph/core/ogn/Types.h> #include <omni/graph/core/IBundle.h> #include <omni/graph/core/ComputeGraph.h> #include <omni/graph/core/IBundleChanges.h> #include <carb/InterfaceUtils.h> namespace omni { namespace graph { namespace core { namespace ogn { // ====================================================================== /** * Class responsible for managing the interaction with bundles of attributes. * It wraps the bundle in an interface with a more natural interaction than the raw ABI calls. * * <AttributeType> How the attribute is interpreted - input, output, or state value * <MemoryType> where the memory for the attributes in this bundle will live (CPU, GPU, or decided at runtime) * <GpuPtrType> where the pointer to array attributes in this bundle will live (CPU or GPU, for GPU data only) */ template <eAttributeType AttributeType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> class BundleContents { //! Aliases used during overload resolution to differentiate between read-only and read-write. using roTag_t = std::true_type; using rwTag_t = std::false_type; //! The writability of a bundle will determine what kinds of operations can be performed on it static constexpr bool readOnly = (AttributeType == kOgnInput); using readOnly_t = std::conditional_t<AttributeType == kOgnInput, roTag_t, rwTag_t>; //! By defining the bundle type based on attribute type duplication of code in this class can be avoided using bundleHandle_t = std::conditional_t<readOnly, ConstBundleHandle, BundleHandle>; //! By defining the interface type based on attribute type duplication of code in this class can be avoided using bundleInterface_t = std::conditional_t<readOnly, IConstBundle2, IBundle2>; //! The handle types are not simply "X" and "const X" variations so the type has to be explicitly defined //! for writable (output/state) and non-writable (input) attribute types. using dataHandle_t = std::conditional_t<readOnly, ConstAttributeDataHandle, AttributeDataHandle>; //! Short form to reduce line length using runtime_t = RuntimeAttribute<AttributeType, MemoryType, GpuPtrType>; using bundleInterfacePtr = omni::core::ObjectPtr<bundleInterface_t>; bundleInterfacePtr m_bundlePtr; runtime_t m_invalid; //!< Special object representing invalid data mutable gsl::span<runtime_t> m_iterableArray; //!< Iterator wrapper // ================================================================================ // Functions supporting both read-only and writable versions, necessitated by the different function calls // and argument types used for both (i.e. you can't just do a const_cast for these). Ideally they would be // broken out into utility functions and shared everywhere. // // They are selected by calling them using a first argument of "readOnly_t()", which will use overloading to // select the proper version. (roTag_t for read-only versions, rwTag_t for writable versions) /** * Extract the interface for an attribute in the bundle with the given name. * * @param name Token representing the name of the attribute in the bundle * @return Bundle member from which attribute information can be extracted (invalid if name was not found) */ dataHandle_t extractNamedAttribute(rwTag_t, NameToken const& name) const { return m_bundlePtr->getAttributeByName(name); } dataHandle_t extractNamedAttribute(roTag_t, NameToken const& name) const { return m_bundlePtr->getConstAttributeByName(name); } /** * Get the list of attribute handles present on the bundle. * * @param Type representing writable (rwTag_t) or read-only (roTag_t) data * @param allHandles Pointer to array of handles that were extracted * @param count The size of the provided pointer array (in pointer count) */ void extractHandles(rwTag_t, dataHandle_t* allHandles, size_t count) const { m_bundlePtr->getAttributes(allHandles, count); } void extractHandles(roTag_t, dataHandle_t* allHandles, size_t count) const { m_bundlePtr->getConstAttributes(allHandles, count); } /** * Construct bundle interface based on provided context and bundle handle. * * @param context Evaluation context. * @param handle Bundle handle. */ bundleInterfacePtr getInterface(rwTag_t, GraphContextObj const& context, bundleHandle_t handle) const { return getBundleFactoryInterface()->getBundle(context, handle); } bundleInterfacePtr getInterface(roTag_t, GraphContextObj const& context, bundleHandle_t handle) const { return getBundleFactoryInterface()->getConstBundle(context, handle); } /** * Construct bundle interface based on provided context and bundle path. * * @param context Evaluation context. * @param path Bundle path. */ bundleInterfacePtr getInterface(rwTag_t, GraphContextObj const& context, omni::fabric::PathC path) const { auto factory = omni::core::cast<IBundleFactory2>(getBundleFactoryInterface()); return factory ? factory->getBundleFromPath(context, path) : bundleInterfacePtr{}; } bundleInterfacePtr getInterface(roTag_t, GraphContextObj const& context, omni::fabric::PathC path) const { auto factory = omni::core::cast<IBundleFactory2>(getBundleFactoryInterface()); return factory ? factory->getConstBundleFromPath(context, path) : bundleInterfacePtr{}; } /** * Get read-only or read-write handle depending on writability permissions of this interface. */ bundleHandle_t getBundleHandle(rwTag_t) const { return m_bundlePtr->getHandle(); } bundleHandle_t getBundleHandle(roTag_t) const { return m_bundlePtr->getConstHandle(); } void clearAttributeCache() const { delete[] m_iterableArray.data(); m_iterableArray = gsl::span<runtime_t>{}; } void updateAttributeCache() const { // Only reallocate the bundle members if the size changed. If it didn't then the // in-place constructor will put the correct data in place. size_t newSize = attributeCount(); if (!m_iterableArray.empty() && (newSize != m_iterableArray.size())) { clearAttributeCache(); } if (m_iterableArray.empty() && (newSize > 0)) { m_iterableArray = gsl::span<runtime_t>{ new runtime_t[newSize], newSize }; } if (!m_iterableArray.empty()) { auto context = m_bundlePtr->getContext(); dataHandle_t* allHandles = reinterpret_cast<dataHandle_t*>(alloca(newSize * sizeof(dataHandle_t))); extractHandles(readOnly_t(), allHandles, newSize); for (size_t i = 0; i < newSize; ++i) { new (&m_iterableArray[i]) runtime_t(context, allHandles[i]); } } } public: // Pass through the span iterator so that this class can iterate over it transparently using iterator = typename gsl::span<runtime_t>::iterator; using reverse_iterator = typename gsl::span<runtime_t>::reverse_iterator; /** * Default constructor */ BundleContents() = default; /** * Constructor with direct initialization from context and bundle handle. */ BundleContents(GraphContextObj const& context, bundleHandle_t handle) : BundleContents() { reset(context, handle); } /** * Constructor with direct initialization from context and bundle path. */ BundleContents(GraphContextObj const& context, omni::fabric::Path path) : BundleContents() { reset(getInterface(readOnly_t{}, context, path)); } /** * Data managed by the bundle cannot be duplicated */ BundleContents(const BundleContents&) = delete; BundleContents& operator=(const BundleContents&) = delete; /** * Clean up any cached data */ ~BundleContents() { clearAttributeCache(); } /** * @deprecated Calling abi_primHandle() is deprecated. Use abi_bundleHandle() instead! */ [[deprecated("Calling abi_primHandle() is deprecated. Use abi_bundleHandle() instead!")]] bundleHandle_t const abi_primHandle() const { return abi_bundleHandle(); } /** * @deprecated Calling abi_primHandle() is deprecated. Use abi_bundleHandle() instead! */ [[deprecated("Calling abi_primHandle() is deprecated. Use abi_bundleHandle() instead!")]] bundleHandle_t abi_primHandle() { return abi_bundleHandle(); } /** * Return bundle factory interface. */ static IBundleFactory* getBundleFactoryInterface() { static omni::core::ObjectPtr<IBundleFactory> factory = carb::getCachedInterface<ComputeGraph>()->getBundleFactoryInterfacePtr(); return factory.get(); } /** * @return the raw bundle handle for use via the ABI directly */ bundleHandle_t const abi_bundleHandle() const { if (isValid()) return getBundleHandle(readOnly_t{}); return {}; } /** * @return the raw bundle handle for use via the ABI directly */ bundleHandle_t abi_bundleHandle() { if (isValid()) return getBundleHandle(readOnly_t{}); return {}; } /** * @return the raw bundle interface for use via the ABI directly */ bundleInterface_t* abi_bundleInterface() const { return m_bundlePtr.get(); } /** * @return true if the handle points to valid data within the fabric */ bool isValid() const { return m_bundlePtr && m_bundlePtr->isValid(); } /** * Set the bundle for evaluation. Delayed so that the contents can be created * early with just-in-time initialization. * * @param bundle Evaluation context to use when extracting information */ void reset(omni::core::ObjectParam<bundleInterface_t> bundle) { m_bundlePtr = omni::core::borrow(bundle.get()); clearAttributeCache(); } /** * Set the context and prim handle for evaluation. Delayed so that the contents can be created * early with just-in-time initialization. * * @param context Evaluation context to use when extracting information * @param handle Virtual prim implementing the bundle interface */ void reset(GraphContextObj const& context, bundleHandle_t handle) { reset(getInterface(readOnly_t{}, context, handle)); } /** * @deprecated Calling size() is deprecated. Use attributeCount instead! */ [[deprecated("Calling size() is deprecated. Use attributeCount instead!")]] size_t size() const { return attributeCount(); } /** * @return The number of attributes contained within the bundle if valid, 0 otherwise */ size_t attributeCount() const { return isValid() ? m_bundlePtr->getAttributeCount() : 0; } /** * @return The number of child bundles contained within the bundle if valid, 0 otherwise */ size_t childCount() const { return isValid() ? m_bundlePtr->getChildBundleCount() : 0; } /** * Extract the interface for an attribute in the bundle with the given name. * * @param[in] name Token representing the name of the attribute in the bundle * @return Bundle member from which attribute information can be extracted (invalid if name was not found) */ runtime_t const attributeByName(NameToken const& name) const { if ((name == omni::fabric::kUninitializedToken) or ! isValid()) { return runtime_t(); } updateAttributeCache(); auto namedAttribute = extractNamedAttribute(readOnly_t(), name); return runtime_t(m_bundlePtr->getContext(), namedAttribute); } /** * Iteration interfaces that just pass through responsibility to the underlying span data. * Inputs call with const objects, hence the two variations of the functions. */ iterator begin() const { updateAttributeCache(); return m_iterableArray.begin(); } iterator end() const { updateAttributeCache(); return m_iterableArray.end(); } reverse_iterator rbegin() const { updateAttributeCache(); return m_iterableArray.rbegin(); } reverse_iterator rend() const { updateAttributeCache(); return m_iterableArray.rend(); } iterator begin() { updateAttributeCache(); return m_iterableArray.begin(); } iterator end() { updateAttributeCache(); return m_iterableArray.end(); } reverse_iterator rbegin() { updateAttributeCache(); return m_iterableArray.rbegin(); } reverse_iterator rend() { updateAttributeCache(); return m_iterableArray.rend(); } /** * Assignment operator is only active for writable bundle contents (i.e. outputs) * Copies the entire input bundle onto the output. * * @param[in] toBeCopied Bundle attribute to be copied * @returns Reference to this bundle */ template <eAttributeType AttributeTypeToCopy, eMemoryType MemoryTypeToCopy, PtrToPtrKind GpuPtrTypeToCopy = PtrToPtrKind::eNotApplicable> BundleContents<AttributeType, MemoryType, GpuPtrType>& operator=(const BundleContents<AttributeTypeToCopy, MemoryTypeToCopy, GpuPtrTypeToCopy>& toBeCopied) { static_assert(!readOnly, "Assignment is not allowed on input bundles"); m_bundlePtr->clearContents(); m_bundlePtr->copyBundle(toBeCopied.abi_bundleHandle()); reset(m_bundlePtr); return *this; } /** * Bundle insertion is only active for writable bundle contents (i.e. outputs) * Adds the entire input bundle onto the output. * * @param[in] toBeInserted Bundle attribute to be inserted * @returns Reference to this bundle */ template <eAttributeType AttributeTypeToInsert, eMemoryType MemoryTypeToInsert, PtrToPtrKind GpuPtrTypeToInsert = PtrToPtrKind::eNotApplicable> void insertBundle(const BundleContents<AttributeTypeToInsert, MemoryTypeToInsert, GpuPtrTypeToInsert>& toBeInserted) { static_assert(!readOnly, "Bundle insertion is not allowed on input bundles"); if (! toBeInserted.isValid()) { CARB_LOG_ERROR("Cannot insert an invalid bundle"); return; } if (! isValid()) { CARB_LOG_ERROR("Cannot insert into an invalid bundle"); return; } m_bundlePtr->copyBundle(toBeInserted.abi_bundleHandle()); reset(m_bundlePtr); } /** * Clear the entire bundle contents (outputs only). */ bool clear() { static_assert(!readOnly, "Clearing of input bundles is not allowed"); if (!isValid()) { CARB_LOG_ERROR("Cannot clear an invalid bundle"); return false; } clearAttributeCache(); return OMNI_SUCCEEDED(m_bundlePtr->clearContents(true)); } /** * Copy an attribute into the bundle. If no name is passed in then use the attribute's current name. */ template <typename RuntimeAttributeType> bool insertAttribute(RuntimeAttributeType const& attributeToCopy, NameToken newName = omni::fabric::kUninitializedToken) { static_assert(!readOnly, "Attribute insertion is not allowed on input bundles"); if (!isValid()) { CARB_LOG_ERROR("Cannot insert into an invalid bundle"); return false; } clearAttributeCache(); AttributeDataHandle attrib = m_bundlePtr->copyAttribute(attributeToCopy.abi_handle(), true, newName); return attrib.isValid(); } /** * Create a new attribute in the bundle. * * @param[in] attributeName Name for the new attribute * @param[in] attributeType Base type for the attribute * @param[in] elementCount If an array type then this is the initial element count * @return Runtime attribute wrapper for the newly created attribute */ runtime_t addAttribute(NameToken const& attributeName, Type const& attributeType, size_t elementCount = 0) { static_assert(!readOnly, "Attribute addition is not allowed on input bundles"); if (! isValid()) { CARB_LOG_ERROR("Cannot add to an invalid bundle"); return runtime_t(); } clearAttributeCache(); auto attribHandle = m_bundlePtr->createAttribute(attributeName, attributeType, elementCount); return runtime_t(m_bundlePtr->getContext(), attribHandle); } /** * Add a batch of attributes to a bundle prim. * * @param[in] attributeCount Number of attributes to be added * @param[in] attrNames Array of names for the new attributes * @param[in] attrTypes Array of types for the new attributes * @return Whether addition was successful */ bool addAttributes(size_t attributeCount, NameToken const* attributeNames, Type const* attributeTypes) { static_assert(!readOnly, "Attribute addition is not allowed on input bundles"); if (! isValid()) { CARB_LOG_ERROR("Cannot add attributes to an invalid bundle"); return false; } clearAttributeCache(); size_t createdCount = 0; auto result = m_bundlePtr->createAttributes(attributeNames, attributeTypes, attributeCount, nullptr /*elementCount*/, nullptr /*createdAttributes*/, &createdCount); if (OMNI_FAILED(result)) return false; return attributeCount == createdCount; } /** * Add a batch of child bundles to this bundle. * * @param childCount Number of children to be added * @param childNames Array of names for the new children * @param childHandles Output handles of child bundles, 'nullptr' can be passed if no output is required * @return Whether addition was successful */ bool addChildBundles(size_t childCount, NameToken const* childNames, BundleHandle* childHandles = nullptr) { static_assert(!readOnly, "Attribute addition is not allowed on input bundles"); if (!isValid()) { CARB_LOG_ERROR("Cannot add children to an invalid bundle"); return false; } size_t createdCount = 0; auto result = m_bundlePtr->createChildBundles(childNames, childCount, childHandles, &createdCount); if (OMNI_FAILED(result)) return false; return childCount == createdCount; } /** * Remove an existing attribute from the bundle. * Silently succeeds if an attribute with the given name did not exist on the bundle * * @param[in] attributeName Name of the attribute to remove */ bool removeAttribute(NameToken const& attributeName) { return removeAttributes(1, &attributeName); } /** * Remove a batch of attributes from a bundle prim. * * @param[in] attributeCount Number of attributes to be removed * @param[in] attrNames Array of names to be removed * @return Whether removal was successful */ bool removeAttributes(size_t attributeCount, NameToken const* attributeNames) { static_assert(!readOnly, "Attribute removal is not allowed on input bundles"); if (!isValid()) { CARB_LOG_ERROR("Cannot remove attributes to an invalid bundle"); return false; } clearAttributeCache(); size_t removedCount = 0; auto result = m_bundlePtr->removeAttributesByName(attributeNames, attributeCount, &removedCount); if (OMNI_FAILED(result)) return false; return removedCount == attributeCount; } }; //! ====================================================================== //! @class BundleChanges //! @brief This class is designed for inspecting modifications within a bundle during its lifetime. //! //! The BundleChanges class enables the inspection of changes in a bundle's attributes and child bundles //! during the lifetime of the BundleChanges instance. It keeps a record of modifications that have occurred, //! providing a suite of functionalities to inspect these changes. //! //! An integral feature of BundleChanges is its automatic clearing of changes upon destruction, //! i.e., when the instance goes out of scope. This ties the lifetime of the recorded changes tightly //! with the BundleChanges instance, ensuring the changes do not persist beyond the intended scope. template <bool readOnly> class BundleChanges { using BundleHandle_t = std::conditional_t<readOnly, ConstBundleHandle, BundleHandle>; public: BundleChanges(omni::core::ObjectPtr<IBundleChanges> const& changes, BundleHandle_t handle, bool clearAtExit = true) : m_bundleChanges(changes), m_bundleHandle(handle), m_clearAtExit(clearAtExit) { CARB_ASSERT(m_bundleChanges); } BundleChanges(BundleChanges const&) = delete; BundleChanges(BundleChanges&&) = default; BundleChanges& operator=(BundleChanges const&) = delete; BundleChanges& operator=(BundleChanges&&) = default; ~BundleChanges() { if (m_clearAtExit) { clearChanges(); } } //! @brief Activates the change tracking system for a bundle. //! //! This method controls the change tracking system of a bundle. It's only applicable //! for read-write bundles (when readOnly template parameter is false). For read-only //! bundles, this method will cause a compilation error if called. //! //! @throws A static_assert error at compile-time if the method is called on a //! read-only bundle. void activate() noexcept { static_assert(!readOnly, "Can't activate change tracking for read-only bundle."); CARB_ASSERT(m_bundleChanges); m_bundleChanges->activateChangeTracking(m_bundleHandle); } //! @brief Deactivates the change tracking system for a bundle. //! //! This method controls the change tracking system of a bundle. It's only applicable //! for read-write bundles (when readOnly template parameter is false). For read-only //! bundles, this method will cause a compilation error if called. //! //! @throws A static_assert error at compile-time if the method is called on a //! read-only bundle. void deactivate() noexcept { static_assert(!readOnly, "Can't activate change tracking for read-only bundle."); CARB_ASSERT(m_bundleChanges); m_bundleChanges->deactivateChangeTracking(m_bundleHandle); } //! @brief Implicit conversion to bool. //! //! This operator allows an instance of BundleChanges to be automatically converted to a bool. //! The boolean value indicates whether the bundle has undergone any changes within its lifetime. //! It leverages the hasChanged() method to provide this information. //! //! @returns True if the bundle has changed; false otherwise. operator bool() noexcept { return hasChanged(); } //! @brief Clears the recorded changes. //! //! This method is used to manually clear the recorded changes of the bundle. omni::core::Result clearChanges() noexcept { CARB_ASSERT(m_bundleChanges); return m_bundleChanges->clearChanges(); } //! @brief Checks if the bundle has changed. //! //! This method is used to check if any changes have been made to the bundle's attributes or child bundles //! within the lifetime of the BundleChanges instance. //! //! @returns True if the bundle has changed; false otherwise. bool hasChanged() noexcept { CARB_ASSERT(m_bundleChanges); return m_bundleChanges->getChange(m_bundleHandle) != BundleChangeType::None; } //! @brief Retrieves the change status of a specific attribute. //! //! This method is used to check if a specific attribute of the bundle has been modified //! within the lifetime of the BundleChanges instance. //! //! @param attribute The specific attribute of the bundle to check for modifications. //! //! @returns True if the specified attribute has changed; false otherwise. template <eAttributeType AttributeType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType> BundleChangeType getChange(RuntimeAttribute<AttributeType, MemoryType, GpuPtrType> const& attribute) noexcept { CARB_ASSERT(m_bundleChanges); auto const handle = attribute.abi_handle(); return m_bundleChanges->getChange(handle); } //! @brief Retrieves the change status of a specific bundle. //! //! This method is used to check if a specific bundle or its contents have been modified //! within the lifetime of the BundleChanges instance. //! //! @param bundle The specific bundle to check for modifications. //! //! @returns A BundleChangeType value indicating the type of change (if any) that has occurred to the specified //! bundle. template <eAttributeType AttributeType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType> BundleChangeType getChange(BundleContents<AttributeType, MemoryType, GpuPtrType> const& bundle) noexcept { CARB_ASSERT(m_bundleChanges); auto const handle = bundle.abi_bundleHandle(); return m_bundleChanges->getChange(handle); } //! @brief Retrieves the change status of a specific bundle or attribute using its handle. //! //! This function is used to check if a specific bundle or attribute, identified by its handle, has been modified //! within the lifetime of the BundleChanges instance. //! //! @tparam HANDLE_TYPE The type of the handle (ConstBundleHandle or ConstAttributeDataHandle). //! @param handle The handle to the specific bundle or attribute to check for modifications. //! //! @returns A BundleChangeType value indicating the type of change (if any) that has occurred to the bundle or //! attribute associated with the specified handle. template <typename HANDLE_TYPE> BundleChangeType abi_getChange(HANDLE_TYPE const& handle) noexcept { constexpr auto isBundle = std::is_same<HANDLE_TYPE, ConstBundleHandle>::value || std::is_same<HANDLE_TYPE, BundleHandle>::value; constexpr auto isAttrib = std::is_same<HANDLE_TYPE, ConstAttributeDataHandle>::value || std::is_same<HANDLE_TYPE, AttributeDataHandle>::value; static_assert(isBundle || isAttrib, "Unsupported handle type for abi_getChange!"); CARB_ASSERT(m_bundleChanges); return m_bundleChanges->getChange(handle); } private: omni::core::ObjectPtr<IBundleChanges> m_bundleChanges; BundleHandle_t m_bundleHandle; bool m_clearAtExit; }; // ====================================================================== /** * Template class responsible for managing the interaction with bundle type input attributes. * It wraps the bundle in an interface with a more natural interaction than the raw ABI calls. * * <AttributeType> How the attribute is interpreted - input, output, or state value * <MemoryType> where the memory for the attributes in this bundle will live (CPU, GPU, or decided at runtime) */ template <eAttributeType AttributeType, eMemoryType MemoryType, PtrToPtrKind GpuPtrType = PtrToPtrKind::eNotApplicable> struct BundleAttribute { //friend with other templates template <eAttributeType, eMemoryType, PtrToPtrKind> friend struct BundleAttribute; private : //! Aliases used during overload resolution to differentiate between read-only and read-write. using roTag_t = std::true_type; using rwTag_t = std::false_type; // The writability of a bundle will determine what kinds of operations can be performed on it static constexpr bool readOnly = (AttributeType == kOgnInput); using readOnly_t = std::conditional_t<AttributeType == kOgnInput, roTag_t, rwTag_t>; using bundleHandle_t = std::conditional_t<readOnly, ConstBundleHandle, BundleHandle>; /** * Construct bundle interface based on provided context and bundle handle. * * @param context Evaluation context. * @param handle Bundle handle. */ auto getInterface(rwTag_t, GraphContextObj const& context, bundleHandle_t handle) const { auto iComputeGraph = carb::getCachedInterface<ComputeGraph>(); auto factory = iComputeGraph->getBundleFactoryInterfacePtr(); return factory->getBundle(context, handle); } auto getInterface(roTag_t, GraphContextObj const& context, bundleHandle_t handle) const { auto iComputeGraph = carb::getCachedInterface<ComputeGraph>(); auto factory = iComputeGraph->getBundleFactoryInterfacePtr(); return factory->getConstBundle(context, handle); } /** * Bundle attributes always live on the CPU since they are always small, containing only a single value through * which their contents can be referenced. The memory type is passed down though, to provide the appropriate * interfaces to the attributes within the bundle. * * @param[in] index In vectorized context, the instance index to access * @return The corresponding bundle handle */ bundleHandle_t bundleHandle(size_t index = 0) const { bundleHandle_t* rel = m_bundleHandleArrayPtr ? m_bundleHandleArrayPtr[m_offset + index] : nullptr; //TODO: multiple input rel return rel ? rel[0] : bundleHandle_t{ bundleHandle_t::invalidValue() }; } // -------------------------------------------------------------------------------------------------------------- //! Data members //! Pointer to the vectorized set of data bundleHandle_t** m_bundleHandleArrayPtr{ nullptr }; //! ABI OmniGraph context object const GraphContextObj* m_context{ nullptr }; //! Interface to the bundle data, constructed on demand BundleContents<AttributeType, MemoryType, GpuPtrType> m_bundle; //! In vectorized context, offset at which we should read our handle size_t const& m_offset; //! Top level bundle change tracking omni::core::ObjectPtr<IBundleChanges> m_bundleChanges; public: // -------------------------------------------------------------------------------------------------------------- /** * Set up the accessor for output attributes with Bundle data */ BundleAttribute(size_t const& offset) : m_offset(offset) { } /** * Bundle attributes always live on the CPU since they are always small, containing only a single value through * which their contents can be referenced. The memory type is passed down though, to provide the appropriate * interfaces to the attributes within the bundle. * * @param[in] index In vectorized context, the instance index to access * @return Reference to the raw fabric data. */ BundleContents<AttributeType, MemoryType, GpuPtrType>& operator()(size_t index = 0) { CARB_ASSERT(context()); m_bundle.reset(*context(), bundleHandle(index)); return m_bundle; } /** * Set the evaluation context for the attribute to allow later access. The data isn't available at construction * time so this method is provided to add it in when it becomes available. * * @param[in] contextObj OmniGraph context object to which this attribute belongs */ void setContext(const GraphContextObj& contextObj) { m_context = &contextObj; m_bundleChanges.release(); } /** * Set the attribute handle for input bundles * * @param[in] handle Handle to the attribute to which the bundle belongs */ void setHandle(ConstAttributeDataHandle handle) { m_context->iAttributeData->getDataR((const void**)&m_bundleHandleArrayPtr, *m_context, &handle, 1); m_bundleChanges.release(); } /** * Retrieve the context object * */ GraphContextObj const* context() const { return m_context; } /** * @param[in] index In vectorized context, the instance index to access * @return The path to the bundle data */ char const* path(size_t index = 0) const { auto bundlePtr = getInterface(readOnly_t{}, *context(), bundleHandle(index)); if (!bundlePtr) { return nullptr; } omni::fabric::PathC path = bundlePtr->getPath(); return carb::getCachedInterface<omni::fabric::IPath>()->getText(path); } /** * @param[in] index In vectorized context, the instance index to access * @return True if the underlying attribute data is valid for accessing */ bool isValid(size_t index = 0) const { return bundleHandle(index).isValid(); } /** * Assignment operator is only active for writable bundle contents (i.e. outputs). * At this level if the data is being stolen it redirects the output to point to the input, otherwise it * copies the entire bundle. * * @note Any accessors to this bundle (operator()) must be called after the assignment or they will be invalid * * @param[in] toBeCopied Bundle attribute from which this one will be redirected or copied * @returns Reference to this bundle */ template <eAttributeType AttributeTypeToCopy, eMemoryType MemoryTypeToCopy, PtrToPtrKind GpuPtrTypeToCopy = PtrToPtrKind::eNotApplicable> BundleAttribute<AttributeType, MemoryType, GpuPtrType>& operator=(const BundleAttribute<AttributeTypeToCopy, MemoryTypeToCopy, GpuPtrTypeToCopy>& toBeCopied) { static_assert(!readOnly, "Assignment is not allowed on input bundles"); if (!context() || !toBeCopied.isValid()) { CARB_LOG_ERROR_ONCE("Could not assign to or from invalid bundle attribute"); return *this; } auto bundlePtr = getInterface(readOnly_t{}, *context(), bundleHandle()); if (bundlePtr) { bundlePtr->clearContents(true); bundlePtr->copyBundle(toBeCopied.bundleHandle()); } return *this; } //! @brief Retrieves the `BundleChanges` object. //! //! The `changes` function returns the `BundleChanges` object associated with requested instance. //! It allows access to the bundle change tracking. //! //! @return Returns the `BundleChanges` object associated with requested instance. BundleChanges<readOnly> changes(size_t instanceIndex = 0, bool clearAtExit = true) noexcept { CARB_ASSERT(context()); if (!m_bundleChanges) { m_bundleChanges = carb::getCachedInterface<ComputeGraph>()->getBundleChangesInterfacePtr(*context()); } return { m_bundleChanges, bundleHandle(instanceIndex), clearAtExit }; } }; } // namespace ogn } // namespace core } // namespace graph } // namespace omni
41,258
C
38.182336
161
0.669228