file_path
stringlengths 21
202
| content
stringlengths 19
1.02M
| size
int64 19
1.02M
| lang
stringclasses 8
values | avg_line_length
float64 5.88
100
| max_line_length
int64 12
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/include/omni/graph/exec/unstable/SmallVector.h | // Copied from USD repository: https://github.com/PixarAnimationStudios/USD
//
// Copyright 2019 Pixar
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
#pragma once
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <initializer_list>
#include <iterator>
#include <limits>
#include <memory>
#include <new>
#include <type_traits>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Contains parts of the small vector implementation that do not depend on
//! *all* of SmallVector's template parameters.
class SmallVectorBase
{
public:
//! Size type
using size_type = std::uint32_t;
//! Difference type
using difference_type = std::uint32_t;
//! Returns the local capacity that may be used without increasing the size
//! of the SmallVector. SmallVector<T, N> will never use more local
//! capacity than is specified by N but clients that wish to maximize local
//! occupancy in a generic way can compute N using this function.
template <typename U>
static constexpr size_type ComputeSerendipitousLocalCapacity()
{
return (alignof(U) <= alignof(_Data<U, 0>)) ? sizeof(_Data<U, 0>) / sizeof(U) : 0;
}
protected:
//! Invoke std::uninitialized_copy that either moves or copies entries,
//! depending on whether the type is move constructible or not.
template <typename Iterator>
static Iterator _UninitializedMove(Iterator first, Iterator last, Iterator dest)
{
return std::uninitialized_copy(std::make_move_iterator(first), std::make_move_iterator(last), dest);
}
//! Invokes either the move or copy constructor (via placement new),
//! depending on whether U is move constructible or not.
template <typename U>
static void _MoveConstruct(U* p, U* src)
{
new (p) U(std::move(*src));
}
#ifndef DOXYGEN_BUILD
//! The data storage, which is a union of both the local storage, as well
//! as a pointer, holding the address to the remote storage on the heap, if
//! used.
template <typename U, size_type M>
union _Data
{
public:
//! Returns raw pointer to local storage of type @c U
U* GetLocalStorage()
{
return reinterpret_cast<U*>(_local);
}
//! Returns const raw pointer to local storage of type @c U
const U* GetLocalStorage() const
{
return reinterpret_cast<const U*>(_local);
}
//! Returns raw pointer to remote storage of type @c U
U* GetRemoteStorage()
{
return _remote;
}
//! Returns const raw pointer to remote storage of type @c U
const U* GetRemoteStorage() const
{
return _remote;
}
//! Sets remote storage to @p p
void SetRemoteStorage(U* p)
{
_remote = p;
}
private:
alignas(U) char _local[sizeof(U) * M];
U* _remote;
};
//! For N == 0 the _Data class has been specialized to elide the local
//! storage completely. This way we don't have to rely on compiler-specific
//! support for 0-sized arrays.
template <typename U>
union _Data<U, 0>
{
public:
//! Specialization for 0-sized local storage. Returns nullptr.
U* GetLocalStorage()
{
// XXX: Could assert here. Introduce dependency on tf/diagnostic.h?
return nullptr;
}
//! Specialization for 0-sized local storage. Returns nullptr.
const U* GetLocalStorage() const
{
// XXX: Could assert here. Introduce dependency on tf/diagnostic.h?
return nullptr;
}
//! Returns raw pointer to remote storage of type @c U
U* GetRemoteStorage()
{
return _remote;
}
//! Returns const raw pointer to remote storage of type @c U
const U* GetRemoteStorage() const
{
return _remote;
}
//! Sets remote storage to @p p
void SetRemoteStorage(U* p)
{
_remote = p;
}
private:
U* _remote;
};
#endif // DOXYGEN_BUILD
};
//!
//! \class SmallVector
//!
//! This is a small-vector class with local storage optimization, the local
//! storage can be specified via a template parameter, and expresses the
//! number of entries the container can store locally.
//!
//! In addition to the local storage optimization, this vector is also
//! optimized for storing a smaller number of entries on the heap: It features
//! a reduced memory footprint (minimum 16 bytes) by limiting max_size() to
//! 2^32, which should still be more than enough for most use cases where a
//! small-vector is advantageous.
//!
//! SmallVector mimics the std::vector API, and can thus be easily used as a
//! drop-in replacement where appropriate. Note, however, that not all the
//! methods on std::vector are implemented here, and that SmallVector may
//! have methods in addition to those that you would find on std::vector.
//!
//! Note that a SmallVector that has grown beyond its local storage, will
//! NOT move its entries back into the local storage once it shrinks back to N.
//!
template <typename T, std::size_t N>
class SmallVector : public SmallVectorBase
{
public:
//! @{
//! Relevant type definitions
using value_type = T;
//! Relevant type definitions
using reference = T&;
//! Relevant type definitions
using const_reference = const T&;
//! }@
//! @{
//! Iterator Support
using iterator = T*;
//! Iterator Support
using const_iterator = const T*;
//! Iterator Support
using reverse_iterator = std::reverse_iterator<iterator>;
//! Iterator Support
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
//! }@
//! Default constructor.
//!
SmallVector() : _size(0), _capacity(N)
{
}
//! Construct a vector holding \p n value-initialized elements.
//!
explicit SmallVector(size_type n) : _capacity(N)
{
_InitStorage(n);
value_type* d = data();
for (size_type i = 0; i < n; ++i)
{
new (d + i) value_type();
}
}
//! Construct a vector holding \p n copies of \p v.
//!
SmallVector(size_type n, const value_type& v) : _capacity(N)
{
_InitStorage(n);
std::uninitialized_fill_n(data(), n, v);
}
//! Enum to disambiguate constructors
enum DefaultInitTag
{
DefaultInit
};
//! Construct a vector holding \p n default-initialized elements.
//!
SmallVector(size_type n, DefaultInitTag) : _capacity(N)
{
_InitStorage(n);
value_type* d = data();
for (size_type i = 0; i < n; ++i)
{
new (d + i) value_type;
}
}
//! Copy constructor.
//!
SmallVector(const SmallVector& rhs) : _capacity(N)
{
_InitStorage(rhs.size());
std::uninitialized_copy(rhs.begin(), rhs.end(), begin());
}
//! Move constructor.
//!
SmallVector(SmallVector&& rhs) : _size(0), _capacity(N)
{
// If rhs can not be stored locally, take rhs's remote storage and
// reset rhs to empty.
if (rhs.size() > N)
{
_data.SetRemoteStorage(rhs._data.GetRemoteStorage());
std::swap(_capacity, rhs._capacity);
}
// If rhs is stored locally, it's faster to simply move the entries
// into this vector's storage, destruct the entries at rhs, and swap
// sizes. Note that capacities will be the same in this case, so no
// need to swap those.
else
{
_UninitializedMove(rhs.begin(), rhs.end(), begin());
rhs._Destruct();
}
std::swap(_size, rhs._size);
}
//! Construct a new vector from initializer list
SmallVector(std::initializer_list<T> values) : SmallVector(values.begin(), values.end())
{
}
//! Compile time check to enabled method when forward iterator is available
template <typename _ForwardIterator>
using _EnableIfForwardIterator =
typename std::enable_if<std::is_convertible<typename std::iterator_traits<_ForwardIterator>::iterator_category,
std::forward_iterator_tag>::value>::type;
//! Creates a new vector containing copies of the data between
//! \p first and \p last.
template <typename ForwardIterator, typename = _EnableIfForwardIterator<ForwardIterator>>
SmallVector(ForwardIterator first, ForwardIterator last) : _capacity(N)
{
_InitStorage(static_cast<difference_type>(std::distance(first, last)));
std::uninitialized_copy(first, last, begin());
}
//! Destructor.
//!
~SmallVector()
{
_Destruct();
_FreeStorage();
}
//! Assignment operator.
//!
SmallVector& operator=(const SmallVector& rhs)
{
if (this != &rhs)
{
assign(rhs.begin(), rhs.end());
}
return *this;
}
//! Move assignment operator.
//!
SmallVector& operator=(SmallVector&& rhs)
{
if (this != &rhs)
{
swap(rhs);
}
return *this;
}
//! Replace existing contents with the contents of \p ilist.
//!
SmallVector& operator=(std::initializer_list<T> ilist)
{
assign(ilist.begin(), ilist.end());
return *this;
}
//! Swap two vector instances.
//!
void swap(SmallVector& rhs)
{
// Both this vector and rhs are stored locally.
if (_IsLocal() && rhs._IsLocal())
{
SmallVector* smaller = size() < rhs.size() ? this : &rhs;
SmallVector* larger = size() < rhs.size() ? &rhs : this;
// Swap all the entries up to the size of the smaller vector.
std::swap_ranges(smaller->begin(), smaller->end(), larger->begin());
// Move the tail end of the entries, and destruct them at the
// source vector.
for (size_type i = smaller->size(); i < larger->size(); ++i)
{
_MoveConstruct(smaller->data() + i, &(*larger)[i]);
(*larger)[i].~value_type();
}
// Swap sizes. Capacities are already equal in this case.
std::swap(smaller->_size, larger->_size);
}
// Both this vector and rhs are stored remotely. Simply swap the
// pointers, as well as size and capacity.
else if (!_IsLocal() && !rhs._IsLocal())
{
value_type* tmp = _data.GetRemoteStorage();
_data.SetRemoteStorage(rhs._data.GetRemoteStorage());
rhs._data.SetRemoteStorage(tmp);
std::swap(_size, rhs._size);
std::swap(_capacity, rhs._capacity);
}
// Either this vector or rhs is stored remotely, whereas the other
// one is stored locally.
else
{
SmallVector* remote = _IsLocal() ? &rhs : this;
SmallVector* local = _IsLocal() ? this : &rhs;
// Get a pointer to the remote storage. We'll be overwriting the
// pointer value below, so gotta retain it first.
value_type* remoteStorage = remote->_GetStorage();
// Move all the entries from the vector with the local storage, to
// the other vector's local storage. This will overwrite the pointer
// to the other vectors remote storage. Note that we will have to
// also destruct the elements at the source's local storage. The
// source will become the one with the remote storage, so those
// entries will be essentially freed.
for (size_type i = 0; i < local->size(); ++i)
{
_MoveConstruct(remote->_data.GetLocalStorage() + i, &(*local)[i]);
(*local)[i].~value_type();
}
// Swap the remote storage into the vector which previously had the
// local storage. It's been properly cleaned up now.
local->_data.SetRemoteStorage(remoteStorage);
// Swap sizes and capacities. Easy peasy.
std::swap(remote->_size, local->_size);
std::swap(remote->_capacity, local->_capacity);
}
}
//! Insert an rvalue-reference entry at the given iterator position.
//!
iterator insert(const_iterator it, value_type&& v)
{
return _Insert(it, std::move(v));
}
//! Insert an entry at the given iterator.
//!
iterator insert(const_iterator it, const value_type& v)
{
return _Insert(it, v);
}
//! Erase an entry at the given iterator.
//!
iterator erase(const_iterator it)
{
return erase(it, it + 1);
}
//! Erase entries between [ \p first, \p last ) from the vector.
//!
iterator erase(const_iterator it, const_iterator last)
{
value_type* p = const_cast<value_type*>(&*it);
value_type* q = const_cast<value_type*>(&*last);
// If we're not removing anything, bail out.
if (p == q)
{
return iterator(p);
}
const difference_type num = static_cast<difference_type>(std::distance(p, q));
// Move entries starting at last, down a few slots to starting a it.
value_type* e = data() + size();
std::move(q, e, p);
// Destruct all the freed up slots at the end of the vector.
for (value_type* i = (e - num); i != e; ++i)
{
i->~value_type();
}
// Bump down the size.
_size -= num;
// Return an iterator to the next entry.
return iterator(p);
}
//! Reserve storage for \p newCapacity entries.
//!
void reserve(size_type newCapacity)
{
// Only reserve storage if the new capacity would grow past the local
// storage, or the currently allocated storage. We'll grow to
// accommodate exactly newCapacity entries.
if (newCapacity > capacity())
{
_GrowStorage(newCapacity);
}
}
//! Resize the vector to \p newSize and insert copies of \p v.
//!
void resize(size_type newSize, const value_type& v = value_type())
{
// If the new size is smaller than the current size, let go of some
// entries at the tail.
if (newSize < size())
{
erase(const_iterator(data() + newSize), const_iterator(data() + size()));
}
// Otherwise, lets grow and fill: Reserve some storage, fill the tail
// end with copies of v, and update the new size.
else if (newSize > size())
{
reserve(newSize);
std::uninitialized_fill(data() + size(), data() + newSize, v);
_size = newSize;
}
}
//! Clear the entries in the vector. Does not let go of the underpinning
//! storage.
//!
void clear()
{
_Destruct();
_size = 0;
}
//! Clears any previously held entries, and copies entries between
//! [ \p first, \p last ) to this vector.
//!
template <typename ForwardIterator, typename = _EnableIfForwardIterator<ForwardIterator>>
void assign(ForwardIterator first, ForwardIterator last)
{
clear();
const difference_type newSize = static_cast<difference_type>(std::distance(first, last));
reserve(newSize);
std::uninitialized_copy(first, last, begin());
_size = newSize;
}
//! Replace existing contents with the contents of \p ilist.
//!
void assign(std::initializer_list<T> ilist)
{
assign(ilist.begin(), ilist.end());
}
//! Emplace an entry at the back of the vector.
//!
template <typename... Args>
void emplace_back(Args&&... args)
{
if (size() == capacity())
{
_GrowStorage(_NextCapacity());
}
new (data() + size()) value_type(std::forward<Args>(args)...);
_size += 1;
}
//! Copy an entry to the back of the vector,
//!
void push_back(const value_type& v)
{
emplace_back(v);
}
//! Move an entry to the back of the vector.
//!
void push_back(value_type&& v)
{
emplace_back(std::move(v));
}
//! Copy the range denoted by [\p first, \p last) into this vector
//! before \p pos.
//!
template <typename ForwardIterator>
void insert(iterator pos, ForwardIterator first, ForwardIterator last)
{
static_assert(std::is_convertible<typename std::iterator_traits<ForwardIterator>::iterator_category,
std::forward_iterator_tag>::value,
"Input Iterators not supported.");
// Check for the insert-at-end special case as the very first thing so
// that we give the compiler the best possible opportunity to
// eliminate the general case code.
const bool insertAtEnd = pos == end();
const difference_type numNewElems = (difference_type)std::distance(first, last);
const size_type neededCapacity = size() + numNewElems;
const size_type nextCapacity = std::max(_NextCapacity(), neededCapacity);
// Insertions at the end would be handled correctly by the code below
// without this special case. However, insert(end(), f, l) is an
// extremely common operation so we provide this fast path both to
// avoid unneeded work and to make it easier for the compiler to
// eliminate dead code when pos == end().
if (insertAtEnd)
{
// The reallocation here is not a simple reserve. We want to grow
// the storage only when there are too many new elements but the
// desired size is based on the growth factor.
if (neededCapacity > capacity())
{
_GrowStorage(nextCapacity);
}
std::uninitialized_copy(first, last, end());
_size += numNewElems;
return;
}
if (neededCapacity > capacity())
{
// Because we need to realloc, we can do the insertion by copying
// each range, [begin(), pos), [first, last), [pos, end()), into
// the new storage.
const size_type posI = (size_type)std::distance(begin(), pos);
value_type* newStorage = _Allocate(nextCapacity);
iterator newPrefixBegin = iterator(newStorage);
iterator newPos = newPrefixBegin + posI;
iterator newSuffixBegin = newPos + numNewElems;
_UninitializedMove(begin(), pos, newPrefixBegin);
std::uninitialized_copy(first, last, newPos);
_UninitializedMove(pos, end(), newSuffixBegin);
// Destroy old data and set up this new buffer.
_Destruct();
_FreeStorage();
_data.SetRemoteStorage(newStorage);
_capacity = nextCapacity;
}
else
{
// Insert in-place requires handling four ranges.
//
// For both the range-to-move [pos, end()) and the range-to-insert
// [first, last), there are two subranges: the subrange to copy
// and the subrange to uinitialized_copy. Note that only three of
// these ranges may be non-empty: either there is a non-empty
// prefix of [pos, end()) that needs to be copied over existing
// elements or there is a non-empty suffix of [first, last) that
// needs to be placed in uninitialized storage.
const difference_type numMoveElems = (difference_type)std::distance(pos, end());
const difference_type numUninitMoves = (difference_type)std::min(numNewElems, numMoveElems);
const difference_type numInitMoves = numMoveElems - numUninitMoves;
const difference_type numUninitNews = numNewElems - numUninitMoves;
const difference_type numInitNews = numNewElems - numUninitNews;
// Move our existing elements out of the way of new elements.
iterator umSrc = pos + numInitMoves;
iterator umDst = end() + numUninitNews;
_UninitializedMove(umSrc, end(), umDst);
std::copy_backward(pos, umSrc, umDst);
// Copy new elements into place.
for (difference_type i = 0; i < numInitNews; ++i, ++first, ++pos)
{
*pos = *first;
}
std::uninitialized_copy(first, last, end());
}
_size += numNewElems;
}
//! Insert elements from \p ilist starting at position \p pos.
//!
void insert(iterator pos, std::initializer_list<T> ilist)
{
insert(pos, ilist.begin(), ilist.end());
}
//! Remove the entry at the back of the vector.
//!
void pop_back()
{
back().~value_type();
_size -= 1;
}
//! Returns the current size of the vector.
//!
size_type size() const
{
return _size;
}
//! Returns the maximum size of this vector.
//!
static constexpr size_type max_size()
{
return std::numeric_limits<size_type>::max();
}
//! Returns \c true if this vector is empty.
//!
bool empty() const
{
return size() == 0;
}
//! Returns the current capacity of this vector. Note that if the returned
//! value is <= N, it does NOT mean the storage is local. A vector that has
//! previously grown beyond its local storage, will not move entries back to
//! the local storage once it shrinks to N.
//!
size_type capacity() const
{
return _capacity;
}
//! Returns the local storage capacity. The vector uses its local storage
//! if capacity() <= internal_capacity().
//! This method mimics the boost::container::small_vector interface.
//!
static constexpr size_type internal_capacity()
{
return N;
}
//! Returns an iterator to the beginning of the vector.
//! @{
iterator begin()
{
return iterator(_GetStorage());
}
//! Returns an iterator to the beginning of the vector.
const_iterator begin() const
{
return const_iterator(_GetStorage());
}
//! Returns an iterator to the beginning of the vector.
const_iterator cbegin() const
{
return begin();
}
//! @}
//! Returns an iterator to the end of the vector.
//! @{
iterator end()
{
return iterator(_GetStorage() + size());
}
//! Returns an iterator to the end of the vector.
const_iterator end() const
{
return const_iterator(_GetStorage() + size());
}
//! Returns an iterator to the end of the vector.
const_iterator cend() const
{
return end();
}
//! @}
//! Returns a reverse iterator to the beginning of the vector.
//! @{
reverse_iterator rbegin()
{
return reverse_iterator(end());
}
//! Returns a reverse iterator to the beginning of the vector.
const_reverse_iterator rbegin() const
{
return const_reverse_iterator(end());
}
//! Returns a reverse iterator to the beginning of the vector.
const_reverse_iterator crbegin() const
{
return rbegin();
}
//! @}
//! @{
//! Returns a reverse iterator to the end of the vector.
reverse_iterator rend()
{
return reverse_iterator(begin());
}
//! Returns a reverse iterator to the end of the vector.
const_reverse_iterator rend() const
{
return const_reverse_iterator(begin());
}
//! Returns a reverse iterator to the end of the vector.
const_reverse_iterator crend() const
{
return rend();
}
//! @}
//! Returns the first element in the vector.
//!
reference front()
{
return *begin();
}
//! Returns the first element in the vector.
//!
const_reference front() const
{
return *begin();
}
//! Returns the last element in the vector.
//!
reference back()
{
return *(data() + size() - 1);
}
//! Returns the last elements in the vector.
//!
const_reference back() const
{
return *(data() + size() - 1);
}
//! Access the specified element.
//!
reference operator[](size_type i)
{
return *(data() + i);
}
//! Access the specified element.
//!
const_reference operator[](size_type i) const
{
return *(data() + i);
}
//! Direct access to the underlying array.
//!
value_type* data()
{
return _GetStorage();
}
//! Direct access to the underlying array.
//!
const value_type* data() const
{
return _GetStorage();
}
//! Lexicographically compares the elements in the vectors for equality.
//!
bool operator==(const SmallVector& rhs) const
{
return size() == rhs.size() && std::equal(begin(), end(), rhs.begin());
}
//! Lexicographically compares the elements in the vectors for inequality.
//!
bool operator!=(const SmallVector& rhs) const
{
return !operator==(rhs);
}
private:
//! Returns true if the local storage is used.
bool _IsLocal() const
{
return _capacity <= N;
}
//! Return a pointer to the storage, which is either local or remote
//! depending on the current capacity.
value_type* _GetStorage()
{
return _IsLocal() ? _data.GetLocalStorage() : _data.GetRemoteStorage();
}
//! Return a const pointer to the storage, which is either local or remote
//! depending on the current capacity.
const value_type* _GetStorage() const
{
return _IsLocal() ? _data.GetLocalStorage() : _data.GetRemoteStorage();
}
//! Free the remotely allocated storage.
void _FreeStorage()
{
if (!_IsLocal())
{
free(_data.GetRemoteStorage());
}
}
//! Destructs all the elements stored in this vector.
void _Destruct()
{
value_type* b = data();
value_type* e = b + size();
for (value_type* p = b; p != e; ++p)
{
p->~value_type();
}
}
//! Allocate a buffer on the heap.
static value_type* _Allocate(size_type size)
{
return static_cast<value_type*>(malloc(sizeof(value_type) * size));
}
//! Initialize the vector with new storage, updating the capacity and size.
void _InitStorage(size_type size)
{
if (size > capacity())
{
_data.SetRemoteStorage(_Allocate(size));
_capacity = size;
}
_size = size;
}
//! Grow the storage to be able to accommodate newCapacity entries. This
//! always allocates remotes storage.
void _GrowStorage(const size_type newCapacity)
{
value_type* newStorage = _Allocate(newCapacity);
_UninitializedMove(begin(), end(), iterator(newStorage));
_Destruct();
_FreeStorage();
_data.SetRemoteStorage(newStorage);
_capacity = newCapacity;
}
//! Returns the next capacity to use for vector growth. The growth factor
//! here is 1.5. A constant 1 is added so that we do not have to special
//! case initial capacities of 0 and 1.
size_type _NextCapacity() const
{
const size_type cap = capacity();
return cap + (cap / 2) + 1;
}
//! Insert the value v at iterator it. We use this method that takes a
//! universal reference to de-duplicate the logic required for the insert
//! overloads, one taking an rvalue reference, and the other one taking a
//! const reference. This way, we can take the most optimal code path (
//! move, or copy without making redundant copies) based on whether v is
//! a rvalue reference or const reference.
template <typename U>
iterator _Insert(const_iterator it, U&& v)
{
value_type* newEntry;
// If the iterator points to the end, simply push back.
if (it == end())
{
push_back(std::forward<U>(v));
return end() - 1;
}
// Grow the remote storage, if we need to. This invalidates iterators,
// so special care must be taken in order to return a new, valid
// iterator.
else if (size() == capacity())
{
const size_type newCapacity = _NextCapacity();
value_type* newStorage = _Allocate(newCapacity);
value_type* i = const_cast<value_type*>(&*it);
value_type* curData = data();
newEntry = _UninitializedMove(curData, i, newStorage);
new (newEntry) value_type(std::forward<U>(v));
_UninitializedMove(i, curData + size(), newEntry + 1);
_Destruct();
_FreeStorage();
_data.SetRemoteStorage(newStorage);
_capacity = newCapacity;
}
// Our current capacity is big enough to allow us to simply shift
// elements up one slot and insert v at it.
else
{
// Move all the elements after it up by one slot.
newEntry = const_cast<value_type*>(&*it);
value_type* last = const_cast<value_type*>(&back());
new (data() + size()) value_type(std::move(*last));
std::move_backward(newEntry, last, last + 1);
// Move v into the slot at the supplied iterator position.
newEntry->~value_type();
new (newEntry) value_type(std::forward<U>(v));
}
// Bump size and return an iterator to the newly inserted entry.
++_size;
return iterator(newEntry);
}
//! The vector storage, which is a union of the local storage and a pointer
//! to the heap memory, if allocated.
_Data<value_type, N> _data;
//! The current size of the vector, i.e. how many entries it contains.
size_type _size;
//! The current capacity of the vector, i.e. how big the currently allocated
//! storage space is.
size_type _capacity;
};
//! Swap fuction for @ref SmallVector
template <typename T, std::size_t N>
void swap(SmallVector<T, N>& a, SmallVector<T, N>& b)
{
a.swap(b);
}
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 31,672 | C | 29.810311 | 119 | 0.584775 |
omniverse-code/kit/include/omni/graph/exec/unstable/IGraphBuilder.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Graph builder is the only class that has the ability to modify topology of a graph.
//!
//! Topological edits of the graph are only allowed during graph transformation and should never
//! be performed during execution of the graph. Construction of the builder will automatically drop
//! all the connections between nodes.
//!
//! Methods on this class mutating a graph topology are not thread-safe (unless documented otherwise)
template <>
class omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>
: public omni::graph::exec::unstable::IGraphBuilder_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IGraphBuilder")
//! Return owner of all graphs this builder touches
//!
//! The returned @ref omni::graph::exec::unstable::IGraph will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::graph::exec::unstable::IGraph* getGraph() noexcept;
//! Returns the topology this builder can modify.
//!
//! The returned @ref omni::graph::exec::unstable::ITopology will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::graph::exec::unstable::ITopology* getTopology() noexcept;
//! Returns the context in which this builder works.
//!
//! The returned @ref omni::graph::exec::unstable::IGraphBuilderContext will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::graph::exec::unstable::IGraphBuilderContext* getContext() noexcept;
//! Returns @ref omni::graph::exec::unstable::INodeGraphDef this builder can modify.
//!
//! The returned @ref omni::graph::exec::unstable::INodeGraphDef will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::graph::exec::unstable::INodeGraphDef* getNodeGraphDef() noexcept;
//! Connect two given nodes.
//!
//! It is an error if the two nodes are not in the same topology.
//!
//! Neither given node should be @c nullptr.
//!
//! Neither @ref omni::graph::exec::unstable::INode have @ref omni::core::IObject::acquire() called
//! during the connection process.
//!
//! May throw.
void connect(omni::core::ObjectParam<omni::graph::exec::unstable::INode> upstreamNode,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> downstreamNode);
//! Disconnect two given nodes.
//!
//! It is an error if the two nodes are not in the same topology.
//!
//! Neither given node should be @c nullptr.
//!
//! Neither @ref omni::graph::exec::unstable::INode have @ref omni::core::IObject::acquire() called
//! during the disconnection process.
//!
//! May throw.
void disconnect(omni::core::ObjectParam<omni::graph::exec::unstable::INode> upstreamNode,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> downstreamNode);
//! Remove a node from topology.
//!
//! The given node must not be @c nullptr.
//!
//! May throw.
void remove(omni::core::ObjectParam<omni::graph::exec::unstable::INode> node);
//! Sets the definition for given node.
//!
//! If a definition is already set, it will be replaced by the given definition.
//!
//! The given definition may be @c nullptr.
//!
//! @ref omni::core::IObject::acquire() is called on the given definition pointer.
//!
//! See also @ref omni::graph::exec::unstable::IGraphBuilder::setNodeGraphDef().
//!
//! This method is NOT thread safe.
void setNodeDef(omni::core::ObjectParam<omni::graph::exec::unstable::INode> node,
omni::core::ObjectParam<omni::graph::exec::unstable::INodeDef> nodeDef) noexcept;
//! Sets the definition for give node.
//!
//! If a definition is already set, it will be replaced by the given definition.
//!
//! The given definition may be @c nullptr.
//!
//! @ref omni::core::IObject::acquire() is called on the given definition pointer.
//!
//! See also @ref omni::graph::exec::unstable::IGraphBuilder::setNodeDef().
//!
//! This method is NOT thread safe.
void setNodeGraphDef(omni::core::ObjectParam<omni::graph::exec::unstable::INode> node,
omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept;
//! Unsets given node's definition.
//!
//! If the definition is already @c nullptr, this method does nothing.
//!
//! This method is NOT thread safe.
void clearDef(omni::core::ObjectParam<omni::graph::exec::unstable::INode> node) noexcept;
//! Replace well formed cluster of nodes with a single node and the given definition.
//!
//! All nodes must exist in the same and current topology, otherwise the entire operation is aborted.
//!
//! @ref omni::core::IObject::acquire() is called on the given definition pointer.
//!
//! This method is NOT thread safe.
void replacePartition(const omni::graph::exec::unstable::NodePartition& partition,
omni::core::ObjectParam<omni::graph::exec::unstable::IDef> definition);
//! Create a new node in current node graph def.
//!
//! The given node name must not be @c nullptr.
//!
//! The given node def can be @c nullptr.
//!
//! Node creation can return @c nullptr when current node graph def doesn't allow node construction outside
//! of the pass that created it.
//!
//! The returned @ref omni::graph::exec::unstable::INode will have @ref omni::core::IObject::acquire() called on it.
omni::core::ObjectPtr<omni::graph::exec::unstable::INode> createNode(
const char* name, omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def);
//! Access created nodes by this builder.
//!
//! Span is no longer valid when topology of the graph changes. You need to query it again.
//!
//! In case a node once created gets removed by another pass, returned list will continue to have it.
//! It is safe to do, because we do not delete underlying nodes until the next graph population.
//! Checking if node is valid in current topology allows to filter out these cases.
//!
//! The pointers in the span are non owning, i.e. @ref omni::graph::exec::unstable::INode will not have
//! @ref omni::core::IObject::acquire() called on it.
omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> getCreatedNodes() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::IGraph* omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::getGraph() noexcept
{
return getGraph_abi();
}
inline omni::graph::exec::unstable::ITopology* omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilder_abi>::getTopology() noexcept
{
return getTopology_abi();
}
inline omni::graph::exec::unstable::IGraphBuilderContext* omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilder_abi>::getContext() noexcept
{
return getContext_abi();
}
inline omni::graph::exec::unstable::INodeGraphDef* omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilder_abi>::getNodeGraphDef() noexcept
{
return getNodeGraphDef_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::connect(
omni::core::ObjectParam<omni::graph::exec::unstable::INode> upstreamNode,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> downstreamNode)
{
OMNI_THROW_IF_ARG_NULL(upstreamNode);
OMNI_THROW_IF_ARG_NULL(downstreamNode);
OMNI_THROW_IF_FAILED(connect_abi(upstreamNode.get(), downstreamNode.get()));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::disconnect(
omni::core::ObjectParam<omni::graph::exec::unstable::INode> upstreamNode,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> downstreamNode)
{
OMNI_THROW_IF_ARG_NULL(upstreamNode);
OMNI_THROW_IF_ARG_NULL(downstreamNode);
OMNI_THROW_IF_FAILED(disconnect_abi(upstreamNode.get(), downstreamNode.get()));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::remove(
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node)
{
OMNI_THROW_IF_ARG_NULL(node);
OMNI_THROW_IF_FAILED(remove_abi(node.get()));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::setNodeDef(
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node,
omni::core::ObjectParam<omni::graph::exec::unstable::INodeDef> nodeDef) noexcept
{
setNodeDef_abi(node.get(), nodeDef.get());
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::setNodeGraphDef(
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node,
omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept
{
setNodeGraphDef_abi(node.get(), nodeGraphDef.get());
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::clearDef(
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node) noexcept
{
clearDef_abi(node.get());
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>::replacePartition(
const omni::graph::exec::unstable::NodePartition& partition,
omni::core::ObjectParam<omni::graph::exec::unstable::IDef> definition)
{
OMNI_THROW_IF_ARG_NULL(definition);
replacePartition_abi(&partition, definition.get());
}
inline omni::core::ObjectPtr<omni::graph::exec::unstable::INode> omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilder_abi>::createNode(const char* name,
omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def)
{
OMNI_THROW_IF_ARG_NULL(name);
auto return_ = omni::core::steal(createNode_abi(name, def.get()));
return return_;
}
inline omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilder_abi>::getCreatedNodes() noexcept
{
return getCreatedNodes_abi();
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 11,158 | C | 40.63806 | 134 | 0.684083 |
omniverse-code/kit/include/omni/graph/exec/unstable/GraphBuilder.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file GraphBuilder.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IGraphBuilder.
#pragma once
#include <carb/Format.h>
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/IGraph.h>
#include <omni/graph/exec/unstable/IGraphBuilder.h>
#include <omni/graph/exec/unstable/IGraphBuilderContext.h>
#include <omni/graph/exec/unstable/INode.h>
#include <omni/graph/exec/unstable/INodeDef.h>
#include <omni/graph/exec/unstable/INodeFactory.h>
#include <omni/graph/exec/unstable/INodeGraphDef.h>
#include <omni/graph/exec/unstable/INodeGraphDefDebug.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::IGraphBuilder
template <typename... Bases>
class GraphBuilderT : public Implements<Bases...>
{
public:
//! Construct graph builder for a root @ref INodeGraphDef.
//!
//! Construction of a graph builder has a side effect on underlying @c topology causing its invalidation.
//!
//! May throw.
static omni::core::ObjectPtr<GraphBuilderT> create(omni::core::ObjectParam<IGraphBuilderContext> context)
{
OMNI_THROW_IF_ARG_NULL(context);
OMNI_GRAPH_EXEC_ASSERT(
!omni::graph::exec::unstable::cast<INodeGraphDefDebug>(context->getGraph()->getNodeGraphDef()) ||
!omni::graph::exec::unstable::cast<INodeGraphDefDebug>(context->getGraph()->getNodeGraphDef())->isExecuting());
auto builder = omni::core::steal(new GraphBuilderT(context.get(), context->getGraph()->getNodeGraphDef()));
auto topology = builder->getTopology();
topology->invalidate();
builder->_modifiedTopology(topology);
return builder;
}
//! Construct graph builder for a given @ref INodeGraphDef.
//!
//! Construction of a graph builder has a side effect on underlying @c topology causing its invalidation.
//!
//! May throw.
static omni::core::ObjectPtr<GraphBuilderT> create(omni::core::ObjectParam<IGraphBuilderContext> context,
omni::core::ObjectParam<INodeGraphDef> nodeGraphDef)
{
OMNI_THROW_IF_ARG_NULL(context);
OMNI_THROW_IF_ARG_NULL(nodeGraphDef);
OMNI_GRAPH_EXEC_ASSERT(!omni::graph::exec::unstable::cast<INodeGraphDefDebug>(nodeGraphDef.get()) ||
!omni::graph::exec::unstable::cast<INodeGraphDefDebug>(nodeGraphDef.get())->isExecuting());
auto builder = omni::core::steal(new GraphBuilderT(context.get(), nodeGraphDef.get()));
auto topology = builder->getTopology();
topology->invalidate();
builder->_modifiedTopology(topology);
return builder;
}
//! Construct graph builder for a given @ref INodeGraphDef without causing topology invalidation.
//!
//! This builder is used by the pass pipeline when operations to the graph will alter existing topology.
//!
//! May throw.
static omni::core::ObjectPtr<GraphBuilderT> createForPass(omni::core::ObjectParam<IGraphBuilderContext> context,
omni::core::ObjectParam<INodeGraphDef> nodeGraphDef)
{
OMNI_THROW_IF_ARG_NULL(context);
OMNI_THROW_IF_ARG_NULL(nodeGraphDef);
OMNI_GRAPH_EXEC_ASSERT(!omni::graph::exec::unstable::cast<INodeGraphDefDebug>(nodeGraphDef.get()) ||
!omni::graph::exec::unstable::cast<INodeGraphDefDebug>(nodeGraphDef.get())->isExecuting());
auto builder = omni::core::steal(new GraphBuilderT(context.get(), nodeGraphDef.get()));
// Detect when node graph was constructed outside of the pass pipeline. Tag these defs are created
// during current construction stamp.
//
// This usage pattern we only have in tests currently.
auto topology = nodeGraphDef->getTopology();
if (!topology->getConstructionStamp().isValid())
{
builder->_modifiedTopology(topology);
}
return builder;
}
protected:
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::getGraph_abi
IGraph* getGraph_abi() noexcept override
{
return m_context->getGraph();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::getTopology_abi
ITopology* getTopology_abi() noexcept override
{
return m_nodeGraphDef->getTopology();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::getContext_abi
IGraphBuilderContext* getContext_abi() noexcept override
{
return m_context;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::getNodeGraphDef_abi
INodeGraphDef* getNodeGraphDef_abi() noexcept override
{
return m_nodeGraphDef;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::connect_abi
omni::core::Result connect_abi(INode* upstreamNode, INode* downstreamNode) noexcept override
{
try
{
_modifiedTopology(upstreamNode->getTopology());
IGraphBuilderNode* upstream = exec::unstable::cast<exec::unstable::IGraphBuilderNode>(upstreamNode);
IGraphBuilderNode* downstream = exec::unstable::cast<exec::unstable::IGraphBuilderNode>(downstreamNode);
if (_connect(upstream, downstream))
{
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_RETURN_ERROR(omni::core::kResultFail);
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::disconnect_abi
omni::core::Result disconnect_abi(INode* upstreamNode, INode* downstreamNode) noexcept override
{
try
{
_modifiedTopology(upstreamNode->getTopology());
IGraphBuilderNode* upstream = exec::unstable::cast<exec::unstable::IGraphBuilderNode>(upstreamNode);
IGraphBuilderNode* downstream = exec::unstable::cast<exec::unstable::IGraphBuilderNode>(downstreamNode);
if (_disconnect(upstream, downstream))
{
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_RETURN_ERROR(omni::core::kResultFail);
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::remove_abi
omni::core::Result remove_abi(INode* node) noexcept override
{
_modifiedTopology(node->getTopology());
try
{
IGraphBuilderNode* nodeToRemove = exec::unstable::cast<exec::unstable::IGraphBuilderNode>(node);
nodeToRemove->validateOrResetTopology();
// Nodes don't have an edge back to the root. Check if the removed node is a child of the root.
auto graphRoot = nodeToRemove->getRoot();
graphRoot->_removeChild(nodeToRemove); // Silently fails if node is not a root child.
// Cache these pointers to avoid virtual method overhead.
auto children = nodeToRemove->getChildren();
auto parents = nodeToRemove->getParents();
// Disconnect all parents from the node to be removed.
for (auto parent : parents)
{
parent->_removeChild(nodeToRemove);
}
// Disconnect all children from the node to be removed.
for (auto child : children)
{
child->_removeParent(nodeToRemove);
}
// Invalidate all remaining connections of the node to be removed.
nodeToRemove->_invalidateConnections();
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::setNodeDef_abi
void setNodeDef_abi(INode* node, INodeDef* nodeDef) noexcept override
{
_modifiedTopology(node->getTopology());
exec::unstable::cast<exec::unstable::IGraphBuilderNode>(node)->_setNodeDef(nodeDef);
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::setNodeGraphDef_abi
void setNodeGraphDef_abi(INode* node, INodeGraphDef* nodeGraphDef) noexcept override
{
_modifiedTopology(node->getTopology());
exec::unstable::cast<exec::unstable::IGraphBuilderNode>(node)->_setNodeGraphDef(nodeGraphDef);
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::clearDef_abi
void clearDef_abi(INode* node) noexcept override
{
_modifiedTopology(node->getTopology());
exec::unstable::cast<exec::unstable::IGraphBuilderNode>(node)->_clearDef();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::replacePartition_abi
void replacePartition_abi(const NodePartition* partition, IDef* definition) noexcept override
{
if (partition->size() == 0)
return;
// validate the partition
INode* rootNode = partition->front()->getRoot();
for (auto nodeInPartition : *partition)
{
if (!nodeInPartition->isValidTopology() || nodeInPartition->getRoot() != rootNode)
{
return;
}
}
// mutate the graph
_commitPartition(m_nodeGraphDef, partition, definition);
_modifiedTopology(m_nodeGraphDef->getTopology());
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::createNode_abi
INode* createNode_abi(const char* name, IDef* def) noexcept override
{
if (auto factory = m_nodeGraphDef->getNodeFactory())
{
auto newNode = factory->createNode(name, def);
m_createdNodes.push_back(newNode.get());
return newNode.detach();
}
else
{
return nullptr;
}
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilder::getCreatedNodes_abi
Span<INode* const> getCreatedNodes_abi() noexcept override
{
return m_createdNodes.size() ? Span<INode* const>{ m_createdNodes.begin(), m_createdNodes.size() } :
Span<INode* const>{ nullptr, 0 };
}
//! Constructor
GraphBuilderT(IGraphBuilderContext* context, INodeGraphDef* nodeGraphDef)
: m_context{ context }, m_nodeGraphDef{ nodeGraphDef }
{
m_context->getGraph()->_setInBuild(true);
}
~GraphBuilderT()
{
m_context->getGraph()->_setInBuild(false);
}
private:
//! This builder modified topology of a graph. Currently it is possible it is not modifying topology belonging to
//! NodeGraphDef it refers to.
//!
void _modifiedTopology(ITopology* modifiedTopology)
{
modifiedTopology->_setConstructionInSync(m_context->getConstructionStamp());
}
bool _connect(IGraphBuilderNode* upstream, IGraphBuilderNode* downstream)
{
if (upstream->getTopology() == downstream->getTopology())
{
upstream->validateOrResetTopology();
downstream->validateOrResetTopology();
if (!upstream->hasChild(downstream))
{
upstream->_addChild(downstream);
if (!upstream->isRoot())
{
downstream->_addParent(upstream);
}
}
return true;
}
return false;
}
bool _disconnect(IGraphBuilderNode* upstream, IGraphBuilderNode* downstream)
{
if (upstream->getTopology() == downstream->getTopology())
{
upstream->validateOrResetTopology();
downstream->validateOrResetTopology();
if (upstream->hasChild(downstream))
{
upstream->_removeChild(downstream);
if (!upstream->isRoot())
{
downstream->_removeParent(upstream);
}
}
return true;
}
return false;
}
//! Make changes to the topology with already validated partition and definition.
void _commitPartition(INodeGraphDef* nodeGraphDef, const NodePartition* partition, IDef* definition) noexcept
{
OMNI_GRAPH_EXEC_ASSERT(nodeGraphDef && definition && (partition->size() > 0));
OMNI_GRAPH_EXEC_ASSERT(nodeGraphDef->getNodeFactory().get());
// we affect the topology, but shouldn't require any memory operation
std::vector<INode*> parents, children;
// optimization, let's assume each node has one parent and one child from outside of the partition
parents.reserve(partition->size() * 2);
children.reserve(partition->size() * 2);
// we want cost to be linear and for that we are going to avoid searches in the partition
// we achieve this by collecting all parents/children (some will be in the partition),
// then invalidating the partition and cleaning up the immediate upstream and downstream
for (auto nodeInPartition : *partition)
{
for (auto parent : nodeInPartition->getParents())
{
parents.push_back(parent);
}
for (auto child : nodeInPartition->getChildren())
{
children.push_back(child);
}
// make the node invalid without invalidating the entire topology
exec::unstable::cast<IGraphBuilderNode>(nodeInPartition)->_invalidateConnections();
}
// generate replacement node
std::string nodeName = carb::fmt::format("Partition_{}", partition->front()->getName().getString().c_str());
auto newNode = createNode_abi(nodeName.c_str(), definition);
auto newBuilderNode = exec::unstable::cast<IGraphBuilderNode>(newNode);
// in one pass: cleanup the topology and reconnect to the new node
auto rootBuilderNode = exec::unstable::cast<IGraphBuilderNode>(partition->front()->getRoot());
rootBuilderNode->_removeInvalidChildren();
for (auto parent : parents)
{
if (parent->isValidTopology())
{
auto parentBuilderNode = exec::unstable::cast<IGraphBuilderNode>(parent);
parentBuilderNode->_removeInvalidChildren();
this->_connect(parentBuilderNode, newBuilderNode);
}
}
for (auto child : children)
{
if (child->isValidTopology())
{
auto childBuilderNode = exec::unstable::cast<IGraphBuilderNode>(child);
childBuilderNode->_removeInvalidParents();
this->_connect(newBuilderNode, childBuilderNode);
}
}
// Need to make sure we are connected to the root (indirectly, or directly if this is an entry node)
if (newNode->getParents().size() == 0)
{
this->_connect(rootBuilderNode, newBuilderNode);
}
}
IGraphBuilderContext* m_context{ nullptr }; //!< All graph builders are operating within a context. We store pointer
//!< to it.
INodeGraphDef* m_nodeGraphDef{ nullptr }; //!< Graph topology this builder can modify. This is not yet enforced in
//!< code.
//! Most of the time we won't be needing any space. The size of 2 was chosen arbitrary.
using NodeArray = SmallVector<INode*, 2>;
NodeArray m_createdNodes; //!< Collect nodes created dynamically to allow pass pipeline discover them.
};
//! Core GraphBuilder implementation for @ref omni::graph::exec::unstable::IGraphBuilder
using GraphBuilder = GraphBuilderT<IGraphBuilder>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 16,438 | C | 37.141531 | 123 | 0.63122 |
omniverse-code/kit/include/omni/graph/exec/unstable/IPassPipeline.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Runs registered passes.
//!
//! The role of pass pipeline is to populate and prepare the execution graph. The base implementation runs passes based
//! on the type and registration order. Most applications will define their own pass pipeline to control how the
//! execution graph is generated.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IPassPipeline_abi>
: public omni::graph::exec::unstable::IPassPipeline_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPassPipeline")
//! Test if pipeline needs to rebuild (mostly for its acceleration structures).
bool needsConstruction() noexcept;
//! Build the pipeline (mostly for its acceleration structures).
void construct();
//! Test if pipeline needs to run (after topology changes in the graph).
bool needsExecute(const omni::graph::exec::unstable::Stamp& globalTopology) noexcept;
//! Execute the graph transformations pipeline
void execute(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderContext> builderContext,
omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline bool omni::core::Generated<omni::graph::exec::unstable::IPassPipeline_abi>::needsConstruction() noexcept
{
return needsConstruction_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IPassPipeline_abi>::construct()
{
OMNI_THROW_IF_FAILED(construct_abi());
}
inline bool omni::core::Generated<omni::graph::exec::unstable::IPassPipeline_abi>::needsExecute(
const omni::graph::exec::unstable::Stamp& globalTopology) noexcept
{
return needsExecute_abi(globalTopology);
}
inline void omni::core::Generated<omni::graph::exec::unstable::IPassPipeline_abi>::execute(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderContext> builderContext,
omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef)
{
OMNI_THROW_IF_ARG_NULL(builderContext);
OMNI_THROW_IF_ARG_NULL(nodeGraphDef);
OMNI_THROW_IF_FAILED(execute_abi(builderContext.get(), nodeGraphDef.get()));
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 3,149 | C | 34.795454 | 119 | 0.741505 |
omniverse-code/kit/include/omni/graph/exec/unstable/SchedulingInfo.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file SchedulingInfo.h
//!
//! @brief Defines omni::graph::exec::unstable::SchedulingInfo.
#pragma once
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Constraints to be fulfilled by the scheduler when dispatching a task.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
enum class SchedulingInfo
{
eSerial, //!< Execution of a task should be serialized globally. No other serial task should be running.
eParallel, //!< Execution of a task can be done safely in parallel. Parallel tasks can run together with serial.
eIsolate, //!< Execution of a task has to be done in isolation. No other tasks can run concurrently.
eSchedulerBypass //!< Execution of a task should bypass the scheduler. Either to avoid overhead for lightweight
//!< tasks or to serialize within a thread generating the work.
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 1,481 | C | 35.146341 | 116 | 0.744092 |
omniverse-code/kit/include/omni/graph/exec/unstable/IBackgroundTask.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Class representing a background task.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IBackgroundTask_abi>
: public omni::graph::exec::unstable::IBackgroundTask_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IBackgroundTask")
//! Returns a @c std::future like object used to check if the background task has completed.
//!
//! A error is returned if this method is called more than once.
//!
//! This method is not thread safe.
omni::core::ObjectPtr<omni::graph::exec::unstable::IBackgroundResult> getBackgroundResult();
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::core::ObjectPtr<omni::graph::exec::unstable::IBackgroundResult> omni::core::Generated<
omni::graph::exec::unstable::IBackgroundTask_abi>::getBackgroundResult()
{
omni::core::ObjectPtr<omni::graph::exec::unstable::IBackgroundResult> out;
OMNI_THROW_IF_FAILED(getBackgroundResult_abi(out.put()));
return out;
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 1,874 | C | 30.77966 | 99 | 0.730523 |
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutionCurrentThread.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IExecutionCurrentThread.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IExecutionCurrentThread.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Status.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class ExecutionTask;
class IExecutionContext;
class IExecutionCurrentThread_abi;
class IExecutionCurrentThread;
class IExecutionStateInfo;
class IExecutor;
class IGraph;
//! Encapsulates the execution state for the current thread allowing callers to determine quantities like the @ref
//! omni::graph::exec::unstable::ExecutionTask currently executing on the thread.
//!
//! Because methods in this interface return thread local data, all methods in this interface are thread safe.
//!
//! This interface is usually accessed as a singleton via one of the following helper methods:
//!
//! - @ref omni::graph::exec::unstable::getCurrentTask()
//!
//! - @ref omni::graph::exec::unstable::getCurrentExecutor()
//!
//! This interface contains methods for graph and task execution. Users should not call these methods directly. See
//! the methods' docs below for the correct way to perform execution.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
class IExecutionCurrentThread_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.IExecutionCurrentThread")>
{
protected:
//! Executes the given @ref omni::graph::exec::unstable::Graph.
//!
//! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::IExecutionContext::execute().
//!
//! From an ABI point-of-view, the purpose of this method is to handle the special case of the top-level @ref
//! omni::graph::exec::unstable::INodeGraphDef being contained by @ref omni::graph::exec::unstable::IGraph rather
//! than pointed to by a node in another @ref omni::graph::exec::unstable::INodeGraphDef. Meaningful values are set
//! for the threads current task and executor (see @ref omni::graph::exec::unstable::getCurrentTask() and @ref
//! omni::graph::exec::unstable::getCurrentExecutor()).
//!
//! @thread_safety This method is thread safe.
virtual Status executeGraph_abi(OMNI_ATTR("not_null, throw_if_null") IGraph* graph,
OMNI_ATTR("not_null, throw_if_null") IExecutionContext* context) noexcept = 0;
//! Executes and sets the thread's "current" task to the given task.
//!
//! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::ExecutionTask::execute().
//!
//! This method executes the definition of the node pointed to by the given task. Importantly, this method sets
//! thread local data to track the currently running task and executor (see @ref
//! omni::graph::exec::unstable::getCurrentTask() and @ref omni::graph::exec::unstable::getCurrentExecutor()).
//!
//! @thread_safety This method is thread safe.
virtual Status execute_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* task,
IExecutor* executor,
OMNI_ATTR("in, out, not_null, throw_if_null") Status* taskStatus) noexcept = 0;
//! Access the task currently executing on the current thread.
//!
//! Useful when needing to access execution context state without having to pass it to every function.
//!
//! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::getCurrentTask().
//!
//! May return @c nullptr.
//!
//! @thread_safety This method is thread safe.
virtual ExecutionTask* getCurrentTask_abi() noexcept = 0;
//! Access the executor currently executing on the current thread.
//!
//! Useful when needing to spawn extra work within the scope of the graph.
//!
//! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::getCurrentExecutor().
//!
//! May return @c nullptr.
//!
//! @thread_safety This method is thread safe.
virtual OMNI_ATTR("no_acquire") IExecutor* getCurrentExecutor_abi() noexcept = 0;
};
//! Smart pointer managing an instance of @ref IExecutionCurrentThread.
using ExecutionCurrentThreadPtr = omni::core::ObjectPtr<IExecutionCurrentThread>;
//! Access current thread's execution state.
//!
//! The returned pointer is a singleton managed by *omni.graph.exec*, and does *not* have @ref
//! omni::core::IObject::acquire() called on it before being returned. The caller should *not* call @ref
//! omni::core::IObject::release() on the returned raw pointer.
//!
//! @thread_safety This method is thread safe.
inline IExecutionCurrentThread* getCurrentThread() noexcept;
//! Access task currently executed on a calling thread.
//!
//! May return @c nullptr.
//!
//! @thread_safety This method is thread safe.
inline ExecutionTask* getCurrentTask() noexcept;
//! Access executor currently used on a calling thread.
//!
//! Useful when needing to spawn extra work within the scope of the graph.
//!
//! The returned @ref IExecutor does *not* have @ref omni::core::IObject::acquire() called before being returned.
//!
//! May return @c nullptr.
//!
//! @thread_safety This method is thread safe.
inline IExecutor* getCurrentExecutor() noexcept;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IExecutionCurrentThread.gen.h>
//! @copydoc omni::graph::exec::unstable::IExecutionCurrentThread_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IExecutionCurrentThread
: public omni::core::Generated<omni::graph::exec::unstable::IExecutionCurrentThread_abi>
{
};
// additional headers needed for API implementation
#include <omni/core/ITypeFactory.h>
#include <omni/graph/exec/unstable/ExecutionTask.h>
#include <omni/graph/exec/unstable/IExecutionContext.h>
#include <omni/graph/exec/unstable/IExecutor.h>
#include <omni/graph/exec/unstable/IGraph.h>
inline omni::graph::exec::unstable::IExecutionCurrentThread* omni::graph::exec::unstable::getCurrentThread() noexcept
{
// createType() always calls acquire() and returns an ObjectPtr to make sure release() is called. we don't want to
// hold a ref here to avoid static destruction issues. here we allow the returned ObjectPtr to destruct (after
// calling get()) to release our ref. we know the DLL in which the singleton was created is maintaining a ref and
// will keep the singleton alive for the lifetime of the DLL.
static auto sSingleton = omni::core::createType<IExecutionCurrentThread>().get();
return sSingleton;
}
inline omni::graph::exec::unstable::ExecutionTask* omni::graph::exec::unstable::getCurrentTask() noexcept
{
return getCurrentThread()->getCurrentTask();
}
inline omni::graph::exec::unstable::IExecutor* omni::graph::exec::unstable::getCurrentExecutor() noexcept
{
return getCurrentThread()->getCurrentExecutor();
}
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IExecutionCurrentThread.gen.h>
| 7,916 | C | 41.794594 | 120 | 0.721071 |
omniverse-code/kit/include/omni/graph/exec/unstable/INodeFactory.gen.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Factory interface for creating @ref omni::graph::exec::unstable::INode objects.
//!
//! Usually used in conjunction with @ref omni::graph::exec::unstable::INodeGraphDef.
//!
//! See @ref omni::graph::exec::unstable::createNodeFactory() to generate one of these objects from an invocable object
//! (e.g. @c std::function).
template <>
class omni::core::Generated<omni::graph::exec::unstable::INodeFactory_abi>
: public omni::graph::exec::unstable::INodeFactory_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::INodeFactory")
//! Creates and returns a new node within a topology this factory came from.
//!
//! It is legal to pass nullptr as a definition, or either @ref omni::graph::exec::unstable::INodeDef
//! or @ref omni::graph::exec::unstable::INodeGraphDef
//!
//! The returned @ref omni::graph::exec::unstable::INode will have @ref omni::core::IObject::acquire() called on it.
omni::core::ObjectPtr<omni::graph::exec::unstable::INode> createNode(
const char* name, omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::core::ObjectPtr<omni::graph::exec::unstable::INode> omni::core::Generated<
omni::graph::exec::unstable::INodeFactory_abi>::createNode(const char* name,
omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def)
{
OMNI_THROW_IF_ARG_NULL(name);
omni::core::ObjectPtr<omni::graph::exec::unstable::INode> out;
OMNI_THROW_IF_FAILED(createNode_abi(name, def.get(), out.put()));
return out;
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 2,523 | C | 36.117647 | 126 | 0.696393 |
omniverse-code/kit/include/omni/graph/exec/unstable/IInvalidationForwarder.gen.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Interface wrapping a function (possibly with storage) to forward topology invalidation notices.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IInvalidationForwarder_abi>
: public omni::graph::exec::unstable::IInvalidationForwarder_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IInvalidationForwarder")
//! Invokes the wrapped function.
//!
//! The given topology must not be @c nullptr.
void invoke(omni::core::ObjectParam<omni::graph::exec::unstable::ITopology> topology);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline void omni::core::Generated<omni::graph::exec::unstable::IInvalidationForwarder_abi>::invoke(
omni::core::ObjectParam<omni::graph::exec::unstable::ITopology> topology)
{
OMNI_THROW_IF_ARG_NULL(topology);
invoke_abi(topology.get());
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 1,731 | C | 29.928571 | 99 | 0.737724 |
omniverse-code/kit/include/omni/graph/exec/unstable/Graph.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Graph.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IGraph.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/ExecutorFactory.h>
#include <omni/graph/exec/unstable/IGraph.h>
#include <omni/graph/exec/unstable/NodeGraphDef.h>
#include <memory>
#include <string>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::IGraph
template <typename... Bases>
class GraphT : public Implements<Bases...>
{
public:
//! Construct a graph with default executor attached to an empty node graph.
//!
//! May throw.
static omni::core::ObjectPtr<GraphT> create(const char* name)
{
OMNI_THROW_IF_ARG_NULL(name);
return omni::core::steal(new GraphT(name));
}
//! Construct a graph with a given executor and an empty node graph.
//!
//! May throw.
static omni::core::ObjectPtr<GraphT> create(const ExecutorFactory& executorFactory, const char* name)
{
OMNI_THROW_IF_ARG_NULL(name);
return omni::core::steal(new GraphT(executorFactory, name));
}
//! Construct a graph with the given node graph.
//!
//! The signature of @p nodeGraphDefFactory must be equivalent to `NodeGraphDefPtr(IGraph*)`.
//!
//! May throw.
template <typename Fn>
static omni::core::ObjectPtr<GraphT> create(const char* name, Fn&& nodeGraphDefFactory)
{
OMNI_THROW_IF_ARG_NULL(name);
return omni::core::steal(new GraphT(name, std::forward<Fn>(nodeGraphDefFactory)));
}
protected:
//! Core implementation of @ref omni::graph::exec::unstable::IGraph::getNodeGraphDef_abi
INodeGraphDef* getNodeGraphDef_abi() noexcept override
{
return m_nodeGraphDef.get();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraph::getName_abi
const ConstName* getName_abi() noexcept override
{
return &m_name;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraph::getGlobalTopologyStamp_abi
Stamp* getGlobalTopologyStamp_abi() noexcept override
{
return &m_globalTopologyStamp;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraph::inBuild_abi
virtual bool inBuild_abi() noexcept override
{
return (m_inBuild > 0);
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraph::_setInBuild_abi
virtual void _setInBuild_abi(bool inBuild) noexcept override
{
if (inBuild)
{
++m_inBuild;
}
else
{
--m_inBuild;
OMNI_GRAPH_EXEC_ASSERT(m_inBuild > -1);
}
}
//! Constructor
//!
//! Construct with a default top level graph definition
GraphT(const char* name) : m_name(name)
{
m_globalTopologyStamp.next();
m_nodeGraphDef = NodeGraphDef::create(this, "NODE-ROOT"); // may throw
}
//! Constructor
//!
//! Construct with a custom executor for a top level graph definition
GraphT(const ExecutorFactory& executorFactory, const char* name) : m_name(name)
{
m_globalTopologyStamp.next();
m_nodeGraphDef = NodeGraphDef::create(this, executorFactory, "NODE-ROOT"); // may throw
}
//! Constructor
//!
//! Construct with a custom top level graph factory
template <typename Fn>
GraphT(const char* name, Fn&& nodeGraphDefFactory) : m_name(name)
{
m_globalTopologyStamp.next();
m_nodeGraphDef = nodeGraphDefFactory(this); // may throw
}
private:
Stamp m_globalTopologyStamp; //!< Global graph topology. Incremented every time any nested topologies changes
omni::core::ObjectPtr<INodeGraphDef> m_nodeGraphDef; //!< Top level node graph definition
ConstName m_name; //!< Name of the execution graph
//! How many builders are active. Atomic since multiple builders may be running in parallel.
std::atomic<int> m_inBuild{ 0 };
};
//! Core Graph implementation for @ref omni::graph::exec::unstable::IGraph
using Graph = GraphT<IGraph>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 4,640 | C | 29.94 | 113 | 0.667457 |
omniverse-code/kit/include/omni/graph/exec/unstable/NodeGraphDef.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file NodeGraphDef.h
//!
//! @brief Declares @ref omni::graph::exec::unstable::NodeGraphDef
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/Executor.h>
#include <omni/graph/exec/unstable/ExecutorFactory.h>
#include <omni/graph/exec/unstable/INodeGraphDef.h>
#include <omni/graph/exec/unstable/INodeGraphDefDebug.h>
#include <omni/graph/exec/unstable/Topology.h>
#include <atomic>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::INodeGraphDef
template <typename... Bases>
class NodeGraphDefT : public Implements<Bases...>
{
public:
//! Construct graph node definition with default executor
//!
//! @param owner Execution graph having this graph as part of the global topology
//! @param definitionName Definition name is considered as a token that transformation passes can register against
//!
//! May throw.
static omni::core::ObjectPtr<NodeGraphDefT> create(omni::core::ObjectParam<IGraph> owner, const char* definitionName)
{
OMNI_THROW_IF_ARG_NULL(owner);
OMNI_THROW_IF_ARG_NULL(definitionName);
return omni::core::steal(new NodeGraphDefT(owner, definitionName));
}
//! Construct graph node definition with a given executor factory
//!
//! @param owner Execution graph having this graph as part of the global topology
//! @param executorFactory Factory returning executor for this graph
//! @param definitionName Definition name is considered as a token that transformation passes can register against
//!
//! May throw.
static omni::core::ObjectPtr<NodeGraphDefT> create(omni::core::ObjectParam<IGraph> owner,
const ExecutorFactory& executorFactory,
const char* definitionName)
{
OMNI_THROW_IF_ARG_NULL(owner);
OMNI_THROW_IF_ARG_NULL(definitionName);
return omni::core::steal(new NodeGraphDefT(owner, executorFactory, definitionName));
}
protected:
//! Core implementation of @ref omni::graph::exec::unstable::IDef::execute_abi for @ref NodeGraphDef
//!
//! Execution is delegated to @ref omni::graph::exec::unstable::IExecutor. The lifetime of an executor is only for a
//! single execution and any state that needs to persist longer than a single execution must be written with @ref
//! omni::graph::exec::unstable::IExecutionContext::setNodeData_abi()
Status execute_abi(ExecutionTask* info) noexcept override
{
// ef-docs nodegraphdef-execute-begin
omni::core::ObjectPtr<IExecutor> executor;
if (m_executorFactory)
{
executor = m_executorFactory(m_topology, *info);
}
else
{
executor = ExecutorFallback::create(m_topology, *info);
}
return executor->execute(); // execute the node specified by info->getNode()
// ef-docs nodegraphdef-execute-end
}
//! Core implementation of @ref omni::graph::exec::unstable::IDef::getSchedulingInfo_abi for @ref NodeGraphDef
SchedulingInfo getSchedulingInfo_abi(const ExecutionTask* info) noexcept override
{
return SchedulingInfo::eSerial;
}
//! Core implementation of @ref omni::graph::exec::unstable::IDef::getName_abi for @ref NodeGraphDef
const ConstName* getName_abi() noexcept override
{
return &m_name;
}
//! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDef::getTopology_abi
ITopology* getTopology_abi() noexcept override
{
return m_topology.get();
}
//! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDef::initializeState_abi
omni::core::Result initializeState_abi(ExecutionTask* rootTask) noexcept override
{
return omni::core::kResultSuccess;
}
//! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDef::preExecute_abi
Status preExecute_abi(ExecutionTask* info) noexcept override
{
return Status::eSuccess;
}
//! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDef::postExecute_abi
Status postExecute_abi(ExecutionTask* info) noexcept override
{
return Status::eSuccess;
}
//! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDef::getNodeFactory_abi
INodeFactory* getNodeFactory_abi() noexcept override
{
return nullptr;
}
//! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDefDebug::getExecutionCount_abi
uint64_t getExecutionCount_abi() noexcept override
{
return m_executionCount;
}
//! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDefDebug::incrementExecutionCount_abi
void incrementExecutionCount_abi() noexcept override
{
++m_executionCount;
}
//! Core implementation of @ref omni::graph::exec::unstable::INodeGraphDefDebug::decrementExecutionCount_abi
void decrementExecutionCount_abi() noexcept override
{
--m_executionCount;
}
//! Constructor with a default executor
NodeGraphDefT(omni::core::ObjectParam<IGraph> owner, const char* definitionName) // may throw
: m_topology{ Topology::create(definitionName) }, m_name{ definitionName }
{
_addInvalidationForwarder(owner);
}
//! Constructor with a custom executor
NodeGraphDefT(omni::core::ObjectParam<IGraph> owner,
ExecutorFactory executorFactory,
const char* definitionName) // may throw
: m_topology{ Topology::create(definitionName) },
m_executorFactory(std::move(executorFactory)),
m_name{ definitionName }
{
_addInvalidationForwarder(owner);
}
private:
//! Private method that will allow forwarding of topology invalidation to the execution graph.
//! Invalid global topology will allow pass pipeline to execute and discover invalidated definitions.
void _addInvalidationForwarder(omni::core::ObjectParam<IGraph> owner) // may throw
{
m_topology->addInvalidationForwarder(reinterpret_cast<InvalidationForwarderId>(owner.get()),
[global = owner->getGlobalTopologyStamp()](ITopology*) -> void
{ global->next(); });
}
omni::core::ObjectPtr<ITopology> m_topology; //!< Graphs topology
ExecutorFactory m_executorFactory; //!< Executor factory (if empty, default executor will be used)
std::atomic<std::size_t> m_executionCount{ 0 }; //!< Debugging counter to detect illegal executions.
ConstName m_name; //!< Definition name
};
//! Core NodeGraphDef implementation for @ref omni::graph::exec::unstable::INodeGraphDef
using NodeGraphDef = NodeGraphDefT<INodeGraphDef, INodeGraphDefDebug>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 7,478 | C | 37.953125 | 121 | 0.684006 |
omniverse-code/kit/include/omni/graph/exec/unstable/INodeFactory.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file INodeFactory.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::INodeFactory.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/IBase.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IGraphBuilder;
class IDef;
class INode;
class INodeFactory;
class INodeFactory_abi;
//! Factory interface for creating @ref omni::graph::exec::unstable::INode objects.
//!
//! Usually used in conjunction with @ref omni::graph::exec::unstable::INodeGraphDef.
//!
//! See @ref omni::graph::exec::unstable::createNodeFactory() to generate one of these objects from an invocable object
//! (e.g. @c std::function).
class INodeFactory_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.INodeFactory")>
{
protected:
//! Creates and returns a new node within a topology this factory came from.
//!
//! It is legal to pass nullptr as a definition, or either @ref omni::graph::exec::unstable::INodeDef
//! or @ref omni::graph::exec::unstable::INodeGraphDef
//!
//! The returned @ref omni::graph::exec::unstable::INode will have @ref omni::core::IObject::acquire() called on it.
virtual OMNI_ATTR("throw_result") omni::core::Result
createNode_abi(OMNI_ATTR("in, not_null, throw_if_null, c_str") const char* name,
IDef* def,
OMNI_ATTR("not_null, throw_if_null, out, *return") INode** out) noexcept = 0;
};
//! Smart pointer managing an instance of @ref INodeFactory.
using NodeFactoryPtr = omni::core::ObjectPtr<INodeFactory>;
//! Generates an @ref INodeFactory from an invocable object such as a function pointer, functor, etc.
//!
//! The given function should have the signature `omni::core::ObjectPtr<INode>(char*, IDef*)`.
template <typename Fn>
NodeFactoryPtr createNodeFactory(Fn&& fn);
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/INodeFactory.gen.h>
//! @copydoc omni::graph::exec::unstable::INodeFactory_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::INodeFactory
: public omni::core::Generated<omni::graph::exec::unstable::INodeFactory_abi>
{
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IGraphBuilder.h>
#include <omni/graph/exec/unstable/INode.h>
#ifndef DOXYGEN_BUILD
template <typename Fn>
omni::graph::exec::unstable::NodeFactoryPtr omni::graph::exec::unstable::createNodeFactory(Fn&& fn)
{
class FactoryImpl : public Implements<INodeFactory>
{
public:
FactoryImpl(Fn&& fn) : m_fn(std::move(fn))
{
}
protected:
omni::core::Result createNode_abi(const char* name, IDef* def, INode** out) noexcept override
{
try
{
NodePtr newNode = m_fn(name, def); // may throw
*out = newNode.detach();
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
Fn m_fn;
};
return omni::core::steal(new FactoryImpl(std::forward<Fn>(fn)));
}
#endif // DOXYGEN_BUILD
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/INodeFactory.gen.h>
| 3,941 | C | 31.578512 | 124 | 0.694748 |
omniverse-code/kit/include/omni/graph/exec/unstable/IBackgroundResultWriter.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Functor interface used to write the result of a background task.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IBackgroundResultWriter_abi>
: public omni::graph::exec::unstable::IBackgroundResultWriter_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IBackgroundResultWriter")
//! Write the result.
omni::graph::exec::unstable::Status write(omni::graph::exec::unstable::ExecutionTask& info) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IBackgroundResultWriter_abi>::write(
omni::graph::exec::unstable::ExecutionTask& info) noexcept
{
return write_abi(&info);
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 1,627 | C | 29.716981 | 130 | 0.741856 |
omniverse-code/kit/include/omni/graph/exec/unstable/BackgroundTask.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file BackgroundTask.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::BackgroundTask.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/IBackgroundTask.h>
#include <future>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::IBackgroundTask
class BackgroundTask : public Implements<IBackgroundTask>
{
public:
//! Creates a new @ref BackgroundTask.
//!
//! May throw
static omni::core::ObjectPtr<BackgroundTask> create()
{
return omni::core::steal(new BackgroundTask);
}
protected:
//! Allows access to result of an async operation.
using Future = std::future<omni::core::ObjectPtr<IBackgroundResultWriter>>;
//! Allows setting the result of an async operation.
using Promise = std::promise<omni::core::ObjectPtr<IBackgroundResultWriter>>;
//! @copydoc IBackgroundTask_abi::getBackgroundResult_abi
omni::core::Result getBackgroundResult_abi(IBackgroundResult** out) noexcept override
{
class Result : public Implements<IBackgroundResult>
{
public:
Result(Future&& future) : m_future(std::move(future))
{
}
protected:
omni::core::Result isReady_abi(bool* out) noexcept override
{
try
{
*out = (m_future.wait_for(std::chrono::milliseconds(0)) == std::future_status::ready); // may throw
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
omni::core::Result cancel_abi(bool blocking) noexcept override
{
try
{
if (blocking)
{
m_future.wait(); // may throw
}
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
omni::core::Result write_abi(ExecutionTask* info, Status* out) noexcept override
{
try
{
*out = Status::eUnknown;
*out = m_future.get()->write(*info); // may throw
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
omni::core::Result waitFor_abi(uint64_t nanoseconds, BackgroundResultStatus* out) noexcept override
{
try
{
auto result = m_future.wait_for(std::chrono::nanoseconds(nanoseconds)); // may throw
switch (result)
{
case std::future_status::deferred: // ?
case std::future_status::ready:
*out = BackgroundResultStatus::eReady;
break;
case std::future_status::timeout:
*out = BackgroundResultStatus::eTimeout;
break;
default:
throw std::logic_error("unknown future state");
}
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
private:
Future m_future;
};
try
{
*out = new Result(m_promise.get_future());
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! @copydoc IBackgroundTask_abi::setResultWriter_abi
omni::core::Result setResultWriter_abi(IBackgroundResultWriter* writer) noexcept override
{
try
{
m_promise.set_value(omni::core::borrow(writer));
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
private:
Promise m_promise;
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 4,566 | C | 30.280822 | 119 | 0.552781 |
omniverse-code/kit/include/omni/graph/exec/unstable/Types.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Types.h
//!
//! @brief Defines typedefs used by interfaces.
//!
//! Because interface bindings are inlined and sometimes coupled, you sometimes need to break out typedefs into their
//! own file so that you can get the include order correct in interface .h files.
#pragma once
#include <omni/graph/exec/unstable/EnumBitops.h>
#include <cstdint>
#include <limits>
namespace omni
{
namespace graph
{
//! Omniverse Execution Framework (EF)
//!
//! The Execution Framework has no dependencies on OmniGraph and designed to be front-end agnostic. It could very
//! much live in its own namespace, but we decided to make it part of @ref omni::graph namespace. There is no runtime
//! without authoring front-end and we consider OmniGraph everyone knows as the front-end to runtime execution.
//! EF then sits at the backend, orchestrating execution of computation defined by one or many front-ends.
//!
//! OmniGraph is becoming an umbrella for authoring front-end and execution backend.
namespace exec
{
//! Unstable features currently in development. Do not depend on any API or ABI in this namespace, as it will change
//! without notice.
namespace unstable
{
//! Each node in an @ref ITopology is given a unique index (via @ref ITopology::acquireNodeIndex()).
using NodeIndexInTopology = uint64_t;
//! Type which store a unique identifier for a node or definition.
using NameHash = uint64_t;
//! Hash of each node's topology index in a path.
using ExecutionPathHash = uint64_t;
//! Key for a piece of data attached to a node.
using NodeDataKey = uint64_t;
//! Pass priority used by @ref IPassPipeline to resolve conflicts between passes.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
using PassPriority = uint32_t;
//! Constant to denote an @ref INode has not been assigned an index in an @ref ITopology.
constexpr const uint64_t kInvalidNodeIndexInTopology = std::numeric_limits<uint64_t>::max();
static_assert(std::numeric_limits<uint64_t>::max() == 0xFFFFFFFFFFFFFFFF, "unexpected uin64_t max value");
//! Grouping type for different passes.
//!
//! Graph transformation pass is registered with a given type and type can't be changed after.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
//!
//! @note We are not yet using all these states...expect changes.
//!
//! @ingroup groupOmniGraphExecPassRegistration
enum class PassType
{
ePopulate, //!< open-up graph types
ePartitioning, //!< change granularity of executions (including executors)
eGlobal, //!< pass is running over entire graph. no other passes can run now
eTypeInference, //!< resolve types
eOverrideExecution, //!< override compute methods, executors, etc
eScheduling, //!< memory access, pipeline stages, etc
eCount //!< total number of known pass types
};
//! Current execution status of pass pipeline a @ref omni::graph::exec::unstable::IPassPipeline.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
enum class PassPipelineStatus : uint32_t
{
eNone = 0, //!< Pipeline is not executing.
eExecuting = 1 << 0, //!< Pipeline is running
eTopologyChangesAllowed = 1 << 1, //!< Pipeline is allowing mutating changes to topology
};
//! Enable bitwise operations on PassPipelineStatus state.
template <>
struct EnumBitops<PassPipelineStatus> : EnumBitops<>::allow_bitops
{
};
//! Result of waiting for the result of a @ref omni::graph::exec::unstable::IBackgroundResult.
enum class BackgroundResultStatus
{
eReady, //!< The result is ready.
eTimeout, //!< The result did not become ready int he specified wait time.
};
//! Type specific function for deleting context specific execution data associated with a node.
//!
//! The function is expected to know the type given as the first arg and handle the deletion of the type in an
//! appropriate manner. Usually, this means casting the `void*` pointer to the proper type and calling `delete`.
using NodeDataDeleterFn = void(void*);
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 4,544 | C | 37.516949 | 117 | 0.746039 |
omniverse-code/kit/include/omni/graph/exec/unstable/IBase.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Base class for all @ref omni::graph::exec objects.
//!
//! Defines an interface for casting between objects without calling @ref omni::core::IObject::acquire().
template <>
class omni::core::Generated<omni::graph::exec::unstable::IBase_abi> : public omni::graph::exec::unstable::IBase_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IBase")
//! Casts this object to the type described the the given id.
//!
//! Returns @c nullptr if the cast was not successful.
//!
//! Unlike @ref omni::core::IObject::cast(), this casting method does not call @ref omni::core::IObject::acquire().
//!
//! @thread_safety This method is thread safe.
void* castWithoutAcquire(omni::core::TypeId id) noexcept;
//! Returns the number of different instances (this included) referencing the current object.
//!
//! @thread_safety This method is thread safe.
uint32_t getUseCount() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline void* omni::core::Generated<omni::graph::exec::unstable::IBase_abi>::castWithoutAcquire(omni::core::TypeId id) noexcept
{
return castWithoutAcquire_abi(id);
}
inline uint32_t omni::core::Generated<omni::graph::exec::unstable::IBase_abi>::getUseCount() noexcept
{
return getUseCount_abi();
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 2,190 | C | 30.753623 | 126 | 0.715982 |
omniverse-code/kit/include/omni/graph/exec/unstable/IPassFactory.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Factory interface for creating @ref omni::graph::exec::unstable::IPass objects.
//!
//! Usually used in conjunction with @ref omni::graph::exec::unstable::IPassRegistry.
//!
//! See @ref omni::graph::exec::unstable::createPassFactory() to generate one of these objects from an invocable object
//! (e.g. @c std::function).
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IPassFactory_abi>
: public omni::graph::exec::unstable::IPassFactory_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPassFactory")
//! Creates and returns a pass.
//!
//! The returned @ref omni::graph::exec::unstable::IPass will have @ref omni::core::IObject::acquire() called on it.
omni::core::ObjectPtr<omni::graph::exec::unstable::IPass> createPass(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::core::ObjectPtr<omni::graph::exec::unstable::IPass> omni::core::
Generated<omni::graph::exec::unstable::IPassFactory_abi>::createPass(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder)
{
OMNI_THROW_IF_ARG_NULL(builder);
omni::core::ObjectPtr<omni::graph::exec::unstable::IPass> out;
OMNI_THROW_IF_FAILED(createPass_abi(builder.get(), out.put()));
return out;
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 2,425 | C | 34.15942 | 120 | 0.722887 |
omniverse-code/kit/include/omni/graph/exec/unstable/ITopology.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! The Topology of a graph is stored in this class.
//!
//! @ref omni::graph::exec::unstable::ITopology is a helper interface used to quickly invalidate the topology, quickly
//! determine if the topology has been invalidated, assign each node in the topology a unique index (suitable for access
//! in contiguous memory), and provide access to the root node.
//!
//! Topologies play a large role in graph invalidation. See @rstref{Graph Invalidation <ef_graph_invalidation>} for
//! details.
//!
//! To better understand how this object relates to other objects in the Execution Framework, see
//! @rstref{Graph Concepts <ef_graph_concepts>}.
//!
//! See @ref omni::graph::exec::unstable::Topology for a concrete implementation of this interface.
template <>
class omni::core::Generated<omni::graph::exec::unstable::ITopology_abi> : public omni::graph::exec::unstable::ITopology_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::ITopology")
//! Returns how many nodes are alive in this topology. Some of the counted nodes may not be connected and
//! discoverable from the root node.
//!
//! @thread_safety This method is thread safe.
uint64_t getNodeCount() noexcept;
//! Returns the topology's root node that allows reaching all of the valid nodes in the topology.
//!
//! The returned @ref omni::graph::exec::unstable::INode will *not* have @ref omni::core::IObject::acquire() called
//! before being returned.
//!
//! The returned pointer will remain valid for the lifetime of this object.
//!
//! @thread_safety This method is thread safe.
omni::graph::exec::unstable::INode* getRoot() noexcept;
//! Returns the topology stamp. This stamp is updated each time the topology is invalidated.
//!
//! See omni::graph::exec::unstable::ITopology::invalidate() to invalidate the topology (and thereby update this
//! Stamp).
//!
//! @thread_safety This method is thread safe.
omni::graph::exec::unstable::Stamp getStamp() noexcept;
//! Invalidate topology. All edges of the graph will be dropped (lazily), nodes remain valid and can be used to
//! build new topology.
//!
//! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details about how this method is used during
//! invalidation.
//!
//! It is not recommended to call this method during graph execution. Rather, defer invalidation until after
//! execution.
//!
//! @thread_safety This method is thread safe.
void invalidate() noexcept;
//! Returns a unique index for a node in this topology.
//!
//! Users should not call this method. Only the constructors of implementations of @ref
//! omni::graph::exec::unstable::INode should call this method.
//!
//! Returns an error if an index could not be acquired.
//!
//! See @ref omni::graph::exec::unstable::ITopology::releaseNodeIndex().
//!
//! @thread_safety This method is not thread safe.
omni::graph::exec::unstable::NodeIndexInTopology acquireNodeIndex();
//! Release unique index of a node in this topology. Shouldn't be used by anything else than a node's destructor.
//!
//! See @ref omni::graph::exec::unstable::ITopology::acquireNodeIndex().
//!
//! @thread_safety This method is not thread safe.
void releaseNodeIndex(omni::graph::exec::unstable::NodeIndexInTopology index) noexcept;
//! Add a callback to forward invalidation to other entities.
//!
//! At a minimum, the top-level @ref omni::graph::exec::unstable::IGraph will register a invalidation callback with
//! all topologies created within a pass pipeline. This allows tracking invalidation and triggering minimal graph
//! rebuild.
//!
//! In the future, override passes can generate new graphs and still track authoring invalidation by registering to
//! the original graph topologies invalidation.
//!
//! The given @ref omni::graph::exec::unstable::IInvalidationForwarder will be stored and have @ref
//! omni::core::IObject::acquire() called.
//!
//! If @p owner has a current forwarder, it will be replaced with the given forwarder.
//!
//! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details about how this method is used during
//! invalidation.
//!
//! See @ref omni::graph::exec::unstable::ITopology::removeInvalidationForwarder().
//!
//! @thread_safety This method is not thread safe.
omni::core::Result addInvalidationForwarder(
omni::graph::exec::unstable::InvalidationForwarderId owner,
omni::core::ObjectParam<omni::graph::exec::unstable::IInvalidationForwarder> callback);
//! Remove invalidation forwarding for a given owner.
//!
//! If the given owner is not known, this method does nothing.
//!
//! See @ref omni::graph::exec::unstable::ITopology::addInvalidationForwarder().
//!
//! @thread_safety This method is not thread safe.
void removeInvalidationForwarder(omni::graph::exec::unstable::InvalidationForwarderId owner) noexcept;
//! Get construction version this topology is synchronized with.
//!
//! @thread_safety This method is thread safe.
omni::graph::exec::unstable::SyncStamp getConstructionStamp() noexcept;
//! Private method only for IGraphBuilder, used to tag construction version.
//!
//! @thread_safety Calling this method concurrently is not recommended.
void _setConstructionInSync(const omni::graph::exec::unstable::Stamp& toSync) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline uint64_t omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::getNodeCount() noexcept
{
return getNodeCount_abi();
}
inline omni::graph::exec::unstable::INode* omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::getRoot() noexcept
{
return getRoot_abi();
}
inline omni::graph::exec::unstable::Stamp omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::getStamp() noexcept
{
return getStamp_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::invalidate() noexcept
{
invalidate_abi();
}
inline omni::graph::exec::unstable::NodeIndexInTopology omni::core::Generated<
omni::graph::exec::unstable::ITopology_abi>::acquireNodeIndex()
{
omni::graph::exec::unstable::NodeIndexInTopology out;
OMNI_THROW_IF_FAILED(acquireNodeIndex_abi(&out));
return out;
}
inline void omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::releaseNodeIndex(
omni::graph::exec::unstable::NodeIndexInTopology index) noexcept
{
releaseNodeIndex_abi(index);
}
inline omni::core::Result omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::addInvalidationForwarder(
omni::graph::exec::unstable::InvalidationForwarderId owner,
omni::core::ObjectParam<omni::graph::exec::unstable::IInvalidationForwarder> callback)
{
OMNI_THROW_IF_ARG_NULL(callback);
auto return_ = addInvalidationForwarder_abi(owner, callback.get());
return return_;
}
inline void omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::removeInvalidationForwarder(
omni::graph::exec::unstable::InvalidationForwarderId owner) noexcept
{
removeInvalidationForwarder_abi(owner);
}
inline omni::graph::exec::unstable::SyncStamp omni::core::Generated<
omni::graph::exec::unstable::ITopology_abi>::getConstructionStamp() noexcept
{
return getConstructionStamp_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::ITopology_abi>::_setConstructionInSync(
const omni::graph::exec::unstable::Stamp& toSync) noexcept
{
_setConstructionInSync_abi(toSync);
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 8,640 | C | 39.378504 | 128 | 0.709838 |
omniverse-code/kit/include/omni/graph/exec/unstable/EnumBitops.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file EnumBitops.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::EnumBitops.
#pragma once
#include <type_traits>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Enable bitwise operations on enum classes. Templates save on writing boiler plate code to allow this.
template <class T = void>
struct EnumBitops
{
};
#ifndef DOXYGEN_BUILD
template <>
struct EnumBitops<void>
{
struct _allow_bitops
{
static constexpr bool allow_bitops = true;
};
using allow_bitops = _allow_bitops;
template <class T, class R = T>
using t = typename std::enable_if<std::is_enum<T>::value && EnumBitops<T>::allow_bitops, R>::type;
template <class T>
using u = typename std::underlying_type<T>::type;
};
template <class T>
constexpr EnumBitops<>::t<T> operator~(T a)
{
return static_cast<T>(~static_cast<EnumBitops<>::u<T>>(a));
}
template <class T>
constexpr EnumBitops<>::t<T> operator|(T a, T b)
{
return static_cast<T>(static_cast<EnumBitops<>::u<T>>(a) | static_cast<EnumBitops<>::u<T>>(b));
}
template <class T>
constexpr EnumBitops<>::t<T> operator&(T a, T b)
{
return static_cast<T>(static_cast<EnumBitops<>::u<T>>(a) & static_cast<EnumBitops<>::u<T>>(b));
}
template <class T>
constexpr EnumBitops<>::t<T> operator^(T a, T b)
{
return static_cast<T>(static_cast<EnumBitops<>::u<T>>(a) ^ static_cast<EnumBitops<>::u<T>>(b));
}
template <class T>
constexpr EnumBitops<>::t<T, T&> operator|=(T& a, T b)
{
a = a | b;
return a;
}
template <class T>
constexpr EnumBitops<>::t<T, T&> operator&=(T& a, T b)
{
a = a & b;
return a;
}
template <class T>
constexpr EnumBitops<>::t<T, T&> operator^=(T& a, T b)
{
a = a ^ b;
return a;
}
template <class T, typename = EnumBitops<>::t<T>>
constexpr bool to_bool(T a)
{
return static_cast<EnumBitops<>::u<T>>(a) != 0;
}
#endif // DOXYGEN_BUILD
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 2,425 | C | 23.26 | 105 | 0.669278 |
omniverse-code/kit/include/omni/graph/exec/unstable/IApplyOnEachFunction.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IApplyOnEachFunction.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IApplyOnEachFunction.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class IApplyOnEachFunction_abi;
class IApplyOnEachFunction;
class ExecutionPath;
//! Interface wrapping a function (possibly with storage) to apply on all instantiations of a given definition.
class IApplyOnEachFunction_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.IApplyOnEachFunction")>
{
protected:
//! Invokes the wrapped function.
virtual void invoke_abi(OMNI_ATTR("in, not_null, throw_if_null, ref") const ExecutionPath* path) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IApplyOnEachFunction.
using ApplyOnEachFunctionPtr = omni::core::ObjectPtr<IApplyOnEachFunction>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IApplyOnEachFunction.gen.h>
//! @copydoc omni::graph::exec::unstable::IApplyOnEachFunction_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IApplyOnEachFunction
: public omni::core::Generated<omni::graph::exec::unstable::IApplyOnEachFunction_abi>
{
};
#include <omni/graph/exec/unstable/ExecutionPath.h>
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IApplyOnEachFunction.gen.h>
| 2,006 | C | 30.857142 | 114 | 0.759721 |
omniverse-code/kit/include/omni/graph/exec/unstable/ExecutionTask.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file ExecutionTask.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::ExecutionTask.
#pragma once
#include <omni/core/ITypeFactory.h>
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/Status.h>
#include <limits>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class ExecutionPath;
class IExecutionContext;
class IExecutionCurrentThread;
class IExecutor;
class INode;
//! A task tag can be used by other entities (such as @ref Executor) to group tasks together.
using ExecutionTaskTag = uint64_t;
//! Represents work item generated by an @ref IExecutor and passed to a scheduler for dispatch.
//!
//! ExecutionTask is a utility class that describes a task to be potentially executed on behalf of a @ref INode in a
//! given @ref IExecutionContext.
//!
//! @rst
//!
//! .. image:: /../docs/ef-execution-path-point-k.svg
//! :align: center
//!
//! @endrst
//!
//! @ref ExecutionTask stores four key pieces of information:
//!
//! - *A pointer to the Node to be executed*. The pointed to @ref INode contains a pointer to either an @ref INodeDef or
//! @ref NodeGraphDef which contains the computation definition. See @ref ExecutionTask::getNode().
//!
//! - *The unique path to the node*. In addition to the @ref INode to be executed, an @ref ExecutionPath to the node's
//! upstream (i.e. containing) node is stored. Combined, these two pieces of information form a unique id for the
//! node.
//!
//! Above, if an @ref ExecutionTask is describing the *k* node pointed to by the yellow arrow, @ref
//! ExecutionTask::getNode() would point to *k* and @ref ExecutionTask::getUpstreamPath() would return */f/p*. Note,
//! the @ref ExecutionTask::getUpstreamPath() *does not* store */f/p/k*, just */f/p*. This is a micro-optimization
//! that allows the same |ExecutionPath| to be reused while visiting nodes within the same
//! @ref INodeGraphDef.
//!
//! - *A pointer to the current execution's* @ref IExecutionContext. Execution always happen in a given context. It's
//! this context, @ref IExecutionContext, that stores the state of the execution. Multiple entities can be executing a
//! given @rstref{execution graph <ef_execution_graph>}, each execution using its own @ref IExecutionContext. In order
//! to understand which of these potentially many executions a task is a part, @ref ExecutionTask stores a reference
//! to the execution's @ref IExecutionContext. This @ref ExecutionTask::getContext() combined with @ref
//! ExecutionTask::getUpstreamPath() and @ref ExecutionTask::getNode() can be used to access the per-execution state
//! for the node (see @ref OMNI_GRAPH_EXEC_GET_NODE_DATA_AS() and @ref OMNI_GRAPH_EXEC_SET_NODE_DATA()).
//!
//! - *A "tag" to identify a task when multiple tasks are associated with a node.* If an @ref INode generates many
//! tasks during execution, @ref ExecutionTask::getTag() can be used to uniquely identify each of the node's tasks.
//! The semantic meaning of @ref ExecutionTask::getTag() is @ref IExecutor dependent and can be used for purposes
//! other than unique identification.
//!
//! This struct is ABI-safe.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
// @note Node definition needs to stay unchanged while there are executions to complete. In future we should fully
// decouple description of compute from compute symbol library. This would allow mutating the authoring side
// without the need to synchronize with execution.
class ExecutionTask
{
public:
enum : ExecutionTaskTag
{
kEmptyTag = std::numeric_limits<ExecutionTaskTag>::max() //!< Special value to represent an empty tag.
};
static_assert(std::numeric_limits<ExecutionTaskTag>::max() == 0xFFFFFFFFFFFFFFFF,
"unexpected ExecutionTaskTag max value");
//! Constructor for execution task
//!
//! @param context Context in which execution task is created. Task can only access state from this context.
//! @ref omni::core::IObject::acquire() is not called on this context. It is up to the calling
//! code to ensure the context remains valid for the lifetime of the ExecutionTask.
//!
//! @param node Node holding the execution definition omni::core::IObject::acquire() is not called on this
//! context. It is up to the calling code to ensure the context remains valid for the lifetime of
//! the ExecutionTask.
//!
//! @param upPath Execution path to the graph owning the node. Node can be executed multiple times with different
//! paths when graph definition is shared.
//!
//! @param tag Used to identify dynamically generated work items that node can compute.
ExecutionTask(IExecutionContext* context, INode* node, const ExecutionPath& upPath, ExecutionTaskTag tag = kEmptyTag) noexcept
: m_context(context), m_node(node), m_upstreamPath(&upPath), m_tag(tag)
{
OMNI_GRAPH_EXEC_ASSERT(context);
OMNI_GRAPH_EXEC_ASSERT(node);
static_assert(std::is_standard_layout<ExecutionTask>::value, "ExecutionTask is expected to be abi safe");
static_assert(offsetof(ExecutionTask, m_context) == 0, "unexpected context offset");
static_assert(offsetof(ExecutionTask, m_node) == 8, "unexpected node offset");
static_assert(offsetof(ExecutionTask, m_upstreamPath) == 16, "unexpected upstream path offset");
static_assert(offsetof(ExecutionTask, m_tag) == 24, "unexpected tag offset");
static_assert(offsetof(ExecutionTask, m_userIndex) == 32, "unexpected status offset");
static_assert(offsetof(ExecutionTask, m_status) == 40, "unexpected status offset");
static_assert(48 == sizeof(ExecutionTask), "ExecutionTask is an unexpected size");
}
//! Return context for this task.
//!
//! The returned @ref IExecutionContext will *not* have @ref omni::core::IObject::acquire() called before being
//! returned.
IExecutionContext* getContext() const noexcept
{
return m_context;
}
//! Return node for this task.
//!
//! The returned @ref INode will *not* have @ref omni::core::IObject::acquire() called before being returned.
INode* getNode() const noexcept
{
return m_node;
}
//! Return execution path to graph owning the node.
const ExecutionPath& getUpstreamPath() const noexcept
{
return *m_upstreamPath;
}
//! Return tag.
ExecutionTaskTag getTag() const noexcept
{
return m_tag;
}
//! Check if this task has a valid tag set. This will mean that a node generates more than one task.
bool hasValidTag() const noexcept
{
return (m_tag != kEmptyTag);
}
//! Return execution status for this task
Status getExecutionStatus() const noexcept
{
return m_status;
}
//! Execute the task. Will be called by the scheduler when task is dispatched for execution.
inline Status execute(omni::core::ObjectParam<IExecutor> executor) noexcept;
//! This index will never be used by the framework, but is a way to pass something into
//! user code via generated task. Mutating this value is allowed as long as it is done
//! via only legal way to access task, i.e. getCurrentTask
//!
//! Setter for user index
void setUserIndex(uint64_t userIndex) noexcept
{
m_userIndex = userIndex;
}
//! This index will never be used by the framework, but is a way to pass something into
//! user code via generated task. Mutating this value is allowed as long as it is done
//! via only legal way to access task, i.e. getCurrentTask
//!
//! Getter for user index
uint64_t getUserIndex() const noexcept
{
return m_userIndex;
}
//! Sets the status of the task.
//!
//! This is an internal method and should not be called by users.
void setExecutionStatus(Status status) noexcept
{
m_status = status;
}
private:
//! Context in which this task was created. This context needs to live as long as there are still executions to
//! complete.
IExecutionContext* m_context;
//! Node holding the execution definition.
INode* m_node;
//! Execution path to the graph owning the node.
const ExecutionPath* m_upstreamPath;
//! Used to identify dynamically generated work items that node can compute.
ExecutionTaskTag m_tag;
//! User index help with passing data into user code.
uint64_t m_userIndex{ 0 };
//! Execution status.
Status m_status{ Status::eUnknown };
//! Reserved padding space.
uint32_t m_reserved;
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#include <omni/graph/exec/unstable/IExecutionCurrentThread.h>
//! Execute the task. Will be called by the scheduler when task is dispatched for execution.
inline omni::graph::exec::unstable::Status omni::graph::exec::unstable::ExecutionTask::execute(
omni::core::ObjectParam<IExecutor> executor) noexcept
{
if (Status::eUnknown != m_status)
{
return m_status;
}
return getCurrentThread()->execute(*this, executor, &m_status);
}
| 9,905 | C | 39.765432 | 130 | 0.691974 |
omniverse-code/kit/include/omni/graph/exec/unstable/IGraph.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IGraph.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IGraph.
#pragma once
#include <omni/graph/exec/unstable/ConstName.h>
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Stamp.h>
#include <omni/graph/exec/unstable/Status.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IExecutionContext;
class IGraph_abi;
class IGraph;
class INode;
class INodeGraphDef;
class ITopology;
//! Top-level container for storing the Execution Framework's graph of graphs.
//!
//! @ref omni::graph::exec::unstable::IGraph is the top-level container used to store the graph of graphs. This
//! top-level container is referred to as the <i>execution graph</i>.
//!
//! @ref omni::graph::exec::unstable::IGraph's responsibilities include:
//!
//! - Tracking if the graph is currently being constructed. See @ref omni::graph::exec::unstable::IGraph::inBuild().
//!
//! - Tracking gross changes to the topologies of graphs within the execution graph. This is done with the <i>global
//! topology stamp</i> (see @ref omni::graph::exec::unstable::IGraph::getGlobalTopologyStamp()). Each time a topology
//! is invalidated, the global topology stamp is incremented. Consumers of the execution graph can use this stamp to
//! detect changes in the graph. See @rstref{Graph Invalidation <ef_graph_invalidation>} for details.
//!
//! - Owning and providing access to the top level graph definition (see @ref
//! omni::graph::exec::unstable::IGraph::getNodeGraphDef()). The root node of the top-level graph definition is the
//! root of execution graph. @ref omni::graph::exec::unstable::IGraph is the only container, other than @ref
//! omni::graph::exec::unstable::INode, that attaches to definitions.
//!
//! See @rstref{Graph Concepts <ef_graph_concepts>} for more information on how @ref omni::graph::exec::unstable::IGraph
//! fits into the Execution Framework.
//!
//! See @ref omni::graph::exec::unstable::Graph for a concrete implementation of this interface.
class IGraph_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IGraph")>
{
protected:
//! Access the top-level node graph definition.
//!
//! The returned @ref omni::graph::exec::unstable::INodeGraphDef will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
//!
//! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref
//! omni::graph::exec::unstable::IGraph.
virtual OMNI_ATTR("no_acquire") INodeGraphDef* getNodeGraphDef_abi() noexcept = 0;
//! Name set on the graph during construction.
//!
//! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref
//! omni::graph::exec::unstable::IGraph.
virtual OMNI_ATTR("ref") const ConstName* getName_abi() noexcept = 0;
//! Return global topology of the graph. Useful when detecting that graph transformation pipeline needs to run.
//!
//! See @rstref{Graph Invalidation <ef_graph_invalidation>} to understand how this stamp is used to detect changes
//! in the graph.
//!
//! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref
//! omni::graph::exec::unstable::IGraph. It is up to the caller to mutate the stamp in a thread safe manner.
virtual Stamp* getGlobalTopologyStamp_abi() noexcept = 0;
//! Return @c true if a @ref omni::graph::exec::unstable::IGraphBuilder is currently building a part of this graph.
//!
//! @thread_safety This method is thread safe.
virtual bool inBuild_abi() noexcept = 0;
//! Mark that an @ref omni::graph::exec::unstable::IGraphBuilder is currently building a part of this graph.
//!
//! Each builder should call @c _setInBuild(true) followed by @c _setInBuild(false) once building is complete. Since
//! multiple builders can be active at a time, it is safe for this method to be called multiple times.
//!
//! This method should only be called by @ref omni::graph::exec::unstable::IGraphBuilder.
//!
//! @thread_safety This method is thread safe.
virtual void _setInBuild_abi(bool inBuild) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IGraph.
using GraphPtr = omni::core::ObjectPtr<IGraph>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IGraph.gen.h>
//! @copydoc omni::graph::exec::unstable::IGraph_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IGraph : public omni::core::Generated<omni::graph::exec::unstable::IGraph_abi>
{
public:
//! Access topology of the graph.
//!
//! The returned @ref ITopology does *not* have @ref omni::core::IObject::acquire() called before being returned.
//!
//! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref
//! omni::graph::exec::unstable::IGraph.
inline ITopology* getTopology() noexcept;
//! Access root of the graph.
//!
//! The returned @ref INode does *not* have @ref omni::core::IObject::acquire() called before being returned.
//!
//! @thread_safety This method is thread safe. The returned pointer will be valid for the lifetime of this @ref
//! omni::graph::exec::unstable::IGraph.
inline INode* getRoot() noexcept;
};
#include <omni/graph/exec/unstable/IExecutionContext.h>
#include <omni/graph/exec/unstable/INodeGraphDef.h>
inline omni::graph::exec::unstable::ITopology* omni::graph::exec::unstable::IGraph::getTopology() noexcept
{
return getNodeGraphDef()->getTopology();
}
inline omni::graph::exec::unstable::INode* omni::graph::exec::unstable::IGraph::getRoot() noexcept
{
return getNodeGraphDef()->getRoot();
}
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IGraph.gen.h>
| 6,646 | C | 41.883871 | 120 | 0.715769 |
omniverse-code/kit/include/omni/graph/exec/unstable/SmallStack.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <carb/Memory.h>
#include <omni/core/Assert.h>
#include <cstdint>
#include <cstring>
#include <stdexcept>
#include <type_traits>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
namespace detail
{
//! ABI-safe stack with inline memory to avoid heap allocation.
//!
//! Reserved memory within the stack will be used until it is exceeded, at which heap memory will be used.
//!
//! It is assumed the items stored are `sizeof(uint64_t)`.
template <typename T = uint64_t>
class SmallStack
{
public:
//! Type of the item in the stack.
using ItemType = T;
//! Constructor.
SmallStack() noexcept
{
static_assert(8 == sizeof(ItemType), "unexpected item size");
static_assert(std::is_trivially_destructible<ItemType>::value, "items stored must be trivially destructible");
static_assert(offsetof(SmallStack, m_external.data) == 0, "unexpected external data offset");
static_assert(offsetof(SmallStack, m_external.count) == 8, "unexpected external count offset");
static_assert(offsetof(SmallStack, m_external.maxCount) == 12, "unexpected external maxCount offset");
static_assert(offsetof(SmallStack, m_internal.data) == 0, "unexpected data offset");
static_assert(offsetof(SmallStack, m_internal.count) == 56, "unexpected count offset");
static_assert(offsetof(SmallStack, m_internal.isInternal) == 60, "unexpected internal flag offset ");
m_internal.count = 0;
m_internal.isInternal = 1;
}
//! Constructor with a single item.
SmallStack(ItemType item) noexcept
{
m_internal.count = 0;
m_internal.isInternal = 1;
push(item); // may throw, but wont in this case.
}
//! Copy constructor.
//!
//! May throw.
SmallStack(const SmallStack& other) // may throw
{
m_internal.isInternal = 1;
_copy(other);
}
//! Construct from a range.
//!
//! @p end must be equal or greater than @p begin.
//!
//! May throw.
SmallStack(ItemType* begin, ItemType* end) // may throw
{
OMNI_ASSERT(end >= begin);
m_internal.isInternal = 1;
uint32_t count = static_cast<uint32_t>(end - begin);
_copy(begin, count, count);
}
//! Copies the contents of the given stack and pushes the given item.
//!
//! May throw.
SmallStack(const SmallStack& other, ItemType item) // may throw
{
uint32_t otherCount = other.count();
uint32_t count = otherCount + 1;
ItemType* p;
if (count > kMaxInternalDataItemCount)
{
p = _allocate(count);
m_internal.isInternal = 0;
m_external.data = p;
m_external.count = count;
m_external.maxCount = count;
}
else
{
m_internal.isInternal = 1;
p = m_internal.data;
m_internal.count = count;
}
std::memcpy(p, other.begin(), sizeof(ItemType) * otherCount);
p[otherCount] = item;
}
//! Move constructor.
SmallStack(SmallStack&& other) noexcept
{
m_internal.isInternal = 1;
_move(std::move(other));
}
//! Destructor
~SmallStack() noexcept
{
_free();
}
//! Assignment operator.
//!
//! May throw.
SmallStack& operator=(const SmallStack& other) // may throw
{
if (this != &other)
{
_copy(other);
}
return *this;
}
//! Assignment operator.
SmallStack& operator=(SmallStack&& other) noexcept
{
if (this != &other)
{
_move(std::move(other));
}
return *this;
}
//! Compares two stacks, returning either a negative number, positive number, or zero.
//!
//! Works similar to @c std::memcmp.
//!
//! Returns a negative value if this stack less than @p other.
//!
//! Returns a positive value if this stack greater than @p other.
//!
//! Returns zero if the stacks are equal.
//!
//! The returned negative or positive values are not guaranteed to be exactly -1 or 1.
int compare(const SmallStack& other) const noexcept
{
int thisCount = count();
int otherCount = other.count();
if (thisCount == otherCount)
{
return std::memcmp(begin(), other.begin(), sizeof(ItemType) * otherCount);
}
else
{
return (thisCount - otherCount);
}
}
//! Return @c true if the stack is empty.
inline bool empty() const noexcept
{
return (0 == count());
}
//! Returns the top of the stack.
//!
//! Reading the top of an empty stack is undefined behavior.
inline ItemType top() const noexcept
{
if (_isInternal())
{
OMNI_ASSERT(0 != m_internal.count);
return m_internal.data[m_internal.count - 1];
}
else
{
OMNI_ASSERT(0 != m_internal.count);
return m_external.data[m_external.count - 1];
}
}
//! Push the given item to the top of the stack.
//!
//! May throw.
inline void push(ItemType elem) // may throw
{
if (_isInternal())
{
if (m_internal.count == kMaxInternalDataItemCount)
{
// we've ran out of internal space
_allocExternalAndCopyInternal();
m_external.data[m_external.count++] = elem;
}
else
{
m_internal.data[m_internal.count++] = elem;
}
}
else
{
if (m_external.count == m_external.maxCount)
{
_grow();
}
m_external.data[m_external.count++] = elem;
}
}
//! Removes the top of the stack.
//!
//! Popping an empty stack is undefined behavior.
inline void pop() noexcept
{
if (_isInternal())
{
OMNI_ASSERT(m_internal.count > 0);
m_internal.count--;
}
else
{
OMNI_ASSERT(m_external.count > 0);
m_external.count--;
}
}
//! Returns the number of items in the stack.
inline uint32_t count() const noexcept
{
if (_isInternal())
{
return m_internal.count;
}
else
{
return m_external.count;
}
}
//! Returns the number of items in the stack.
inline uint32_t size() const noexcept
{
return count();
}
//! Returns a pointer to the oldest item in the stack.
//!
//! If the stack is empty, the returned pointer should not be read or written though can be compared to @ref end().
inline const ItemType* begin() const noexcept
{
if (_isInternal())
{
return m_internal.data;
}
else
{
return m_external.data;
}
}
//! Returns a pointer to one past the top of the stack.
//!
//! If the stack is empty, the returned pointer should not be read or written though can be compared to @ref
//! begin().
inline const ItemType* end() const noexcept
{
if (_isInternal())
{
return m_internal.data + m_internal.count;
}
else
{
return m_external.data + m_external.count;
}
}
//! Returns a pointer to the oldest item in the stack.
//!
//! Result are undefined if the stack is empty.
inline const ItemType* data() const noexcept
{
return begin();
}
private:
inline bool _isInternal() const noexcept
{
return m_internal.isInternal;
}
inline uint32_t _maxCount() const noexcept
{
if (_isInternal())
{
return kMaxInternalDataItemCount;
}
else
{
return m_external.maxCount;
}
}
inline void _free() noexcept
{
if (!_isInternal())
{
carb::deallocate(m_external.data);
m_internal.count = 0;
m_internal.isInternal = 0;
}
}
// assumes _free() has already been called (when needed)
inline void _copy(const SmallStack& other)
{
_copy(const_cast<ItemType*>(other.begin()), other.count(), other._maxCount());
}
// assumes _free() has already been called (when needed)
inline void _copy(ItemType* data, uint32_t count, uint32_t maxCount)
{
if (_maxCount() < count)
{
// not enough storage for the copy. we'll have to allocate more.
OMNI_ASSERT(maxCount >= count);
_free();
m_external.data = reinterpret_cast<ItemType*>(carb::allocate(sizeof(ItemType) * maxCount));
if (!m_external.data)
{
throw std::bad_alloc();
}
std::memcpy(m_external.data, data, sizeof(ItemType) * count);
m_external.count = count;
m_external.maxCount = maxCount;
m_internal.isInternal = 0;
}
else
{
// data fits in our storage. simply copy it.
if (_isInternal())
{
std::memcpy(m_internal.data, data, sizeof(ItemType) * count);
m_internal.count = count;
}
else
{
std::memcpy(m_external.data, data, sizeof(ItemType) * count);
m_external.count = count;
}
}
}
// assumes _free() has already been called (when needed)
inline void _move(SmallStack&& other) noexcept
{
if (other._isInternal())
{
// since other is using its internal storage, we have to copy the data
_copy(other);
other.m_internal.count = 0;
}
else
{
// other is using external storage
_free();
m_internal.isInternal = 0;
m_external.data = other.m_external.data;
m_external.count = other.m_external.count;
m_external.maxCount = other.m_external.maxCount;
other.m_internal.count = 0;
other.m_internal.isInternal = 1;
}
}
inline ItemType* _allocate(uint32_t maxCount)
{
auto data = reinterpret_cast<ItemType*>(carb::allocate(sizeof(ItemType) * maxCount));
if (!data)
{
throw std::bad_alloc();
}
return data;
}
inline void _allocExternalAndCopyInternal()
{
OMNI_ASSERT(_isInternal());
constexpr uint32_t newMaxCount = kMaxInternalDataItemCount * 2;
ItemType* data = _allocate(newMaxCount);
std::memcpy(data, m_internal.data, sizeof(ItemType) * newMaxCount);
m_external.data = data;
m_external.count = kMaxInternalDataItemCount;
m_external.maxCount = newMaxCount;
m_internal.isInternal = 0;
}
inline void _grow()
{
OMNI_ASSERT(!_isInternal());
OMNI_ASSERT(m_external.maxCount > 0);
m_external.maxCount *= 2;
ItemType* data = _allocate(m_external.maxCount);
std::memcpy(data, m_external.data, sizeof(ItemType) * m_external.count);
carb::deallocate(m_external.data);
m_external.data = data;
}
constexpr static uint32_t kMaxInternalDataItemCount = 7;
struct ExternalData
{
ItemType* data;
uint32_t count;
uint32_t maxCount;
};
static_assert(sizeof(ExternalData) == 16, "ExternalData is unexpected size");
struct InternalData
{
ItemType data[kMaxInternalDataItemCount];
uint32_t count;
uint32_t isInternal;
};
static_assert(sizeof(InternalData) == 64, "InternalData is unexpected size");
private:
union
{
ExternalData m_external;
InternalData m_internal;
};
};
static_assert(sizeof(SmallStack<uint64_t>) == 64, "SmallStack has unexpected size");
static_assert(std::is_standard_layout<SmallStack<uint64_t>>::value, "SmallStack is not ABI-safe");
} // namespace detail
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 12,761 | C | 25.980972 | 119 | 0.561633 |
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutor.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Executes the node in a graph definition.
//!
//! The purpose of an executor is to generate work for the nodes in an graph definition. @ref
//! omni::graph::exec::unstable::IExecutor is a minimal interface that defines enough methods to accomplish just that.
//!
//! However, @ref omni::graph::exec::unstable::IExecutor's minimal nature is not what most users want when customizing
//! execution for their graph definitions. Rather, they want something useful. @ref
//! omni::graph::exec::unstable::Executor is an useful implementation of @ref omni::graph::exec::unstable::IExecutor
//! designed for graph definition authors to extend. See
//! @ref omni::graph::exec::unstable::Executor's documentation to better understand the purpose, duties, and
//! capabilities of an executor.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! See @rstref{Creating an Executor <ef_executor_creation>} for a guide on creating a customize executor for your graph
//! defintion.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IExecutor_abi> : public omni::graph::exec::unstable::IExecutor_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IExecutor")
//! Main execute method. Returning status of the execution.
omni::graph::exec::unstable::Status execute() noexcept;
//! Request for scheduling of additional work after the given task has executed but before it has completed.
//!
//! @param task The current task
omni::graph::exec::unstable::Status continueExecute(omni::graph::exec::unstable::ExecutionTask& task) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IExecutor_abi>::execute() noexcept
{
return execute_abi();
}
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IExecutor_abi>::continueExecute(
omni::graph::exec::unstable::ExecutionTask& task) noexcept
{
return continueExecute_abi(&task);
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 3,011 | C | 37.615384 | 128 | 0.736964 |
omniverse-code/kit/include/omni/graph/exec/unstable/Module.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Module.h
//!
//! @brief Helpers for writing modules/plugins based on @ref omni::graph::exec.
#pragma once
#include <omni/graph/exec/unstable/PassRegistry.h>
//! Helper macro to ensure EF features are enabled in the current module/plugin.
//!
//! This macro should be called from either @c carbOnPluginStartup or @c onStarted.
//!
//! If your module/plugin registers EF nodes or passes, you must call this macro.
//!
//! For Kit-based extensions, rather than calling this macro, call OMNI_KIT_EXEC_CORE_ON_MODULE_STARTED(), which will
//! call this macro on your behalf.
#define OMNI_GRAPH_EXEC_ON_MODULE_STARTED(moduleName_) \
try \
{ \
omni::graph::exec::unstable::registerModulePasses(); \
} \
catch (std::exception & e) \
{ \
CARB_LOG_ERROR("failed to register %s's passes: %s", moduleName_, e.what()); \
}
//! Helper macro to ensure EF features are safely disabled when the current module/plugin unloads.
//!
//! This macro should be called from either @c carbOnPluginShutdown or @c onUnload.
//!
//! If your module/plugin registers EF nodes or passes, you must call this macro.
//!
//! For Kit-based extensions, rather than calling this macro, call OMNI_KIT_EXEC_CORE_ON_MODULE_UNLOAD(), which will
//! call this macro on your behalf.
#define OMNI_GRAPH_EXEC_ON_MODULE_UNLOAD() \
do \
{ \
omni::graph::exec::unstable::deregisterModulePasses(); \
} while (0)
| 2,927 | C | 59.999999 | 120 | 0.457123 |
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutionCurrentThread.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Encapsulates the execution state for the current thread allowing callers to determine quantities like the @ref
//! omni::graph::exec::unstable::ExecutionTask currently executing on the thread.
//!
//! Because methods in this interface return thread local data, all methods in this interface are thread safe.
//!
//! This interface is usually accessed as a singleton via one of the following helper methods:
//!
//! - @ref omni::graph::exec::unstable::getCurrentTask()
//!
//! - @ref omni::graph::exec::unstable::getCurrentExecutor()
//!
//! This interface contains methods for graph and task execution. Users should not call these methods directly. See
//! the methods' docs below for the correct way to perform execution.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IExecutionCurrentThread_abi>
: public omni::graph::exec::unstable::IExecutionCurrentThread_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IExecutionCurrentThread")
//! Executes the given @ref omni::graph::exec::unstable::Graph.
//!
//! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::IExecutionContext::execute().
//!
//! From an ABI point-of-view, the purpose of this method is to handle the special case of the top-level @ref
//! omni::graph::exec::unstable::INodeGraphDef being contained by @ref omni::graph::exec::unstable::IGraph rather
//! than pointed to by a node in another @ref omni::graph::exec::unstable::INodeGraphDef. Meaningful values are set
//! for the threads current task and executor (see @ref omni::graph::exec::unstable::getCurrentTask() and @ref
//! omni::graph::exec::unstable::getCurrentExecutor()).
//!
//! @thread_safety This method is thread safe.
omni::graph::exec::unstable::Status executeGraph(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraph> graph,
omni::core::ObjectParam<omni::graph::exec::unstable::IExecutionContext> context);
//! Executes and sets the thread's "current" task to the given task.
//!
//! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::ExecutionTask::execute().
//!
//! This method executes the definition of the node pointed to by the given task. Importantly, this method sets
//! thread local data to track the currently running task and executor (see @ref
//! omni::graph::exec::unstable::getCurrentTask() and @ref omni::graph::exec::unstable::getCurrentExecutor()).
//!
//! @thread_safety This method is thread safe.
omni::graph::exec::unstable::Status execute(omni::graph::exec::unstable::ExecutionTask& task,
omni::core::ObjectParam<omni::graph::exec::unstable::IExecutor> executor,
omni::graph::exec::unstable::Status* taskStatus);
//! Access the task currently executing on the current thread.
//!
//! Useful when needing to access execution context state without having to pass it to every function.
//!
//! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::getCurrentTask().
//!
//! May return @c nullptr.
//!
//! @thread_safety This method is thread safe.
omni::graph::exec::unstable::ExecutionTask* getCurrentTask() noexcept;
//! Access the executor currently executing on the current thread.
//!
//! Useful when needing to spawn extra work within the scope of the graph.
//!
//! Do not call this function directly, rather, call @ref omni::graph::exec::unstable::getCurrentExecutor().
//!
//! May return @c nullptr.
//!
//! @thread_safety This method is thread safe.
omni::graph::exec::unstable::IExecutor* getCurrentExecutor() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IExecutionCurrentThread_abi>::executeGraph(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraph> graph,
omni::core::ObjectParam<omni::graph::exec::unstable::IExecutionContext> context)
{
OMNI_THROW_IF_ARG_NULL(graph);
OMNI_THROW_IF_ARG_NULL(context);
auto return_ = executeGraph_abi(graph.get(), context.get());
return return_;
}
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IExecutionCurrentThread_abi>::execute(
omni::graph::exec::unstable::ExecutionTask& task,
omni::core::ObjectParam<omni::graph::exec::unstable::IExecutor> executor,
omni::graph::exec::unstable::Status* taskStatus)
{
OMNI_THROW_IF_ARG_NULL(taskStatus);
auto return_ = execute_abi(&task, executor.get(), taskStatus);
return return_;
}
inline omni::graph::exec::unstable::ExecutionTask* omni::core::Generated<
omni::graph::exec::unstable::IExecutionCurrentThread_abi>::getCurrentTask() noexcept
{
return getCurrentTask_abi();
}
inline omni::graph::exec::unstable::IExecutor* omni::core::Generated<
omni::graph::exec::unstable::IExecutionCurrentThread_abi>::getCurrentExecutor() noexcept
{
return getCurrentExecutor_abi();
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 6,214 | C | 43.078014 | 137 | 0.702124 |
omniverse-code/kit/include/omni/graph/exec/unstable/IPassTypeRegistry.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IPassTypeRegistry.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IPassTypeRegistry.
#pragma once
#include <omni/graph/exec/unstable/ConstName.h>
#include <omni/graph/exec/unstable/ElementAt.h>
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Types.h>
#include <cstring>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IPassFactory;
class IPassTypeRegistry;
class IPassTypeRegistry_abi;
//! ABI-safe struct to hold registered @ref omni::graph::exec::unstable::IPassFactory objects.
struct PassTypeRegistryEntry
{
//! The name of the pass type.
const char* name;
//! Factory interface for creating an instance of the pass.
//!
//! This struct does not acquire this pointer.
//!
//! This pointer is never @c nullptr.
IPassFactory* factory;
//! Some passes (e.g. populate passes) desire to affect only a subset of the nodes and/or definitions in a graph.
//! This field is used to specify the name of the node/definitions the pass wishes to affect.
//!
//! The meaning of this field is pass type dependent. Many passes ignore this field.
//!
//! This pointer is never @c nullptr.
const ConstName* nameToMatch;
//! Some pass types (e.g. partition passes) are designed such that only a single pass should affect an entity. When
//! multiple passes wish to affect an entity, this priority value can be used to resolve the conflict. The meaning
//! of the priority value is pass type specific. Many passes ignore this value.
PassPriority priority;
//! Reserved padding space.
uint32_t reserved;
};
static_assert(std::is_standard_layout<PassTypeRegistryEntry>::value, "PassTypeRegistryEntry is expected to be abi safe");
static_assert(offsetof(PassTypeRegistryEntry, name) == 0, "unexpected name offset");
static_assert(offsetof(PassTypeRegistryEntry, factory) == 8, "unexpected factory offset");
static_assert(offsetof(PassTypeRegistryEntry, nameToMatch) == 16, "unexpected hash offset");
static_assert(offsetof(PassTypeRegistryEntry, priority) == 24, "unexpected hash offset");
static_assert(32 == sizeof(PassTypeRegistryEntry), "PassTypeRegistryEntry is an unexpected size");
//! @ref omni::graph::exec::unstable::IPassFactory registry for a particular @ref omni::graph::exec::unstable::PassType.
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
class IPassTypeRegistry_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.IPassTypeRegistry")>
{
protected:
//! Returns the number of registered passes.
virtual uint64_t getPassCount_abi() noexcept = 0;
//! Returns the pass at the given index.
//!
//! If the index is greater than the count, an error is returned.
//!
//! The returned @ref omni::graph::exec::unstable::PassTypeRegistryEntry is valid as long as this pass type registry
//! is not mutated (e.g. a pass is added or removed from the registry).
virtual OMNI_ATTR("throw_result") omni::core::Result
getPassAt_abi(uint64_t index,
OMNI_ATTR("out, not_null, throw_if_null") PassTypeRegistryEntry* outEntry) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IPassTypeRegistry.
using PassTypeRegistryPtr = omni::core::ObjectPtr<IPassTypeRegistry>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IPassTypeRegistry.gen.h>
//! @copydoc omni::graph::exec::unstable::IPassTypeRegistry_abi
//!
//! @ingroup groupOmniGraphExecPassRegistration groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IPassTypeRegistry
: public omni::core::Generated<omni::graph::exec::unstable::IPassTypeRegistry_abi>
{
public:
//! Implementation detail to access registry ABI
struct GetPass
{
//! Access element at a given index
static void getAt(IPassTypeRegistry* owner, uint64_t index, PassTypeRegistryEntry* out)
{
owner->getPassAt(index, out);
}
//! Returns element count
static uint64_t getCount(IPassTypeRegistry* owner)
{
return owner->getPassCount();
}
};
//! Implementation detail that wraps index-based node access with iterators.
using Passes = detail::ElementAt<IPassTypeRegistry, PassTypeRegistryEntry, GetPass>;
//! Returns an object that allows the list of passes to be iterated over (i.e. using range-based for loops).
//!
//! The returned iterator is valid as long as this pass type registry is not mutated (e.g. a pass is added or
//! removed from the registry).
Passes getPasses() noexcept
{
return Passes(this);
}
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IPassFactory.h>
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IPassTypeRegistry.gen.h>
| 5,759 | C | 37.4 | 121 | 0.718875 |
omniverse-code/kit/include/omni/graph/exec/unstable/INode.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Represents work in a graph. Nodes point to a shared execution definition to state the actual work.
//!
//! @ref omni::graph::exec::unstable::INode is the main structural component used to build a graph's topology. @ref
//! omni::graph::exec::unstable::INode stores edges to *parents* (i.e. predecessors) and *children* (i.e. successors).
//! These edges set an ordering between nodes. See @ref omni::graph::exec::unstable::INode::getParents() and @ref
//! omni::graph::exec::unstable::INode::getChildren() respectively.
//!
//! A node represents work to be performed. The description of the work to be performed is stored in a *definition*
//! (i.e. @ref omni::graph::exec::unstable::IDef). Each node wishing to perform work points to a definition (see @ref
//! omni::graph::exec::unstable::INode::getDef()).
//!
//! The definition to which a node points can be one of two types. The first type, @ref
//! omni::graph::exec::unstable::INodeDef, defines work opaquely (i.e. EF is unable to view the work definition and
//! potentially optimize it). The second type, @ref omni::graph::exec::unstable::INodeGraphDef, defines work with a
//! graph. This last representation is the most power as it allows for both *extensibilty* and *composibility* in EF.
//!
//! @rst
//!
//! .. image:: /../docs/ef-simple-w-defs.svg
//! :align: center
//!
//! @endrst
//!
//! Above, we see that nodes point to graph definitions, which contain other nodes that point to other graph
//! definitions. This structure of graphs pointing to other graphs is where EF gets its *graph of graphs* name.
//!
//! Not all nodes will point to a definition. For example, the @rstref{root node <ef_root_node>} in each graph
//! definition will not point to a definition.
//!
//! A node is always part of a graph definition and the graph definition's executor is responsible for orchestrating and
//! generating work to the scheduler.
//!
//! Node's within a graph definition are assigned a unique index, between zero and the number of nodes in the
//! definition. This index is often used as a lookup into transient arrays used to store state during graph traversals.
//! See @ref omni::graph::exec::unstable::INode::getIndexInTopology().
//!
//! Nodes have a notion of validity. See @rstref{Graph Invalidation <ef_graph_invalidation>} for details.
//!
//! @ref omni::graph::exec::unstable::INode does not contain methods for either settings the node's definition or
//! connecting nodes to each other. This functionality is reserved for @ref omni::graph::exec::unstable::IGraphBuilder.
//! See @rstref{Graph Construction <ef_pass_concepts>} for details.
//!
//! See @rstref{Graph Concepts <ef_graph_concepts>} for a guide on how this object relates to other objects in the
//! Execution Framework.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! Users may wish to implement this interface to store meaningful authoring level data in EF. For example, OmniGraph
//! uses an implementation of this node to store graph instancing information. See @ref
//! omni::graph::exec::unstable::Node for a concrete implementation of this interface suitable for sub-classing.
template <>
class omni::core::Generated<omni::graph::exec::unstable::INode_abi> : public omni::graph::exec::unstable::INode_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::INode")
//! Access topology owning this node
//!
//! The returned @ref omni::graph::exec::unstable::ITopology will *not* have @ref omni::core::IObject::acquire()
//! called before being returned.
omni::graph::exec::unstable::ITopology* getTopology() noexcept;
//! Access node's unique identifier name.
const omni::graph::exec::unstable::ConstName& getName() noexcept;
//! Access nodes unique index withing owning topology. Index will be always smaller than topology size.
omni::graph::exec::unstable::NodeIndexInTopology getIndexInTopology() noexcept;
//! Access parents.
omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> getParents() noexcept;
//! Access children.
omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> getChildren() noexcept;
//! Return number of parents that cause cycles within the graph during traversal over this node.
uint32_t getCycleParentCount() noexcept;
//! Check if topology/connectivity of nodes is valid within current topology version.
//!
//! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details on invalidation.
bool isValidTopology() noexcept;
//! Make topology valid for current topology version. Drop all the connections if topology changed.
//!
//! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details on invalidation.
void validateOrResetTopology() noexcept;
//! Access base node definition (can be empty).
//!
//! When you wish to determine if the attached definition is either opaque or a graph, consider calling @ref
//! omni::graph::exec::unstable::INode::getNodeDef() or @ref omni::graph::exec::unstable::INode::getNodeGraphDef()
//! rather than this method.
//!
//! The returned @ref omni::graph::exec::unstable::IDef will *not* have @ref omni::core::IObject::acquire() called
//! before being returned.
omni::graph::exec::unstable::IDef* getDef() noexcept;
//! Access node definition (can be empty).
//!
//! If the returned pointer is @c nullptr, either the definition does not implement @ref
//! omni::graph::exec::unstable::INodeDef or there is no definition attached to the node.
//!
//! The returned @ref omni::graph::exec::unstable::INodeDef will *not* have @ref omni::core::IObject::acquire()
//! called before being returned.
//!
//! Also see @ref omni::graph::exec::unstable::INode::getDef() and @ref
//! omni::graph::exec::unstable::INode::getNodeGraphDef().
omni::graph::exec::unstable::INodeDef* getNodeDef() noexcept;
//! Access node's graph definition (can be empty)
//!
//! The returned graph definition pointer is the graph definition which defines the work this node represents. The
//! returned pointer **is not** the graph definition that contains this node.
//!
//! If the returned pointer is @c nullptr, either the definition does not implement @ref
//! omni::graph::exec::unstable::INodeGraphDef or there is no definition attached to the node.
//!
//! The returned @ref omni::graph::exec::unstable::INodeGraphDef will *not* have @ref omni::core::IObject::acquire()
//! called before being returned.
//!
//! Also see @ref omni::graph::exec::unstable::INode::getDef() and @ref
//! omni::graph::exec::unstable::INode::getNodeDef().
omni::graph::exec::unstable::INodeGraphDef* getNodeGraphDef() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::ITopology* omni::core::Generated<omni::graph::exec::unstable::INode_abi>::getTopology() noexcept
{
return getTopology_abi();
}
inline const omni::graph::exec::unstable::ConstName& omni::core::Generated<omni::graph::exec::unstable::INode_abi>::getName() noexcept
{
return *(getName_abi());
}
inline omni::graph::exec::unstable::NodeIndexInTopology omni::core::Generated<
omni::graph::exec::unstable::INode_abi>::getIndexInTopology() noexcept
{
return getIndexInTopology_abi();
}
inline omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> omni::core::Generated<
omni::graph::exec::unstable::INode_abi>::getParents() noexcept
{
return getParents_abi();
}
inline omni::graph::exec::unstable::Span<omni::graph::exec::unstable::INode* const> omni::core::Generated<
omni::graph::exec::unstable::INode_abi>::getChildren() noexcept
{
return getChildren_abi();
}
inline uint32_t omni::core::Generated<omni::graph::exec::unstable::INode_abi>::getCycleParentCount() noexcept
{
return getCycleParentCount_abi();
}
inline bool omni::core::Generated<omni::graph::exec::unstable::INode_abi>::isValidTopology() noexcept
{
return isValidTopology_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::INode_abi>::validateOrResetTopology() noexcept
{
validateOrResetTopology_abi();
}
inline omni::graph::exec::unstable::IDef* omni::core::Generated<omni::graph::exec::unstable::INode_abi>::getDef() noexcept
{
return getDef_abi();
}
inline omni::graph::exec::unstable::INodeDef* omni::core::Generated<omni::graph::exec::unstable::INode_abi>::getNodeDef() noexcept
{
return getNodeDef_abi();
}
inline omni::graph::exec::unstable::INodeGraphDef* omni::core::Generated<
omni::graph::exec::unstable::INode_abi>::getNodeGraphDef() noexcept
{
return getNodeGraphDef_abi();
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 9,758 | C | 43.359091 | 134 | 0.713158 |
omniverse-code/kit/include/omni/graph/exec/unstable/IPass.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IPass.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IPass.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Types.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IPass;
class IPass_abi;
//! @defgroup groupOmniGraphExecPasses Passes
//!
//! @brief Interfaces, classes, and helpers related to graph transformation passes.
//!
//! Passes are user definable objects that populate, transform, and optimize the execution graph.
//!
//! Passes are registered using one of the @ref groupOmniGraphExecPassRegistration helpers.
//!
//! Passes are executed during graph construction via a @ref omni::graph::exec::unstable::PassPipeline.
//! Base class for graph transformation passes.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
class IPass_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IPass")>
{
};
//! Smart pointer managing an instance of @ref omni::graph::exec::unstable::IPass.
using PassPtr = omni::core::ObjectPtr<IPass>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IPass.gen.h>
//! @copydoc omni::graph::exec::unstable::IPass_abi
//!
//! @ingroup groupOmniGraphExecPasses groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IPass : public omni::core::Generated<omni::graph::exec::unstable::IPass_abi>
{
};
// additional headers needed for API implementation
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IPass.gen.h>
| 2,247 | C | 29.79452 | 117 | 0.757899 |
omniverse-code/kit/include/omni/graph/exec/unstable/Node.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Node.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::Node.
#pragma once
#include <omni/core/ResultError.h>
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/IGraph.h>
#include <omni/graph/exec/unstable/IGraphBuilderNode.h>
#include <omni/graph/exec/unstable/INode.h>
#include <omni/graph/exec/unstable/INodeDef.h>
#include <omni/graph/exec/unstable/INodeGraphDef.h>
#include <omni/graph/exec/unstable/ITopology.h>
#include <omni/graph/exec/unstable/SmallVector.h>
#include <omni/graph/exec/unstable/Types.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::INode
template <typename... Bases>
class NodeT : public Implements<Bases...>
{
public:
//! Constructor of a node with an empty definition.
//!
//! May throw.
static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<ITopology> topology, const char* idName)
{
OMNI_THROW_IF_ARG_NULL(topology);
OMNI_THROW_IF_ARG_NULL(idName);
return omni::core::steal(new NodeT(topology.get(), idName));
}
//! Constructor of a node with an empty definition.
//!
//! May throw.
static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<IGraph> owner, const char* idName)
{
OMNI_THROW_IF_ARG_NULL(owner);
OMNI_THROW_IF_ARG_NULL(idName);
return omni::core::steal(new NodeT(owner->getTopology(), idName));
}
//! Constructor of a node with a node graph definition.
//!
//! May throw.
static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<IGraph> owner,
omni::core::ObjectParam<INodeGraphDef> nodeGraphDef,
const char* idName)
{
OMNI_THROW_IF_ARG_NULL(owner);
OMNI_THROW_IF_ARG_NULL(idName);
return omni::core::steal(new NodeT(owner->getTopology(), nodeGraphDef.get(), idName));
}
//! Constructor of a node with a opaque node definition.
//!
//! May throw.
static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<ITopology> owner,
omni::core::ObjectParam<INodeGraphDef> nodeGraphDef,
const char* idName)
{
OMNI_THROW_IF_ARG_NULL(owner);
OMNI_THROW_IF_ARG_NULL(idName);
return omni::core::steal(new NodeT(owner.get(), nodeGraphDef.get(), idName));
}
//! Constructor of a node with a opaque node definition.
//!
//! May throw.
static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<IGraph> owner,
omni::core::ObjectParam<INodeDef> nodeDef,
const char* idName)
{
OMNI_THROW_IF_ARG_NULL(owner);
OMNI_THROW_IF_ARG_NULL(idName);
return omni::core::steal(new NodeT(owner->getTopology(), nodeDef.get(), idName));
}
//! Constructor of a node with a opaque node definition.
//!
//! May throw.
static omni::core::ObjectPtr<NodeT> create(omni::core::ObjectParam<ITopology> topology,
omni::core::ObjectParam<INodeDef> nodeDef,
const char* idName)
{
OMNI_THROW_IF_ARG_NULL(topology);
OMNI_THROW_IF_ARG_NULL(idName);
return omni::core::steal(new NodeT(topology.get(), nodeDef.get(), idName));
}
//! Constructor of a node with a base definition (can be null_ptr, NodeDef or NodeGraphDef).
//!
//! May throw.
static omni::core::ObjectPtr<NodeT> createForDef(omni::core::ObjectParam<ITopology> topology,
omni::core::ObjectParam<IDef> def,
const char* idName)
{
OMNI_THROW_IF_ARG_NULL(topology);
OMNI_THROW_IF_ARG_NULL(idName);
if (!def)
return omni::core::steal(new NodeT(topology.get(), idName));
else if (auto* nodeDef = omni::graph::exec::unstable::cast<INodeDef>(def))
return omni::core::steal(new NodeT(topology.get(), nodeDef, idName));
else if (auto* nodeGraphDef = omni::graph::exec::unstable::cast<INodeGraphDef>(def))
return omni::core::steal(new NodeT(topology.get(), nodeGraphDef, idName));
else
return nullptr;
}
//! Destructor
virtual ~NodeT()
{
// in case we decide to implement move constructor
if (m_indexInTopology != kInvalidNodeIndexInTopology)
{
m_topology->releaseNodeIndex(m_indexInTopology);
if (isValidTopology_abi())
{
m_topology->invalidate();
}
}
}
// disambiguate between INode and IGraphBuilderNode
using INode::getChildren;
using INode::getParents;
using INode::getTopology;
protected:
//! Core implementation of @ref omni::graph::exec::unstable::INode::getTopology_abi
ITopology* getTopology_abi() noexcept override
{
return m_topology;
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::getName_abi
const ConstName* getName_abi() noexcept override
{
return &m_name;
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::getIndexInTopology_abi
NodeIndexInTopology getIndexInTopology_abi() noexcept override
{
return m_indexInTopology;
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::getParents_abi
Span<INode* const> getParents_abi() noexcept override
{
return isValidTopology_abi() ? Span<INode* const>{ m_parents.begin(), m_parents.size() } :
Span<INode* const>{ nullptr, 0 };
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::getChildren_abi
Span<INode* const> getChildren_abi() noexcept override
{
return isValidTopology_abi() ? Span<INode* const>{ m_children.begin(), m_children.size() } :
Span<INode* const>{ nullptr, 0 };
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::getCycleParentCount_abi
uint32_t getCycleParentCount_abi() noexcept override
{
return isValidTopology_abi() ? m_cycleParentCount : 0;
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::isValidTopology_abi
//!
//! @note This method is called in the destructor and therefore must be marked as final
bool isValidTopology_abi() noexcept final override
{
return m_topologyStamp.inSync(m_topology->getStamp());
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::validateOrResetTopology_abi
virtual void validateOrResetTopology_abi() noexcept
{
if (m_topologyStamp.makeSync(m_topology->getStamp()))
{
// topology changed, let's clear the old one
m_parents.clear();
m_children.clear();
m_cycleParentCount = 0;
}
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::getDef_abi
IDef* getDef_abi() noexcept override
{
if (m_nodeDef.get())
{
return m_nodeDef.get();
}
else
{
return m_nodeGraphDef.get();
}
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::getNodeDef_abi
INodeDef* getNodeDef_abi() noexcept override
{
return m_nodeDef.get();
}
//! Core implementation of @ref omni::graph::exec::unstable::INode::getNodeGraphDef_abi
INodeGraphDef* getNodeGraphDef_abi() noexcept override
{
return m_nodeGraphDef.get();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_addParent_abi
omni::core::Result _addParent_abi(IGraphBuilderNode* parent) noexcept override
{
try
{
OMNI_GRAPH_EXEC_CAST_OR_RETURN(asNode, INode, parent);
m_parents.push_back(asNode);
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_removeParent_abi
omni::core::Result _removeParent_abi(IGraphBuilderNode* parent) noexcept override
{
try
{
OMNI_GRAPH_EXEC_CAST_OR_RETURN(asNode, INode, parent);
_eraseRemove(m_parents, asNode);
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_addChild_abi
omni::core::Result _addChild_abi(IGraphBuilderNode* child) noexcept override
{
try
{
OMNI_GRAPH_EXEC_CAST_OR_RETURN(asNode, INode, child);
m_children.push_back(asNode);
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_removeChild_abi
omni::core::Result _removeChild_abi(IGraphBuilderNode* child) noexcept override
{
try
{
OMNI_GRAPH_EXEC_CAST_OR_RETURN(asNode, INode, child);
_eraseRemove(m_children, asNode);
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_removeInvalidParents_abi
void _removeInvalidParents_abi() noexcept override
{
if (isValidTopology_abi())
{
m_parents.erase(
std::remove_if(m_parents.begin(), m_parents.end(), [](INode* n) { return !n->isValidTopology(); }),
m_parents.end());
}
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_removeInvalidChildren_abi
void _removeInvalidChildren_abi() noexcept override
{
if (isValidTopology_abi())
{
m_children.erase(
std::remove_if(m_children.begin(), m_children.end(), [](INode* n) { return !n->isValidTopology(); }),
m_children.end());
}
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_invalidateConnections_abi
//!
//! @warning This only removes connections on a single node. The topology has bi-directional connections
//! for every node with the exception of the connection with the root node.
void _invalidateConnections_abi() noexcept override
{
m_topologyStamp.invalidate();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::setCycleParentCount_abi
void setCycleParentCount_abi(uint32_t count) noexcept override
{
m_cycleParentCount = count;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_setNodeDef_abi
void _setNodeDef_abi(INodeDef* nodeDef) noexcept override
{
m_nodeDef.borrow(nodeDef);
m_nodeGraphDef.release();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_setNodeGraphDef_abi
void _setNodeGraphDef_abi(INodeGraphDef* nodeGraphDef) noexcept override
{
m_nodeGraphDef.borrow(nodeGraphDef);
m_nodeDef.release();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::_clearDef_abi
void _clearDef_abi() noexcept override
{
m_nodeDef.release();
m_nodeGraphDef.release();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::getParentAt_abi
omni::core::Result getParentAt_abi(uint64_t index, IGraphBuilderNode** out) noexcept override
{
*out = nullptr;
if (!isValidTopology_abi() || index >= m_parents.size())
{
OMNI_GRAPH_EXEC_RETURN_ERROR(omni::core::kResultInvalidIndex);
}
else
{
OMNI_GRAPH_EXEC_CAST_OR_RETURN(
asGraphBuilderNode, IGraphBuilderNode, m_parents[static_cast<uint32_t>(index)]);
*out = asGraphBuilderNode; // explicitly does not acquire
return omni::core::kResultSuccess;
}
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::getParentCount_abi
uint64_t getParentCount_abi() noexcept override
{
return isValidTopology_abi() ? m_parents.size() : 0;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::getChildAt_abi
omni::core::Result getChildAt_abi(uint64_t index, IGraphBuilderNode** out) noexcept override
{
*out = nullptr;
if (!isValidTopology_abi() || index >= m_children.size())
{
OMNI_GRAPH_EXEC_RETURN_ERROR(omni::core::kResultInvalidIndex);
}
else
{
OMNI_GRAPH_EXEC_CAST_OR_RETURN(
asGraphBuilderNode, IGraphBuilderNode, m_children[static_cast<uint32_t>(index)]);
*out = asGraphBuilderNode; // explicitly does not acquire
return omni::core::kResultSuccess;
}
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::getChildCount_abi
uint64_t getChildCount_abi() noexcept override
{
return isValidTopology_abi() ? m_children.size() : 0;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::hasChild_abi
bool hasChild_abi(IGraphBuilderNode* node) noexcept override
{
if (!isValidTopology_abi())
return false;
auto asNode = omni::graph::exec::unstable::cast<INode>(node);
if (!asNode)
{
return false;
}
return std::find(m_children.begin(), m_children.end(), asNode) != m_children.end();
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::isRoot_abi
bool isRoot_abi() noexcept override
{
return (m_topology->getRoot() == static_cast<INode*>(this));
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderNode::getRoot_abi
omni::core::Result getRoot_abi(IGraphBuilderNode** out) noexcept override
{
*out = nullptr;
OMNI_GRAPH_EXEC_CAST_OR_RETURN(asGraphBuilderNode, IGraphBuilderNode, m_topology->getRoot());
*out = asGraphBuilderNode; // explicitly does not acquire
return omni::core::kResultSuccess;
}
//! Constructor
NodeT(ITopology* topology, const char* idName) // may throw
: m_topology{ topology }, m_indexInTopology{ m_topology->acquireNodeIndex() }, m_name{ idName }
{
}
//! Constructor
NodeT(ITopology* topology,
INodeGraphDef* nodeGraphDef,
const char* idName) // may throw
: m_topology{ topology },
m_indexInTopology{ m_topology->acquireNodeIndex() },
m_nodeGraphDef{ nodeGraphDef, omni::core::kBorrow },
m_name{ idName }
{
}
//! Constructor
NodeT(ITopology* topology,
INodeDef* nodeDef,
const char* idName) // may throw
: m_topology{ topology },
m_indexInTopology{ m_topology->acquireNodeIndex() },
m_nodeDef{ nodeDef, omni::core::kBorrow },
m_name{ idName }
{
}
private:
//! Container for connections.
//!
//! Using @ref omni::graph::exec::unstable::SmallVector with local storage space for two nodes.
//! The local space storage was hand picked, following the experience that most of graph nodes
//! have very few downstream nodes.
using NodeArray = SmallVector<INode*, 2>;
//! Helper erase-remove idiom to remove and eliminate a node from the container
template <typename T>
void _eraseRemove(T& v, INode* n) // may throw
{
v.erase(std::remove(v.begin(), v.end(), n), v.end());
};
ITopology* m_topology; //!< Topology owning this node
//! Acquired local index
NodeIndexInTopology m_indexInTopology{ kInvalidNodeIndexInTopology };
NodeArray m_parents; //!< Edges to parents
NodeArray m_children; //!< Edges to children
uint32_t m_cycleParentCount{ 0 }; //!< Cycling parents (used by the graph traversal)
SyncStamp m_topologyStamp; //!< Validity check for edges
omni::core::ObjectPtr<INodeDef> m_nodeDef; //!< Node definition
omni::core::ObjectPtr<INodeGraphDef> m_nodeGraphDef; //!< Node graph definition
ConstName m_name; //!< Identifier name
};
//! Core Node implementation for @ref omni::graph::exec::unstable::INode
using Node = NodeT<INode, IGraphBuilderNode>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 17,448 | C | 34.756147 | 117 | 0.621848 |
omniverse-code/kit/include/omni/graph/exec/unstable/Status.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Status.h
//!
//! @brief Defines omni::graph::exec::unstable::Status.
#pragma once
#include <omni/graph/exec/unstable/EnumBitops.h>
#include <cstdint>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Return status of all executions.
enum class Status : uint32_t
{
eUnknown = 0, //!< Status is undetermined yet
eSuccess = 1 << 0, //!< Execution was successful
eSkip = 1 << 1, //!< Execution was skipped
eDeferred = 1 << 2, //!< Execution was deferred to start and/or complete outside of current execution frame
eFailure = 1 << 3, //!< Execution failed
eInProgress = 1 << 4 //!< Execution is in progress
};
//! Enable bitwise operations on return state.
template <>
struct EnumBitops<Status> : EnumBitops<>::allow_bitops
{
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 1,316 | C | 25.87755 | 111 | 0.716565 |
omniverse-code/kit/include/omni/graph/exec/unstable/IGraphBuilderNode.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IGraphBuilderNode.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IGraphBuilderNode.
#pragma once
#include <omni/core/ResultError.h>
#include <omni/graph/exec/unstable/ElementAt.h>
#include <omni/graph/exec/unstable/IBase.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IGraphBuilderNode;
class IGraphBuilderNode_abi;
class INode;
class INodeDef;
class INodeGraphDef;
class ITopology;
//! Describes a node @ref omni::graph::exec::unstable::IGraphBuilder can manipulate.
//!
//! Only @ref omni::graph::exec::unstable::IGraphBuilder should use @ref omni::graph::exec::unstable::IGraphBuilderNode.
//! One way to think about this interface is that it is a private interface used by
//! @ref omni::graph::exec::unstable::IGraphBuilder to connect instances of @ref omni::graph::exec::unstable::INode.
class IGraphBuilderNode_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.IGraphBuilderNode")>
{
protected:
//! Adds the given node as a parent (i.e. upstream) of this node.
//!
//! @ref omni::core::IObject::acquire() is not called on the given node. It is up to the calling code to ensure the
//! node persists while in use by this interface.
//!
//! @p parent must not be @c nullptr.
//!
//! It is undefined behavior to add a parent multiple times to a node.
//!
//! This method is not thread safe.
//!
//! May throw.
virtual OMNI_ATTR("throw_result") omni::core::Result
_addParent_abi(OMNI_ATTR("not_null, throw_if_null") IGraphBuilderNode* parent) noexcept = 0;
//! Removes the given node as a parent.
//!
//! If given node is not a parent, this method returns success.
//!
//! This method is not thread safe.
//!
//! May throw.
virtual OMNI_ATTR("throw_result") omni::core::Result
_removeParent_abi(OMNI_ATTR("not_null, throw_if_null") IGraphBuilderNode* parent) noexcept = 0;
//! Adds the given node as a child (i.e. downstream) of this node.
//!
//! @ref omni::core::IObject::acquire() is not called on the given node. It is up to the calling code to ensure the
//! node persists while in use by this interface.
//!
//! @p child must not be @c nullptr.
//!
//! It is undefined behavior to add a child multiple times to a node.
//!
//! This method is not thread safe.
//!
//! May throw.
virtual OMNI_ATTR("throw_result") omni::core::Result
_addChild_abi(OMNI_ATTR("not_null, throw_if_null") IGraphBuilderNode* child) noexcept = 0;
//! Removes the given node as a child.
//!
//! If given node is not a child, this method returns success.
//!
//! This method is not thread safe.
//!
//! May throw.
virtual OMNI_ATTR("throw_result") omni::core::Result
_removeChild_abi(OMNI_ATTR("not_null, throw_if_null") IGraphBuilderNode* child) noexcept = 0;
//! Remove from the container parent nodes that no longer exist in current topology, i.e are invalid.
//!
//! @ref omni::core::IObject::release() is not called on the invalid nodes.
//!
//! This method is not thread safe.
virtual void _removeInvalidParents_abi() noexcept = 0;
//! Remove from the container child nodes that no longer exist in current topology, i.e are invalid.
//!
//! @ref omni::core::IObject::release() is not called on the invalid nodes.
//!
//! This method is not thread safe.
virtual void _removeInvalidChildren_abi() noexcept = 0;
//! Invalidate all children and parents connections by invalidating the topology this node is sync with.
//!
//! This method is thread safe.
virtual void _invalidateConnections_abi() noexcept = 0;
//! Sets the number of parents who are a part of cycle.
//!
//! This method is not thread safe.
virtual void setCycleParentCount_abi(uint32_t count) noexcept = 0;
//! Sets the definition for this node.
//!
//! If a definition is already set, it will be replaced by the given definition.
//!
//! The given definition may be @c nullptr.
//!
//! @ref omni::core::IObject::acquire() is called on the given pointer.
//!
//! See also @ref omni::graph::exec::unstable::IGraphBuilderNode::_setNodeGraphDef().
//!
//! This method is not thread safe.
virtual void _setNodeDef_abi(INodeDef* nodeDef) noexcept = 0;
//! Sets the definition for this node.
//!
//! If a definition is already set, it will be replaced by the given definition.
//!
//! The given definition may be @c nullptr.
//!
//! @ref omni::core::IObject::acquire() is called on the given pointer.
//!
//! See also @ref omni::graph::exec::unstable::IGraphBuilderNode::_setNodeDef().
//!
//! This method is not thread safe.
virtual void _setNodeGraphDef_abi(INodeGraphDef* nodeGraphDef) noexcept = 0;
//! Unsets this node's definition.
//!
//! If the definition is already @c nullptr, this method does nothing.
//!
//! This method is not thread safe.
virtual void _clearDef_abi() noexcept = 0;
//! Access the topology owning this node.
//!
//! The returned @ref omni::graph::exec::unstable::ITopology will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
//!
//! This method is not thread safe.
virtual OMNI_ATTR("no_acquire") ITopology* getTopology_abi() noexcept = 0;
//! Make topology valid for current topology version. Drop all the connections if topology changed.
//!
//! This method is not thread safe.
virtual void validateOrResetTopology_abi() noexcept = 0;
//! Access parent at the given index.
//!
//! If the given index is greater than the parent count, an error is returned.
//!
//! This method is not thread safe.
//!
//! May throw due to internal casting.
//!
//! See @ref omni::graph::exec::unstable::IGraphBuilderNode::getParentCount().
//!
//! Consider using @ref omni::graph::exec::unstable::IGraphBuilderNode::getParents()
//! for a modern C++ wrapper to this method.
//!
//! The returned @ref omni::graph::exec::unstable::IGraphBuilderNode will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
virtual OMNI_ATTR("throw_result") omni::core::Result
getParentAt_abi(uint64_t index,
OMNI_ATTR("not_null, throw_if_null, out, *no_acquire, *return")
IGraphBuilderNode** out) noexcept = 0;
//! Returns the number of parents.
//!
//! This method is not thread safe.
virtual uint64_t getParentCount_abi() noexcept = 0;
//! Access child at the given index.
//!
//! If the given index is greater than the parent count, an error is returned.
//!
//! This method is not thread safe.
//!
//! May throw due to internal casting.
//!
//! See @ref omni::graph::exec::unstable::IGraphBuilderNode::getChildCount().
//!
//! Consider using @ref omni::graph::exec::unstable::IGraphBuilderNode::getChildren()
//! for a modern C++ wrapper to this method.
//!
//! The returned @ref omni::graph::exec::unstable::IGraphBuilderNode will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
virtual OMNI_ATTR("throw_result") omni::core::Result
getChildAt_abi(uint64_t index,
OMNI_ATTR("not_null, throw_if_null, out, *no_acquire, *return")
IGraphBuilderNode** out) noexcept = 0;
//! Returns the number of children.
//!
//! This method is not thread safe.
virtual uint64_t getChildCount_abi() noexcept = 0;
//! Returns @c true if the given node is an immediate child of this node.
//!
//! @p node may be @c nullptr.
//!
//! This method is not thread safe.
virtual bool hasChild_abi(IGraphBuilderNode* node) noexcept = 0;
//! Returns @c true if this node is the root of the topology.
//!
//! This method is not thread safe.
virtual bool isRoot_abi() noexcept = 0;
//! Returns the root node of the topology of which this node is a part.
//!
//! This method is not thread safe.
//!
//! May throw due to internal casting.
//!
//! The returned @ref omni::graph::exec::unstable::IGraphBuilderNode will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
virtual omni::core::Result getRoot_abi(OMNI_ATTR("not_null, throw_if_null, out, *no_acquire")
IGraphBuilderNode** out) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IGraphBuilderNode.
using GraphBuilderNodePtr = omni::core::ObjectPtr<IGraphBuilderNode>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IGraphBuilderNode.gen.h>
//! @copydoc omni::graph::exec::unstable::IGraphBuilderNode_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IGraphBuilderNode
: public omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>
{
public:
//! Implementation detail to access parent ABI.
struct GetParent
{
//! Access element at a given index
static void getAt(IGraphBuilderNode* owner, uint64_t index, IGraphBuilderNode** out)
{
*out = owner->getParentAt(index);
}
//! Returns element count
static uint64_t getCount(IGraphBuilderNode* owner)
{
return owner->getParentCount();
}
};
//! Implementation detail to access children ABI.
struct GetChild
{
//! Access element at a given index
static void getAt(IGraphBuilderNode* owner, uint64_t index, IGraphBuilderNode** out)
{
*out = owner->getChildAt(index);
}
//! Returns element count
static uint64_t getCount(IGraphBuilderNode* owner)
{
return owner->getChildCount();
}
};
//! Implementation details that wraps index-based node access with iterators.
using Parents = detail::ElementAt<IGraphBuilderNode, IGraphBuilderNode*, GetParent>;
//! Implementation details that wraps index-based node access with iterators.
using Children = detail::ElementAt<IGraphBuilderNode, IGraphBuilderNode*, GetChild>;
//! Returns an object that allows the list of parents to be iterated over (i.e. using range-based for loops).
Parents getParents() noexcept
{
return Parents(this);
}
//! Returns an object that allows the list of children to be iterated over (i.e. using range-based for loops).
Children getChildren() noexcept
{
return Children(this);
}
//! Returns the root node of the topology of which this node is a part.
//!
//! May throw.
inline IGraphBuilderNode* getRoot();
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/INode.h>
#include <omni/graph/exec/unstable/INodeDef.h>
#include <omni/graph/exec/unstable/INodeGraphDef.h>
#include <omni/graph/exec/unstable/ITopology.h>
inline omni::graph::exec::unstable::IGraphBuilderNode* omni::graph::exec::unstable::IGraphBuilderNode::getRoot()
{
IGraphBuilderNode* out;
OMNI_THROW_IF_FAILED(getRoot_abi(&out));
return out;
}
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IGraphBuilderNode.gen.h>
| 12,254 | C | 36.024169 | 120 | 0.656602 |
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutionStateInfo.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IExecutionStateInfo.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IExecutionStateInfo.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Span.h>
#include <omni/graph/exec/unstable/Stamp.h>
#include <omni/graph/exec/unstable/Types.h>
#include <memory>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IBackgroundResult;
class IExecutionStateInfo;
class IExecutionStateInfo_abi;
//! State associated with a given execution task
//!
//! @note We separated execution state from the execution graph to allow concurrent and/or nested execution
class IExecutionStateInfo_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.IExecutionStateInfo")>
{
protected:
//! Store a "future" result for this state. The actual computation is running asynchronously outside of execution
//! frame
//!
//! @return \c true if execution state accepts "future" results.
virtual bool storeBackgroundResult_abi(OMNI_ATTR("not_null, throw_if_null") IBackgroundResult* result) noexcept = 0;
//! Query used by some executors to determine if computation of a node is necessary
virtual bool needsCompute_abi(Stamp execVersion) noexcept = 0;
//! Set to request computation
virtual void requestCompute_abi() noexcept = 0;
//! Reset request to compute after computation was performed
virtual void setComputed_abi() noexcept = 0;
//! Get current/last exec version set for this node during execution
virtual SyncStamp getExecutionStamp_abi() noexcept = 0;
//! Set current exec version for this node. Returns true if version wasn't in sync.
virtual bool setExecutionStamp_abi(Stamp execVersion) noexcept = 0;
//! Returns a value from a node's key/value datastore.
//!
//! The key is used as a look-up in the node's key/value datastore.
//!
//! The type of each data item is returned in @p outTypeId.
//!
//! @p outPtr will be updated with a pointer to the actual data.
//!
//! @p outItemSize store the size of each item in the returned array.
//!
//! @p outItemCount contains the number of items returned (i.e. the number
//! of items @p outPtr points to). For an array, this will be greater than
//! 1.
//!
//! If the key is not found, @p outPtr is set to @c nullptr and @p
//! outItemCount is set to 0.
//!
//! Accessing the node's key/value datastore is not thread safe.
//!
//! An exception is thrown on all other errors.
virtual OMNI_ATTR("throw_result") omni::core::Result
getNodeData_abi(NodeDataKey key,
OMNI_ATTR("out, not_null, throw_if_null") omni::core::TypeId* outTypeId,
OMNI_ATTR("out, not_null, throw_if_null, *out, *in") void** outPtr,
OMNI_ATTR("out, not_null, throw_if_null") uint64_t* outItemSize,
OMNI_ATTR("out, not_null, throw_if_null") uint64_t* outItemCount) noexcept = 0;
//! Sets a value in a node's key/value datastore.
//!
//! The key is used as a look-up in the node's key/value datastore.
//!
//! The type of each data item is set with @p typeId.
//!
//! @p data points to an array of data items.
//!
//! @p itemSize is the size of each item in the given array.
//!
//! @p itemCount contains the number of items pointed to by @p data. For an
//! array, this will be greater than 1.
//!
//! @p deleter is a function used to delete @p data when either a new value
//! is set at the key or the context is invalidated. If @p deleter is @c
//! nullptr, it is up to the calling code to manage the lifetime of the @p
//! data.
//!
//! Accessing the node's key/value datastore is not thread safe.
//!
//! An exception is thrown on all other errors.
virtual OMNI_ATTR("throw_result") omni::core::Result
setNodeData_abi(NodeDataKey key,
omni::core::TypeId typeId,
OMNI_ATTR("in, out, not_null, throw_if_null") void* data,
uint64_t itemSize,
uint64_t itemCount,
OMNI_ATTR("in, out") NodeDataDeleterFn* deleter) noexcept = 0;
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IExecutionStateInfo.gen.h>
//! @copydoc omni::graph::exec::unstable::IExecutionStateInfo_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IExecutionStateInfo
: public omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>
{
public:
//! Returns a pointer to a value stored in the node's key/value datastore.
//!
//! If there is no value stored at the given @p key an empty span is
//! returned.
//!
//! Accessing the node's key/value datastore is not thread safe.
//!
//! If the type @c T does not match the type of the store data, an exception
//! is thrown.
//!
//! An exception is thrown on all other errors.
//!
//! Prefer using @ref OMNI_GRAPH_EXEC_GET_NODE_DATA_AS() instead of this method, which will populate the type id for
//! you.
template <typename T>
inline Span<T> getNodeDataAs(omni::core::TypeId desiredType, NodeDataKey key);
//! Stores a value in the node's key/value datastore.
//!
//! If a value is already stored at the given @p key it will be replaced.
//!
//! Accessing the node's key/value datastore is not thread safe.
//!
//! An exception is thrown on all errors.
//!
//! Prefer using @ref OMNI_GRAPH_EXEC_SET_NODE_DATA() instead of this method, which will populate the type id for
//! you.
template <typename SpecifiedT, typename DataT>
inline void setNodeData(omni::core::TypeId itemType, NodeDataKey key, std::unique_ptr<DataT> data);
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IBackgroundResult.h>
#ifndef DOXYGEN_BUILD // templates and doxygen are not friends (remove this line to see why)
template <typename T>
inline omni::graph::exec::unstable::Span<T> omni::graph::exec::unstable::IExecutionStateInfo::getNodeDataAs(
omni::core::TypeId desiredType, NodeDataKey key)
{
omni::core::TypeId outType;
void* outPtr;
uint64_t outItemSize, outItemCount;
OMNI_THROW_IF_FAILED(getNodeData_abi(key, &outType, &outPtr, &outItemSize, &outItemCount));
if (outPtr)
{
if (outType != desiredType)
{
throw omni::core::ResultError(omni::core::kResultInvalidDataType);
}
if (outItemSize != sizeof(T))
{
throw omni::core::ResultError(omni::core::kResultInvalidDataSize);
}
}
return Span<T>{ reinterpret_cast<T*>(outPtr), outItemCount };
}
template <typename SpecifiedT, typename DataT>
inline void omni::graph::exec::unstable::IExecutionStateInfo::setNodeData(omni::core::TypeId desiredType,
NodeDataKey key,
std::unique_ptr<DataT> data)
{
static_assert(std::is_same<SpecifiedT, DataT>::value, "given TypeId does not match the data type");
static_assert(!std::is_array<DataT>::value, "setting arrays as node data via unique_ptr not yet implemented");
OMNI_THROW_IF_FAILED(setNodeData_abi(key, desiredType, data.get(), sizeof(DataT), 1,
[](void* p)
{
typename std::unique_ptr<DataT>::deleter_type deleter;
deleter(reinterpret_cast<DataT*>(p));
}));
data.release(); // now safe to release ownership
}
#endif // DOXYGEN_BUILD
//! Calls either @ref omni::graph::exec::unstable::IExecutionContext::getNodeDataAs() or @ref
//! omni::graph::exec::unstable::IExecutionStateInfo::getNodeDataAs() (dependent on the type of the first argument).
//!
//! The purpose of this macro is generate an appropriate @ref omni::core::TypeId at compile time from the data item's
//! type. The user can manually do this, but this macro is much less error prone.
//!
//! @code
//! auto data = OMNI_GRAPH_EXEC_GET_NODE_DATA_AS( task->getContext(), GraphContextCacheOverride,
//! task->getUpstreamPath(), nullptr, tokens::kInstanceContext).data();
//! @endcode
//!
//! The macro itself is a variadic macro and can map to multiple overloads of @c getNodeDataAs() methods in the
//! interface given as the first argument
//!
//! With newer compilers (GCC >= 8), this macro can be replaced with templated methods (without breaking the ABI).
#define OMNI_GRAPH_EXEC_GET_NODE_DATA_AS(context_, type_, ...) \
context_->getNodeDataAs<type_>(CARB_HASH_STRING(CARB_STRINGIFY(type_)), __VA_ARGS__)
// the ugly macro above is used to hash the type of the data at compile time.
//
// it's possible to get the type of data at compile time by inspecting the function name (e.g. __FUNCSIG__ and
// __PRETTY_FUNCTION__). however __PRETTY_FUNCTION__ was not a constexpr until GCC 8. omniverse currently uses GCC 7
// so were left with this hack.
//
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66639
//! Calls either @ref omni::graph::exec::unstable::IExecutionContext::setNodeData() or @ref
//! omni::graph::exec::unstable::IExecutionStateInfo::setNodeData() (dependent on the type of the first argument).
//!
//! The purpose of this macro is generate an appropriate @ref omni::core::TypeId at compile time from the data item's
//! type. The user can manually do this, but this macro is much less error prone.
//!
//! @code
//! OMNI_GRAPH_EXEC_SET_NODE_DATA(stateInfo, GraphContextCacheOverride, tokens::kInstanceContext,
//! std::move(contextOverridePtr));
//! @endcode
//!
//! The macro itself is a variadic macro and can map to multiple overloads of @c setNodeData() methods in the interface
//! given as the first argument
//!
//! With newer compilers (GCC >= 8), this macro can be replaced with templated methods (without breaking the ABI).
#define OMNI_GRAPH_EXEC_SET_NODE_DATA(context_, type_, ...) \
context_->setNodeData<type_>(CARB_HASH_STRING(CARB_STRINGIFY(type_)), __VA_ARGS__)
// the ugly macro above is used to hash the type of the data at compile time.
//
// it's possible to get the type of data at compile time by inspecting the function name (e.g. __FUNCSIG__ and
// __PRETTY_FUNCTION__). however __PRETTY_FUNCTION__ was not a constexpr until GCC 8. omniverse currently uses GCC 7
// so were left with this hack.
//
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66639
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IExecutionStateInfo.gen.h>
| 11,745 | C | 42.503704 | 121 | 0.655513 |
omniverse-code/kit/include/omni/graph/exec/unstable/Traversal.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Traversal.h
//!
//! @brief Defines graph traversal algorithms.
//! See @rstref{Traversing a Graph <ef_graph_traversal_guide>} and
//! @rstref{Graph Traversal In-Depth <ef_graph_traversal_advanced>}
//! for more information how to utilize graph traversals.
#pragma once
#include <concurrentqueue/include_concurrentqueue.h>
#include <omni/graph/exec/unstable/AtomicBackoff.h>
#include <omni/graph/exec/unstable/EnumBitops.h>
#include <atomic>
#include <functional>
#include <memory>
#include <queue>
#include <type_traits>
#include <vector>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Implementation details.
namespace detail
{
//! Information returned to traversal callback indicating visit order
enum class VisitOrder
{
eUnknown = 0, //!< Visit order is not specified
eFirst = 1 << 0, //!< This is the first visit to a node during the traversal
eNext = 1 << 1, //!< This is a next visit to a node, i.e. not a first one and not the last one
eLast = 1 << 2, //!< This is the last visit to a node during the traversal
eCycle = 1 << 3 //!< In case of cycles, when traversal enters node at the last visit, more visit can happen with
//!< cycle order
};
} // namespace detail
//! Enable bitwise operation on VisitOrder
template <>
struct EnumBitops<detail::VisitOrder> : EnumBitops<>::allow_bitops
{
};
namespace detail
{
//! Traversal information stored per node
struct NodeData
{
std::atomic<std::size_t> visitCount{ 0 }; //!< How many times this node was visited
//! Copy constructor
//!
//! Compiler will not generate by default copy constructor (nor assignment operator) due to atomic member.
//! We are adding it explicitly here because the usage pattern is not allowing copy construction
//! in concurrent execution.
NodeData(const NodeData& src) : visitCount(src.visitCount.load())
{
}
//! Assignment operator
//!
//! Compiler will not generate by default copy constructor (nor assignment operator) due to atomic member.
//! We are adding it explicitly here because the usage pattern is not allowing assignment
//! in concurrent execution.
NodeData& operator=(const NodeData& rhs)
{
visitCount.store(rhs.visitCount.load());
return *this;
}
// and because we added above, we still have to add default constructor which can't be added implicitly
//! Default constructor
NodeData()
{
}
};
// ef-docs visit-first-begin
//! Traversal strategy that enters the node when it was first time discovered
struct VisitFirst
{
//! Call to traverse the graph with a strategy to visit only when first time discovered
template <typename Node, typename NodeData>
static VisitOrder tryVisit(Node* node, NodeData& nodeData)
{
auto lastVisit = nodeData.visitCount++; // read+increment only once. other threads can be doing the same.
return (lastVisit == 0) ? VisitOrder::eFirst : VisitOrder::eUnknown;
}
};
// ef-docs visit-first-end
// ef-docs visit-last-begin
//! Traversal strategy that enters the node when entire upstream was already visited and this is the last
//! opportunity to enter the node.
//!
//! In case of cycles, this algorithm is relying on knowledge of number of parents that are causing cycles.
struct VisitLast
{
//! Call to traverse the graph with a strategy to visit only when no more visits are possible
template <typename Node, typename NodeData>
static VisitOrder tryVisit(Node& node, NodeData& nodeData)
{
auto requiredCount = node->getParents().size() - node->getCycleParentCount();
auto currentVisit = ++nodeData.visitCount; // increment+read only once. other threads can be doing the same.
if (requiredCount == 0 && currentVisit == 1)
{
return VisitOrder::eLast;
}
else if (currentVisit == requiredCount)
{
return VisitOrder::eLast;
}
return VisitOrder::eUnknown;
}
};
// ef-docs visit-last-end
// ef-docs visit-all-begin
//! Traversal strategy that allows discovering all the edges in the graph. Traversal continuation is controlled by user
//! code.
struct VisitAll
{
//! Call to traverse the graph with a strategy to visit all edges of the graph
template <typename Node, typename NodeData>
static VisitOrder tryVisit(Node& node, NodeData& nodeData)
{
auto parentCount = node->getParents().size();
auto requiredCount = parentCount - node->getCycleParentCount();
auto currentVisit = ++nodeData.visitCount; // increment+read only once. other threads can be doing the same.
if (requiredCount == 0 && currentVisit == 1)
{
return (VisitOrder::eFirst | VisitOrder::eLast);
}
VisitOrder ret = VisitOrder::eUnknown;
if (currentVisit > requiredCount)
{
ret = VisitOrder::eCycle;
}
else if (currentVisit == requiredCount)
{
ret = (currentVisit == 1) ? (VisitOrder::eFirst | VisitOrder::eLast) : VisitOrder::eLast;
}
else if (currentVisit == 1)
{
ret = VisitOrder::eFirst;
}
else
{
ret = VisitOrder::eNext;
}
return ret;
}
};
// ef-docs visit-all-end
#ifndef DOXYGEN_BUILD
struct FlowDFS
{
};
struct FlowBFS
{
};
struct SerialQueue
{
};
struct ConcurrentQueue
{
};
template <typename GraphNode, typename Flow, typename Queue, bool ConstNode, typename Enable = void>
struct TraversalBase
{
};
template <typename GraphNode, typename Flow, typename Queue, bool ConstNode>
struct TraversalBase<GraphNode, Flow, Queue, ConstNode, std::enable_if_t<std::is_same<Flow, FlowDFS>::value>>
{
void incrementInfo()
{
}
void decrementInfo()
{
}
};
//! Base traversal class when using BFS traversal order and doesn't require thread safe queue
template <typename GraphNode, typename Flow, typename Queue, bool ConstNode>
struct TraversalBase<GraphNode,
Flow,
Queue,
ConstNode,
std::enable_if_t<std::is_same<Flow, FlowBFS>::value && std::is_same<Queue, SerialQueue>::value>>
{
using Node = typename std::conditional_t<ConstNode, const GraphNode, GraphNode>;
using NodeQueue = std::queue<Node*>;
NodeQueue m_queue;
void push(Node* node)
{
m_queue.push(&node);
}
bool tryPop(Node*& node)
{
if (m_queue.empty())
{
return false;
}
node = m_queue.front();
m_queue.pop();
return true;
}
void incrementInfo()
{
}
void decrementInfo()
{
}
bool hasInfo() const
{
return false;
}
};
//! Base traversal class when using BFS traversal order and DOES require thread safe queue
template <typename GraphNode, typename Flow, typename Queue, bool ConstNode>
struct TraversalBase<GraphNode,
Flow,
Queue,
ConstNode,
std::enable_if_t<std::is_same<Flow, FlowBFS>::value && std::is_same<Queue, ConcurrentQueue>::value>>
{
using Node = typename std::conditional_t<ConstNode, const GraphNode, GraphNode>;
using NodeQueue = moodycamel::ConcurrentQueue<Node*>;
NodeQueue m_queue;
std::atomic<std::size_t> m_infoCount{ 0 };
TraversalBase() noexcept
{
}
TraversalBase(TraversalBase&& src) noexcept : m_queue(std::move(src.m_queue)), m_infoCount(src.m_infoCount.load())
{
}
TraversalBase(TraversalBase& src) = delete;
void push(Node* node)
{
m_queue.enqueue(node);
}
bool tryPop(Node*& node)
{
return m_queue.try_dequeue(node);
}
void incrementInfo()
{
m_infoCount++;
}
void decrementInfo()
{
m_infoCount--;
}
bool hasInfo() const
{
return m_infoCount.load() > 0;
}
};
struct NoUserData
{
};
template <typename GraphNode, typename NodeUserData, typename Enable = void>
struct UserDataBase
{
explicit UserDataBase(std::size_t size) noexcept
{
}
UserDataBase(UserDataBase&& src) noexcept
{
}
};
//! User defined data class to be available for each traversed node
template <typename GraphNode, typename NodeUserData>
struct UserDataBase<GraphNode, NodeUserData, std::enable_if_t<!std::is_same<NodeUserData, NoUserData>::value>>
{
static_assert(std::is_trivially_copyable<NodeUserData>::value, "User data needs to be trivially copyable");
explicit UserDataBase(std::size_t size) noexcept : m_userData(size)
{
}
UserDataBase(UserDataBase&& src) noexcept : m_userData(std::move(src.m_userData))
{
}
NodeUserData& userData(GraphNode* node)
{
return m_userData[node->getIndexInTopology()];
}
using NodeUserDataArray = std::vector<NodeUserData>;
NodeUserDataArray m_userData;
};
using QueueType = ConcurrentQueue; // or SerialQueue (but serial queue will fail with some multithreaded unit tests)
//! Traversal class
//!
//! @tparam GraphNode Node typename
//! @tparam Graph Graph typename
//! @tparam Strategy Traversal visit strategy (first, last, all) typename
//! @tparam Flow Visit flow (DFS or BFS) typename
//! @tparam NodeUserData Custom user data typename allocated for each node
//! @tparam ConstNode Is this a const traversal
template <typename GraphNode, typename Graph, typename Strategy, typename Flow, typename NodeUserData, bool ConstNode>
class Traversal : private TraversalBase<GraphNode, Flow, QueueType, ConstNode>,
private UserDataBase<GraphNode, NodeUserData>
{
public:
using Node = typename std::conditional_t<ConstNode, const GraphNode, GraphNode>;
using Base = TraversalBase<GraphNode, Flow, QueueType, ConstNode>;
using BaseUserData = UserDataBase<GraphNode, NodeUserData>;
struct Info
{
Traversal& traversal;
VisitOrder order;
Info(Traversal& t, VisitOrder o) noexcept : traversal(t), order(o)
{
traversal.Base::incrementInfo();
}
Info(const Info& src) noexcept : traversal(src.traversal), order(src.order)
{
traversal.Base::incrementInfo();
}
Info(Info&& src) noexcept : traversal(src.traversal), order(src.order)
{
traversal.Base::incrementInfo();
}
~Info()
{
traversal.Base::decrementInfo();
}
Info& operator=(const Info& rhs) = delete;
Info& operator=(Info&& rhs) = delete;
bool isFirstVisit() const
{
return (order & VisitOrder::eFirst) == VisitOrder::eFirst;
}
bool isLastVisit() const
{
return (order & VisitOrder::eLast) == VisitOrder::eLast;
}
void continueVisit(GraphNode* node)
{
return traversal.continueVisit(node);
}
NodeUserData& userData(GraphNode* node)
{
return traversal.BaseUserData::userData(node);
}
};
using CallbackType = void(Info, GraphNode*, GraphNode*);
using CallbackFn = std::function<CallbackType>;
using NodeDataArray = std::vector<NodeData>;
explicit Traversal(Graph* g, CallbackFn call) noexcept
: BaseUserData(g->getNodeCount()), m_callback(call), m_data(g->getNodeCount())
{
}
Traversal() = delete;
Traversal(const Traversal& src) = delete;
Traversal(Traversal&& src) = delete;
Traversal& operator=(const Traversal& rhs) = delete;
Traversal& operator=(Traversal&& rhs) = delete;
void continueVisit(GraphNode* prev)
{
continueImpl(Flow(), prev);
}
void startVisit(GraphNode* node)
{
startImpl(Flow(), node);
}
void markVisited(GraphNode* node)
{
nodeData(node).visitCount = node->getParents().size();
}
private:
// see if our traversal policy allows us to visit this node
void tryVisit(GraphNode* prev, GraphNode* current)
{
VisitOrder visitOrder = Strategy::tryVisit(current, nodeData(current));
if (visitOrder > VisitOrder::eUnknown)
{
m_callback({ *this, visitOrder }, prev, current);
}
}
void tryContinue(GraphNode* current)
{
for (auto child : current->getChildren())
{
tryVisit(current, child);
}
}
void continueImpl(FlowDFS, Node* prev)
{
tryContinue(prev);
}
void continueImpl(FlowBFS, Node* prev)
{
Base::push(prev);
}
void startImpl(FlowDFS, Node* node)
{
markVisited(node);
tryContinue(node);
}
void startImpl(FlowBFS, Node* node)
{
markVisited(node);
Base::push(node);
AtomicBackoff backoff;
Node* stackNode = nullptr;
while (true)
{
if (Base::tryPop(stackNode))
{
tryContinue(stackNode);
backoff.reset();
continue;
}
if (Base::hasInfo())
{
backoff.pause();
}
else
{
break;
}
}
}
NodeData& nodeData(GraphNode* node)
{
return m_data[node->getIndexInTopology()];
}
CallbackFn m_callback;
NodeDataArray m_data;
};
//! Main traversal template for DFS algorithms
template <typename GraphNode, typename Graph, typename Strategy, typename NodeUserData, bool ConstNode>
void run_traversal_dfs(typename std::conditional_t<ConstNode, const GraphNode, GraphNode>* node,
typename Traversal<GraphNode, Graph, Strategy, FlowDFS, NodeUserData, ConstNode>::CallbackFn call)
{
Traversal<GraphNode, Graph, Strategy, FlowDFS, NodeUserData, ConstNode> traversal(node->getTopology(), call);
traversal.startVisit(node);
}
//! Main traversal template for BFS algorithms
template <typename GraphNode, typename Graph, typename Strategy, typename NodeUserData, bool ConstNode>
void run_traversal_bfs(typename std::conditional_t<ConstNode, const GraphNode, GraphNode>* node,
typename Traversal<GraphNode, Graph, Strategy, FlowBFS, NodeUserData, ConstNode>::CallbackFn call)
{
Traversal<GraphNode, Graph, Strategy, FlowBFS, NodeUserData, ConstNode> traversal(node->getTopology(), call);
traversal.startVisit(node);
}
//! Main traversal template for DFS algorithms. Traversal is allocated on the heap and returned.
template <typename GraphNode, typename Graph, typename Strategy, typename NodeUserData, bool ConstNode>
auto alloc_and_run_traversal_dfs(
typename std::conditional_t<ConstNode, const GraphNode, GraphNode>* node,
typename Traversal<GraphNode, Graph, Strategy, FlowDFS, NodeUserData, ConstNode>::CallbackFn call)
{
using TraversalType = Traversal<GraphNode, Graph, Strategy, FlowDFS, NodeUserData, ConstNode>;
std::unique_ptr<TraversalType> traversal = std::make_unique<TraversalType>(node->getTopology(), call);
traversal->startVisit(node);
return traversal;
}
//! Main traversal template for BFS algorithms. Traversal is allocated on the heap and returned.
template <typename GraphNode, typename Graph, typename Strategy, typename NodeUserData, bool ConstNode>
auto alloc_and_run_traversal_bfs(
typename std::conditional_t<ConstNode, const GraphNode, GraphNode>* node,
typename Traversal<GraphNode, Graph, Strategy, FlowBFS, NodeUserData, ConstNode>::CallbackFn call)
{
using TraversalType = Traversal<GraphNode, Graph, Strategy, FlowBFS, NodeUserData, ConstNode>;
std::unique_ptr<TraversalType> traversal = std::make_unique<TraversalType>(node->getTopology(), call);
traversal->startVisit(node);
return traversal;
}
#endif // DOXYGEN_BUILD
} // detail
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#include <omni/graph/exec/unstable/IGraph.h>
#include <omni/graph/exec/unstable/INode.h>
#include <omni/graph/exec/unstable/ITopology.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::detail::VisitFirst
using VisitFirst = detail::VisitFirst;
//! @copydoc omni::graph::exec::unstable::detail::VisitLast
using VisitLast = detail::VisitLast;
//! @copydoc omni::graph::exec::unstable::detail::VisitAll
using VisitAll = detail::VisitAll;
using detail::FlowBFS;
using detail::FlowDFS;
using detail::NoUserData;
using detail::Traversal;
using detail::VisitOrder;
// ef-docs traversal-methods-begin
//! Deep-first-search traversal
template <typename Strategy, typename NodeUserData = NoUserData>
void traversal_dfs(INode* node,
typename Traversal<INode, ITopology, Strategy, FlowDFS, NodeUserData, false>::CallbackFn call)
{
detail::run_traversal_dfs<INode, ITopology, Strategy, NodeUserData, false>(node, call);
}
//! Breadth-first-search traversal
template <typename Strategy, typename NodeUserData = NoUserData>
void traversal_bfs(INode* node,
typename Traversal<INode, ITopology, Strategy, FlowBFS, NodeUserData, false>::CallbackFn call)
{
detail::run_traversal_bfs<INode, ITopology, Strategy, NodeUserData, false>(node, call);
}
//! Deep-first-search traversal. Traversal allocated on heap and returned to extend lifetime
//! to the end of all concurrent tasks.
template <typename Strategy, typename NodeUserData = NoUserData>
auto concurrent_traversal_dfs(INode* node,
typename Traversal<INode, ITopology, Strategy, FlowDFS, NodeUserData, false>::CallbackFn call)
{
return detail::alloc_and_run_traversal_dfs<INode, ITopology, Strategy, NodeUserData, false>(node, call);
}
//! Breadth-first-search traversal. Traversal allocated on heap and returned to extend lifetime
//! to the end of all concurrent tasks.
template <typename Strategy, typename NodeUserData = NoUserData>
auto concurrent_traversal_bfs(INode* node,
typename Traversal<INode, ITopology, Strategy, FlowBFS, NodeUserData, false>::CallbackFn call)
{
return detail::alloc_and_run_traversal_bfs<INode, ITopology, Strategy, NodeUserData, false>(node, call);
}
// ef-docs traversal-methods-end
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 18,912 | C | 29.802932 | 124 | 0.66323 |
omniverse-code/kit/include/omni/graph/exec/unstable/INodeGraphDefDebug.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file INodeGraphDefDebug.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::INodeGraphDefDebug.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class INodeGraphDefDebug;
class INodeGraphDefDebug_abi;
//! Interface containing debugging methods for @ref omni::graph::exec::unstable::INodeGraphDef.
//!
//! Implementation of this interface is optional.
class INodeGraphDefDebug_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.INodeGraphDefDebug")>
{
protected:
//! Returns the current execution count. A value of 0 means the graph is not executing.
virtual uint64_t getExecutionCount_abi() noexcept = 0;
//! Increments the execution count.
virtual void incrementExecutionCount_abi() noexcept = 0;
//! Decrements the execution count. It is undefined behavior for call decrement more than increment.
virtual void decrementExecutionCount_abi() noexcept = 0;
};
//! Smart pointer managing an instance of @ref INodeGraphDefDebug.
using NodeGraphDefDebugPtr = omni::core::ObjectPtr<INodeGraphDefDebug>;
class ScopedExecutionDebug;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/INodeGraphDefDebug.gen.h>
//! @copydoc omni::graph::exec::unstable::INodeGraphDefDebug_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::INodeGraphDefDebug
: public omni::core::Generated<omni::graph::exec::unstable::INodeGraphDefDebug_abi>
{
public:
//! Returns @c true if the graph's execution count is greater than 0.
inline bool isExecuting() noexcept
{
return (getExecutionCount() > 0);
}
};
//! Scoped object used mark that a given @ref INodeGraphDef is currently executing.
//!
//! Since @ref INodeGraphDef objects can be shared across nodes, it is safe to create multiple instances of this object
//! with the same @ref INodeGraphDef.
class omni::graph::exec::unstable::ScopedExecutionDebug
{
public:
//! Marks the given @ref INodeGraphDef as executing.
ScopedExecutionDebug(omni::core::ObjectParam<IBase> nodeGraphDef)
: m_nodeGraphDef(omni::graph::exec::unstable::cast<INodeGraphDefDebug>(nodeGraphDef))
{
if (m_nodeGraphDef)
{
m_nodeGraphDef->incrementExecutionCount();
}
}
//! Decrements the given @ref INodeGraphDef's execution tracker.
~ScopedExecutionDebug()
{
if (m_nodeGraphDef)
{
m_nodeGraphDef->decrementExecutionCount();
}
}
private:
INodeGraphDefDebug* m_nodeGraphDef;
};
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/INodeGraphDefDebug.gen.h>
| 3,468 | C | 31.12037 | 119 | 0.72376 |
omniverse-code/kit/include/omni/graph/exec/unstable/GraphUtils.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file GraphUtils.h
//!
//! @brief Defines utilities for graph visualization.
#pragma once
#include <carb/Format.h>
#include <omni/graph/exec/unstable/IGraph.h>
#include <omni/graph/exec/unstable/Traversal.h>
#include <ostream>
#include <string>
#include <unordered_map>
#include <vector>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Debugging utility to write out the graph topology in a graphviz format
//!
//! @param inGraph Graph to dump
//! @param out Output stream to receive the produced graphviz text output
inline void writeFlattenedAsGraphviz(omni::core::ObjectParam<IGraph> inGraph, std::ostream& out);
namespace detail
{
#ifndef DOXYGEN_BUILD
constexpr const char* colorScheme()
{
return "paired10";
}
constexpr unsigned colorSchemeSize()
{
return 10;
}
struct GraphState
{
std::unordered_map<size_t, unsigned> colorMapping;
unsigned nextColorIndex{ 0 };
unsigned getColor(size_t hash)
{
/*static const std::vector<const char*> gColors = { "black","aqua","aquamarine","bisque",
"blue","blueviolet","brown","burlywood","cadetblue","chartreuse","chocolate","coral","cornflowerblue",
"crimson","cyan","darkblue","darkcyan","darkgoldenrod","darkgray","darkgreen","darkgrey","darkkhaki",
"darkmagenta","darkolivegreen","darkorange","darkorchid","darkred","darksalmon","darkseagreen","darkslateblue",
"darkslategray","darkslategrey","darkturquoise","darkviolet","deeppink","deepskyblue","dimgray","dimgrey","dodgerblue",
"firebrick","floralwhite","forestgreen","fuchsia","gainsboro","gold","goldenrod","gray","grey","green","greenyellow",
"hotpink","indianred","indigo","khaki","lavender","lavenderblush","lawngreen","lemonchiffon","lightblue","lightcoral",
"lightcyan","lightgoldenrodyellow","lightgray","lightgreen","lightgrey","lightpink","lightsalmon","lightseagreen",
"lightskyblue","lightslategray","lightslategrey","lightsteelblue","lightyellow","limegreen","linen","magenta",
"maroon","mediumaquamarine","mediumblue","mediumorchid","mediumpurple","mediumseagreen","mediumslateblue",
"mediumspringgreen","mediumturquoise","mediumvioletred","midnightblue","mistyrose","moccasin","navy","oldlace","olive",
"olivedrab","orange","orangered","orchid","palegoldenrod","palegreen","paleturquoise","palevioletred","papayawhip",
"peachpuff","peru","pink","plum","powderblue","purple","red","rosybrown","royalblue","saddlebrown","salmon","sandybrown",
"seagreen","sienna","silver","skyblue","slateblue","slategray","slategrey","springgreen","steelblue","tan","teal",
"thistle","tomato","turquoise","violet","wheat","whitesmoke","yellow","yellowgreen" };*/
auto foundIt = colorMapping.find(hash);
if (foundIt == colorMapping.end())
{
auto& color = colorMapping[hash];
color = nextColorIndex;
nextColorIndex = (nextColorIndex + 1) % colorSchemeSize();
return color;
}
else
{
return foundIt->second;
}
}
};
constexpr const char* singleIndent()
{
return " ";
}
inline std::string makeQuoted(const std::string& s)
{
return "\"" + s + "\"";
}
inline std::string makeId(const std::string& path, INode* node)
{
return path + "/" + node->getName().toString();
}
inline std::string makeClusterId(const std::string& id)
{
return "Cluster//" + id;
}
inline void writeNodeProperties(INode* node, std::ostream& out)
{
if (node->isRoot())
out << "["
<< "label=\"\" "
<< "shape=point"
<< "]";
else
out << "["
<< "label=" << makeQuoted(node->getName().toString()) << "]";
}
inline void writeNode(const std::string& indent, const std::string& path, INode* node, std::ostream& out)
{
out << indent << makeQuoted(makeId(path, node)) << " ";
writeNodeProperties(node, out);
out << ";" << std::endl;
}
inline void writeConnection(const std::string& indent, const std::string& path, INode* nodeA, INode* nodeB, std::ostream& out)
{
auto graphA = nodeA->getNodeGraphDef();
auto graphB = nodeB->getNodeGraphDef();
std::string pathA = makeId(path, nodeA);
std::string pathB = makeId(path, nodeB);
std::string nodeAId = graphA ? makeId(pathA, graphA->getRoot()) : pathA;
std::string nodeBId = graphB ? makeId(pathB, graphB->getRoot()) : pathB;
out << indent;
out << makeQuoted(nodeAId);
out << " -> ";
out << makeQuoted(nodeBId);
if (graphA || graphB)
{
out << "[";
if (graphA)
out << "ltail=" << makeQuoted(makeClusterId(pathA));
if (graphB)
out << "lhead=" << makeQuoted(makeClusterId(pathB));
out << "]";
}
out << ";" << std::endl;
}
inline void writeSubgraphProperties(
const std::string& indent, INode* node, INodeGraphDef* graph, GraphState& state, std::ostream& out)
{
const auto& nodeGraphDefName = graph->getName();
std::string nodeGraphDefNameLabel =
carb::fmt::format("{}({})", nodeGraphDefName.getString().c_str(), nodeGraphDefName.getHash());
// this is nested NodeGraphDef
if (node)
{
const auto& nodeName = node->getName();
std::string nodeNameLabel = carb::fmt::format("{}({})", nodeName.getString().c_str(), node->getIndexInTopology());
out << indent << "label = " << makeQuoted(nodeNameLabel + " | " + nodeGraphDefNameLabel) << std::endl;
}
// this is top level NodeGraphDef
else
{
out << indent << "label = " << makeQuoted("EXECUTION GRAPH | " + nodeGraphDefNameLabel) << std::endl;
}
auto color = state.getColor(graph->getName().getHash());
out << indent << "color = " << color << std::endl;
out << indent << "node [color = " << color << "]" << std::endl;
out << indent << "edge [color = " << color << "]" << std::endl;
}
inline void writeGraph(const std::string& indent,
const std::string& path,
INode* node,
INodeGraphDef* graph,
GraphState& state,
std::ostream& out)
{
out << indent << "subgraph " << makeQuoted(makeClusterId(path)) << " {" << std::endl;
std::string thisIndent = indent + singleIndent();
writeSubgraphProperties(thisIndent, node, graph, state, out);
// for readability, we first write nodes...
writeNode(thisIndent, path, graph->getRoot(), out);
traversal_dfs<VisitFirst>(graph->getRoot(),
[&out, &thisIndent, &path, &state](auto info, INode* prev, INode* curr)
{
auto nodeGraph = curr->getNodeGraphDef();
if (nodeGraph)
writeGraph(thisIndent, makeId(path, curr), curr, nodeGraph, state, out);
else
writeNode(thisIndent, path, curr, out);
info.continueVisit(curr);
});
// ... and then we write connections
traversal_dfs<VisitAll>(graph->getRoot(),
[&out, &thisIndent, &path](auto info, INode* prev, INode* curr)
{
writeConnection(thisIndent, path, prev, curr, out);
if (info.isFirstVisit()) // visit all edges, continue traversal on the first one
info.continueVisit(curr);
});
out << indent << "}" << std::endl;
}
inline void writeGraphProperties(const std::string& indent, std::ostream& out)
{
out << indent << "compound=true" << std::endl;
out << indent << "colorscheme=" << colorScheme() << std::endl;
out << indent << "node [shape=circle style=filled fontcolor=white color=black colorscheme=" << colorScheme() << "]"
<< std::endl;
out << indent << "edge [colorscheme=" << colorScheme() << "]" << std::endl;
out << indent << "rankdir=LR" << std::endl;
out << indent << "style=rounded" << std::endl;
}
#endif // DOXYGEN_BUILD
} // namespace detail
inline void writeFlattenedAsGraphviz(omni::core::ObjectParam<IGraph> inGraph, std::ostream& out)
{
using namespace detail;
out << "digraph ExecutionGraph {" << std::endl;
{
writeGraphProperties(singleIndent(), out);
GraphState state;
writeGraph(singleIndent(), "", nullptr, inGraph->getNodeGraphDef(), state, out);
}
out << "}" << std::endl;
}
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 9,266 | C | 34.642308 | 133 | 0.598964 |
omniverse-code/kit/include/omni/graph/exec/unstable/IPassTypeRegistry.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! @ref omni::graph::exec::unstable::IPassFactory registry for a particular @ref omni::graph::exec::unstable::PassType.
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IPassTypeRegistry_abi>
: public omni::graph::exec::unstable::IPassTypeRegistry_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPassTypeRegistry")
//! Returns the number of registered passes.
uint64_t getPassCount() noexcept;
//! Returns the pass at the given index.
//!
//! If the index is greater than the count, an error is returned.
//!
//! The returned @ref omni::graph::exec::unstable::PassTypeRegistryEntry is valid as long as this pass type registry
//! is not mutated (e.g. a pass is added or removed from the registry).
void getPassAt(uint64_t index, omni::graph::exec::unstable::PassTypeRegistryEntry* outEntry);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline uint64_t omni::core::Generated<omni::graph::exec::unstable::IPassTypeRegistry_abi>::getPassCount() noexcept
{
return getPassCount_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IPassTypeRegistry_abi>::getPassAt(
uint64_t index, omni::graph::exec::unstable::PassTypeRegistryEntry* outEntry)
{
OMNI_THROW_IF_ARG_NULL(outEntry);
OMNI_THROW_IF_FAILED(getPassAt_abi(index, outEntry));
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
static_assert(std::is_standard_layout<omni::graph::exec::unstable::PassTypeRegistryEntry>::value,
"omni::graph::exec::unstable::PassTypeRegistryEntry must be standard layout to be used in ONI ABI");
| 2,638 | C | 35.150684 | 120 | 0.734268 |
omniverse-code/kit/include/omni/graph/exec/unstable/IGraphBuilder.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IGraphBuilder.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IGraphBuilder.
#pragma once
#include <omni/core/ResultError.h>
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/NodePartition.h>
#include <omni/graph/exec/unstable/Span.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IGraph;
class IGraphBuilder_abi;
class IGraphBuilder;
class IGraphBuilderContext;
class IGraphBuilderNode;
class INode;
class IDef;
class INodeDef;
class INodeGraphDef;
class ITopology;
//! Graph builder is the only class that has the ability to modify topology of a graph.
//!
//! Topological edits of the graph are only allowed during graph transformation and should never
//! be performed during execution of the graph. Construction of the builder will automatically drop
//! all the connections between nodes.
//!
//! Methods on this class mutating a graph topology are not thread-safe (unless documented otherwise)
class IGraphBuilder_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IGraphBuilder")>
{
protected:
//! Return owner of all graphs this builder touches
//!
//! The returned @ref omni::graph::exec::unstable::IGraph will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
virtual OMNI_ATTR("no_acquire") IGraph* getGraph_abi() noexcept = 0;
//! Returns the topology this builder can modify.
//!
//! The returned @ref omni::graph::exec::unstable::ITopology will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
virtual OMNI_ATTR("no_acquire") ITopology* getTopology_abi() noexcept = 0;
//! Returns the context in which this builder works.
//!
//! The returned @ref omni::graph::exec::unstable::IGraphBuilderContext will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
virtual OMNI_ATTR("no_acquire") IGraphBuilderContext* getContext_abi() noexcept = 0;
//! Returns @ref omni::graph::exec::unstable::INodeGraphDef this builder can modify.
//!
//! The returned @ref omni::graph::exec::unstable::INodeGraphDef will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
virtual OMNI_ATTR("no_acquire") INodeGraphDef* getNodeGraphDef_abi() noexcept = 0;
//! Connect two given nodes.
//!
//! It is an error if the two nodes are not in the same topology.
//!
//! Neither given node should be @c nullptr.
//!
//! Neither @ref omni::graph::exec::unstable::INode have @ref omni::core::IObject::acquire() called
//! during the connection process.
//!
//! May throw.
virtual OMNI_ATTR("throw_result") omni::core::Result
connect_abi(OMNI_ATTR("not_null, throw_if_null") INode* upstreamNode,
OMNI_ATTR("not_null, throw_if_null") INode* downstreamNode) noexcept = 0;
//! Disconnect two given nodes.
//!
//! It is an error if the two nodes are not in the same topology.
//!
//! Neither given node should be @c nullptr.
//!
//! Neither @ref omni::graph::exec::unstable::INode have @ref omni::core::IObject::acquire() called
//! during the disconnection process.
//!
//! May throw.
virtual OMNI_ATTR("throw_result") omni::core::Result
disconnect_abi(OMNI_ATTR("not_null, throw_if_null") INode* upstreamNode,
OMNI_ATTR("not_null, throw_if_null") INode* downstreamNode) noexcept = 0;
//! Remove a node from topology.
//!
//! The given node must not be @c nullptr.
//!
//! May throw.
virtual OMNI_ATTR("throw_result") omni::core::Result
remove_abi(OMNI_ATTR("not_null, throw_if_null") INode* node) noexcept = 0;
//! Sets the definition for given node.
//!
//! If a definition is already set, it will be replaced by the given definition.
//!
//! The given definition may be @c nullptr.
//!
//! @ref omni::core::IObject::acquire() is called on the given definition pointer.
//!
//! See also @ref omni::graph::exec::unstable::IGraphBuilder::setNodeGraphDef().
//!
//! This method is NOT thread safe.
virtual void setNodeDef_abi(OMNI_ATTR("not_null") INode* node, INodeDef* nodeDef) noexcept = 0;
//! Sets the definition for give node.
//!
//! If a definition is already set, it will be replaced by the given definition.
//!
//! The given definition may be @c nullptr.
//!
//! @ref omni::core::IObject::acquire() is called on the given definition pointer.
//!
//! See also @ref omni::graph::exec::unstable::IGraphBuilder::setNodeDef().
//!
//! This method is NOT thread safe.
virtual void setNodeGraphDef_abi(OMNI_ATTR("not_null") INode* node, INodeGraphDef* nodeGraphDef) noexcept = 0;
//! Unsets given node's definition.
//!
//! If the definition is already @c nullptr, this method does nothing.
//!
//! This method is NOT thread safe.
virtual void clearDef_abi(OMNI_ATTR("not_null") INode* node) noexcept = 0;
//! Replace well formed cluster of nodes with a single node and the given definition.
//!
//! All nodes must exist in the same and current topology, otherwise the entire operation is aborted.
//!
//! @ref omni::core::IObject::acquire() is called on the given definition pointer.
//!
//! This method is NOT thread safe.
virtual void replacePartition_abi(OMNI_ATTR("in, not_null, throw_if_null, ref") const NodePartition* partition,
OMNI_ATTR("not_null, throw_if_null") IDef* definition) noexcept = 0;
//! Create a new node in current node graph def.
//!
//! The given node name must not be @c nullptr.
//!
//! The given node def can be @c nullptr.
//!
//! Node creation can return @c nullptr when current node graph def doesn't allow node construction outside
//! of the pass that created it.
//!
//! The returned @ref omni::graph::exec::unstable::INode will have @ref omni::core::IObject::acquire() called on it.
virtual INode* createNode_abi(OMNI_ATTR("in, not_null, throw_if_null, c_str") const char* name,
IDef* def) noexcept = 0;
//! Access created nodes by this builder.
//!
//! Span is no longer valid when topology of the graph changes. You need to query it again.
//!
//! In case a node once created gets removed by another pass, returned list will continue to have it.
//! It is safe to do, because we do not delete underlying nodes until the next graph population.
//! Checking if node is valid in current topology allows to filter out these cases.
//!
//! The pointers in the span are non owning, i.e. @ref omni::graph::exec::unstable::INode will not have
//! @ref omni::core::IObject::acquire() called on it.
virtual Span<INode* const> getCreatedNodes_abi() noexcept = 0;
};
//! Smart pointer managing an instance of @ref IGraphBuilder.
using GraphBuilderPtr = omni::core::ObjectPtr<IGraphBuilder>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IGraphBuilder.gen.h>
//! @copydoc omni::graph::exec::unstable::IGraphBuilder_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IGraphBuilder
: public omni::core::Generated<omni::graph::exec::unstable::IGraphBuilder_abi>
{
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IGraphBuilderNode.h>
#include <omni/graph/exec/unstable/INode.h>
#include <omni/graph/exec/unstable/INodeDef.h>
#include <omni/graph/exec/unstable/INodeGraphDef.h>
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IGraphBuilder.gen.h>
| 8,490 | C | 39.433333 | 125 | 0.683274 |
omniverse-code/kit/include/omni/graph/exec/unstable/IPassPipeline.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IPassPipeline.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IPassPipeline.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Stamp.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IGraphBuilderContext;
class IPassPipeline;
class IPassPipeline_abi;
class INodeGraphDef;
//! Runs registered passes.
//!
//! The role of pass pipeline is to populate and prepare the execution graph. The base implementation runs passes based
//! on the type and registration order. Most applications will define their own pass pipeline to control how the
//! execution graph is generated.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
class IPassPipeline_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IPassPipeline")>
{
protected:
//! Test if pipeline needs to rebuild (mostly for its acceleration structures).
virtual bool needsConstruction_abi() noexcept = 0;
//! Build the pipeline (mostly for its acceleration structures).
virtual OMNI_ATTR("throw_result") omni::core::Result construct_abi() noexcept = 0;
//! Test if pipeline needs to run (after topology changes in the graph).
virtual bool needsExecute_abi(Stamp globalTopology) noexcept = 0;
//! Execute the graph transformations pipeline
virtual OMNI_ATTR("throw_result") omni::core::Result
execute_abi(OMNI_ATTR("not_null, throw_if_null") IGraphBuilderContext* builderContext,
OMNI_ATTR("not_null, throw_if_null") INodeGraphDef* nodeGraphDef) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IPassPipeline.
using PassPipelinePtr = omni::core::ObjectPtr<IPassPipeline>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IPassPipeline.gen.h>
//! @copydoc omni::graph::exec::unstable::IPassPipeline_abi
//!
//! @ingroup groupOmniGraphExecPasses groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IPassPipeline
: public omni::core::Generated<omni::graph::exec::unstable::IPassPipeline_abi>
{
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IGraphBuilder.h>
#include <omni/graph/exec/unstable/IGraphBuilderContext.h>
#include <omni/graph/exec/unstable/INodeGraphDef.h>
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IPassPipeline.gen.h>
| 3,114 | C | 34.804597 | 125 | 0.759152 |
omniverse-code/kit/include/omni/graph/exec/unstable/Executor.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Executor.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::Executor.
#pragma once
#include <carb/cpp/TypeTraits.h>
#include <omni/graph/exec/unstable/ExecutionPath.h>
#include <omni/graph/exec/unstable/ExecutionTask.h>
#include <omni/graph/exec/unstable/IExecutionContext.h>
#include <omni/graph/exec/unstable/IExecutor.h>
#include <omni/graph/exec/unstable/INode.h>
#include <omni/graph/exec/unstable/ITopology.h>
#include <omni/graph/exec/unstable/ScheduleFunction.h>
#include <omni/graph/exec/unstable/SchedulingInfo.h>
#include <atomic>
#include <memory>
#include <queue>
#include <type_traits>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Basic scheduler which executes the task on calling thread and accumulates the result
struct SerialScheduler
{
//! Constructor
SerialScheduler(IExecutionContext* context)
{
}
//! Serial scheduler will emplace the task on serial queue and dispatch one by one after entering process loop
template <typename Fn>
Status schedule(Fn&& task, SchedulingInfo)
{
m_tasks.emplace(
[task = captureScheduleFunction(task), this]() mutable
{
Status stat = invokeScheduleFunction(task);
this->m_tasksStatus |= stat;
});
return Status::eSuccess;
}
//! Enter processing of tasks and return accumulated status
Status getStatus()
{
while (!m_tasks.empty())
{
auto& task = m_tasks.front();
task();
m_tasks.pop();
}
return m_tasksStatus;
}
private:
//! Collecting status from all tasks executed by this instance
Status m_tasksStatus{ Status::eUnknown };
//! Collect serial tasks in queue to avoid hitting potential stack size limit with the recursive
//! pattern we had before.
std::queue<std::function<void()>> m_tasks;
};
//! Data available for executor on every node when traversing the graph.
//!
//! This data does NOT persist from execution to execution. It is written and read by executor during task generation.
//!
//! @note Can be customized via one of executor template parameters.
struct ExecutionNodeData
{
std::atomic<std::uint32_t> visitCount{ 0 }; //!< Number of traversal visit to the node.
std::atomic<bool> hasComputedUpstream{ false }; //!< Propagated value during traversal - has upstream computed.
std::atomic<bool> hasDeferredUpstream{ false }; //!< Propagated value during traversal - has deferred upstream
//!< computation.
//! Copy constructor
//! @note We need to explicitly add copy constructor since they are not available for atomic operations.
//! We don't use them in concurrent execution so we will be fine.
ExecutionNodeData(const ExecutionNodeData& src)
: visitCount(src.visitCount.load()),
hasComputedUpstream(src.hasComputedUpstream.load()),
hasDeferredUpstream(src.hasDeferredUpstream.load())
{
}
//! Assignment operator
//! @note We need to explicitly add assignment operator since they are not available for atomic operations.
//! We don't use them in concurrent execution so we will be fine.
ExecutionNodeData& operator=(const ExecutionNodeData& rhs)
{
visitCount.store(rhs.visitCount.load());
hasComputedUpstream.store(rhs.hasComputedUpstream.load());
hasDeferredUpstream.store(rhs.hasDeferredUpstream.load());
return *this;
}
//! Default constructor
//! @note Compiler will not implicitly generate default constructor because we defined copy constructor.
ExecutionNodeData()
{
}
};
// ef-docs execution-visit-begin
//! Graph traversal visit strategy.
//!
//! Will generate a new task when all upstream nodes have been executed.
struct ExecutionVisit
{
//! Called when the traversal wants to visit a node. This method determines what to do with the node (e.g. schedule
//! it, defer it, etc).
template <typename ExecutorInfo>
static Status tryVisit(ExecutorInfo info) noexcept
{
auto& nodeData = info.getNodeData();
if (info.currentTask.getExecutionStatus() == Status::eDeferred)
nodeData.hasDeferredUpstream = true; // we only set to true...doesn't matter which thread does it first
std::size_t requiredCount = info.nextNode->getParents().size() - info.nextNode->getCycleParentCount();
if ((requiredCount == 0) || (++nodeData.visitCount == requiredCount))
{
if (!nodeData.hasDeferredUpstream)
{
// spawning a task within executor doesn't change the upstream path. just reference the same one.
ExecutionTask newTask(info.getContext(), info.nextNode, info.getUpstreamPath());
return info.schedule(std::move(newTask));
}
else
return Status::eDeferred;
}
return Status::eUnknown;
}
};
// ef-docs execution-visit-end
//! Graph traversal visit strategy with dirty cache check.
//!
//! This strategy will generate a new task when all upstream nodes have been visited and:
//!
//! - The node's state requests compute (i.e. the node has been marked as dirty)
//!
//! or:
//!
//! - An upstream node computed
//!
//! If neither of the conditions are true, but all parent nodes have been visited, execution continues (via @ref
//! omni::graph::exec::unstable::IExecutor::continueExecute()).
// ef-docs execution-visit-cache-begin
struct ExecutionVisitWithCacheCheck
{
//! Called when the traversal wants to visit a node. This method determines what to do with the node (e.g. schedule
//! it, defer it, etc).
template <typename ExecutorInfo>
static Status tryVisit(ExecutorInfo info) noexcept
{
auto& nodeData = info.getNodeData();
auto triggeringTaskStatus = info.currentTask.getExecutionStatus();
if (triggeringTaskStatus == Status::eSuccess)
nodeData.hasComputedUpstream = true; // we only set to true...doesn't matter which thread does it first
else if (triggeringTaskStatus == Status::eDeferred)
nodeData.hasDeferredUpstream = true; // we only set to true...doesn't matter which thread does it first
std::size_t requiredCount = info.nextNode->getParents().size() - info.nextNode->getCycleParentCount();
if ((requiredCount == 0) || (++nodeData.visitCount == requiredCount))
{
if (nodeData.hasDeferredUpstream)
return Status::eDeferred;
else
{
// spawning a task within executor doesn't change the upstream path. just reference the same one.
ExecutionTask newTask(info.getContext(), info.nextNode, info.getUpstreamPath());
if (nodeData.hasComputedUpstream ||
info.getContext()->getStateInfo(newTask)->needsCompute(info.getExecutionStamp()))
return info.schedule(std::move(newTask));
else // continue downstream...there may be something dirty. Bypass scheduler to avoid unnecessary
// overhead
return info.continueExecute(newTask);
}
}
return Status::eUnknown;
}
};
// ef-docs execution-visit-cache-end
//! Algorithm to determine how task should be scheduled
struct DefaultSchedulingStrategy
{
//! Returns the SchedulingInfo (e.g. serial, main thread, etc.) for the given task.
static SchedulingInfo getSchedulingInfo(const ExecutionTask& task)
{
INode* node = task.getNode();
if (node->getNodeDef())
return node->getNodeDef()->getSchedulingInfo(task);
else if (node->getNodeGraphDef())
return node->getNodeGraphDef()->getSchedulingInfo(task);
else
return SchedulingInfo::eSchedulerBypass; // bypass the scheduler since there is nothing to compute
}
};
//! Easily configurable @ref omni::graph::exec::unstable::IExecutor implementation providing necessary tools for most
//! common executor types.
//!
//! The @ref omni::graph::exec::unstable::Executor class traverses parts of the
//! @rstref{execution graph <ef_execution_graph>}, generating tasks for each node *visited*. One of the core concepts of
//! EF is that *each* @ref omni::graph::exec::unstable::INodeGraphDef *specifies the* @ref
//! omni::graph::exec::unstable::IExecutor *that should be used to execute the subgraph it defines*. This allows each
//! @ref omni::graph::exec::unstable::INodeGraphDef to control a host of strategies for how its subgraph is executed.
//! Some of the strategies are as follows:
//!
//! - *If a node should be scheduled*. For example, the executor may decide to prune parts of the graph based on the
//! result of a previous execution (i.e. conditional execution). An executor may detect part of the graph does not
//! need to be computed because a previous execution's results are still valid (i.e. caching). An executor may also
//! employ strategies such as executing a node once all of its parent have completed or executing the node as soon as
//! any of the parents have executed.
//!
//! - *How nodes are scheduled*. When an executor visits a node, the executor may choose to execute the computational
//! logic in the node's definition immediately. Alternatively, it can delegate the execution to a *scheduler*. Working
//! with the scheduler, an executor is able to provide execution strategies such as:
//!
//! - Defer execution of the node to a later time.
//!
//! - Execute the node in parallel with other nodes in the graph.
//!
//! - Ensure the node is the only node executing at the moment (e.g. "isolated" tasks).
//!
//! - Execute the node on a specified thread (e.g. the thread that started executing the graph).
//!
//! - *Where nodes are scheduled*. An executor can work with a resource scheduler to determine *where* to execute a
//! node. This includes deciding the best GPU on a multi-GPU system to execute a GPU node. Likewise, executors can
//! consult data center aware schedulers to schedule nodes on remote machines.
//!
//! - *The amount of work to be scheduled*. When visiting a node, an executor can create any number of tasks to
//! accomplish the node's computation. These tasks are able to dynamically create additional work/tasks that the
//! executor is able to track.
//!
//! Executors and schedulers work together to produce, schedule, and execute tasks on behalf of the node. Executors
//! determine which nodes should be visited and generate appropriate work (i.e. tasks). Said differently, executor
//! objects "interpret" the graph based on the behavioral logic encapsulated in the executor. Schedulers collect tasks,
//! possibly concurrently from many executor objects, and map the tasks to hardware resources for execution.
//!
//! This @ref omni::graph::exec::unstable::Executor template contains several parameters that allow the user to control
//! the strategies above.
//!
//! <h1><b>Node Type to be Traversed</b></h1>
//!
//! The @p ExecNode parameter defines the interface used to communicate with nodes. This will usually be @ref
//! omni::graph::exec::unstable::INode or a subclass thereof.
//!
//! <h1><b>Work Generation Strategy</b></h1>
//!
//! @p ExecStrategy defines when/if/what work should be generated when visiting a node. EF provides several
//! implementations of this strategy:
//!
//! - @ref omni::graph::exec::unstable::ExecutionVisit - Generates work after all parent nodes have been executed.
//!
//! - @ref omni::graph::exec::unstable::ExecutionVisitWithCacheCheck - Generates work after all parents have been
//! visited *and* either a parent has successfully executed or the node has been explicitly marked for execution (i.e.
//! dirty).
//!
//! Users are able to define their own execution strategies. For example OmniGraph defines custom work generation
//! strategies for its various graph types (e.g. pull, push, etc).
//!
//! <h1><b>Transient Execution Data</b></h1>
//!
//! Executing a graph definition may require transient data to implement the executor's work generation strategy. For
//! example, when executing parents in parallel, transient data is needed to atomically count the number of parents that
//! have executed to avoid a node incorrectly executing multiple times. @p ExecNodeData is a `struct` the user can
//! define to store this transient data.
//!
//! Each node in the graph definition is assigned an @p ExecNodeData. This transient data type is usually tied to @p
//! ExecStrategy but can also be utilized by the other parameters in this template.
//!
//! EF provides the @ref omni::graph::exec::unstable::ExecutionNodeData struct to work with EF's built-in execution
//! strategies.
//!
//! <h1><b>Scheduler</b></h1>
//!
//! The executors job is to traverse a graph definition, generating appropriate work as nodes are visited. That work is
//! given to a scheduler, whose job it is to dispatch the work. The benefit of a scheduler is that it can have a
//! holistic view of the system, across multiple running executors, and efficiently dispatch the work to proper hardware
//! resources.
//!
//! The @p Scheduler parameter defines the scheduler to be used.
//!
//! EF defines the @ref omni::graph::exec::unstable::SerialScheduler which executes task serially. In practice, more
//! advanced schedulers are available. For example, *omni.kit.exec.core* defines the @c ParallelSpawner scheduler
//! (based on [Intel's Thread Building Blocks](https://github.com/oneapi-src/oneTBB)) which is able to run tasks in
//! parallel.
//!
//! <h1><b>Scheduling Strategy</b></h1>
//!
//! The @p SchedulingStrategy provides a @ref omni::graph::exec::unstable::SchedulingInfo for each generated task. @ref
//! omni::graph::exec::unstable::SchedulingInfo is an enum that outlines scheduling constraints for a task (e.g. must be
//! run serially, must run on the thread that started graph execution, etc).
//!
//! EF's @ref omni::graph::exec::unstable::DefaultSchedulingStrategy calls the definitions's @ref
//! omni::graph::exec::unstable::IDef::getSchedulingInfo() to get the definitions's preferred strategy. However, users
//! may choose to override the definitions's preferred strategy with a custom @c SchedulingStrategy. For example,
//! forcing all definitions to run serially to ease in debugging the execution graph.
//!
//! <h1><b>Virtual Methods</b></h1>
//!
//! In addition to the template parameters, users may choose to override one of @ref
//! omni::graph::exec::unstable::Executor's virtual methods. These methods are:
//!
//! - @ref omni::graph::exec::unstable::IExecutor::execute_abi(): This method begins execution of the node provided in
//! the constructor. Since this node is usually a root node, this method simply calls @ref
//! omni::graph::exec::unstable::IExecutor::continueExecute_abi() to execute nodes beyond the root node and calls the
//! scheduler's @c getStatus() method which is a blocking call that waits for outstanding work to finish.
//!
//! - @ref omni::graph::exec::unstable::IExecutor::continueExecute_abi(): This method is called after each node
//! executes. It's job is to continue executing the nodes downstream of the executed node. By default, this method
//! uses the work generation strategy (i.e. @c ExecStrategy) on each of the node's children.
//!
//! <h1><b>Miscellaneous</b></h1>
//!
//! The lifetime of an executor is short. They exist only when executing their owning graph definition. All transient
//! data stored in @p ExecNodeData is valid only during this lifetime.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! See @rstref{Creating an Executor <ef_executor_creation>} for a guide on creating a customize executor for your graph
//! defintion.
template <typename ExecNode,
typename ExecStrategy,
typename ExecNodeData,
typename Scheduler,
typename SchedulingStrategy,
typename ExecutorInterface = IExecutor>
class Executor : public Implements<ExecutorInterface>
{
using Node = const ExecNode;
using NodeData = ExecNodeData;
using NodeDataArray = std::vector<NodeData>;
using ThisExecutor = Executor<ExecNode, ExecStrategy, ExecNodeData, Scheduler, SchedulingStrategy, ExecutorInterface>;
using ThisExecutorPtr = omni::core::ObjectPtr<ThisExecutor>;
//! Helper utility to check scheduler dispatch strategy. By default schedulers are not deferred.
template <typename S, typename Enabled = void>
struct is_deferred
{
static constexpr bool value = false;
};
//! Template utility to check scheduler dispatch strategy. Schedulers with scheduleDeferred method are deferred.
//! Deferred tasks will need to hold a shared pointer to the executor to extend its lifetime past current execution.
//! This is handled automatically thanks to this helper.
template <typename S>
struct is_deferred<
S,
std::enable_if_t<std::is_same<Status,
decltype(std::declval<S>().scheduleDeferred(
std::declval<IScheduleFunction*>(), std::declval<SchedulingInfo>()))>::value>>
{
static constexpr bool value = true;
};
public:
//! Structure passed to the traversal algorithm collecting all necessary data for easy access.
struct Info
{
private:
Executor* m_executor;
public:
const ExecutionTask& currentTask; //!< The node currently being processed.
INode* nextNode; //!< The node to be visited next.
//! Constructor.
Info(Executor* executor, const ExecutionTask& task, INode* next) noexcept
: m_executor(executor), currentTask(task), nextNode(next)
{
}
//! Returns the @ref Executor defined data for the node.
NodeData& getNodeData()
{
return m_executor->getNodeData(nextNode);
}
//! Returns a reference to the owning @ref Executor.
ThisExecutor* getExecutor()
{
return m_executor;
}
//! Returns the current context (i.e. @ref IExecutionContext) in which the @ref Executor is executing.
IExecutionContext* getContext()
{
return currentTask.getContext();
}
//! Returns the current context's execution stamp/version (i.e. @ref IExecutionContext::getExecutionStamp()).
Stamp getExecutionStamp()
{
return getContext()->getExecutionStamp();
}
//! Returns the upstream path of the node that is currently being processed.
const ExecutionPath& getUpstreamPath() const
{
return currentTask.getUpstreamPath();
}
//! Schedules the given task.
Status schedule(ExecutionTask&& newTask)
{
return m_executor->scheduleInternal(std::move(newTask));
}
//! Returns the given task's SchedulingInfo.
SchedulingInfo getSchedulingInfo(const ExecutionTask& task) const
{
return m_executor->getSchedulingInfo(task);
}
//! Returns the @ref Executor's scheduler.
Scheduler& getScheduler()
{
return m_executor->m_scheduler;
}
//! Tells the @ref Executor to processes the given task/node's children. This allows it to
//! generate additional work after the given task has executed.
Status continueExecute(ExecutionTask& currentTask)
{
return m_executor->continueExecute_abi(¤tTask);
}
};
//! Scheduling constraint to use when dispatching given task.
SchedulingInfo getSchedulingInfo(const ExecutionTask& task) const
{
return SchedulingStrategy::getSchedulingInfo(task);
}
//! Access custom data associated with each node.
NodeData& getNodeData(INode* node)
{
return m_nodeData[node->getIndexInTopology()];
}
//! Execution path to node instantiating graph def associated with this executor.
const ExecutionPath& getPath() const
{
return m_path;
}
//! Execution context.
IExecutionContext* getContext() const
{
return m_task.getContext();
}
//! Factory method for this executor
static ThisExecutorPtr create(omni::core::ObjectParam<ITopology> toExecute, const ExecutionTask& thisTask)
{
return omni::core::steal(new ThisExecutor(toExecute.get(), thisTask));
}
protected:
//! Default constructor is removed
Executor() = delete;
//! Constructor used by factory method.
//!
//! @param toExecute Graph topology used to generate the work.
//! @param currentTask Task causing this execution. Used to generate execution path.
explicit Executor(ITopology* toExecute, const ExecutionTask& currentTask) noexcept
: m_path((currentTask.getNode() != toExecute->getRoot()) ?
ExecutionPath(currentTask.getUpstreamPath(), currentTask.getNode()) :
currentTask.getUpstreamPath()),
m_task(currentTask.getContext(), toExecute->getRoot(), m_path),
m_nodeData(toExecute->getNodeCount()),
m_scheduler(currentTask.getContext())
{
}
// ef-docs executor-execute-begin
//! Main execution method. Called once by each node instantiating same graph definition.
Status execute_abi() noexcept override
{
(void)continueExecute_abi(&m_task);
// give a chance for scheduler to complete the execution of potentially parallel work which should complete
// within current execution. all background tasks will continue pass this point.
// scheduler is responsible for collecting the execution status for everything that this executor generated
return m_scheduler.getStatus() | m_schedulerBypass;
}
// ef-docs executor-execute-end
// ef-docs executor-continue-execute-begin
//! Implementation of the base class method to generate additional work after the given task has executed but
//! before it has completed.
Status continueExecute_abi(ExecutionTask* currentTask) noexcept override
{
if (currentTask->getNode()->getChildren().empty())
{
return Status::eSuccess | currentTask->getExecutionStatus();
}
Status ret = Status::eUnknown;
for (auto child : currentTask->getNode()->getChildren())
{
ret |= ExecStrategy::tryVisit(Info(this, *currentTask, child));
}
return ret | currentTask->getExecutionStatus();
}
// ef-docs executor-continue-execute-end
//! Implementation of base class schedule method available for work generation outside of traversal loop.
Status schedule_abi(IScheduleFunction* fn, SchedulingInfo schedInfo) noexcept override
{
return scheduleExternal(fn, schedInfo);
}
//! Scheduling spawner of a task generated by traversal implementation
template <typename S = Scheduler>
Status scheduleInternal(ExecutionTask&& newTask, typename std::enable_if_t<!is_deferred<S>::value>* = nullptr)
{
// ef-docs executor-schedule-internal-begin
Status ret = Status::eUnknown;
SchedulingInfo schedInfo = getSchedulingInfo(newTask);
if (schedInfo != SchedulingInfo::eSchedulerBypass)
{
// this task will finish before we exit executor...just capture as reference to avoid unnecessary cost
ret = m_scheduler.schedule([executor = this, task = std::move(newTask)]() mutable -> Status
{ return task.execute(executor); },
schedInfo);
}
else // bypass the scheduler...no need for extra scheduling overhead
{
m_schedulerBypass |= newTask.execute(this);
}
return ret;
// ef-docs executor-schedule-internal-end
}
//! Deferred scheduling spawner of a task generated by traversal implementation
template <typename S = Scheduler>
Status scheduleInternal(ExecutionTask&& newTask, typename std::enable_if_t<is_deferred<S>::value>* = nullptr)
{
// ef-docs executor-schedule-deferred-begin
SchedulingInfo schedInfo = getSchedulingInfo(newTask);
// for deferred tasks, we capture executor as a shared_ptr (extra cost, but keeps object alive)
Status ret = m_scheduler.scheduleDeferred(
[executor = omni::core::borrow(this), task = std::move(newTask)]() mutable -> Status
{ return task.execute(executor); },
schedInfo);
return ret;
// ef-docs executor-schedule-deferred-end
}
//! Scheduling spawner of a task generated by currently running task
template <typename S = Scheduler>
Status scheduleExternal(IScheduleFunction* fn,
SchedulingInfo schedInfo,
typename std::enable_if_t<!is_deferred<S>::value>* = nullptr)
{
if (schedInfo != SchedulingInfo::eSchedulerBypass)
{
return m_scheduler.schedule(fn, schedInfo);
}
else // bypass the scheduler...no need for extra scheduling overhead
{
return fn->invoke();
}
}
//! Deferred scheduling spawner of a task generated by currently running task
template <typename S = Scheduler>
Status scheduleExternal(IScheduleFunction* fn,
SchedulingInfo schedInfo,
typename std::enable_if_t<is_deferred<S>::value>* = nullptr)
{
Status ret = m_scheduler.scheduleDeferred(fn, schedInfo);
return ret;
}
ExecutionPath m_path; //!< Execution path helping discover state associated with current instance of the graph.
ExecutionTask m_task; //!< Task starting the execution
NodeDataArray m_nodeData; //!< Storage for per node custom data
Scheduler m_scheduler; //!< An interface for spawning tasks for dispatch by scheduler and waiting for completion.
Status m_schedulerBypass{ Status::eUnknown }; //!< Execution status for tasks bypassing scheduler.
};
//! Default executor used by all node graph definitions that don't explicitly pass the executor factory method.
using ExecutorFallback = Executor<INode, ExecutionVisit, ExecutionNodeData, SerialScheduler, DefaultSchedulingStrategy>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 27,211 | C | 42.678973 | 122 | 0.679909 |
omniverse-code/kit/include/omni/graph/exec/unstable/IBackgroundResult.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Class representing a result of asynchronous computation.
//!
//! Create via @ref omni::graph::exec::unstable::IBackgroundTask::getBackgroundResult().
//!
//! Call @ref omni::graph::exec::unstable::IBackgroundResult::isReady() or @ref
//! omni::graph::exec::unstable::IBackgroundResult::waitFor() to make sure the result is ready. Once the result is
//! ready, call @ref omni::graph::exec::unstable::IBackgroundResult::write() to make the result visible.
//!
//! Operates much like `std::future`.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IBackgroundResult_abi>
: public omni::graph::exec::unstable::IBackgroundResult_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IBackgroundResult")
//! Check if background computation has a result available for consumption.
//!
//! @return @c true when it is safe to call omni::graph::exec::unstable::IBackgroundResult::write(), @c false
//! otherwise.
//!
//! Once @ref omni::graph::exec::unstable::IBackgroundResult::write() has been called, this method will return an
//! error.
//!
//! This method is not thread safe.
//!
//! May throw.
bool isReady();
//! Request background processing cancellation
//!
//! @param blocking If @c true, this call won't exit until background processing is completed.
//!
//! This method is not thread safe.
//!
//! May throw.
void cancel(bool blocking);
//! Write the result.
//!
//! This method is not thread safe.
//!
//! An error is returned if this method is called more than once.
//!
//! May throw.
omni::graph::exec::unstable::Status write(omni::graph::exec::unstable::ExecutionTask& info);
//! Waits for the specified time for the result to become ready.
//!
//! If the result becomes ready in the specified time (or is already ready) @ref
//! omni::graph::exec::unstable::BackgroundResultStatus::eReady is returned. Otherwise, @ref
//! omni::graph::exec::unstable::BackgroundResultStatus::eTimeout is returned.
//!
//! This method is not thread safe.
//!
//! Returns an error if the result has already been consumed.
//!
//! May throw.
omni::graph::exec::unstable::BackgroundResultStatus waitFor(uint64_t nanoseconds);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline bool omni::core::Generated<omni::graph::exec::unstable::IBackgroundResult_abi>::isReady()
{
bool ready;
OMNI_THROW_IF_FAILED(isReady_abi(&ready));
return ready;
}
inline void omni::core::Generated<omni::graph::exec::unstable::IBackgroundResult_abi>::cancel(bool blocking)
{
OMNI_THROW_IF_FAILED(cancel_abi(blocking));
}
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IBackgroundResult_abi>::write(
omni::graph::exec::unstable::ExecutionTask& info)
{
omni::graph::exec::unstable::Status out;
OMNI_THROW_IF_FAILED(write_abi(&info, &out));
return out;
}
inline omni::graph::exec::unstable::BackgroundResultStatus omni::core::Generated<
omni::graph::exec::unstable::IBackgroundResult_abi>::waitFor(uint64_t nanoseconds)
{
omni::graph::exec::unstable::BackgroundResultStatus out;
OMNI_THROW_IF_FAILED(waitFor_abi(nanoseconds, &out));
return out;
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 4,209 | C | 32.951613 | 124 | 0.694227 |
omniverse-code/kit/include/omni/graph/exec/unstable/IApplyOnEachFunction.gen.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Interface wrapping a function (possibly with storage) to apply on all instantiations of a given definition.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IApplyOnEachFunction_abi>
: public omni::graph::exec::unstable::IApplyOnEachFunction_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IApplyOnEachFunction")
//! Invokes the wrapped function.
void invoke(const omni::graph::exec::unstable::ExecutionPath& path) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline void omni::core::Generated<omni::graph::exec::unstable::IApplyOnEachFunction_abi>::invoke(
const omni::graph::exec::unstable::ExecutionPath& path) noexcept
{
invoke_abi(&path);
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 1,611 | C | 29.415094 | 111 | 0.744258 |
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutor.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IExecutor.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IExecutor.
#pragma once
#include <omni/graph/exec/unstable/ExecutionTask.h>
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/SchedulingInfo.h>
#include <omni/graph/exec/unstable/Status.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class IExecutor_abi;
class IExecutor;
class IScheduleFunction;
//! Executes the node in a graph definition.
//!
//! The purpose of an executor is to generate work for the nodes in an graph definition. @ref
//! omni::graph::exec::unstable::IExecutor is a minimal interface that defines enough methods to accomplish just that.
//!
//! However, @ref omni::graph::exec::unstable::IExecutor's minimal nature is not what most users want when customizing
//! execution for their graph definitions. Rather, they want something useful. @ref
//! omni::graph::exec::unstable::Executor is an useful implementation of @ref omni::graph::exec::unstable::IExecutor
//! designed for graph definition authors to extend. See
//! @ref omni::graph::exec::unstable::Executor's documentation to better understand the purpose, duties, and
//! capabilities of an executor.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! See @rstref{Creating an Executor <ef_executor_creation>} for a guide on creating a customize executor for your graph
//! defintion.
class IExecutor_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IExecutor")>
{
protected:
//! Main execute method. Returning status of the execution.
virtual Status execute_abi() noexcept = 0;
//! Request for scheduling of additional work generated during execution of a task.
//!
//! @param fn Function to call once the work is dispatched.
//! @param schedInfo Scheduling constraints to use when dispatching this work.
virtual OMNI_ATTR("no_api") Status schedule_abi(IScheduleFunction* fn, SchedulingInfo schedInfo) noexcept = 0;
// TODO does a memory allocation, return result?
//! Request for scheduling of additional work after the given task has executed but before it has completed.
//!
//! @param task The current task
virtual Status continueExecute_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref")
ExecutionTask* task) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IExecutor.
using ExectorPtr = omni::core::ObjectPtr<IExecutor>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IExecutor.gen.h>
//! @copydoc omni::graph::exec::unstable::IExecutor_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IExecutor : public omni::core::Generated<omni::graph::exec::unstable::IExecutor_abi>
{
public:
//! Schedules the supplied function.
//!
//! The given function must have the signature of `Status(void)`.
template <typename Fn>
inline Status schedule(Fn&& fn, SchedulingInfo schedInfo); // may throw
};
#include <omni/graph/exec/unstable/IScheduleFunction.h>
#ifndef DOXYGEN_BUILD // templates confuse doxygen :(
template <typename Fn>
inline omni::graph::exec::unstable::Status omni::graph::exec::unstable::IExecutor::schedule(Fn&& fn,
SchedulingInfo schedInfo)
{
class ScheduleFunction : public Implements<IScheduleFunction>
{
public:
static omni::core::ObjectPtr<ScheduleFunction> create(Fn&& fn)
{
return omni::core::steal(new ScheduleFunction(std::forward<Fn>(fn)));
}
protected:
Status invoke_abi() noexcept override
{
return m_fn();
}
ScheduleFunction(Fn&& fn) : m_fn(std::move(fn))
{
}
private:
Fn m_fn;
};
return schedule_abi(ScheduleFunction::create(std::forward<Fn>(fn)).get(), schedInfo);
}
#endif // DOXYGEN_BUILD
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IExecutor.gen.h>
| 4,755 | C | 35.305343 | 121 | 0.698212 |
omniverse-code/kit/include/omni/graph/exec/unstable/PartitioningUtils.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file PartitioningUtils.h
//!
//! @brief Defines classes and algorithms to use with @ref omni::graph::exec::unstable::IPartitionPass.
#pragma once
#include <omni/graph/exec/unstable/INode.h>
#include <omni/graph/exec/unstable/ITopology.h>
#include <omni/graph/exec/unstable/Span.h>
#include <omni/graph/exec/unstable/Traversal.h>
#include <cstdint>
#include <type_traits>
#include <vector>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Specialized container to enable partitioning algorithm via disjoint set like data structure
//!
//! Modifications to typically known algorithm (https://en.wikipedia.org/wiki/Disjoint-set_data_structure)
//! - index set "0" is a special one and matching the root of the graph, i.e. root node will always have index 0
//! - only "selected" nodes are inserted into sets and only these nodes are merged to form groups of nodes based
//! on partitioning algorithm
//!
//! This object is NOT ABI-safe.
class PartitionSet
{
//! Deleted default constructor
PartitionSet() = delete;
//! Reserved index for root nodes. We leverage that to distinguish between selected and not selected nodes.
enum : uint64_t
{
kReservedRootIndex = 0
};
public:
//! Construct a set for a static topology of a given @p topologyNodeCount nodes.
PartitionSet(std::size_t topologyNodeCount) noexcept : m_parent(topologyNodeCount), m_rank(topologyNodeCount)
{
}
//! Initialize selected nodes
template <typename V>
void makeSelectedSets(V& selected)
{
for (INode* node : selected)
{
auto index = node->getIndexInTopology();
m_parent[index] = index;
}
}
//! Return true is give node is selected for partitioning and has a set allocated.
bool isMarked(INode* node) const
{
auto index = node->getIndexInTopology();
return m_parent[index] != kReservedRootIndex;
}
//! Find the set this node belongs to. Forwards the call to underlying implementation.
uint64_t find(INode* node)
{
return find(node->getIndexInTopology());
}
//! Merge two sets. Forwards the call to underlying implementation.
void merge(INode* nodeA, INode* nodeB)
{
merge(nodeA->getIndexInTopology(), nodeB->getIndexInTopology());
}
//! Find set that this index belongs to.
//!
//! Search has a side effect, i.e. it flattens the set links directly to the last link in the chain.
//! This allows for faster search next time same find is performed.
uint64_t find(uint64_t index)
{
if (m_parent[index] != index)
{
m_parent[index] = find(m_parent[index]);
}
return m_parent[index];
}
//! Merge two sets.
//!
//! Implementation uses rank to prioritize merging into sets that received more merges.
//! This improves the search time.
void merge(uint64_t a, uint64_t b)
{
auto aSet = find(a);
auto bSet = find(b);
if (aSet == bSet)
return;
// Shouldn't happen. Make sure we only merge marked nodes
OMNI_GRAPH_EXEC_ASSERT(a != kReservedRootIndex && b != kReservedRootIndex);
if (m_rank[aSet] < m_rank[bSet])
{
m_parent[aSet] = bSet;
}
else if (m_rank[aSet] > m_rank[bSet])
{
m_parent[bSet] = aSet;
}
else
{
m_parent[bSet] = aSet;
m_rank[aSet] += 1;
}
}
std::vector<uint64_t> m_parent; //!< We have as many elements as nodes in the topology. Each element represents
//!< a unique set (if it points to its own index), or a link to another set (if
//!< merged)
std::vector<uint64_t> m_rank; //!< Rank per element used to prevent growing the tree to high and optimizes searches
};
//! Algorithm to group selected nodes into valid partitions based on node ordering.
//!
//! Partition is only valid when there is no path that leaves and comes back to the same group.
//! Such partition would introduce cycles in the graph.
//!
//! Quick algorithm uses a single traversal over the entire graph to determine unique partition index for group of
//! nodes. During traversal, the partition index is based on the node the traversal comes from and:
//! - the partition index is incremented by 1 if edge crosses selected and unselected nodes
//! - the partition index is assigned to the currently visited node only if it is higher than currently set
//!
//! The traversal algorithm visits all the edges in the graph and does continuation on the last visit to the node.
template <typename VerifyAndCreateFn>
void quickPartitioning(ITopology* topology, Span<INode*> selectedNodes, VerifyAndCreateFn&& verifyAndCommitPartitionFn)
{
if (selectedNodes.size() == 0)
return;
std::vector<INode*> nodes(selectedNodes.begin(), selectedNodes.end());
PartitionSet partitions(topology->getNodeCount());
partitions.makeSelectedSets(nodes);
struct QuickPartitioningNodeData
{
uint64_t partition{ 0 };
bool marked{ false };
};
traversal_dfs<VisitAll, QuickPartitioningNodeData>(
topology->getRoot(),
[&partitions](auto info, INode* prev, INode* curr)
{
auto& currUserData = info.userData(curr);
if (info.isFirstVisit())
{
currUserData.marked = partitions.isMarked(curr);
}
auto& prevUserData = info.userData(prev);
uint64_t edgePartition = prevUserData.partition;
if (currUserData.marked != prevUserData.marked)
{
edgePartition += 1;
}
if (currUserData.partition < edgePartition)
{
currUserData.partition = edgePartition;
}
if (info.isLastVisit())
{
if (currUserData.marked)
{
for (INode* parent : curr->getParents())
{
auto& parentUserData = info.userData(parent);
if (parentUserData.partition == currUserData.partition)
{
partitions.merge(parent, curr);
}
}
}
info.continueVisit(curr);
}
});
// sort vector by partitions
std::sort(nodes.begin(), nodes.end(),
[&partitions](INode* a, INode* b) { return partitions.find(a) < partitions.find(b); });
// verify and create partitions in the graph
auto partitionSetBegin = nodes.begin();
auto partitionSet = partitions.find(*partitionSetBegin);
auto partitionSetNext = partitionSetBegin;
do
{
partitionSetNext++;
if (partitionSetNext == nodes.end())
{
verifyAndCommitPartitionFn(&(*partitionSetBegin), partitionSetNext - partitionSetBegin);
}
else if (partitions.find(*partitionSetNext) != partitionSet)
{
verifyAndCommitPartitionFn(&(*partitionSetBegin), partitionSetNext - partitionSetBegin);
// switch to next partition
partitionSetBegin = partitionSetNext;
partitionSet = partitions.find(*partitionSetNext);
}
} while (partitionSetNext != nodes.end());
}
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 8,003 | C | 32.48954 | 119 | 0.627515 |
omniverse-code/kit/include/omni/graph/exec/unstable/NodeDefLambda.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file NodeDefLambda.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::NodeDefLambda.
#pragma once
#include <omni/graph/exec/unstable/NodeDef.h>
#include <functional>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Wrapper of a lambda function into opaque node definition
//!
//! This class is great for quick prototyping and nodes that won't be statically analyzed. It is recommended to convert
//! such nodes into real classes to avoid paying the price of std::function call from already virtual method and to
//! provide type information.
// ef-docs node-def-lambda-begin
class NodeDefLambda : public NodeDef
{
public:
//! Templated constructor for wrapper class
//!
//! @tparam Fn Function type taking execution task and returning status. Compiler should detect it for you.
//! @param definitionName Definition name is considered as a token that transformation passes can register against
//! @param fn Execute function body. Signature should be `Status(ExecutionTask&)`.
//! @param schedInfo Fixed at runtime scheduling constraint
//!
//! May throw.
template <typename Fn>
static omni::core::ObjectPtr<NodeDefLambda> create(const char* definitionName, Fn&& fn, SchedulingInfo schedInfo)
{
OMNI_THROW_IF_ARG_NULL(definitionName);
return omni::core::steal(new NodeDefLambda(definitionName, std::forward<Fn>(fn), schedInfo));
}
protected:
//! Templated and protected constructor for wrapper class.
//!
//! Use factory method to construct objects of this class.
template <typename Fn>
NodeDefLambda(const char* definitionName, Fn&& fn, SchedulingInfo schedInfo)
: NodeDef(definitionName), m_fn(std::move(fn)), m_schedulingInfo(schedInfo)
{
}
//! Core implementation of @ref omni::graph::exec::unstable::IDef::execute_abi for @ref NodeDefLambda
//!
//! Calling captured lambda
Status execute_abi(ExecutionTask* info) noexcept override
{
return m_fn(*info);
}
//! Core implementation of @ref omni::graph::exec::unstable::IDef::getSchedulingInfo_abi for @ref NodeDefLambda
//!
//! Returns scheduling information provided to the constructor
SchedulingInfo getSchedulingInfo_abi(const ExecutionTask* info) noexcept override
{
return m_schedulingInfo;
}
private:
std::function<Status(ExecutionTask&)> m_fn; //!< Execute function body
SchedulingInfo m_schedulingInfo; //!< Scheduling constraint
};
// ef-docs node-def-lambda-end
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 3,080 | C | 34.011363 | 120 | 0.720455 |
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutionContext.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Stores and provides access to the execution state of the graph.
//!
//! The @rstref{execution graph <ef_execution_graph>} is only a description of what needs to be executed. The actual
//! graph state is stored separately in an instance of this object.
//!
//! The execution context allows computing the same graph description within multiple contexts. It also enables the
//! ability to perform this computation concurrently. Some example use cases of this class:
//!
//! - Computing the state of a graph at a time different than the current time (e.g. asynchronous caching, fake
//! dynamics)
//!
//! - Computing the state of a graph with a inputs different than the current input state (e.g. double solve)
//!
//! All execution begins with a call to one of the execute methods on this interface. @ref
//! omni::graph::exec::unstable::IExecutionContext::execute() is used to execute the entire execution graph while @ref
//! omni::graph::exec::unstable::IExecutionContext::executeNode() can be used to execute only a part of the graph.
//!
//! Part of this interface defines a key/value store. The *key* in this store is an @ref
//! omni::graph::exec::unstable::ExecutionPath. The *value* is an implementation of @ref
//! omni::graph::exec::unstable::IExecutionStateInfo, which in addition to storing computation state, can also store
//! user defined data. The computation state and user data can be access with one of the `getStateInfo` /
//! `setStateInfo` methods though if you wish to access the user data using one of the `getNodeData` / `setNodeData` is
//! slightly faster.
//!
//! Another feature of @ref omni::graph::exec::unstable::IExecutionContext is the ability to quickly search for nodes
//! using a particular definition and apply a function on them. Definitions can be searched by name or by pointer (see
//! @ref omni::graph::exec::unstable::IExecutionContext::applyOnEach()). These methods are used extensively during
//! @rstref{Graph Construction <ef_pass_concepts>}.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! Since multiple threads can concurrently traverse a graph, implementors of methods within this class should expect
//! that multiple threads will be accessing this object in parallel.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>
: public omni::graph::exec::unstable::IExecutionContext_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IExecutionContext")
//! Current execution version. Incremented with each execution of the context.
//!
//! @thread_safety See thread safety information in interface description.
omni::graph::exec::unstable::Stamp getExecutionStamp() noexcept;
//! Returns `true` if context is currently executing
//!
//! See thread safety information in interface description.
bool inExecute() noexcept;
//! Returns `true` if the current thread is the one who started this context's execution.
//!
//! Note, do not assume that the thread that started the context's execution is the "main" thread.
//!
//! @thread_safety See thread safety information in interface description.
bool isExecutingThread() noexcept;
//! Main execution method. Executes the entire execution graph.
//!
//! @thread_safety See thread safety information in interface description.
omni::graph::exec::unstable::Status execute() noexcept;
//! On-demand execution method. Executes the given node.
//!
//! @thread_safety See thread safety information in interface description.
omni::graph::exec::unstable::Status executeNode(const omni::graph::exec::unstable::ExecutionPath& upstreamPath,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node);
//! Context initialization. Responsible to propagate initialization to graphs.
//!
//! @thread_safety See thread safety information in interface description.
void initialize();
//! Access state for a given execution path.
//!
//! If the given node is not @c nullptr, a copy of the given path with the node appended will be used as the lookup
//! key.
//!
//! @thread_safety See thread safety information in interface description.
//!
//! @warning This method should be used for read only access by downstream nodes, example accessing graph state
//! when executing downstream nodes. Extra care needs to be taken if this state has to be mutated
//! concurrently.
omni::graph::exec::unstable::IExecutionStateInfo* getStateInfo(
const omni::graph::exec::unstable::ExecutionPath& path,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node);
//! Returns a value from a node's key/value datastore.
//!
//! The node from which to grab data is identified by the given path and
//! node. The @p node may be @c nullptr.
//!
//! The key is used as a look-up in the node's key/value datastore.
//!
//! The type of each data item is returned in @p outTypeId.
//!
//! @p outPtr will be updated with a pointer to the actual data.
//!
//! @p outItemSize store the size of each item in the returned array.
//!
//! @p outItemCount contains the number of items returned (i.e. the number
//! of items @p outPtr points to). For an array, this will be greater than
//! 1.
//!
//! If the key is not found, @p outPtr is set to @c nullptr and @p
//! outItemCount is set to 0.
//!
//! Accessing the node's key/value datastore is not thread safe.
//!
//! An exception is thrown on all other errors.
void getNodeData(const omni::graph::exec::unstable::ExecutionPath& path,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node,
omni::graph::exec::unstable::NodeDataKey key,
omni::core::TypeId* outTypeId,
void** outPtr,
uint64_t* outItemSize,
uint64_t* outItemCount);
//! Sets a value in a node's key/value datastore.
//!
//! The node in which to set the data is identified by the given path and
//! node. The @p node may be @c nullptr.
//!
//! The key is used as a look-up in the node's key/value datastore.
//!
//! The type of each data item is set with @p typeId.
//!
//! @p data points to an array of data items.
//!
//! @p itemSize is the size of each item in the given array.
//!
//! @p itemCount contains the number of items pointed to by @p data. For an
//! array, this will be greater than 1.
//!
//! @p deleter is a function used to delete @p data when either a new value
//! is set at the key or the context is invalidated. If @p deleter is @c
//! nullptr, it is up to the calling code to manage the lifetime of the @p
//! data.
//!
//! Accessing the node's key/value datastore is not thread safe.
//!
//! An exception is thrown on all other errors.
void setNodeData(const omni::graph::exec::unstable::ExecutionPath& path,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node,
omni::graph::exec::unstable::NodeDataKey key,
omni::core::TypeId typeId,
void* data,
uint64_t itemSize,
uint64_t itemCount,
omni::graph::exec::unstable::NodeDataDeleterFn* deleter);
//! Discover all execution paths leading to given definition and invoke given function with each of them.
//!
//! Implementation of this interface will cache results to remove the traversal cost for subsequent call. Change in
//! execution graph topology will cause invalidation of the cache.
//!
//! @thread_safety This method is thread-safe and thread-efficient and can be called during execution, as well as
//! from within the provided @p callback.
//!
//! This method must not be called during graph construction.
//!
//! @p def definition to look for
//! @p callback wrapped function into a callback to execute with each path to given definition
void applyOnEachDef(omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def,
omni::core::ObjectParam<omni::graph::exec::unstable::IApplyOnEachFunction> callback) noexcept;
//! Discover all execution paths leading to definition with the given name and invoke the given function with each
//! of them
//!
//! Implementation of this interface will cache results to remove the traversal cost for subsequent call. Change in
//! execution graph topology will cause invalidation of the cache.
//!
//! @thread_safety This method is thread-safe and thread-efficient and can be called during execution, as well as
//! from within the provided @p callback.
//!
//! This method must not be called during graph construction.
//!
//! @p name definition to look for
//! @p callback wrapped function into a callback to execute with each path to given definition
void applyOnEachDefWithName(const omni::graph::exec::unstable::ConstName* name,
omni::core::ObjectParam<omni::graph::exec::unstable::IApplyOnEachFunction> callback) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::Stamp omni::core::Generated<
omni::graph::exec::unstable::IExecutionContext_abi>::getExecutionStamp() noexcept
{
return getExecutionStamp_abi();
}
inline bool omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::inExecute() noexcept
{
return inExecute_abi();
}
inline bool omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::isExecutingThread() noexcept
{
return isExecutingThread_abi();
}
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::execute() noexcept
{
return execute_abi();
}
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::executeNode(
const omni::graph::exec::unstable::ExecutionPath& upstreamPath,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node)
{
OMNI_THROW_IF_ARG_NULL(node);
auto return_ = executeNode_abi(&upstreamPath, node.get());
return return_;
}
inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::initialize()
{
OMNI_THROW_IF_FAILED(initialize_abi());
}
inline omni::graph::exec::unstable::IExecutionStateInfo* omni::core::
Generated<omni::graph::exec::unstable::IExecutionContext_abi>::getStateInfo(
const omni::graph::exec::unstable::ExecutionPath& path,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node)
{
omni::graph::exec::unstable::IExecutionStateInfo* out;
OMNI_THROW_IF_FAILED(getStateInfo_abi(&path, node.get(), &out));
return out;
}
inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::getNodeData(
const omni::graph::exec::unstable::ExecutionPath& path,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node,
omni::graph::exec::unstable::NodeDataKey key,
omni::core::TypeId* outTypeId,
void** outPtr,
uint64_t* outItemSize,
uint64_t* outItemCount)
{
OMNI_THROW_IF_ARG_NULL(outTypeId);
OMNI_THROW_IF_ARG_NULL(outPtr);
OMNI_THROW_IF_ARG_NULL(outItemSize);
OMNI_THROW_IF_ARG_NULL(outItemCount);
OMNI_THROW_IF_FAILED(getNodeData_abi(&path, node.get(), key, outTypeId, outPtr, outItemSize, outItemCount));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::setNodeData(
const omni::graph::exec::unstable::ExecutionPath& path,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node,
omni::graph::exec::unstable::NodeDataKey key,
omni::core::TypeId typeId,
void* data,
uint64_t itemSize,
uint64_t itemCount,
omni::graph::exec::unstable::NodeDataDeleterFn* deleter)
{
OMNI_THROW_IF_ARG_NULL(data);
OMNI_THROW_IF_FAILED(setNodeData_abi(&path, node.get(), key, typeId, data, itemSize, itemCount, deleter));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::applyOnEachDef(
omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def,
omni::core::ObjectParam<omni::graph::exec::unstable::IApplyOnEachFunction> callback) noexcept
{
applyOnEachDef_abi(def.get(), callback.get());
}
inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>::applyOnEachDefWithName(
const omni::graph::exec::unstable::ConstName* name,
omni::core::ObjectParam<omni::graph::exec::unstable::IApplyOnEachFunction> callback) noexcept
{
applyOnEachDefWithName_abi(name, callback.get());
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 13,982 | C | 44.399351 | 136 | 0.6891 |
omniverse-code/kit/include/omni/graph/exec/unstable/IPassFactory.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IPassFactory.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IPassFactory.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IGraphBuilder;
class IPass;
class IPassFactory;
class IPassFactory_abi;
//! Factory interface for creating @ref omni::graph::exec::unstable::IPass objects.
//!
//! Usually used in conjunction with @ref omni::graph::exec::unstable::IPassRegistry.
//!
//! See @ref omni::graph::exec::unstable::createPassFactory() to generate one of these objects from an invocable object
//! (e.g. @c std::function).
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
class IPassFactory_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IPassFactory")>
{
protected:
//! Creates and returns a pass.
//!
//! The returned @ref omni::graph::exec::unstable::IPass will have @ref omni::core::IObject::acquire() called on it.
virtual OMNI_ATTR("throw_result") omni::core::Result
createPass_abi(OMNI_ATTR("not_null, throw_if_null") IGraphBuilder* builder,
OMNI_ATTR("not_null, throw_if_null, out, *return") IPass** out) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IPassFactory.
using PassFactoryPtr = omni::core::ObjectPtr<IPassFactory>;
//! Generates an @ref IPassFactory from an invocable object such as a function pointer, functor, etc.
//!
//! The given function should have the signature `IPass*(IGraphBuilder*)`.
template <typename Fn>
PassFactoryPtr createPassFactory(Fn&& fn);
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IPassFactory.gen.h>
//! @copydoc omni::graph::exec::unstable::IPassFactory_abi
//!
//! @ingroup groupOmniGraphExecPassRegistration groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IPassFactory
: public omni::core::Generated<omni::graph::exec::unstable::IPassFactory_abi>
{
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IGraphBuilder.h>
#include <omni/graph/exec/unstable/IPass.h>
#ifndef DOXYGEN_BUILD
template <typename Fn>
omni::graph::exec::unstable::PassFactoryPtr omni::graph::exec::unstable::createPassFactory(Fn&& fn)
{
class FactoryImpl : public Implements<IPassFactory>
{
public:
FactoryImpl(Fn&& fn) : m_fn(std::move(fn))
{
}
protected:
omni::core::Result createPass_abi(IGraphBuilder* builder, IPass** out) noexcept override
{
try
{
PassPtr pass = m_fn(builder); // may throw
*out = pass.detach();
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
Fn m_fn;
};
return omni::core::steal(new FactoryImpl(std::forward<Fn>(fn)));
}
#endif // DOXYGEN_BUILD
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IPassFactory.gen.h>
| 3,807 | C | 31 | 124 | 0.707381 |
omniverse-code/kit/include/omni/graph/exec/unstable/INodeDef.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Opaque node definition.
//!
//! Nodes are opaque because execution framework has no knowledge or what the execution method will do and does not
//! orchestrate generation and dispatch of the tasks.
//!
//! Node definitions can be shared across multiple nodes and graphs. The implementation should leverage execution task
//! to operate within proper task state.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! See also @ref omni::graph::exec::unstable::ExecutionTask, @ref omni::graph::exec::unstable::ExecutionPath.
template <>
class omni::core::Generated<omni::graph::exec::unstable::INodeDef_abi> : public omni::graph::exec::unstable::INodeDef_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::INodeDef")
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 1,764 | C | 31.685185 | 121 | 0.744331 |
omniverse-code/kit/include/omni/graph/exec/unstable/INodeDef.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file INodeDef.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::INodeDef.
#pragma once
#include <omni/graph/exec/unstable/IDef.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class INodeDef_abi;
class INodeDef;
//! Opaque node definition.
//!
//! Nodes are opaque because execution framework has no knowledge or what the execution method will do and does not
//! orchestrate generation and dispatch of the tasks.
//!
//! Node definitions can be shared across multiple nodes and graphs. The implementation should leverage execution task
//! to operate within proper task state.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! See also @ref omni::graph::exec::unstable::ExecutionTask, @ref omni::graph::exec::unstable::ExecutionPath.
class INodeDef_abi : public omni::core::Inherits<IDef, OMNI_TYPE_ID("omni.graph.exec.unstable.INodeDef")>
{
};
//! Smart pointer managing an instance of @ref omni::graph::exec::unstable::INodeDef.
using NodeDefPtr = omni::core::ObjectPtr<INodeDef>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/INodeDef.gen.h>
//! @copydoc omni::graph::exec::unstable::INodeDef_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::INodeDef : public omni::core::Generated<omni::graph::exec::unstable::INodeDef_abi>
{
};
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/INodeDef.gen.h>
| 2,060 | C | 30.707692 | 118 | 0.752913 |
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutionContext.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IExecutionContext.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IExecutionContext.
#pragma once
#include <omni/graph/exec/unstable/ConstName.h>
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Span.h>
#include <omni/graph/exec/unstable/Stamp.h>
#include <omni/graph/exec/unstable/Status.h>
#include <omni/graph/exec/unstable/Types.h>
#include <memory>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class ExecutionPath;
class ExecutionTask;
class IApplyOnEachFunction;
class IDef;
class IExecutionContext_abi;
class IExecutionContext;
class IExecutionStateInfo;
class INode;
//! Stores and provides access to the execution state of the graph.
//!
//! The @rstref{execution graph <ef_execution_graph>} is only a description of what needs to be executed. The actual
//! graph state is stored separately in an instance of this object.
//!
//! The execution context allows computing the same graph description within multiple contexts. It also enables the
//! ability to perform this computation concurrently. Some example use cases of this class:
//!
//! - Computing the state of a graph at a time different than the current time (e.g. asynchronous caching, fake
//! dynamics)
//!
//! - Computing the state of a graph with a inputs different than the current input state (e.g. double solve)
//!
//! All execution begins with a call to one of the execute methods on this interface. @ref
//! omni::graph::exec::unstable::IExecutionContext::execute() is used to execute the entire execution graph while @ref
//! omni::graph::exec::unstable::IExecutionContext::executeNode() can be used to execute only a part of the graph.
//!
//! Part of this interface defines a key/value store. The *key* in this store is an @ref
//! omni::graph::exec::unstable::ExecutionPath. The *value* is an implementation of @ref
//! omni::graph::exec::unstable::IExecutionStateInfo, which in addition to storing computation state, can also store
//! user defined data. The computation state and user data can be access with one of the `getStateInfo` /
//! `setStateInfo` methods though if you wish to access the user data using one of the `getNodeData` / `setNodeData` is
//! slightly faster.
//!
//! Another feature of @ref omni::graph::exec::unstable::IExecutionContext is the ability to quickly search for nodes
//! using a particular definition and apply a function on them. Definitions can be searched by name or by pointer (see
//! @ref omni::graph::exec::unstable::IExecutionContext::applyOnEach()). These methods are used extensively during
//! @rstref{Graph Construction <ef_pass_concepts>}.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! Since multiple threads can concurrently traverse a graph, implementors of methods within this class should expect
//! that multiple threads will be accessing this object in parallel.
class IExecutionContext_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.IExecutionContext")>
{
protected:
//! Current execution version. Incremented with each execution of the context.
//!
//! @thread_safety See thread safety information in interface description.
virtual Stamp getExecutionStamp_abi() noexcept = 0;
//! Returns `true` if context is currently executing
//!
//! See thread safety information in interface description.
virtual bool inExecute_abi() noexcept = 0;
//! Returns `true` if the current thread is the one who started this context's execution.
//!
//! Note, do not assume that the thread that started the context's execution is the "main" thread.
//!
//! @thread_safety See thread safety information in interface description.
virtual bool isExecutingThread_abi() noexcept = 0;
//! Main execution method. Executes the entire execution graph.
//!
//! @thread_safety See thread safety information in interface description.
virtual Status execute_abi() noexcept = 0;
//! On-demand execution method. Executes the given node.
//!
//! @thread_safety See thread safety information in interface description.
virtual Status executeNode_abi(OMNI_ATTR("in, not_null, throw_if_null, ref") const ExecutionPath* upstreamPath,
OMNI_ATTR("not_null, throw_if_null") INode* node) noexcept = 0;
//! Context initialization. Responsible to propagate initialization to graphs.
//!
//! @thread_safety See thread safety information in interface description.
virtual OMNI_ATTR("throw_result") omni::core::Result initialize_abi() noexcept = 0;
//! Access state for a given execution path.
//!
//! If the given node is not @c nullptr, a copy of the given path with the node appended will be used as the lookup
//! key.
//!
//! @thread_safety See thread safety information in interface description.
//!
//! @warning This method should be used for read only access by downstream nodes, example accessing graph state
//! when executing downstream nodes. Extra care needs to be taken if this state has to be mutated
//! concurrently.
virtual OMNI_ATTR("throw_result") omni::core::Result
getStateInfo_abi(OMNI_ATTR("in, not_null, throw_if_null, ref") const ExecutionPath* path,
INode* node,
OMNI_ATTR("out, *return, not_null, throw_if_null, *no_acquire")
IExecutionStateInfo** out) noexcept = 0;
//! Returns a value from a node's key/value datastore.
//!
//! The node from which to grab data is identified by the given path and
//! node. The @p node may be @c nullptr.
//!
//! The key is used as a look-up in the node's key/value datastore.
//!
//! The type of each data item is returned in @p outTypeId.
//!
//! @p outPtr will be updated with a pointer to the actual data.
//!
//! @p outItemSize store the size of each item in the returned array.
//!
//! @p outItemCount contains the number of items returned (i.e. the number
//! of items @p outPtr points to). For an array, this will be greater than
//! 1.
//!
//! If the key is not found, @p outPtr is set to @c nullptr and @p
//! outItemCount is set to 0.
//!
//! Accessing the node's key/value datastore is not thread safe.
//!
//! An exception is thrown on all other errors.
virtual OMNI_ATTR("throw_result") omni::core::Result
getNodeData_abi(OMNI_ATTR("in, not_null, throw_if_null, ref") const ExecutionPath* path,
INode* node,
NodeDataKey key,
OMNI_ATTR("out, not_null, throw_if_null") omni::core::TypeId* outTypeId,
OMNI_ATTR("out, not_null, throw_if_null, *out, *in") void** outPtr,
OMNI_ATTR("out, not_null, throw_if_null") uint64_t* outItemSize,
OMNI_ATTR("out, not_null, throw_if_null") uint64_t* outItemCount) noexcept = 0;
//! Sets a value in a node's key/value datastore.
//!
//! The node in which to set the data is identified by the given path and
//! node. The @p node may be @c nullptr.
//!
//! The key is used as a look-up in the node's key/value datastore.
//!
//! The type of each data item is set with @p typeId.
//!
//! @p data points to an array of data items.
//!
//! @p itemSize is the size of each item in the given array.
//!
//! @p itemCount contains the number of items pointed to by @p data. For an
//! array, this will be greater than 1.
//!
//! @p deleter is a function used to delete @p data when either a new value
//! is set at the key or the context is invalidated. If @p deleter is @c
//! nullptr, it is up to the calling code to manage the lifetime of the @p
//! data.
//!
//! Accessing the node's key/value datastore is not thread safe.
//!
//! An exception is thrown on all other errors.
virtual OMNI_ATTR("throw_result") omni::core::Result
setNodeData_abi(OMNI_ATTR("in, not_null, throw_if_null, ref") const ExecutionPath* path,
INode* node,
NodeDataKey key,
omni::core::TypeId typeId,
OMNI_ATTR("in, out, not_null, throw_if_null") void* data,
uint64_t itemSize,
uint64_t itemCount,
OMNI_ATTR("in, out") NodeDataDeleterFn* deleter) noexcept = 0;
//! Discover all execution paths leading to given definition and invoke given function with each of them.
//!
//! Implementation of this interface will cache results to remove the traversal cost for subsequent call. Change in
//! execution graph topology will cause invalidation of the cache.
//!
//! @thread_safety This method is thread-safe and thread-efficient and can be called during execution, as well as
//! from within the provided @p callback.
//!
//! This method must not be called during graph construction.
//!
//! @p def definition to look for
//! @p callback wrapped function into a callback to execute with each path to given definition
virtual void applyOnEachDef_abi(OMNI_ATTR("not_null") IDef* def,
OMNI_ATTR("not_null") IApplyOnEachFunction* callback) noexcept = 0;
//! Discover all execution paths leading to definition with the given name and invoke the given function with each
//! of them
//!
//! Implementation of this interface will cache results to remove the traversal cost for subsequent call. Change in
//! execution graph topology will cause invalidation of the cache.
//!
//! @thread_safety This method is thread-safe and thread-efficient and can be called during execution, as well as
//! from within the provided @p callback.
//!
//! This method must not be called during graph construction.
//!
//! @p name definition to look for
//! @p callback wrapped function into a callback to execute with each path to given definition
virtual void applyOnEachDefWithName_abi(OMNI_ATTR("in, not_null") const ConstName* name,
OMNI_ATTR("not_null") IApplyOnEachFunction* callback) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IExecutionContext.
using ExecutionContextPtr = omni::core::ObjectPtr<IExecutionContext>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IExecutionContext.gen.h>
//! @copydoc omni::graph::exec::unstable::IExecutionContext_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IExecutionContext
: public omni::core::Generated<omni::graph::exec::unstable::IExecutionContext_abi>
{
public:
//! @copydoc omni::graph::exec::unstable::IExecutionContext_abi::executeNode_abi
Status execute(const ExecutionPath& path, omni::core::ObjectParam<INode> node)
{
return executeNode(path, node);
}
using Generated<IExecutionContext_abi>::execute;
//! Retrieves the state info for the given task.
//!
//! The task's upstream path and node are hashed for the lookup.
//!
//! The returned @ref omni::graph::exec::unstable::IExecutionStateInfo will
//! not have @ref omni::core::IObject::acquire() called on it.
inline IExecutionStateInfo* getStateInfo(const ExecutionTask& task);
using Generated<IExecutionContext_abi>::getStateInfo;
//! Retrieves the state info at the given execution path.
//!
//! The returned @ref omni::graph::exec::unstable::IExecutionStateInfo will
//! not have @ref omni::core::IObject::acquire() called on it.
inline IExecutionStateInfo* getStateInfo(const ExecutionPath& path);
//! Access state for a given execution path.
//!
//! See thread safety information in interface description.
template <typename T>
inline T* getStateInfoAs(const ExecutionTask& info);
//! Access state for a given execution path.
//!
//! See thread safety information in interface description.
template <typename T>
inline T* getStateInfoAs(const ExecutionPath& path);
//! Access state for a given execution path.
//!
//! See thread safety information in interface description.
template <typename T>
inline T* getStateInfoAs(const ExecutionPath& path, omni::core::ObjectParam<INode> node);
//! Returns a pointer to a value stored in a node's key/value datastore.
//!
//! The node whose key/value datastore should be used is identified by combining the given path and node. @p node
//! may be @c nullptr.
//!
//! If there is no value stored at the given @p key an empty span is returned.
//!
//! Accessing the node's key/value datastore is not thread safe.
//!
//! If the type @c T does not match the type of the store data, an exception is thrown.
//!
//! An exception is thrown on all other errors.
//!
//! Prefer using @ref OMNI_GRAPH_EXEC_GET_NODE_DATA_AS() instead of this method, which will populate the type id for
//! you.
template <typename T>
inline Span<T> getNodeDataAs(omni::core::TypeId desiredType,
const ExecutionPath& path,
omni::core::ObjectParam<INode> node,
NodeDataKey key);
//! Returns a pointer to a value stored in a node's key/value datastore.
//!
//! The node whose key/value datastore should be used is identified by combining the given path and node in the
//! given task.
//!
//! If there is no value stored at the given @p key an empty span is returned.
//!
//! Accessing the node's key/value datastore is not thread safe.
//!
//! If the type @c T does not match the type of the store data, an exception is thrown.
//!
//! An exception is thrown on all other errors.
//!
//! Prefer using @ref OMNI_GRAPH_EXEC_GET_NODE_DATA_AS() instead of this method, which will populate the type id for
//! you.
template <typename T>
inline Span<T> getNodeDataAs(omni::core::TypeId desiredType, const ExecutionTask& path, NodeDataKey key);
//! Stores a value in a node's key/value datastore.
//!
//! The node whose key/value datastore should be used is identified by combining the given path and node. @p node
//! may be @c nullptr.
//!
//! If a value is already stored at the given @p key it will be replaced.
//!
//! Accessing the node's key/value datastore is not thread safe.
//!
//! An exception is thrown on all errors.
//!
//! Prefer using @ref OMNI_GRAPH_EXEC_SET_NODE_DATA() instead of this method, which will populate the type id for
//! you.
template <typename SpecifiedT, typename DataT>
inline void setNodeData(omni::core::TypeId itemType,
const ExecutionPath& path,
omni::core::ObjectParam<INode> node,
NodeDataKey key,
std::unique_ptr<DataT> data);
//! Stores a value in a node's key/value datastore.
//!
//! The node whose key/value datastore should be used is identified by combining the given path and node in the
//! given task.
//!
//! If a value is already stored at the given @p key it will be replaced.
//!
//! Accessing the node's key/value datastore is not thread safe.
//!
//! An exception is thrown on all errors.
//!
//! Prefer using @ref OMNI_GRAPH_EXEC_SET_NODE_DATA() instead of this method, which will populate the type id for
//! you.
template <typename SpecifiedT, typename DataT>
inline void setNodeData(omni::core::TypeId itemType,
const ExecutionTask& path,
NodeDataKey key,
std::unique_ptr<DataT> data);
//! Discover all execution paths leading to given definition and invoke given function with each of them
//!
//! This inline implementation wraps lambda into IApplyOnEachFunction
//!
//! The supplied function should have the signature of `void(const ExecutionPath&)`.
template <typename FN>
inline void applyOnEach(omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def, FN&& callback);
//! Discover all execution paths leading to definition with the given name and invoke the given function with each
//! of them
//!
//! This inline implementation wraps lambda into IApplyOnEachFunction
//!
//! The supplied function should have the signature of `void(const ExecutionPath&)`.
template <typename FN>
inline void applyOnEach(const ConstName& name, FN&& callback);
};
#include <omni/graph/exec/unstable/ExecutionPath.h>
#include <omni/graph/exec/unstable/ExecutionTask.h>
#include <omni/graph/exec/unstable/IApplyOnEachFunction.h>
#include <omni/graph/exec/unstable/IDef.h>
#include <omni/graph/exec/unstable/IExecutionStateInfo.h>
#include <omni/graph/exec/unstable/INode.h>
#ifndef DOXYGEN_BUILD // templates are hard to understand, even for doxygen...
inline omni::graph::exec::unstable::IExecutionStateInfo* omni::graph::exec::unstable::IExecutionContext::getStateInfo(
const ExecutionTask& info)
{
IExecutionStateInfo* out;
OMNI_THROW_IF_FAILED(getStateInfo_abi(&(info.getUpstreamPath()), info.getNode(), &out));
return out;
}
inline omni::graph::exec::unstable::IExecutionStateInfo* omni::graph::exec::unstable::IExecutionContext::getStateInfo(
const ExecutionPath& path)
{
IExecutionStateInfo* out;
OMNI_THROW_IF_FAILED(getStateInfo_abi(&path, nullptr, &out));
return out;
}
template <typename T>
inline T* omni::graph::exec::unstable::IExecutionContext::getStateInfoAs(const ExecutionTask& info)
{
IExecutionStateInfo* out;
OMNI_THROW_IF_FAILED(getStateInfo_abi(&(info.getUpstreamPath()), info.getNode(), &out));
return omni::graph::exec::unstable::cast<T>(out);
}
template <typename T>
inline T* omni::graph::exec::unstable::IExecutionContext::getStateInfoAs(const ExecutionPath& path)
{
IExecutionStateInfo* out;
OMNI_THROW_IF_FAILED(getStateInfo_abi(&path, nullptr, &out));
return omni::graph::exec::unstable::cast<T>(out);
}
template <typename T>
inline T* omni::graph::exec::unstable::IExecutionContext::getStateInfoAs(const ExecutionPath& path,
omni::core::ObjectParam<INode> node)
{
IExecutionStateInfo* out;
OMNI_THROW_IF_FAILED(getStateInfo_abi(&path, node.get(), &out));
return omni::graph::exec::unstable::cast<T>(out);
}
template <typename T>
inline omni::graph::exec::unstable::Span<T> omni::graph::exec::unstable::IExecutionContext::getNodeDataAs(
omni::core::TypeId desiredType, const ExecutionPath& path, omni::core::ObjectParam<INode> node, NodeDataKey key)
{
omni::core::TypeId outType;
void* outPtr;
uint64_t outItemSize, outItemCount;
OMNI_THROW_IF_FAILED(getNodeData_abi(&path, node.get(), key, &outType, &outPtr, &outItemSize, &outItemCount));
if (outPtr)
{
if (outType != desiredType)
{
throw omni::core::ResultError(omni::core::kResultInvalidDataType);
}
if (outItemSize != sizeof(T))
{
throw omni::core::ResultError(omni::core::kResultInvalidDataSize);
}
}
return Span<T>{ reinterpret_cast<T*>(outPtr), outItemCount };
}
template <typename T>
inline omni::graph::exec::unstable::Span<T> omni::graph::exec::unstable::IExecutionContext::getNodeDataAs(
omni::core::TypeId desiredType, const ExecutionTask& info, NodeDataKey key)
{
return getNodeDataAs<T>(desiredType, info.getUpstreamPath(), info.getNode(), key);
}
template <typename SpecifiedT, typename DataT>
inline void omni::graph::exec::unstable::IExecutionContext::setNodeData(omni::core::TypeId itemType,
const ExecutionPath& path,
omni::core::ObjectParam<INode> node,
NodeDataKey key,
std::unique_ptr<DataT> data)
{
static_assert(std::is_same<SpecifiedT, DataT>::value, "given TypeId does not match the data type");
static_assert(!std::is_array<DataT>::value, "setting arrays as node data via unique_ptr not yet implemented");
OMNI_THROW_IF_FAILED(setNodeData_abi(&path, node.get(), key, itemType, data.get(), sizeof(DataT), 1,
[](void* p)
{
typename std::unique_ptr<DataT>::deleter_type deleter;
deleter(reinterpret_cast<DataT*>(p));
}));
data.release(); // now safe to release ownership
}
template <typename SpecifiedT, typename DataT>
inline void omni::graph::exec::unstable::IExecutionContext::setNodeData(omni::core::TypeId itemType,
const ExecutionTask& task,
NodeDataKey key,
std::unique_ptr<DataT> data)
{
return setNodeData<SpecifiedT>(itemType, task.getUpstreamPath(), task.getNode(), key, std::move(data));
}
template <typename FN>
inline void omni::graph::exec::unstable::IExecutionContext::applyOnEach(
omni::core::ObjectParam<omni::graph::exec::unstable::IDef> def, FN&& callback)
{
class Forwarder : public Implements<IApplyOnEachFunction>
{
public:
Forwarder(FN&& fn) : m_fn(std::move(fn))
{
}
protected:
void invoke_abi(const ExecutionPath* path) noexcept override
{
m_fn(*path);
}
FN m_fn;
};
applyOnEachDef_abi(def.get(), omni::core::steal(new Forwarder(std::forward<FN>(callback))).get());
}
template <typename FN>
inline void omni::graph::exec::unstable::IExecutionContext::applyOnEach(const ConstName& name, FN&& callback)
{
class Forwarder : public Implements<IApplyOnEachFunction>
{
public:
Forwarder(FN&& fn) : m_fn(std::move(fn))
{
}
protected:
void invoke_abi(const ExecutionPath* path) noexcept override
{
m_fn(*path);
}
FN m_fn;
};
applyOnEachDefWithName_abi(&name, omni::core::steal(new Forwarder(std::forward<FN>(callback))).get());
}
#endif // DOXYGEN_BUILD
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IExecutionContext.gen.h>
| 23,719 | C | 43.007421 | 120 | 0.650365 |
omniverse-code/kit/include/omni/graph/exec/unstable/IBackgroundResultWriter.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IBackgroundResultWriter.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IBackgroundResultWriter.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Status.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class ExecutionTask;
class IBackgroundResultWriter;
class IBackgroundResultWriter_abi;
//! Functor interface used to write the result of a background task.
class IBackgroundResultWriter_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.IBackgroundResultWriter")>
{
protected:
//! Write the result.
virtual Status write_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* info) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IBackgroundResultWriter.
using BackgroundResultWriterPtr = omni::core::ObjectPtr<IBackgroundResultWriter>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IBackgroundResultWriter.gen.h>
//! @copydoc omni::graph::exec::unstable::IBackgroundResultWriter_abi
class omni::graph::exec::unstable::IBackgroundResultWriter
: public omni::core::Generated<omni::graph::exec::unstable::IBackgroundResultWriter_abi>
{
};
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IBackgroundResultWriter.gen.h>
| 2,056 | C | 31.650793 | 114 | 0.764591 |
omniverse-code/kit/include/omni/graph/exec/unstable/PassPipeline.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file PassPipeline.h
//!
//! @brief Declares @ref omni::graph::exec::unstable::PassPipeline.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/IGlobalPass.h>
#include <omni/graph/exec/unstable/IGraphBuilder.h>
#include <omni/graph/exec/unstable/INodeGraphDef.h>
#include <omni/graph/exec/unstable/IPartitionPass.h>
#include <omni/graph/exec/unstable/IPassPipeline.h>
#include <omni/graph/exec/unstable/IPassRegistry.h>
#include <omni/graph/exec/unstable/IPassTypeRegistry.h>
#include <omni/graph/exec/unstable/IPopulatePass.h>
#include <omni/graph/exec/unstable/Traversal.h>
#include <omni/graph/exec/unstable/Types.h>
#include <memory>
#include <unordered_map>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::IPassPipeline
template <typename GraphBuilderT, typename... Bases>
class PassPipelineT : public Implements<Bases...>
{
public:
//! Creates and returns a @ref IPassPipeline.
//!
//! May throw.
static omni::core::ObjectPtr<PassPipelineT> create()
{
return omni::core::steal(new PassPipelineT);
}
protected:
//! Core implementation of @ref omni::graph::exec::unstable::IPassPipeline::needsConstruction_abi
bool needsConstruction_abi() noexcept override
{
return !m_registryCache.inSync(getPassRegistry()->getStamp());
}
//! Core implementation of @ref omni::graph::exec::unstable::IPassPipeline::construct_abi
omni::core::Result construct_abi() noexcept override
{
try
{
auto registry = getPassRegistry();
if (m_registryCache.makeSync(registry->getStamp()))
{
m_populatePasses.clear();
auto populatePasses = registry->getPassTypeRegistry(PassType::ePopulate);
OMNI_GRAPH_EXEC_ASSERT(populatePasses);
if (populatePasses)
{
for (auto& record : populatePasses->getPasses())
{
m_populatePasses.emplace(record.nameToMatch->getHash(), record); // may throw
}
}
m_partitionPasses.clear();
auto partitionPasses = registry->getPassTypeRegistry(PassType::ePartitioning);
OMNI_GRAPH_EXEC_ASSERT(partitionPasses);
if (partitionPasses)
{
for (auto& record : partitionPasses->getPasses())
{
_insert_sorted(m_partitionPasses, record,
[](const PassTypeRegistryEntry& a, const PassTypeRegistryEntry& b) -> bool
{ return a.priority > b.priority; }); // may throw
}
}
}
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! Core implementation of @ref omni::graph::exec::unstable::IPassPipeline::needsExecute_abi
bool needsExecute_abi(Stamp globalTopology) noexcept override
{
return !m_globalTopology.inSync(globalTopology);
}
//! Core implementation of @ref omni::graph::exec::unstable::IPassPipeline::execute_abi
//!
//! Acceleration structure is generated on changes to @ref omni::graph::exec::unstable::IPassRegistry
//! to organize passes for this pipeline.
//!
//! @note Passes are executed single-threaded in core implementation because we have no access to rich
//! threading library in OV. We have parallel version available in omni.kit.exec.core.
omni::core::Result execute_abi(IGraphBuilderContext* builderContext, INodeGraphDef* nodeGraphDef) noexcept override
{
try
{
auto globalTopologyStamp = builderContext->getGraph()->getGlobalTopologyStamp();
if (m_globalTopology.inSync(*globalTopologyStamp))
{
return omni::core::kResultSuccess; // already in sync, nothing to do
}
if (this->needsConstruction())
{
this->construct(); // may throw
}
_runPopulatePass(builderContext, nodeGraphDef);
_runGlobalPass(builderContext, nodeGraphDef);
m_globalTopology.sync(*globalTopologyStamp);
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION();
}
//! Execute populate passes
//!
//! The algorithm will traverse the graph with DFS order serially. For each visited node,
//! it will give a chance to registered population passes to populate the definition
//! of discovered @ref omni::graph::exec::unstable::INode or @ref omni::graph::exec::unstable::INodeGraphDef.
//!
//! To preserve instancing within a single NodeGraphDef, algorithm will keep a track of discovered
//! NodeGraphDefs and populate/continue traversal of only first visited node that instantiates it.
//! Later when internal state goes out of scope, all instances are updated to point to same definition.
//!
//! This algorithm is wrote in a way to follow the multithreaded version from omni.kit.exec.core and
//! the goal is to replace it with multithreaded version once OM-70769 is closed.
void _runPopulatePass(IGraphBuilderContext* builderContext, INodeGraphDef* nodeGraphDef)
{
class PassState
{
struct Record
{
INode* orgNode{ nullptr };
// This definition might have been released if it is only single instance
// and it was changed during graph transformation. That's ok, since we will
// only use the address of the pointer, and only if other instances exist
// (if so, they will own a reference to this pointer making it still valid)
INodeGraphDef* orgNodeGraphDef{ nullptr };
// definitions are referenced by nodes in the graph, but can as well be hold
// by the authoring side for fast access. We will allocate a small space
// for these cases to avoid dynamic allocation cost when reserving space upfront for all instances
// based on use count.
SmallVector<INode*, 2> accumulatedInstances;
static_assert(
sizeof(accumulatedInstances) == 24,
"Expecting sizeof(SmallVector<INode*, 2>) to be 24 bytes and match sizeof an empty std::vector");
Record(INode* instance, INodeGraphDef* nodeGraphDef) : orgNode(instance), orgNodeGraphDef(nodeGraphDef)
{
accumulatedInstances.reserve(useCount(nodeGraphDef));
}
void addInstance(INode* instance)
{
accumulatedInstances.emplace_back(instance);
}
void processInstances()
{
if (accumulatedInstances.size() == 0)
return;
if (auto newNodeDef = orgNode->getNodeDef())
{
for (auto node : accumulatedInstances)
{
exec::unstable::cast<IGraphBuilderNode>(node)->_setNodeDef(newNodeDef);
}
}
else
{
auto newNodeGraphDef = orgNode->getNodeGraphDef();
if (newNodeGraphDef != orgNodeGraphDef)
{
for (auto node : accumulatedInstances)
{
exec::unstable::cast<IGraphBuilderNode>(node)->_setNodeGraphDef(newNodeGraphDef);
}
}
}
}
};
public:
PassState(PassPipelineT* owner, IGraphBuilderContext* builderContext, INodeGraphDef* nodeGraphDef)
: m_passPipeline(owner)
{
m_builder = GraphBuilder::createForPass(builderContext, nodeGraphDef);
}
~PassState()
{
// Update instances
for (auto& pair : m_registry)
{
pair.second.processInstances();
}
// If this NodeGraphDef changed in this population pass, run the partitioning pass on it
auto* nodeGraphDef = m_builder->getNodeGraphDef();
auto* builderContext = m_builder->getContext();
auto constructionStamp = nodeGraphDef->getTopology()->getConstructionStamp();
if (constructionStamp.inSync(builderContext->getConstructionStamp()))
m_passPipeline->_runPartitionPass(builderContext, nodeGraphDef);
}
IGraphBuilder* getBuilder()
{
return m_builder.get();
}
//! Store record of a node graph if it has multiple instances referencing it.
//!
//! Returns true if a new record was generated for this NodeGraphDef.
//! It also returns true when nodeGraphDef has only single instance referencing it. For performance
//! reasons we don't store such record in the container which reduces cost in updateInstances method.
bool addRecord(INode* instance, INodeGraphDef* nodeGraphDef)
{
auto fountIt = m_registry.find(nodeGraphDef);
if (fountIt == m_registry.end())
{
m_registry.emplace(std::piecewise_construct, std::forward_as_tuple(nodeGraphDef),
std::forward_as_tuple(instance, nodeGraphDef));
return true;
}
else
{
fountIt->second.addInstance(instance);
return false;
}
}
private:
PassPipelineT* m_passPipeline;
GraphBuilderPtr m_builder;
using Container = std::unordered_map<INodeGraphDef*, Record>;
Container m_registry;
};
auto traversalFnImp = [this, builderContext](INodeGraphDef* nodeGraphDef, auto& fn) -> void
{
PassState nodeGraphDefPassState(this, builderContext, nodeGraphDef);
traversal_dfs<VisitFirst>(
nodeGraphDef->getTopology()->getRoot(),
[this, &nodeGraphDefPassState, &fn](auto info, INode* prev, INode* curr)
{
bool processNodeGraphDef = true;
auto builder = nodeGraphDefPassState.getBuilder();
auto foundIt = this->m_populatePasses.find(curr->getName().getHash());
if (foundIt != this->m_populatePasses.end())
{
auto newPass = foundIt->second.factory->createPass(builder);
auto newPopulatePass = exec::unstable::cast<IPopulatePass>(newPass);
newPopulatePass->run(builder, curr);
}
else if (auto currNodeGraphDef = curr->getNodeGraphDef())
{
auto foundIt = this->m_populatePasses.find(currNodeGraphDef->getName().getHash());
if (foundIt != this->m_populatePasses.end())
{
processNodeGraphDef = nodeGraphDefPassState.addRecord(curr, currNodeGraphDef);
if (processNodeGraphDef)
{
auto newPass = foundIt->second.factory->createPass(builder);
auto newPopulatePass = exec::unstable::cast<IPopulatePass>(newPass);
newPopulatePass->run(builder, curr);
}
}
}
if (processNodeGraphDef)
{
// re-acquire the graph node def (because there might have been pass
// that expanded it) and traverse inside
if (auto nodeGraphDef = curr->getNodeGraphDef())
{
fn(nodeGraphDef, fn);
}
}
info.continueVisit(curr);
});
// nodeGraphDefPassState is going out of scope and will trigger additional work
// the code is structure this way to make it easier for multithreaded execution
// where partitioning pass should execute when the last reference of the state is removed.
};
traversalFnImp(nodeGraphDef, traversalFnImp);
}
//! Execute partition passes
void _runPartitionPass(IGraphBuilderContext* builderContext, INodeGraphDef* nodeGraphDef)
{
// Partition passes require NodeGraphDef's to provide node factory. Skip ones that doesn't
// implement it.
if (!nodeGraphDef->getNodeFactory())
{
return;
}
auto builder{ GraphBuilderT::createForPass(builderContext, nodeGraphDef) };
INode* root = nodeGraphDef->getTopology()->getRoot();
std::vector<PartitionPassPtr> passInstances;
passInstances.reserve(m_partitionPasses.size());
// Initialize partitioning passes
for (auto& record : m_partitionPasses)
{
auto newPass = record.factory->createPass(builder);
auto newPartitionPass = exec::unstable::cast<IPartitionPass>(newPass);
if (newPartitionPass->initialize(nodeGraphDef->getTopology()))
{
passInstances.emplace_back(newPartitionPass, omni::core::kBorrow);
}
}
// No need to do the traversal if nothing initialized for this node graph def
if (passInstances.size() == 0)
return;
// Select nodes for partitioning
traversal_dfs<VisitFirst>(root,
[this, &passInstances](auto info, INode* prev, INode* curr)
{
for (auto& pass : passInstances)
{
pass->run(curr);
}
info.continueVisit(curr);
});
// commit changes to the definition
for (auto& pass : passInstances)
{
pass->commit(builder);
}
}
//! Execute global passes
void _runGlobalPass(IGraphBuilderContext* builderContext, INodeGraphDef* nodeGraphDef)
{
auto builder{ GraphBuilderT::createForPass(builderContext, nodeGraphDef) };
auto registry = getPassRegistry();
auto globalPasses = registry->getPassTypeRegistry(PassType::eGlobal);
for (auto& record : globalPasses->getPasses())
{
auto newPass = record.factory->createPass(builder); // may throw
auto newGlobalPass = exec::unstable::cast<IGlobalPass>(newPass);
newGlobalPass->run(builder);
}
}
//! Helper function to insert an item into a vector in sorted order.
template <typename T, typename Compare>
typename std::vector<T>::iterator _insert_sorted(std::vector<T>& vec, T const& item, Compare comp)
{
return vec.insert(std::upper_bound(vec.begin(), vec.end(), item, comp), item);
}
//! Type of acceleration structure holding population passes
using PopulatePassCache = std::unordered_map<NameHash, PassTypeRegistryEntry>;
SyncStamp m_registryCache; //!< Synchronized with cache registry version allows us to detect changes
SyncStamp m_globalTopology; //!< Synchronized with execution graph allows us to detect changes to execute the
//!< pipeline
PopulatePassCache m_populatePasses; //!< Acceleration structure for this pipeline to speed up the searches
std::vector<PassTypeRegistryEntry> m_partitionPasses; //!< Acceleration structure to keep passes ordered for this
//!< pipeline. We order from the highest priority to the
//!< lowest.
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#include <omni/graph/exec/unstable/GraphBuilder.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Core PassPipeline implementation for @ref omni::graph::exec::unstable::IPassPipeline
using PassPipeline = PassPipelineT<GraphBuilder, IPassPipeline>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 17,498 | C | 40.664286 | 119 | 0.574809 |
omniverse-code/kit/include/omni/graph/exec/unstable/IScheduleFunction.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IScheduleFunction.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IScheduleFunction.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/SchedulingInfo.h>
#include <omni/graph/exec/unstable/Status.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class IScheduleFunction_abi;
class IScheduleFunction;
template <typename T>
class ScheduleFunction;
//! Interface wrapping a function (possibly with storage). Used to wrap a task when passing generated work to the
//! scheduler.
class IScheduleFunction_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.IScheduleFunction")>
{
protected:
//! Main execute method. Returning status of the execution.
virtual Status invoke_abi() noexcept = 0;
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IScheduleFunction.gen.h>
//! @copydoc omni::graph::exec::unstable::IScheduleFunction_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IScheduleFunction
: public omni::core::Generated<omni::graph::exec::unstable::IScheduleFunction_abi>
{
};
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IScheduleFunction.gen.h>
| 1,891 | C | 30.016393 | 117 | 0.744051 |
omniverse-code/kit/include/omni/graph/exec/unstable/INodeGraphDef.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Graph definition. Defines work to be done as a graph.
//!
//! Nodes within a graph represent work to be done. The actual work to be performed is described in a
//! @rstref{definition <ef_definition>}. Each node wanting to perform work points to a defintion.
//!
//! This interface is a subclass of the work definition interface (i.e. @ref omni::graph::exec::unstable::IDef) and
//! extends @ref omni::graph::exec::unstable::IDef with methods to describe work as a graph.
//!
//! Visually:
//!
//! @rst
//!
//! .. image:: /../docs/ef-simple-w-defs.svg
//! :align: center
//!
//! @endrst
//!
//! Above, you can see the two types of definitions: opaque definitions (described by @ref
//! omni::graph::exec::unstable::INodeDef) and graph definitions (described by this interface).
//!
//! Nodes within a graph definition can point to other graph definitions. This composibility is where EF gets its *graph
//! of graphs* moniker.
//!
//! Multiple node's in the execution graph can point to the same instance of a graph definition. This saves both space
//! and graph construction time. However, since each graph definition can be shared, its pointer value cannot be used
//! to uniquely identify its location in the graph. To solve this, when traversing/executing a graph definition, an
//! @ref omni::graph::exec::unstable::ExecutionPath is passed (usually via @ref
//! omni::graph::exec::unstable::ExecutionTask::getUpstreamPath()).
//!
//! When defining new graph types, it is common to create a new implementation of this interface. See @ref
//! omni::graph::exec:unstable::NodeGraphDef for an implementation of this interface that can be easily inherited from.
//! See @rstref{Definition Creation <ef_definition_creation>} for a guide on creating your own graph definition.
//!
//! How a graph definition's nodes are traversed during execution is defined by the definition's @ref
//! omni::graph::exec::unstable::IExecutor. See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth
//! guide on how executors and graph definitions work together during execution.
//!
//! See also @ref omni::graph::exec::unstable::INode, @ref omni::graph::exec::unstable::IExecutor, and @ref
//! omni::graph::exec::unstable::ExecutionTask.
template <>
class omni::core::Generated<omni::graph::exec::unstable::INodeGraphDef_abi>
: public omni::graph::exec::unstable::INodeGraphDef_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::INodeGraphDef")
//! Return this graph's topology object.
//!
//! Each @ref omni::graph::exec::unstable::INodeGraphDef owns a @ref omni::graph::exec::unstable::ITopology.
//!
//! The returned @ref omni::graph::exec::unstable::ITopology. will *not* have @ref omni::core::IObject::acquire()
//! called before being returned.
omni::graph::exec::unstable::ITopology* getTopology() noexcept;
//! Initialize the state of the graph.
//!
//! It is up to the implementation of the graph type to decide whether this call needs to be propagated over all
//! nodes within the graph or a single shared state is owned by the graph.
//!
//! @param rootTask State will be initialized for every instance of this graph. Root task will provide a path to
//! allow discovery of the state. Must not be @c nullptr.
void initializeState(omni::graph::exec::unstable::ExecutionTask& rootTask);
//! Pre-execution call can be used to setup the graph state prior to execution or skip entirely the execution.
//!
//! The given task must not be @c nullptr.
omni::graph::exec::unstable::Status preExecute(omni::graph::exec::unstable::ExecutionTask& info) noexcept;
//! Post-execution call can be used to finalize the execution, e.g. transfer computation results to consumers.
//!
//! The given task must not be @c nullptr.
omni::graph::exec::unstable::Status postExecute(omni::graph::exec::unstable::ExecutionTask& info) noexcept;
//! Acquire factory object allowing for allocating new node instances for this node graph def.
//!
//! Provided factory may be empty when graph def doesn't allow allocating new nodes outside of pass that constructed
//! the definition in the first place.
//!
//! Accessing node factory is thread-safe but mutating graphs topology is not. This includes node creation.
omni::core::ObjectPtr<omni::graph::exec::unstable::INodeFactory> getNodeFactory() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::ITopology* omni::core::Generated<
omni::graph::exec::unstable::INodeGraphDef_abi>::getTopology() noexcept
{
return getTopology_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::INodeGraphDef_abi>::initializeState(
omni::graph::exec::unstable::ExecutionTask& rootTask)
{
OMNI_THROW_IF_FAILED(initializeState_abi(&rootTask));
}
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::INodeGraphDef_abi>::preExecute(
omni::graph::exec::unstable::ExecutionTask& info) noexcept
{
return preExecute_abi(&info);
}
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::INodeGraphDef_abi>::postExecute(
omni::graph::exec::unstable::ExecutionTask& info) noexcept
{
return postExecute_abi(&info);
}
inline omni::core::ObjectPtr<omni::graph::exec::unstable::INodeFactory> omni::core::Generated<
omni::graph::exec::unstable::INodeGraphDef_abi>::getNodeFactory() noexcept
{
return omni::core::steal(getNodeFactory_abi());
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 6,503 | C | 43.244898 | 126 | 0.720129 |
omniverse-code/kit/include/omni/graph/exec/unstable/IBackgroundResult.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IBackgroundResult.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IBackgroundResult.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Status.h>
#include <omni/graph/exec/unstable/Types.h>
#include <chrono>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class ExecutionTask;
class IBackgroundResult;
class IBackgroundResult_abi;
//! Class representing a result of asynchronous computation.
//!
//! Create via @ref omni::graph::exec::unstable::IBackgroundTask::getBackgroundResult().
//!
//! Call @ref omni::graph::exec::unstable::IBackgroundResult::isReady() or @ref
//! omni::graph::exec::unstable::IBackgroundResult::waitFor() to make sure the result is ready. Once the result is
//! ready, call @ref omni::graph::exec::unstable::IBackgroundResult::write() to make the result visible.
//!
//! Operates much like `std::future`.
class IBackgroundResult_abi : public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.IBackgroundResult")>
{
protected:
//! Check if background computation has a result available for consumption.
//!
//! @return @c true when it is safe to call omni::graph::exec::unstable::IBackgroundResult::write(), @c false
//! otherwise.
//!
//! Once @ref omni::graph::exec::unstable::IBackgroundResult::write() has been called, this method will return an
//! error.
//!
//! This method is not thread safe.
//!
//! May throw.
virtual OMNI_ATTR("throw_result") omni::core::Result
isReady_abi(OMNI_ATTR("not_null, throw_if_null, out, *return") bool* ready) noexcept = 0;
//! Request background processing cancellation
//!
//! @param blocking If @c true, this call won't exit until background processing is completed.
//!
//! This method is not thread safe.
//!
//! May throw.
virtual OMNI_ATTR("throw_result") omni::core::Result cancel_abi(bool blocking) noexcept = 0;
//! Write the result.
//!
//! This method is not thread safe.
//!
//! An error is returned if this method is called more than once.
//!
//! May throw.
virtual OMNI_ATTR("throw_result") omni::core::Result
write_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* info,
OMNI_ATTR("out, not_null, throw_if_null, *return") Status* out) noexcept = 0;
//! Waits for the specified time for the result to become ready.
//!
//! If the result becomes ready in the specified time (or is already ready) @ref
//! omni::graph::exec::unstable::BackgroundResultStatus::eReady is returned. Otherwise, @ref
//! omni::graph::exec::unstable::BackgroundResultStatus::eTimeout is returned.
//!
//! This method is not thread safe.
//!
//! Returns an error if the result has already been consumed.
//!
//! May throw.
virtual OMNI_ATTR("throw_result") omni::core::Result
waitFor_abi(uint64_t nanoseconds,
OMNI_ATTR("out, not_null, throw_if_null, *return") BackgroundResultStatus* out) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IBackgroundResult.
using BackgroundResultPtr = omni::core::ObjectPtr<IBackgroundResult>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IBackgroundResult.gen.h>
//! @copydoc omni::graph::exec::unstable::IBackgroundResult_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IBackgroundResult
: public omni::core::Generated<omni::graph::exec::unstable::IBackgroundResult_abi>
{
public:
//! Waits the specified time for the result to become ready.
//!
//! See @ref IBackgroundResult_abi::waitFor_abi().
template <typename Rep, typename Period>
BackgroundResultStatus waitFor(std::chrono::duration<Rep, Period> duration)
{
return waitFor(std::chrono::duration_cast<std::chrono::nanoseconds>(duration).count());
}
using omni::core::Generated<IBackgroundResult_abi>::waitFor;
};
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IBackgroundResult.gen.h>
| 4,877 | C | 36.236641 | 117 | 0.692434 |
omniverse-code/kit/include/omni/graph/exec/unstable/Topology.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Topology.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::Topology.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/CompactUniqueIndex.h>
#include <omni/graph/exec/unstable/ITopology.h>
#include <omni/graph/exec/unstable/Node.h>
#include <memory>
#include <unordered_map>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::ITopology
class Topology : public Implements<ITopology>
{
public:
//! Creates a new topology.
//!
//! May throw.
static omni::core::ObjectPtr<Topology> create(const char* rootDebugName)
{
OMNI_THROW_IF_ARG_NULL(rootDebugName);
return omni::core::steal(new Topology(rootDebugName));
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::getNodeCount_abi
uint64_t getNodeCount_abi() noexcept override
{
return m_nodeIndexes.size();
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::getRoot_abi
INode* getRoot_abi() noexcept override
{
return m_root.get();
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::getStamp_abi
Stamp getStamp_abi() noexcept override
{
return m_version;
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::invalidate_abi
void invalidate_abi() noexcept override
{
if (isValid())
{
m_version.next();
_forwardInvalidation();
}
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::acquireNodeIndex_abi
omni::core::Result acquireNodeIndex_abi(NodeIndexInTopology* out) noexcept override
{
try
{
*out = m_nodeIndexes.acquireUniqueIndex(); // may throw
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::releaseNodeIndex_abi
void releaseNodeIndex_abi(NodeIndexInTopology index) noexcept override
{
m_nodeIndexes.releaseUniqueIndex(index);
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::addInvalidationForwarder_abi
virtual omni::core::Result addInvalidationForwarder_abi(InvalidationForwarderId owner,
IInvalidationForwarder* callback) noexcept override
{
try
{
m_invalidationForwarders.emplace( // may throw
std::piecewise_construct, std::forward_as_tuple(owner),
std::forward_as_tuple(callback, omni::core::kBorrow));
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::removeInvalidationForwarder_abi
void removeInvalidationForwarder_abi(InvalidationForwarderId owner) noexcept override
{
auto foundIt = m_invalidationForwarders.find(owner);
if (foundIt != m_invalidationForwarders.end())
{
m_invalidationForwarders.erase(foundIt);
}
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::getConstructionStamp_abi for @ref NodeDef
SyncStamp getConstructionStamp_abi() noexcept override
{
return m_constructionStamp;
}
//! Core implementation of @ref omni::graph::exec::unstable::ITopology::_setConstructionInSync_abi for @ref NodeDef
void _setConstructionInSync_abi(Stamp toSync) noexcept override
{
m_constructionStamp.sync(toSync);
}
//! Constructor
explicit Topology(const char* rootDebugName) : m_root{ Node::create(this, rootDebugName) }
{
static Stamp sTopologyVersion;
sTopologyVersion.next();
m_version = sTopologyVersion;
}
//! Destructor
virtual ~Topology()
{
// prevent the root node's ~Node() trying to invalidate the topology
m_version.next();
}
private:
//! Invoke invalidation forwarders
void _forwardInvalidation()
{
for (auto& pair : m_invalidationForwarders)
{
pair.second->invoke(this);
}
}
CompactUniqueIndex m_nodeIndexes; //!< Compact registry of nodes unique indexes
omni::core::ObjectPtr<INode> m_root; //!< Root node allowing to discover all nodes within the current topology
Stamp m_version; //!< Topology version used by nodes to detect if they belong to current topology version
SyncStamp m_constructionStamp; //!< Synchronized with @ref omni::graph::exec::unstable::IGraphBuilderContext.
//!< Allows detecting in which construction pass this topology was altered.
//! Array of functions to call on topology invalidation
std::unordered_map<InvalidationForwarderId, omni::core::ObjectPtr<IInvalidationForwarder>> m_invalidationForwarders;
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 5,563 | C | 32.721212 | 120 | 0.669782 |
omniverse-code/kit/include/omni/graph/exec/unstable/IBase.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IBase.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IBase.
#pragma once
#include <omni/core/IObject.h>
#include <omni/graph/exec/unstable/SchedulingInfo.h>
#include <omni/graph/exec/unstable/Status.h>
#include <omni/graph/exec/unstable/Types.h>
//! @defgroup groupOmniGraphExecInterfaces API Interfaces
//!
//! @brief Convenience interfaces backed by a stable ABI.
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class IBase;
class IBase_abi;
class ExecutionTask;
//! Base class for all @ref omni::graph::exec objects.
//!
//! Defines an interface for casting between objects without calling @ref omni::core::IObject::acquire().
class IBase_abi : public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.exec.unstable.IBase")>
{
protected:
//! Casts this object to the type described the the given id.
//!
//! Returns @c nullptr if the cast was not successful.
//!
//! Unlike @ref omni::core::IObject::cast(), this casting method does not call @ref omni::core::IObject::acquire().
//!
//! @thread_safety This method is thread safe.
virtual void* castWithoutAcquire_abi(omni::core::TypeId id) noexcept = 0;
//! Returns the number of different instances (this included) referencing the current object.
//!
//! @thread_safety This method is thread safe.
virtual uint32_t getUseCount_abi() noexcept = 0;
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IBase.gen.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::IBase_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class IBase : public omni::core::Generated<omni::graph::exec::unstable::IBase_abi>
{
};
//! Casts the given pointer to the given interface (e.g. T).
//!
//! `nullptr` is accepted.
//!
//! Unlike @ref omni::core::cast(), this function does not call @ref omni::core::IObject::acquire() on the returned
//! pointer.
//!
//! @returns A valid pointer is returned if the given pointer implements the given interface. Otherwise, `nullptr` is
//! returned.
template <typename T, typename U>
inline T* cast(U* ptr) noexcept
{
static_assert(std::is_base_of<IBase, T>::value, "cast can only be used with classes that derive from IBase");
if (ptr)
{
return reinterpret_cast<T*>(ptr->castWithoutAcquire(T::kTypeId));
}
else
{
return nullptr;
}
}
//! Casts the given pointer to the given interface (e.g. T).
//!
//! `nullptr` is accepted.
//!
//! Unlike @ref omni::core::cast(), this function does not call @ref omni::core::IObject::acquire() on the returned
//! pointer.
//!
//! @returns A valid pointer is returned if the given pointer implements the given interface. Otherwise, `nullptr` is
//! returned.
template <typename T, typename U>
inline T* cast(omni::core::ObjectParam<U> ptr) noexcept
{
static_assert(std::is_base_of<IBase, T>::value, "cast can only be used with classes that derive from IBase");
if (ptr)
{
return reinterpret_cast<T*>(ptr->castWithoutAcquire(T::kTypeId));
}
else
{
return nullptr;
}
}
//! Casts the given pointer to the given interface (e.g. T).
//!
//! `nullptr` is accepted.
//!
//! Unlike @ref omni::core::cast(), this function does not call @ref omni::core::IObject::acquire() on the returned
//! pointer.
//!
//! @returns A valid pointer is returned if the given pointer implements the given interface. Otherwise, `nullptr` is
//! returned.
template <typename T, typename U>
inline T* cast(omni::core::ObjectPtr<U> ptr) noexcept
{
static_assert(std::is_base_of<IBase, T>::value, "cast can only be used with classes that derive from IBase");
if (ptr)
{
return reinterpret_cast<T*>(ptr->castWithoutAcquire(T::kTypeId));
}
else
{
return nullptr;
}
}
#ifndef DOXYGEN_BUILD
namespace details
{
template <typename T>
inline void* castWithoutAcquire(T* obj, omni::core::TypeId id) noexcept; // forward declaration
} // namespace details
#endif
//! Helper template for implementing the @ref castWithoutAcquire function for one or more interfaces.
template <typename T, typename... Rest>
struct ImplementsCastWithoutAcquire : public T, public Rest...
{
public:
//! See @ref omni::core::IObject::cast.
inline void* cast(omni::core::TypeId id) noexcept
{
// note: this implementation is needed to disambiguate which `cast` to call when using multiple inheritance. it
// has zero-overhead.
return static_cast<T*>(this)->cast(id);
}
//! See @ref omni::graph::exec::unstable::IBase_abi::castWithoutAcquire_abi.
inline void* castWithoutAcquire(omni::core::TypeId id) noexcept
{
// note: this implementation is needed to disambiguate which `cast` to call when using multiple inheritance. it
// has zero-overhead.
return static_cast<T*>(this)->castWithoutAcquire(id);
}
private:
// given a type id, castImpl() check if the type id matches T's typeid. if not, T's parent class type id is
// checked. if T's parent class type id does not match, the grandparent class's type id is check. this continues
// until IObject's type id is checked.
//
// if no type id in T's inheritance chain match, the next interface in Rest is checked.
//
// it's expected the compiler can optimize away the recursion
template <typename U, typename... Args>
inline void* castImpl(omni::core::TypeId id) noexcept
{
// omni::core::detail::cast will march down the inheritance chain
void* obj = omni::core::detail::cast<U>(this, id);
if (nullptr == obj)
{
// check the next class (inheritance chain) provide in the inheritance list
return castImpl<Args...>(id);
}
return obj;
}
// given a type id, castWithoutAcquireImpl() check if the type id matches T's typeid. if not, T's parent class type
// id is checked. if T's parent class type id does not match, the grandparent class's type id is check. this
// continues until IObject's type id is checked.
//
// if no type id in T's inheritance chain match, the next interface in Rest is checked.
//
// it's expected the compiler can optimize away the recursion
template <typename U, typename... Args>
inline void* castWithoutAcquireImpl(omni::core::TypeId id) noexcept
{
// details::castWithoutAcquire will march down the inheritance chain
void* obj = details::castWithoutAcquire<U>(this, id);
if (nullptr == obj)
{
// check the next class (inheritance chain) provide in the inheritance list
return castWithoutAcquireImpl<Args...>(id);
}
return obj;
}
// this terminates walking across the types in the variadic template
template <int = 0>
inline void* castImpl(omni::core::TypeId) noexcept
{
return nullptr;
}
// this terminates walking across the types in the variadic template
template <int = 0>
inline void* castWithoutAcquireImpl(omni::core::TypeId) noexcept
{
return nullptr;
}
protected:
virtual ~ImplementsCastWithoutAcquire() noexcept = default;
//! @copydoc omni::core::IObject_abi::cast_abi
void* cast_abi(omni::core::TypeId id) noexcept override
{
return castImpl<T, Rest...>(id);
}
//! @copydoc omni::graph::exec::unstable::IBase_abi::castWithoutAcquire_abi
void* castWithoutAcquire_abi(omni::core::TypeId id) noexcept override
{
return castWithoutAcquireImpl<T, Rest...>(id);
}
};
//! Helper template for implementing one or more interfaces.
//!
//! Similar functionality as @ref omni::core::Implements but adds support for @ref ImplementsCastWithoutAcquire.
template <typename T, typename... Rest>
struct Implements : public ImplementsCastWithoutAcquire<T, Rest...>
{
public:
//! See @ref omni::core::IObject::acquire.
inline void acquire() noexcept
{
// note: this implementation is needed to disambiguate which `cast` to call when using multiple inheritance. it
// has zero-overhead.
static_cast<T*>(this)->acquire();
}
//! See @ref omni::core::IObject::release.
inline void release() noexcept
{
// note: this implementation is needed to disambiguate which `cast` to call when using multiple inheritance. it
// has zero-overhead.
static_cast<T*>(this)->release();
}
protected:
std::atomic<uint32_t> m_refCount{ 1 }; //!< Reference count.
virtual ~Implements() noexcept = default;
//! @copydoc omni::core::IObject_abi::acquire_abi()
void acquire_abi() noexcept override
{
m_refCount.fetch_add(1, std::memory_order_relaxed);
}
//! @copydoc omni::core::IObject_abi::release_abi()
void release_abi() noexcept override
{
if (0 == m_refCount.fetch_sub(1, std::memory_order_release) - 1)
{
std::atomic_thread_fence(std::memory_order_acquire);
delete this;
}
}
//! Returns the number of different instances (this included) referencing the current object.
uint32_t getUseCount_abi() noexcept override
{
return m_refCount;
}
};
#ifndef DOXYGEN_BUILD
namespace details
{
//! Given a type, this function walks the inheritance chain for the type, checking if the id of the type matches the
//! given id.
//!
//! Implementation detail. Do not call.
template <typename T>
inline void* castWithoutAcquire(T* obj, omni::core::TypeId id) noexcept
{
if (T::kTypeId == id)
{
return obj;
}
else
{
return castWithoutAcquire<typename T::BaseType>(obj, id); // call cast again, but with the parent type
}
}
//! Specialization of `castWithoutAcquire<T>(T*, TypeId)` for @ref omni::graph::exec::unstable::IBase. @ref
//! omni::graph::exec::unstable::IBase always terminates the recursive template since it does not have a base class.
//!
//! Implementation detail. Do not call.
template <>
inline void* castWithoutAcquire<IBase>(IBase* obj, omni::core::TypeId id) noexcept
{
if (IBase::kTypeId == id)
{
return obj;
}
else
{
return nullptr;
}
}
} // namespace details
#endif
//! Helper utility to access the number of different instances referencing the given object.
//!
//! It does it without modifying the referencing count.
template <typename T>
inline uint32_t useCount(T* ptr) noexcept
{
static_assert(std::is_base_of<IBase, T>::value, "useCount can only be used with classes that derive from IBase");
if (ptr)
{
return ptr->getUseCount();
}
else
{
return 0;
}
}
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IBase.gen.h>
| 11,483 | C | 30.206522 | 120 | 0.671514 |
omniverse-code/kit/include/omni/graph/exec/unstable/IDef.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Base class for all node definitions
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! Since definitions can be shared by multiple nodes, and nodes can be executed in parallel, implementations of
//! this interface should expect its methods to be called in parallel.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IDef_abi> : public omni::graph::exec::unstable::IDef_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IDef")
//! Execute the node definition.
//!
//! See thread safety information in interface description.
omni::graph::exec::unstable::Status execute(omni::graph::exec::unstable::ExecutionTask& info) noexcept;
//! Provide runtime information about scheduling constraints particular task have
//!
//! The provided @ref omni::graph::exec::unstable::ExecutionTask can be used to determine the path of the current
//! definition.
//!
//! The given task must not be @c nullptr.
//!
//! See thread safety information in interface description.
omni::graph::exec::unstable::SchedulingInfo getSchedulingInfo(
const omni::graph::exec::unstable::ExecutionTask& info) noexcept;
//! Return unique definition identifier.
//!
//! See thread safety information in interface description.
const omni::graph::exec::unstable::ConstName& getName() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::Status omni::core::Generated<omni::graph::exec::unstable::IDef_abi>::execute(
omni::graph::exec::unstable::ExecutionTask& info) noexcept
{
return execute_abi(&info);
}
inline omni::graph::exec::unstable::SchedulingInfo omni::core::Generated<omni::graph::exec::unstable::IDef_abi>::getSchedulingInfo(
const omni::graph::exec::unstable::ExecutionTask& info) noexcept
{
return getSchedulingInfo_abi(&info);
}
inline const omni::graph::exec::unstable::ConstName& omni::core::Generated<omni::graph::exec::unstable::IDef_abi>::getName() noexcept
{
return *(getName_abi());
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 3,034 | C | 33.885057 | 133 | 0.722479 |
omniverse-code/kit/include/omni/graph/exec/unstable/IBackgroundTask.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IBackgroundTask.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IBackgroundTask.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
#include <utility>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IBackgroundResult;
class IBackgroundResultWriter;
class IBackgroundTask;
class IBackgroundTask_abi;
//! Class representing a background task.
class IBackgroundTask_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IBackgroundTask")>
{
protected:
//! Returns a @c std::future like object used to check if the background task has completed.
//!
//! A error is returned if this method is called more than once.
//!
//! This method is not thread safe.
virtual OMNI_ATTR("throw_result") omni::core::Result
getBackgroundResult_abi(OMNI_ATTR("not_null, throw_if_null, out, *return") IBackgroundResult** out) noexcept = 0;
//! Completes async computation by setting a functor to the result as a shared state of the promise.
//!
//! It is the responsibility of the @ref omni::graph::exec::unstable::IBackgroundResult user to call @ref
//! omni::graph::exec::unstable::IBackgroundResult::write() to invoke this given @ref
//! omni::graph::exec::unstable::IBackgroundResultWriter. This allows the task's waiter to optionally not consume
//! the result of the task.
//!
//! @ref omni::core::IObject::acquire() is called on the given writer.
//!
//! A error is returned if this method is called more than once.
//!
//! This method is not thread safe.
virtual OMNI_ATTR("no_api, throw_result") omni::core::Result
setResultWriter_abi(OMNI_ATTR("not_null, throw_if_null") IBackgroundResultWriter* writer) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IBackgroundTask.
using BackgroundTaskPtr = omni::core::ObjectPtr<IBackgroundTask>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IBackgroundTask.gen.h>
//! @copydoc omni::graph::exec::unstable::IBackgroundTask_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IBackgroundTask
: public omni::core::Generated<omni::graph::exec::unstable::IBackgroundTask_abi>
{
public:
//! Marks the task as ready and sets a completion writer that can be invoked by @ref IBackgroundResult.
//!
//! The supplied function should have the signature of `Status(ExecutionTask*)`.
template <typename Fn>
void setResultWriter(Fn&& fn);
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IBackgroundResult.h>
#include <omni/graph/exec/unstable/IBackgroundResultWriter.h>
template <typename Fn>
void omni::graph::exec::unstable::IBackgroundTask::setResultWriter(Fn&& fn)
{
class Writer : public Implements<IBackgroundResultWriter>
{
public:
Writer(Fn&& fn) : m_fn(std::move(fn))
{
}
protected:
Status write_abi(ExecutionTask* info) noexcept override
{
return m_fn(*info);
}
private:
Fn m_fn;
};
setResultWriter_abi(omni::core::steal(new Writer(std::forward<Fn>(fn))).get());
}
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IBackgroundTask.gen.h>
| 3,996 | C | 32.588235 | 127 | 0.716216 |
omniverse-code/kit/include/omni/graph/exec/unstable/Span.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Span.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::Span.
#pragma once
#include <cstdint>
#include <type_traits>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! A pointer along with the number of items the pointer points to.
//!
//! This object is ABI-safe.
template <typename T>
class Span
{
public:
//! Constructor.
Span(T* buffer, uint64_t count) noexcept : m_buffer(buffer), m_count(count)
{
static_assert(offsetof(Span<T>, m_buffer) == 0, "unexpected buffer offset");
static_assert(offsetof(Span<T>, m_count) == 8, "unexpected count offset");
static_assert(16 == sizeof(Span<T>), "Span is an unexpected size");
static_assert(std::is_standard_layout<Span<T>>::value, "Span is expected to be abi safe");
}
//! Returns a pointer to the beginning of the array.
T* begin() noexcept
{
return m_buffer;
}
//! Returns a const pointer to the beginning of the array.
const T* begin() const noexcept
{
return m_buffer;
}
//! Returns a pointer to one past the end of the array.
T* end() noexcept
{
return m_buffer + m_count;
}
//! Returns a const pointer to one past the end of the array.
const T* end() const noexcept
{
return m_buffer + m_count;
}
//! Return @c true if the span is empty.
bool empty() const noexcept
{
return (0 == m_count);
}
//! Returns a reference to the first element.
//!
//! Calling when the span is empty is undefined behavior.
T& front() noexcept
{
return *(begin());
}
//! Returns a const reference to the first element.
//!
//! Calling when the span is empty is undefined behavior.
const T& front() const noexcept
{
return *(begin());
}
//! Returns a reference to the last element.
//!
//! Calling when the span is empty is undefined behavior.
T& back() noexcept
{
return *(end() - 1);
}
//! Returns a const reference to the last element.
//!
//! Calling when the span is empty is undefined behavior.
const T& back() const noexcept
{
return *(end() - 1);
}
//! Returns a pointer to the beginning of the array.
T* data() noexcept
{
return m_buffer;
}
//! Returns a pointer to the beginning of the array.
const T* data() const noexcept
{
return m_buffer;
}
//! Returns the number of items in the array.
uint64_t size() const noexcept
{
return m_count;
}
private:
T* m_buffer;
uint64_t m_count;
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 3,186 | C | 23.143939 | 98 | 0.622097 |
omniverse-code/kit/include/omni/graph/exec/unstable/ElementAt.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file ElementAt.h
//!
//! @brief Defines helper classes to access iteratable items via an ABI.
#pragma once
#include <cstdint>
#include <iterator>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
namespace detail
{
//! Provides iterator access to an interface that defines per-element access.
//!
//! Use this object to wrap an interface that define random element access. The resulting wrapper object can be passed
//! to any algorithm that can iterate over an iterable object (e.g. C++'s built-in range-based `for`).
//!
//! @tparam OwnerType The interface type (e.g. `IMyArray`).
//!
//! @tparam ValueType The type of the value returned from the getter.
//!
//! @tparam GetterType A struct that defines a static `getAt(OwnerType* owner, uint64_t index, ValueType* out)` method.
//! This method is used to access the element at the given index. The struct must also define a
//! static `getCount(OwnerType*)` method which returns the number of items to iterate over.
template <typename OwnerType, typename ValueType, typename GetterType>
struct ElementAt
{
//! Iterator pointing to an element in the iterable range.
struct Iterator
{
//! Type of the iterator
using iterator_category = std::forward_iterator_tag;
//! Type of the value to which the iterator points.
using value_type = ValueType;
//! Pointer to the type of the value to which the iterator points.
using pointer = value_type*;
//! Reference to the type of the value to which the iterator points.
using reference = value_type&;
//! Constructor.
Iterator(OwnerType* owner_, uint64_t index_, uint64_t count_)
: m_owner(owner_), m_index(index_), m_count(count_)
{
_get();
}
//! Dereference operator.
reference operator*()
{
return m_element;
}
//! Dereference operator.
pointer operator->()
{
return &m_element;
}
//! Move to the next item in the container.
Iterator& operator++() noexcept
{
m_index++;
_get();
return *this;
}
//! Move to the next item in the container.
Iterator operator++(int) noexcept
{
Iterator tmp = *this;
++(*this);
return tmp;
}
//! Check if the iterators are equal.
friend bool operator==(const Iterator& a, const Iterator& b) noexcept
{
return ((a.m_owner == b.m_owner) && (a.m_index == b.m_index));
};
//! Check if the iterators are not equal.
friend bool operator!=(const Iterator& a, const Iterator& b) noexcept
{
return ((a.m_owner != b.m_owner) || (a.m_index != b.m_index));
};
private:
void _get()
{
if (m_index < m_count)
{
GetterType::getAt(m_owner, m_index, &m_element);
}
}
OwnerType* m_owner;
ValueType m_element;
uint64_t m_index;
uint64_t m_count;
};
//! Constructor
ElementAt(OwnerType* owner) noexcept : m_owner(owner)
{
}
//! Returns an iterator to the first element.
Iterator begin() const noexcept
{
return Iterator(m_owner, 0, GetterType::getCount(m_owner));
}
//! Returns an invalid iterator past the last element.
Iterator end() const noexcept
{
auto count = GetterType::getCount(m_owner);
return Iterator(m_owner, count, count);
}
//! Returns element count
uint64_t getCount() const noexcept
{
return GetterType::getCount(m_owner);
}
private:
OwnerType* m_owner;
};
} // namespace detail
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 4,373 | C | 27.402597 | 119 | 0.605991 |
omniverse-code/kit/include/omni/graph/exec/unstable/RaceConditionFinder.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file RaceConditionFinder.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::RaceConditionFinder.
#pragma once
#include <carb/Defines.h>
#include <atomic>
#include <thread>
#if CARB_PLATFORM_WINDOWS
#else
# include <signal.h>
#endif
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Helper class for detecting race conditions.
//!
//! This type of a "fake lock" has been known under many names. The goal is to create a "critical section"
//! without enforcing synchronization. If more than one thread falls into this "critical section" it means
//! we haven't managed (at the orchestration level) guarantee sequential execution of particular code.
//!
//! The RC finder is used very similar to a lock, i.e. you allocate a shared RC finder and enter
//! the "critical section" using Scope RAII object. When more than one thread enters this section,
//! we will make them spin forever and issue a debug break from the first thread that started the section.
//! This allows to catch all the threads and easily debug by seeing all the callstacks and states.
//!
//! RC finder supports recursive execution.
//!
//! This is a debugging object and shouldn't be used in released product. Without debugged attached, application will
//! simply crash.
class RaceConditionFinder
{
public:
//! Create scope around the fake "critical section".
class Scope
{
public:
//! Construct the scope with a valid shared finder.
Scope(RaceConditionFinder& finder)
: m_sharedFinder(finder),
m_thisThreadId(std::this_thread::get_id()),
m_originalThreadId(m_sharedFinder.m_currentThread)
{
// handle recursive code paths
bool acquiredSuccessfully = (m_originalThreadId == m_thisThreadId);
if (!acquiredSuccessfully)
{
std::thread::id emptyId;
acquiredSuccessfully = m_sharedFinder.m_currentThread.compare_exchange_strong(emptyId, m_thisThreadId);
}
// infinite loop in here and let the other thread complete and issue debugger break in the destructor
if (!acquiredSuccessfully)
{
m_sharedFinder.m_raceDetected = true;
while (true)
{
std::this_thread::yield();
}
}
}
//! Issue a breakpoint if race condition is detected, otherwise release the section.
~Scope()
{
if (m_sharedFinder.m_raceDetected)
{
#if CARB_PLATFORM_WINDOWS
__debugbreak();
#else
raise(SIGTRAP);
#endif
}
else
{
bool resetSuccessfully =
m_sharedFinder.m_currentThread.compare_exchange_strong(m_thisThreadId, m_originalThreadId);
// this shouldn't be possible
if (!resetSuccessfully)
{
m_sharedFinder.m_raceDetected = true;
while (true)
{
std::this_thread::yield();
}
}
}
}
private:
RaceConditionFinder& m_sharedFinder; //!< Shared finder object used to communicate state across threads
std::thread::id m_thisThreadId; //!< Captured thread ID at the construction of the object
std::thread::id m_originalThreadId; //!< Captured thread ID at the construction time from shared finder
};
private:
std::atomic<std::thread::id> m_currentThread; //!< Thread ID currently holding critical section, or empty ID
std::atomic<bool> m_raceDetected{ false }; //!< Was race condition detected
};
}
}
}
}
| 4,206 | C | 33.483606 | 119 | 0.632905 |
omniverse-code/kit/include/omni/graph/exec/unstable/IPartitionPass.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IPartitionPass.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IPartitionPass.
#pragma once
#include <omni/graph/exec/unstable/IPass.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IGraphBuilder;
class INode;
class ITopology;
class IPartitionPass;
class IPartitionPass_abi;
//! Base class for graph partitioning passes.
//!
//! Partition passes are typically run just after population passes and only on newly modified
//! @ref omni::graph::exec::unstable::INodeGraphDef objects. The job of a partition pass is to recognize patterns in the
//! newly populated graph and replace them with a new definition or augment existing one.
//!
//! Partition passes can only mutate the graph from the @ref omni::graph::exec::unstable::IPartitionPass::commit method
//! using provided @ref omni::graph::exec::unstable::IGraphBuilder. This will guarantee that the rest of the pipeline
//! is aware of changes made to the graph and avoid potential threading issues.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
class IPartitionPass_abi : public omni::core::Inherits<IPass, OMNI_TYPE_ID("omni.graph.exec.unstable.IPartitionPass")>
{
protected:
//! Call from pass pipeline to initialize the pass for @p topology.
//!
//! This interface method implementation can't mutate given @p topology. Multiple passes can run concurrently on it.
//!
//! Returns True if initialization was successful and pipeline should issue calls to run and commit.
//! Otherwise this pass will be destroyed and won't participate in partitioning @p topology.
virtual bool initialize_abi(OMNI_ATTR("not_null, throw_if_null") ITopology* topology) noexcept = 0;
//! Call from pass pipeline to discover nodes requiring partitioning.
//!
//! No topology changes are permitted at this point. Multiple passes will get a chance to receive this
//! notification.
//!
//! Call to this method comes from graph traversal that may run multiple passes concurrently.
virtual void run_abi(OMNI_ATTR("not_null, throw_if_null") INode* node) noexcept = 0;
//! Call to verify generated partitions and commit new definition/s replacing discovered partitions.
//!
//! Commit of partitions is done serially and in the priority order of the pass. Passes with higher order will get
//! the chance first. This is the only partition pass method that can mutate the graph.
virtual void commit_abi(OMNI_ATTR("not_null, throw_if_null") IGraphBuilder* builder) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IPartitionPass.
using PartitionPassPtr = omni::core::ObjectPtr<IPartitionPass>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IPartitionPass.gen.h>
//! @copydoc omni::graph::exec::unstable::IPartitionPass_abi
//!
//! @ingroup groupOmniGraphExecPasses groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IPartitionPass
: public omni::core::Generated<omni::graph::exec::unstable::IPartitionPass_abi>
{
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IGraphBuilder.h>
#include <omni/graph/exec/unstable/INode.h>
#include <omni/graph/exec/unstable/ITopology.h>
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IPartitionPass.gen.h>
| 4,015 | C | 39.979591 | 120 | 0.756663 |
omniverse-code/kit/include/omni/graph/exec/unstable/Assert.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Assert.h
//!
//! @brief Defines macros for assertions.
#pragma once
#include <carb/extras/Debugging.h>
#include <omni/core/Assert.h>
#include <omni/core/ResultError.h>
#include <omni/graph/exec/unstable/IBase.h>
//! Debug build assertion.
#define OMNI_GRAPH_EXEC_ASSERT(cond, ...) OMNI_ASSERT(cond, ##__VA_ARGS__)
#ifndef DOXYGEN_BUILD
# define OMNI_GRAPH_EXEC_BREAK_ON_ERROR
#endif
#if defined(OMNI_GRAPH_EXEC_BREAK_ON_ERROR) || defined(DOXYGEN_BUILD)
//! Returns the given @ref omni::core::Result. If a debugger is attached, it will break.
# define OMNI_GRAPH_EXEC_RETURN_ERROR(e_) \
carb::extras::debuggerBreak(); \
return e_;
#else
# define OMNI_GRAPH_EXEC_RETURN_ERROR(e_) return e_;
#endif
//! When authoring ABI methods, use this macro to convert exceptions to @ref omni::core::Result codes.
#define OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION() \
catch (const omni::core::ResultError& e_) \
{ \
OMNI_GRAPH_EXEC_RETURN_ERROR(e_.getResult()); \
} \
catch (...) \
{ \
OMNI_GRAPH_EXEC_RETURN_ERROR(omni::core::kResultFail); \
}
//! Casts @p obj_ to an object of type @p type_. If the cast fails, an exception is thrown.
//!
//! The resulting pointer is stored in @p var_.
#define OMNI_GRAPH_EXEC_CAST_OR_RETURN(var_, type_, obj_) \
auto var_ = omni::graph::exec::unstable::cast<type_>(obj_); \
do \
{ \
if (!var_) \
{ \
OMNI_GRAPH_EXEC_RETURN_ERROR(omni::core::kResultNoInterface); \
} \
} while (0)
| 3,539 | C | 57.999999 | 120 | 0.374117 |
omniverse-code/kit/include/omni/graph/exec/unstable/INodeGraphDefDebug.gen.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Interface containing debugging methods for @ref omni::graph::exec::unstable::INodeGraphDef.
//!
//! Implementation of this interface is optional.
template <>
class omni::core::Generated<omni::graph::exec::unstable::INodeGraphDefDebug_abi>
: public omni::graph::exec::unstable::INodeGraphDefDebug_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::INodeGraphDefDebug")
//! Returns the current execution count. A value of 0 means the graph is not executing.
uint64_t getExecutionCount() noexcept;
//! Increments the execution count.
void incrementExecutionCount() noexcept;
//! Decrements the execution count. It is undefined behavior for call decrement more than increment.
void decrementExecutionCount() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline uint64_t omni::core::Generated<omni::graph::exec::unstable::INodeGraphDefDebug_abi>::getExecutionCount() noexcept
{
return getExecutionCount_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::INodeGraphDefDebug_abi>::incrementExecutionCount() noexcept
{
incrementExecutionCount_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::INodeGraphDefDebug_abi>::decrementExecutionCount() noexcept
{
decrementExecutionCount_abi();
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 2,190 | C | 30.3 | 122 | 0.750228 |
omniverse-code/kit/include/omni/graph/exec/unstable/ExecutorFactory.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file ExecutorFactory.h
//!
//! @brief Declares @ref omni::graph::exec::unstable::ExecutorFactory
#pragma once
#include <omni/graph/exec/unstable/IExecutor.h>
#include <functional>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class ExecutionTask;
class ITopology;
//! Factory owned by node graph definition used to instantiate executor to generate the work
//!
//! May throw.
using ExecutorFactory =
std::function<omni::core::ObjectPtr<IExecutor>(omni::core::ObjectParam<ITopology>, const ExecutionTask&)>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 1,074 | C | 25.219512 | 110 | 0.76257 |
omniverse-code/kit/include/omni/graph/exec/unstable/INode.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file INode.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::INode.
#pragma once
#include <omni/graph/exec/unstable/ConstName.h>
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Span.h>
#include <omni/graph/exec/unstable/Types.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class IDef;
class INode_abi;
class INode;
class INodeDef;
class INodeGraphDef;
class ITopology;
//! Represents work in a graph. Nodes point to a shared execution definition to state the actual work.
//!
//! @ref omni::graph::exec::unstable::INode is the main structural component used to build a graph's topology. @ref
//! omni::graph::exec::unstable::INode stores edges to *parents* (i.e. predecessors) and *children* (i.e. successors).
//! These edges set an ordering between nodes. See @ref omni::graph::exec::unstable::INode::getParents() and @ref
//! omni::graph::exec::unstable::INode::getChildren() respectively.
//!
//! A node represents work to be performed. The description of the work to be performed is stored in a *definition*
//! (i.e. @ref omni::graph::exec::unstable::IDef). Each node wishing to perform work points to a definition (see @ref
//! omni::graph::exec::unstable::INode::getDef()).
//!
//! The definition to which a node points can be one of two types. The first type, @ref
//! omni::graph::exec::unstable::INodeDef, defines work opaquely (i.e. EF is unable to view the work definition and
//! potentially optimize it). The second type, @ref omni::graph::exec::unstable::INodeGraphDef, defines work with a
//! graph. This last representation is the most power as it allows for both *extensibilty* and *composibility* in EF.
//!
//! @rst
//!
//! .. image:: /../docs/ef-simple-w-defs.svg
//! :align: center
//!
//! @endrst
//!
//! Above, we see that nodes point to graph definitions, which contain other nodes that point to other graph
//! definitions. This structure of graphs pointing to other graphs is where EF gets its *graph of graphs* name.
//!
//! Not all nodes will point to a definition. For example, the @rstref{root node <ef_root_node>} in each graph
//! definition will not point to a definition.
//!
//! A node is always part of a graph definition and the graph definition's executor is responsible for orchestrating and
//! generating work to the scheduler.
//!
//! Node's within a graph definition are assigned a unique index, between zero and the number of nodes in the
//! definition. This index is often used as a lookup into transient arrays used to store state during graph traversals.
//! See @ref omni::graph::exec::unstable::INode::getIndexInTopology().
//!
//! Nodes have a notion of validity. See @rstref{Graph Invalidation <ef_graph_invalidation>} for details.
//!
//! @ref omni::graph::exec::unstable::INode does not contain methods for either settings the node's definition or
//! connecting nodes to each other. This functionality is reserved for @ref omni::graph::exec::unstable::IGraphBuilder.
//! See @rstref{Graph Construction <ef_pass_concepts>} for details.
//!
//! See @rstref{Graph Concepts <ef_graph_concepts>} for a guide on how this object relates to other objects in the
//! Execution Framework.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! Users may wish to implement this interface to store meaningful authoring level data in EF. For example, OmniGraph
//! uses an implementation of this node to store graph instancing information. See @ref
//! omni::graph::exec::unstable::Node for a concrete implementation of this interface suitable for sub-classing.
class INode_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.INode")>
{
public:
using NodeArray = Span<INode* const>; //!< Stores the list of parents and children.
protected:
//! Access topology owning this node
//!
//! The returned @ref omni::graph::exec::unstable::ITopology will *not* have @ref omni::core::IObject::acquire()
//! called before being returned.
virtual OMNI_ATTR("no_acquire") ITopology* getTopology_abi() noexcept = 0;
//! Access node's unique identifier name.
virtual OMNI_ATTR("ref") const ConstName* getName_abi() noexcept = 0;
//! Access nodes unique index withing owning topology. Index will be always smaller than topology size.
virtual NodeIndexInTopology getIndexInTopology_abi() noexcept = 0;
//! Access parents.
virtual Span<INode* const> getParents_abi() noexcept = 0;
//! Access children.
virtual Span<INode* const> getChildren_abi() noexcept = 0;
//! Return number of parents that cause cycles within the graph during traversal over this node.
virtual uint32_t getCycleParentCount_abi() noexcept = 0;
//! Check if topology/connectivity of nodes is valid within current topology version.
//!
//! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details on invalidation.
virtual bool isValidTopology_abi() noexcept = 0;
//! Make topology valid for current topology version. Drop all the connections if topology changed.
//!
//! See @rstref{Graph Invalidation <ef_graph_invalidation>} for details on invalidation.
virtual void validateOrResetTopology_abi() noexcept = 0;
//! Access base node definition (can be empty).
//!
//! When you wish to determine if the attached definition is either opaque or a graph, consider calling @ref
//! omni::graph::exec::unstable::INode::getNodeDef() or @ref omni::graph::exec::unstable::INode::getNodeGraphDef()
//! rather than this method.
//!
//! The returned @ref omni::graph::exec::unstable::IDef will *not* have @ref omni::core::IObject::acquire() called
//! before being returned.
virtual OMNI_ATTR("no_acquire") IDef* getDef_abi() noexcept = 0;
//! Access node definition (can be empty).
//!
//! If the returned pointer is @c nullptr, either the definition does not implement @ref
//! omni::graph::exec::unstable::INodeDef or there is no definition attached to the node.
//!
//! The returned @ref omni::graph::exec::unstable::INodeDef will *not* have @ref omni::core::IObject::acquire()
//! called before being returned.
//!
//! Also see @ref omni::graph::exec::unstable::INode::getDef() and @ref
//! omni::graph::exec::unstable::INode::getNodeGraphDef().
virtual OMNI_ATTR("no_acquire") INodeDef* getNodeDef_abi() noexcept = 0;
//! Access node's graph definition (can be empty)
//!
//! The returned graph definition pointer is the graph definition which defines the work this node represents. The
//! returned pointer **is not** the graph definition that contains this node.
//!
//! If the returned pointer is @c nullptr, either the definition does not implement @ref
//! omni::graph::exec::unstable::INodeGraphDef or there is no definition attached to the node.
//!
//! The returned @ref omni::graph::exec::unstable::INodeGraphDef will *not* have @ref omni::core::IObject::acquire()
//! called before being returned.
//!
//! Also see @ref omni::graph::exec::unstable::INode::getDef() and @ref
//! omni::graph::exec::unstable::INode::getNodeDef().
virtual OMNI_ATTR("no_acquire") INodeGraphDef* getNodeGraphDef_abi() noexcept = 0;
};
//! Smart pointer managing an instance of @ref INode.
using NodePtr = omni::core::ObjectPtr<INode>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/INode.gen.h>
//! @copydoc omni::graph::exec::unstable::INode_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::INode : public omni::core::Generated<omni::graph::exec::unstable::INode_abi>
{
public:
//! Returns the root of the graph definition of which this node is a part.
inline INode* getRoot() noexcept;
//! Check if this node is the root of the graph/topology.
inline bool isRoot() noexcept;
//! Check if a given node is a parent of this node.
inline bool hasParent(omni::core::ObjectParam<INode> parent) noexcept;
//! Check if a given node is a child of this node.
inline bool hasChild(omni::core::ObjectParam<INode> child) noexcept;
};
#include <omni/graph/exec/unstable/INodeDef.h>
#include <omni/graph/exec/unstable/INodeGraphDef.h>
#include <omni/graph/exec/unstable/ITopology.h>
inline omni::graph::exec::unstable::INode* omni::graph::exec::unstable::INode::getRoot() noexcept
{
return getTopology()->getRoot();
}
inline bool omni::graph::exec::unstable::INode::isRoot() noexcept
{
return (getRoot() == this);
}
inline bool omni::graph::exec::unstable::INode::hasParent(omni::core::ObjectParam<INode> parent) noexcept
{
auto parents = getParents();
return std::find(parents.begin(), parents.end(), parent.get()) != parents.end();
}
inline bool omni::graph::exec::unstable::INode::hasChild(omni::core::ObjectParam<INode> child) noexcept
{
auto children = getChildren();
return std::find(children.begin(), children.end(), child.get()) != children.end();
}
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/INode.gen.h>
| 9,771 | C | 43.217194 | 120 | 0.712721 |
omniverse-code/kit/include/omni/graph/exec/unstable/GraphBuilderContext.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file GraphBuilderContext.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IGraphBuilderContext.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/IGraph.h>
#include <omni/graph/exec/unstable/IGraphBuilder.h>
#include <omni/graph/exec/unstable/IGraphBuilderContext.h>
#include <omni/graph/exec/unstable/IPassPipeline.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::IGraphBuilderContext
template <typename... Bases>
class GraphBuilderContextT : public Implements<Bases...>
{
public:
//! Construct graph builder context for a given @ref IGraph with a given pass transformation pipeline.
//!
//! May throw.
static omni::core::ObjectPtr<GraphBuilderContextT> create(omni::core::ObjectParam<IGraph> graph,
omni::core::ObjectParam<IPassPipeline> passPipeline)
{
OMNI_THROW_IF_ARG_NULL(graph);
OMNI_THROW_IF_ARG_NULL(passPipeline);
return omni::core::steal(new GraphBuilderContextT(graph.get(), passPipeline.get()));
}
protected:
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderContext::getConstructionStamp_abi
Stamp getConstructionStamp_abi() noexcept override
{
return m_constructionStamp;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderContext::getGraph_abi
IGraph* getGraph_abi() noexcept override
{
return m_owner;
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderContext::report_abi
void report_abi(const char* diagnose) noexcept override
{
// Default implementation doesn't report anything
}
//! Core implementation of @ref omni::graph::exec::unstable::IGraphBuilderContext::runTransformations_abi
void runTransformations_abi(INodeGraphDef* nodeGraphDef) noexcept override
{
m_pipeline->execute(this, nodeGraphDef);
m_constructionStamp.next();
}
//! Constructor
GraphBuilderContextT(IGraph* graph, IPassPipeline* pipeline)
: m_owner{ graph }, m_pipeline{ pipeline, omni::core::kBorrow }
{
}
private:
IGraph* m_owner; //!< Owner of all graphs this context touches
PassPipelinePtr m_pipeline; //!< Graph transformations pipeline used in this context
Stamp m_constructionStamp; //!< Construction version incremented after pipeline run.
};
//! Core GraphBuilderContext implementation for @ref omni::graph::exec::unstable::IGraphBuilderContext
using GraphBuilderContext = GraphBuilderContextT<IGraphBuilderContext>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 3,207 | C | 34.252747 | 114 | 0.718117 |
omniverse-code/kit/include/omni/graph/exec/unstable/ExecutionContext.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file ExecutionContext.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::ExecutionContext.
#pragma once
#include <carb/thread/RecursiveSharedMutex.h>
#include <carb/thread/SharedMutex.h>
#include <carb/thread/Spinlock.h>
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/ExecutionPath.h>
#include <omni/graph/exec/unstable/Executor.h>
#include <omni/graph/exec/unstable/IExecutionContext.h>
#include <omni/graph/exec/unstable/IGraph.h>
#include <omni/graph/exec/unstable/INodeGraphDefDebug.h>
#include <omni/graph/exec/unstable/SmallVector.h>
#include <omni/graph/exec/unstable/Traversal.h>
#include <thread>
#include <unordered_map>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Implementation details for omni::graph::exec. Items in this namespace should not be relied on outside of the API.
namespace detail
{
//! Utility class for discovering all execution paths for a given definition
//!
//! Searches are cached until topology of execution graph changes.
//! Invalidation of the cache happens lazy upon request.
//!
//! This class is thread-safe and can be utilized recurrently.
class ExecutionPathCache
{
public:
//! Default constructor is removed
ExecutionPathCache() = delete;
//! Constructor
explicit ExecutionPathCache(IGraph& graph) noexcept : m_graph(graph)
{
}
//! Call given function for every execution path that points to given node or node graph definition
//!
//! Function should have the signature of `void(const ExecutionPath&)`
template <typename Key>
void applyOnEach(const Key& key, IApplyOnEachFunction& applyFn)
{
if (m_graph.inBuild())
{
// traversing the entire graph while building it is isn't allowed since multiple threads may be building it
OMNI_GRAPH_EXEC_ASSERT(!m_graph.inBuild());
return;
}
if (!m_graph.getTopology()->isValid())
{
return;
}
auto discoverAndApplyOnNodesWithDefinitionFn = [this, &key, &applyFn](
const ExecutionPath& upstreamPath, INodeGraphDef& graph,
Paths& collectedPaths, auto recursionFn) -> void
{
traversal_dfs<VisitFirst>(
graph.getRoot(),
[this, &upstreamPath, &key, &recursionFn, &applyFn, &collectedPaths](auto info, INode* prev, INode* curr)
{
auto currNodeGraph = curr->getNodeGraphDef();
if (currNodeGraph)
{
ExecutionPath newUpstreamPath(upstreamPath, curr);
recursionFn(newUpstreamPath, *currNodeGraph, collectedPaths, recursionFn);
}
auto def = curr->getDef();
if (def && _isMatch(key, def))
{
collectedPaths.emplace_back(upstreamPath, curr);
applyFn.invoke(collectedPaths.back());
}
info.continueVisit(curr);
});
};
// check if the this cache is in-sync with the current topology. since we can run this method in parallel, we
// need a read lock to m_mutex to safely read m_topologyStamp
std::shared_lock<MutexType> readLock(m_mutex);
auto topologyStamp = *m_graph.getGlobalTopologyStamp();
if (!m_topologyStamp.inSync(topologyStamp))
{
// cache is out-of-sync. upgrade to a write lock.
readLock.unlock();
{
// here we once again check to see if the cache is in-sync since another thread may have beat this
// thread to the write lock and brought the cache into sync.
std::lock_guard<MutexType> writeLock(m_mutex);
if (m_topologyStamp.makeSync(topologyStamp))
{
// we're the thread that got to the write lock first. its our job to clear the cache.
m_defCache.clear();
m_nameCache.clear();
}
}
// grab the read lock again so we can safely read the cache
readLock.lock();
}
auto& cache = _getCache(key);
auto findIt = cache.find(key);
if (findIt != cache.end())
{
// We've seen this name before. Make a copy of the paths so we can release the readLock. This is
// required because an invocation can result in re-entering and taking the writeLock.
auto pathsCopy = findIt->second;
readLock.unlock();
for (ExecutionPath& path : pathsCopy)
{
applyFn.invoke(path);
}
}
else
{
// Release readLock because apply below can result in re-entry of this function
readLock.unlock();
// either the key wasn't found or we're building the graph
Paths paths;
discoverAndApplyOnNodesWithDefinitionFn(
ExecutionPath::getEmpty(), *m_graph.getNodeGraphDef(), paths, discoverAndApplyOnNodesWithDefinitionFn);
// Insert only once we collected all the paths. Some other thread may be looking for this definition at
// the same time.
std::lock_guard<MutexType> writeLock(m_mutex);
cache.emplace(key, std::move(paths));
}
}
private:
bool _isMatch(const ConstName& desired, IDef* candidate)
{
return (desired == candidate->getName());
}
bool _isMatch(IDef* desired, IDef* candidate)
{
return (desired == candidate);
}
auto& _getCache(const ConstName&)
{
return m_nameCache;
}
auto& _getCache(IDef*)
{
return m_defCache;
}
using Paths = SmallVector<ExecutionPath, 2>;
using DefCache = std::unordered_map<IDef*, Paths>;
using NameCache = std::unordered_map<ConstName, Paths>;
using MutexType = carb::thread::recursive_shared_mutex;
IGraph& m_graph; //!< Execution graph to search for execution paths
DefCache m_defCache; //!< Storage for already discovered paths (keyed on def ptr)
NameCache m_nameCache; //!< Storage for already discovered paths (keyed on def name)
MutexType m_mutex; //!< Mutex to allow concurrent utilization of cache and serialized insertion
SyncStamp m_topologyStamp; //!< Topology of execution graph this cache is valid for
};
} // namespace detail
//! @copydoc omni::graph::exec::unstable::IExecutionContext
template <typename StorageType, typename ParentInterface = IExecutionContext>
class ExecutionContext : public Implements<ParentInterface>
{
protected:
//! Helper RAII object controlling in execution flag.
class ScopedInExecute
{
public:
//! Constructor
ScopedInExecute(ExecutionContext& context) : m_context(context)
{
std::lock_guard<carb::thread::Spinlock> lock(m_context.m_threadIdSpinlock);
++m_context.m_contextThreadIds[std::this_thread::get_id()];
}
//! Destructor
~ScopedInExecute()
{
std::lock_guard<carb::thread::Spinlock> lock(m_context.m_threadIdSpinlock);
--m_context.m_contextThreadIds[std::this_thread::get_id()];
if (m_context.m_contextThreadIds[std::this_thread::get_id()] == 0)
{
m_context.m_contextThreadIds.erase(std::this_thread::get_id());
}
}
private:
ExecutionContext& m_context; //!< Context in execution
};
//! @copydoc omni::graph::exec::unstable::IExecutionContext::getExecutionStamp_abi
Stamp getExecutionStamp_abi() noexcept override
{
return m_executionStamp;
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::inExecute_abi
bool inExecute_abi() noexcept override
{
std::lock_guard<carb::thread::Spinlock> lock(m_threadIdSpinlock);
return !m_contextThreadIds.empty();
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::isExecutingThread_abi
bool isExecutingThread_abi() noexcept override
{
std::lock_guard<carb::thread::Spinlock> lock(m_threadIdSpinlock);
return m_contextThreadIds.find(std::this_thread::get_id()) != m_contextThreadIds.end();
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::execute_abi
Status execute_abi() noexcept override
{
if (!m_initStamp.inSync(m_graph->getTopology()->getStamp()))
{
this->initialize();
}
m_executionStamp = _getNextGlobalExecutionStamp();
ScopedInExecute scopedInExecute(*this);
ScopedExecutionDebug scopedDebug{ m_graph->getNodeGraphDef() };
return getCurrentThread()->executeGraph(m_graph, this);
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::executeNode_abi
Status executeNode_abi(const ExecutionPath* path, INode* node) noexcept override
{
if (!m_initStamp.inSync(m_graph->getTopology()->getStamp()))
{
this->initialize();
}
m_executionStamp = _getNextGlobalExecutionStamp();
ScopedInExecute scopedInExecute(*this);
ScopedExecutionDebug scopedDebug{ m_graph->getNodeGraphDef() };
auto def = node->getDef();
if (def)
{
ExecutionTask newTask{ this, node, *path };
auto tmpExecutor = ExecutorFallback::create(node->getTopology(), newTask);
return newTask.execute(tmpExecutor);
}
else
{
return Status::eFailure;
}
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::initialize_abi
omni::core::Result initialize_abi() noexcept override
{
try
{
if (!m_initStamp.makeSync(m_graph->getTopology()->getStamp()))
{
return omni::core::kResultSuccess;
}
auto traversalFn = [this](INodeGraphDef* nodeGraphDef, const ExecutionPath& path, auto& recursionFn) -> void
{
ExecutionTask info(this, nodeGraphDef->getRoot(), path);
nodeGraphDef->initializeState(info); // may throw
traversal_dfs<VisitFirst>(nodeGraphDef->getRoot(),
[&path, &recursionFn, nodeGraphDef](auto info, INode* prev, INode* curr)
{
auto currNodeGraphDef = curr->getNodeGraphDef();
if (currNodeGraphDef)
{
ExecutionPath newPath{ path, curr }; // may throw
recursionFn(currNodeGraphDef, newPath, recursionFn);
}
info.continueVisit(curr);
});
};
ExecutionPath path;
traversalFn(m_graph->getNodeGraphDef(), path, traversalFn); // may throw
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::getStateInfo_abi
virtual omni::core::Result getStateInfo_abi(const ExecutionPath* path,
INode* node,
IExecutionStateInfo** out) noexcept override
{
try
{
*out = m_storage.getStateInfo(*path, node); // may throw
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::getNodeData_abi
virtual omni::core::Result getNodeData_abi(const ExecutionPath* path,
INode* node,
NodeDataKey key,
omni::core::TypeId* outTypeId,
void** outPtr,
uint64_t* outItemSize,
uint64_t* outBufferSize) noexcept override
{
try
{
m_storage.getNodeData(*path, node, key, outTypeId, outPtr, outItemSize, outBufferSize); // may throw
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::setNodeData_abi
virtual omni::core::Result setNodeData_abi(const ExecutionPath* path,
INode* node,
NodeDataKey key,
omni::core::TypeId typeId,
void* data,
uint64_t dataByteCount,
uint64_t dataItemCount,
NodeDataDeleterFn* deleter) noexcept override
{
try
{
m_storage.setNodeData(*path, node, key, typeId, data, dataByteCount, dataItemCount, deleter); // may throw
return omni::core::kResultSuccess;
}
OMNI_GRAPH_EXEC_CATCH_ABI_EXCEPTION()
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::applyOnEachDef_abi
void applyOnEachDef_abi(IDef* def, IApplyOnEachFunction* callback) noexcept override
{
m_pathCache.applyOnEach(def, *callback);
}
//! @copydoc omni::graph::exec::unstable::IExecutionContext::applyOnEachDefWithName_abi
void applyOnEachDefWithName_abi(const ConstName* name, IApplyOnEachFunction* callback) noexcept override
{
m_pathCache.applyOnEach(*name, *callback);
}
//! Constructor
ExecutionContext(IGraph* graph) noexcept
: m_graph(graph), m_executionStamp(_getNextGlobalExecutionStamp()), m_pathCache(*graph)
{
}
StorageType m_storage; //!< Data store.
private:
static Stamp _getNextGlobalExecutionStamp() noexcept
{
// since this is private, and will only be accessed indirectly via virtual methods, declaring this inline static
// should be ok
static Stamp gExecutionStamp;
gExecutionStamp.next();
return gExecutionStamp;
}
IGraph* m_graph{ nullptr }; //!< Graph associated with this context.
Stamp m_executionStamp; //!< Execution version incremented with each execution.
SyncStamp m_initStamp; //!< State initialization version. Synchronized with graph topology.
detail::ExecutionPathCache m_pathCache; //!< Cache of execution paths for a given definition. Populated lazily and
//!< thread-safe.
std::unordered_map<std::thread::id, size_t> m_contextThreadIds; //!< Unordered map of thread ids that kickstarted
//!< context execution, along with a counter that
//!< tracks the number of times that
//!< nested/recursive execution has been triggered
//!< by those context-starting threads.
carb::thread::Spinlock m_threadIdSpinlock; //!< Mutex to protect m_contextThreadIds from concurrent write
//!< operations.
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 16,459 | C | 38.284009 | 121 | 0.575734 |
omniverse-code/kit/include/omni/graph/exec/unstable/NodePartition.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file NodePartition.h
//!
//! @brief Defines omni::graph::exec::unstable::NodePartition.
#pragma once
#include <omni/graph/exec/unstable/Span.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations
class INode;
//! Type definition used to pass node partitions in the ABI.
using NodePartition = omni::graph::exec::unstable::Span<INode* const>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 921 | C | 24.61111 | 77 | 0.758958 |
omniverse-code/kit/include/omni/graph/exec/unstable/IGlobalPass.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IGlobalPass.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IGlobalPass.
#pragma once
#include <omni/graph/exec/unstable/IPass.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IGraphBuilder;
class IGlobalPass;
class IGlobalPass_abi;
//! Base class for global passes.
//!
//! The purpose of a global pass is to perform global transformations on the graph.
//!
//! This transformation category should be considered as a last resort given its global impact on the topology which
//! prevents threading at the pass pipeline level.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
class IGlobalPass_abi : public omni::core::Inherits<IPass, OMNI_TYPE_ID("omni.graph.exec.unstable.IGlobalPass")>
{
protected:
//! Call from pass pipeline to apply global graph transformations.
virtual OMNI_ATTR("throw_result") omni::core::Result run_abi(IGraphBuilder* builder) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IGlobalPass.
using GlobalPassPtr = omni::core::ObjectPtr<IGlobalPass>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IGlobalPass.gen.h>
//! @copydoc omni::graph::exec::unstable::IGlobalPass_abi
//!
//! @ingroup groupOmniGraphExecPasses groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IGlobalPass
: public omni::core::Generated<omni::graph::exec::unstable::IGlobalPass_abi>
{
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IGraphBuilder.h>
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IGlobalPass.gen.h>
| 2,291 | C | 30.833333 | 116 | 0.766478 |
omniverse-code/kit/include/omni/graph/exec/unstable/CompactUniqueIndex.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file CompactUniqueIndex.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::CompactUniqueIndex.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/Types.h>
#include <vector>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Registry of unique indexes with recycling of released indexes.
//!
//! Call @ref acquireUniqueIndex() to retrieve a unique index. Indexes are "compact", meaning abandoned indices will be
//! reused. This means that if @ref releaseUniqueIndex() is called with a value of 6, the next call to @ref
//! acquireUniqueIndex() will return 6.
//!
//! This class is useful for assigning a stable unique index to a set of dynamic items.
//!
//! Methods are not thread safe unless otherwise stated.
class CompactUniqueIndex
{
public:
//! Invalid index is used when no free indexes are available, and as well
//! as a value for reserved elements of the allocation array (an implementation detail)
enum : std::size_t
{
kInvalidIndex = kInvalidNodeIndexInTopology
};
//! Constructor
CompactUniqueIndex() noexcept = default;
//! Destructor
~CompactUniqueIndex() noexcept = default;
//! Returns a unique index.
//!
//! If @ref releaseUniqueIndex() was previously called, the value passed to it will be returned (i.e. the index will
//! be recycled). Otherwise, a new index is allocated that is one greater than the current max index.
//!
//! May throw.
inline std::size_t acquireUniqueIndex();
//! Marks an index as no longer used.
//!
//! A subsequent call to @ref acquireUniqueIndex() will prefer reusing the index given to this method.
//!
//! If @p indexToFree was not previously returned by @ref acquireUniqueIndex, undefined behavior will result.
inline void releaseUniqueIndex(std::size_t indexToFree);
//! Returns the size of the registry.
//!
//! The maximum number of indices is returned, not the current number of "active" indices. Said differently, if
//! @ref acquireUniqueIndex() is called followed by @ref releaseUniqueIndex(), @ref size() would return 1 not 0.
std::size_t size() const
{
return m_allocatedIndexes.size();
}
private:
//! Index registry. Holds acquired and released indexes.
std::vector<std::size_t> m_allocatedIndexes;
//! All released indexes will form a list and m_lastFree points to the last released / first item of the list.
std::size_t m_lastFree{ kInvalidIndex };
};
inline std::size_t CompactUniqueIndex::acquireUniqueIndex()
{
// no free index to recycle, allocate a new one
if (m_lastFree == kInvalidIndex)
{
m_allocatedIndexes.emplace_back(kInvalidIndex);
OMNI_GRAPH_EXEC_ASSERT(m_allocatedIndexes.size() > 0);
return m_allocatedIndexes.size() - 1;
}
// recycle existing index
else
{
OMNI_GRAPH_EXEC_ASSERT(m_lastFree < m_allocatedIndexes.size());
std::size_t recycledIndex = m_lastFree;
m_lastFree = m_allocatedIndexes[recycledIndex];
m_allocatedIndexes[recycledIndex] = kInvalidIndex;
return recycledIndex;
}
}
inline void CompactUniqueIndex::releaseUniqueIndex(std::size_t indexToFree)
{
OMNI_GRAPH_EXEC_ASSERT(indexToFree < m_allocatedIndexes.size());
OMNI_GRAPH_EXEC_ASSERT(m_allocatedIndexes[indexToFree] == kInvalidIndex);
if (indexToFree < m_allocatedIndexes.size() && m_allocatedIndexes[indexToFree] == kInvalidIndex)
{
if (m_lastFree == kInvalidIndex)
m_lastFree = indexToFree;
else
{
m_allocatedIndexes[indexToFree] = m_lastFree;
m_lastFree = indexToFree;
}
}
}
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 4,278 | C | 32.960317 | 120 | 0.695185 |
omniverse-code/kit/include/omni/graph/exec/unstable/PassRegistry.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file PassRegistry.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IPassRegistry.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/IGlobalPass.h>
#include <omni/graph/exec/unstable/IPartitionPass.h>
#include <omni/graph/exec/unstable/IPassFactory.h>
#include <omni/graph/exec/unstable/IPassRegistry.h>
#include <omni/graph/exec/unstable/IPopulatePass.h>
#include <omni/graph/exec/unstable/Types.h>
#include <memory>
#include <string>
#include <vector>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Scoped object that registers a pass factory in its constructor and deregisters in the objects destructor.
//!
//! Useful for temporarily registering @ref IPassFactory, for example, in a unit test.
//!
//! When registering a pass in a plugin, rather than using this object, prefer using one of the pass registration macros
//! (e.g. @ref OMNI_GRAPH_EXEC_REGISTER_POPULATE_PASS()). See @ref groupOmniGraphExecPassRegistration for a list of
//! registration macros.
class ScopedPassRegistration
{
public:
//! Constructor. Calls @ref IPassRegistry::registerPass().
//!
//! May throw.
ScopedPassRegistration(PassType type,
const char* name,
omni::core::ObjectParam<IPassFactory> factory,
const ConstName& nameToMatch = ConstName(),
PassPriority priority = 0)
: m_type(type), m_name(name)
{
OMNI_THROW_IF_ARG_NULL(name);
_register(factory.get(), nameToMatch, priority);
}
//! Constructor. Calls @ref IPassRegistry::registerPass().
//!
//! The given function should have the signature `IPass*(IGraphBuilder*)`.
//!
//! May throw.
template <typename Fn>
ScopedPassRegistration(
PassType type, const char* name, Fn&& fn, const ConstName& nameToMatch = ConstName(), PassPriority priority = 0)
: m_type(type), m_name(name)
{
OMNI_THROW_IF_ARG_NULL(name);
_register(createPassFactory(std::forward<Fn>(fn)).get(), nameToMatch, priority);
}
//! Constructor. Calls @ref IPassRegistry::registerPass().
//!
//! May throw.
ScopedPassRegistration(PassType type,
std::string&& name,
omni::core::ObjectParam<IPassFactory> factory,
const ConstName& nameToMatch = ConstName(),
PassPriority priority = 0)
: m_type(type), m_name(std::move(name))
{
_register(factory.get(), nameToMatch, priority);
}
//! Destructor. Calls @ref IPassRegistry::deregisterPass().
~ScopedPassRegistration() noexcept
{
if (m_registry)
{
m_registry->deregisterPass(m_type, m_name.c_str());
}
}
private:
CARB_PREVENT_COPY_AND_MOVE(ScopedPassRegistration);
void _register(IPassFactory* factory, const ConstName& nameToMatch, PassPriority priority)
{
OMNI_THROW_IF_ARG_NULL(factory);
m_registry = getPassRegistry();
if (m_registry)
{
getPassRegistry()->registerPass(m_type, m_name.c_str(), factory, nameToMatch, priority);
}
}
IPassRegistry* m_registry;
PassType m_type;
std::string m_name;
};
#ifndef DOXYGEN_BUILD
namespace detail
{
struct PassRegistrationInfo
{
PassType type;
std::string name;
PassFactoryPtr factory;
ConstName nameToMatch;
PassPriority priority;
PassRegistrationInfo(
PassType type_, const char* name_, PassFactoryPtr&& factory_, ConstName&& nameToMatch_, PassPriority priority_)
: type(type_), name(name_), factory(std::move(factory_)), nameToMatch(std::move(nameToMatch_)), priority(priority_)
{
}
};
//! Return the per module (e.g. DLL) list of passes that should be registered.
//!
//! This function is an implementation detail and should not be directly used. Rather, populate this list with one of
//! the following macros:
//!
//! - @ref OMNI_GRAPH_EXEC_REGISTER_PASS()
//!
//! - @ref OMNI_GRAPH_EXEC_REGISTER_POPULATE_PASS()
//!
//! This list is cleared after the module developer calls @ref registerModulePasses().
inline std::vector<PassRegistrationInfo>& getModulePassesToRegister()
{
static std::vector<PassRegistrationInfo> sPasses;
return sPasses;
}
//! Return the per module (e.g. DLL) list of passes that should be deregistered.
//!
//! This function is an implementation detail and should not be directly used.
//!
//! This list is populated by @ref registerModulePasses().
inline std::vector<std::unique_ptr<ScopedPassRegistration>>& getModulePassesToDeregister()
{
static std::vector<std::unique_ptr<ScopedPassRegistration>> sPasses;
return sPasses;
}
} // namespace detail
# define OMNI_GRAPH_EXEC_CONCAT_(a_, b_) a_##b_
# define OMNI_GRAPH_EXEC_CONCAT(a_, b_) OMNI_GRAPH_EXEC_CONCAT_(a_, b_)
# define OMNI_GRAPH_EXEC_REGISTER_PASS_(type_, class_, var_, nameToMatch, priority_) \
static auto var_ = []() \
{ \
omni::graph::exec::unstable::detail::getModulePassesToRegister().emplace_back( \
type_, #class_, \
omni::graph::exec::unstable::createPassFactory([](omni::graph::exec::unstable::IGraphBuilder* b) \
{ return class_::create(b); }), \
omni::graph::exec::unstable::ConstName(nameToMatch), priority_); \
return 0; \
}()
#endif
//! @defgroup groupOmniGraphExecPassRegistration Pass Registration
//!
//! @brief Macros to register a plugin's passes.
//!
//! Pass registration macros should be called at global scope (not within a function/method).
//!
//! In order to avoid accidentally registering a pass twice, it is recommended to call registration macros from a *.cpp*
//! file rather than a *.h* file.
//!
//! Registration macros only add the pass to a list of passes to register. This is useful if you have passes defined in
//! several **.cpp** files in your module. It is up to the module developer to call @ref registerModulePasses() and
//! @ref deregisterModulePasses() to perform the actual registration.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
//!
//! @ingroup groupOmniGraphExecPasses
//! Adds an @ref omni::graph::exec::unstable::IPass to a list to be registered at the module's (i.e.g DLL) startup.
//!
//! @param type_ A @ref omni::graph::exec::unstable::PassType.
//!
//! @param class_ An implementation of @ref omni::graph::exec::unstable::IPass.
//!
//! This macro should be called at global scope (not within a function/method).
//!
//! In order to avoid accidentally registering a pass twice, it is recommended to call this macro from a *.cpp* file
//! rather than a *.h* file.
//!
//! This macro only adds the pass to a list of passes to register. This is useful if you have passes defined in several
//! **.cpp** files in your module. It is up to the module developer to call @ref
//! omni::graph::exec::unstable::registerModulePasses() and
//! @ref omni::graph::exec::unstable::deregisterModulePasses() to perform the actual registration.
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! @ingroup groupOmniGraphExecPassRegistration
#define OMNI_GRAPH_EXEC_REGISTER_PASS(type_, class_) \
OMNI_GRAPH_EXEC_REGISTER_PASS_(type_, class_, OMNI_GRAPH_EXEC_CONCAT(sRegisterPass, __LINE__), "", 0)
//! Adds an @ref omni::graph::exec::unstable::IPopulatePass to a list to be registered as type
//! @ref omni::graph::exec::unstable::PassType::ePopulate at the module's (i.e.g DLL) startup.
//!
//! @param class_ An implementation of @ref omni::graph::exec::unstable::IPopulatePass.
//!
//! @param defNameToPopulate_ The name of the definition, @ref omni::graph::exec::unstable::IDef, this pass should
//! populate. An example would be "OmniGraph".
//!
//! This macro should be called at global scope (not within a function/method).
//!
//! In order to avoid accidentally registering a pass twice, it is recommended to call this macro from a *.cpp* file
//! rather than a *.h* file.
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! @ingroup groupOmniGraphExecPassRegistration
#define OMNI_GRAPH_EXEC_REGISTER_POPULATE_PASS(class_, defNameToPopulate_) \
static_assert(std::is_base_of<omni::graph::exec::unstable::IPopulatePass, class_>::value, \
"Registering a class that doesn't implement IPopulatePass"); \
OMNI_GRAPH_EXEC_REGISTER_PASS_(omni::graph::exec::unstable::PassType::ePopulate, class_, \
OMNI_GRAPH_EXEC_CONCAT(sRegisterPass, __LINE__), defNameToPopulate_, 0)
//! Adds an @ref omni::graph::exec::unstable::IPartitionPass to a list to be registered as type
//! @ref omni::graph::exec::unstable::PassType::ePartitioning at the module's (i.e.g DLL) startup.
//!
//! @param class_ An implementation of @ref omni::graph::exec::unstable::IPartitionPass.
//!
//! @param priority_ @ref omni::graph::exec::unstable::PassPriority used to resolve conflicts between passes
//!
//! This macro should be called at global scope (not within a function/method).
//!
//! In order to avoid accidentally registering a pass twice, it is recommended to call this macro from a *.cpp* file
//! rather than a *.h* file.
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! @ingroup groupOmniGraphExecPassRegistration
#define OMNI_GRAPH_EXEC_REGISTER_PARTITION_PASS(class_, priority_) \
static_assert(std::is_base_of<omni::graph::exec::unstable::IPartitionPass, class_>::value, \
"Registering a class that doesn't implement IPartitionPass"); \
OMNI_GRAPH_EXEC_REGISTER_PASS_(omni::graph::exec::unstable::PassType::ePartitioning, class_, \
OMNI_GRAPH_EXEC_CONCAT(sRegisterPass, __LINE__), "", priority_)
//! Adds an @ref omni::graph::exec::unstable::IGlobalPass to a list to be registered as type
//! @ref omni::graph::exec::unstable::PassType::eGlobal at the module's (i.e.g DLL) startup.
//!
//! @param class_ An implementation of @ref omni::graph::exec::unstable::IGlobalPass.
//!
//! This macro should be called at global scope (not within a function/method).
//!
//! In order to avoid accidentally registering a pass twice, it is recommended to call this macro from a *.cpp* file
//! rather than a *.h* file.
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! @ingroup groupOmniGraphExecPassRegistration
#define OMNI_GRAPH_EXEC_REGISTER_GLOBAL_PASS(class_) \
static_assert(std::is_base_of<omni::graph::exec::unstable::IGlobalPass, class_>::value, \
"Registering a class that doesn't implement IGlobalPass"); \
OMNI_GRAPH_EXEC_REGISTER_PASS_(omni::graph::exec::unstable::PassType::eGlobal, class_, \
OMNI_GRAPH_EXEC_CONCAT(sRegisterPass, __LINE__), "", 0)
//! Registers the module's @ref omni::graph::exec::unstable::IPass factories with @ref
//! omni::graph::exec::unstable::IPassRegistry.
//!
//! This function should be called in the module's function registered with omni::core::OMNI_MODULE_ON_MODULE_STARTED().
//! This is usually called @c onStarted().
//!
//! When developing a Kit extension, prefer calling @c OMNI_KIT_EXEC_CORE_ON_MODULE_STARTED() rather than this function.
//!
//! May throw.
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! @ingroup groupOmniGraphExecPassRegistration
inline void registerModulePasses()
{
auto& toRegister = detail::getModulePassesToRegister();
auto& toDeregister = detail::getModulePassesToDeregister();
for (auto& pass : toRegister)
{
toDeregister.emplace_back(std::make_unique<ScopedPassRegistration>(
pass.type, std::move(pass.name), std::move(pass.factory), std::move(pass.nameToMatch), pass.priority));
}
toRegister.clear();
}
//! Deregisters the module's @ref omni::graph::exec::unstable::IPass factories with @ref IPassRegistry.
//!
//! Failure to call this function may lead to crashes during program shutdown.
//!
//! This function should be called in the module's function registered with omni::core::OMNI_MODULE_ON_MODULE_UNLOAD().
//! This is usually called @c onUnload().
//!
//! When developing a Kit extension, prefer calling @c OMNI_KIT_EXEC_CORE_ON_MODULE_UNLOAD() rather than this function.
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! @ingroup groupOmniGraphExecPassRegistration
inline void deregisterModulePasses() noexcept
{
detail::getModulePassesToDeregister().clear();
}
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 14,452 | C | 43.74613 | 123 | 0.637559 |
omniverse-code/kit/include/omni/graph/exec/unstable/AtomicBackoff.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file AtomicBackoff.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::AtomicBackoff.
#pragma once
#include <carb/Defines.h>
#if CARB_X86_64
# include <immintrin.h>
#endif
#include <thread>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Exponential backoff pattern for waiting with a cap number of pauses
//!
//! This class implements exponential backoff, where each call to pause will
//! cause busy waiting and increment the number of iterations for next pause call.
//! All of this is cap with a maximum limit of pause calls after which waiting
//! is considered long and switches to yield.
//!
//! This class is useful when we expect short waits and would like to prevent
//! yielding the compute resources for this short period of time.
//!
//! Methods are not thread safe unless otherwise noted.
class AtomicBackoff
{
public:
//! Default constructor
constexpr AtomicBackoff() noexcept
{
}
// No use in allowing copy and assignment operators for this class
AtomicBackoff(const AtomicBackoff&) = delete;
AtomicBackoff& operator=(const AtomicBackoff&) = delete;
//! Pause execution for a short period of time.
//!
//! Use exponential backoff pattern and a upper wait cap to select between busy waiting and yielding.
void pause() noexcept
{
if (m_loopCount <= kLoopBeforeYield)
{
auto loop = m_loopCount;
while (loop-- > 0)
{
#if CARB_X86_64
_mm_pause();
#elif CARB_AARCH64
// based on TBB machine_pause and BOOST pause.hpp
__asm__ __volatile__("yield" ::: "memory");
#endif
}
m_loopCount *= 2;
}
else
{
std::this_thread::yield();
}
}
//! Clear wait counter
void reset() noexcept
{
m_loopCount = 1;
}
private:
//! Upper cap for busy waiting. Pass this count the pause call will always yield until reset method is called.
//!
//! The number must be power of two and is approximately equal to number of pause instructions it would take
//! to do a context switch.
enum : int
{
kLoopBeforeYield = 16
};
//! Next number of busy loop iterations. Incremented exponentially and cap at kLoopBeforeYield
int m_loopCount{ 1 };
};
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 2,892 | C | 26.552381 | 114 | 0.664592 |
omniverse-code/kit/include/omni/graph/exec/unstable/IExecutionStateInfo.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! State associated with a given execution task
//!
//! @note We separated execution state from the execution graph to allow concurrent and/or nested execution
template <>
class omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>
: public omni::graph::exec::unstable::IExecutionStateInfo_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IExecutionStateInfo")
//! Store a "future" result for this state. The actual computation is running asynchronously outside of execution
//! frame
//!
//! @return \c true if execution state accepts "future" results.
bool storeBackgroundResult(omni::core::ObjectParam<omni::graph::exec::unstable::IBackgroundResult> result);
//! Query used by some executors to determine if computation of a node is necessary
bool needsCompute(const omni::graph::exec::unstable::Stamp& execVersion) noexcept;
//! Set to request computation
void requestCompute() noexcept;
//! Reset request to compute after computation was performed
void setComputed() noexcept;
//! Get current/last exec version set for this node during execution
omni::graph::exec::unstable::SyncStamp getExecutionStamp() noexcept;
//! Set current exec version for this node. Returns true if version wasn't in sync.
bool setExecutionStamp(const omni::graph::exec::unstable::Stamp& execVersion) noexcept;
//! Returns a value from a node's key/value datastore.
//!
//! The key is used as a look-up in the node's key/value datastore.
//!
//! The type of each data item is returned in @p outTypeId.
//!
//! @p outPtr will be updated with a pointer to the actual data.
//!
//! @p outItemSize store the size of each item in the returned array.
//!
//! @p outItemCount contains the number of items returned (i.e. the number
//! of items @p outPtr points to). For an array, this will be greater than
//! 1.
//!
//! If the key is not found, @p outPtr is set to @c nullptr and @p
//! outItemCount is set to 0.
//!
//! Accessing the node's key/value datastore is not thread safe.
//!
//! An exception is thrown on all other errors.
void getNodeData(omni::graph::exec::unstable::NodeDataKey key,
omni::core::TypeId* outTypeId,
void** outPtr,
uint64_t* outItemSize,
uint64_t* outItemCount);
//! Sets a value in a node's key/value datastore.
//!
//! The key is used as a look-up in the node's key/value datastore.
//!
//! The type of each data item is set with @p typeId.
//!
//! @p data points to an array of data items.
//!
//! @p itemSize is the size of each item in the given array.
//!
//! @p itemCount contains the number of items pointed to by @p data. For an
//! array, this will be greater than 1.
//!
//! @p deleter is a function used to delete @p data when either a new value
//! is set at the key or the context is invalidated. If @p deleter is @c
//! nullptr, it is up to the calling code to manage the lifetime of the @p
//! data.
//!
//! Accessing the node's key/value datastore is not thread safe.
//!
//! An exception is thrown on all other errors.
void setNodeData(omni::graph::exec::unstable::NodeDataKey key,
omni::core::TypeId typeId,
void* data,
uint64_t itemSize,
uint64_t itemCount,
omni::graph::exec::unstable::NodeDataDeleterFn* deleter);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline bool omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::storeBackgroundResult(
omni::core::ObjectParam<omni::graph::exec::unstable::IBackgroundResult> result)
{
OMNI_THROW_IF_ARG_NULL(result);
auto return_ = storeBackgroundResult_abi(result.get());
return return_;
}
inline bool omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::needsCompute(
const omni::graph::exec::unstable::Stamp& execVersion) noexcept
{
return needsCompute_abi(execVersion);
}
inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::requestCompute() noexcept
{
requestCompute_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::setComputed() noexcept
{
setComputed_abi();
}
inline omni::graph::exec::unstable::SyncStamp omni::core::Generated<
omni::graph::exec::unstable::IExecutionStateInfo_abi>::getExecutionStamp() noexcept
{
return getExecutionStamp_abi();
}
inline bool omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::setExecutionStamp(
const omni::graph::exec::unstable::Stamp& execVersion) noexcept
{
return setExecutionStamp_abi(execVersion);
}
inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::getNodeData(
omni::graph::exec::unstable::NodeDataKey key,
omni::core::TypeId* outTypeId,
void** outPtr,
uint64_t* outItemSize,
uint64_t* outItemCount)
{
OMNI_THROW_IF_ARG_NULL(outTypeId);
OMNI_THROW_IF_ARG_NULL(outPtr);
OMNI_THROW_IF_ARG_NULL(outItemSize);
OMNI_THROW_IF_ARG_NULL(outItemCount);
OMNI_THROW_IF_FAILED(getNodeData_abi(key, outTypeId, outPtr, outItemSize, outItemCount));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IExecutionStateInfo_abi>::setNodeData(
omni::graph::exec::unstable::NodeDataKey key,
omni::core::TypeId typeId,
void* data,
uint64_t itemSize,
uint64_t itemCount,
omni::graph::exec::unstable::NodeDataDeleterFn* deleter)
{
OMNI_THROW_IF_ARG_NULL(data);
OMNI_THROW_IF_FAILED(setNodeData_abi(key, typeId, data, itemSize, itemCount, deleter));
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 6,760 | C | 35.945355 | 117 | 0.684911 |
omniverse-code/kit/include/omni/graph/exec/unstable/ConstName.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file ConstName.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::ConstName.
#pragma once
#include <carb/Defines.h>
#include <carb/cpp/StringView.h>
#include <omni/String.h>
#include <omni/graph/exec/unstable/Types.h>
#include <type_traits>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! An immutable name with fast hash based comparisons.
//!
//! @ref ConstName is a hash of a given string. This hash is used for all comparisons. The original string is also
//! stored in this object.
//!
//! Prefer using the overloaded comparison operators (e.g. ==, <, !=) rather than performing comparison operators with
//! the result of @ref toString() or @ref getString(). Using the comparison operators is considerably faster.
//!
//! Comparison of @ref ConstName with `const char*`, @c omni::string, or @c std::string is potentially slow and as such
//! no comparison operators exist to do so. To compare a @ref ConstName with either a `const char*`, @c omni::string,
//! or @c std::string, you must first explicitly create a @ref ConstName from the string.
//!
//! Classes like `carb::RString` and `pxr::TfToken` also utilize a hash of an original string for fast string
//! comparison. In these classes, the hash is simply passed around but the string is stored in a global lookup table.
//! When the original string is needed, the hash is used as a lookup in the global table.
//!
//! Unlike `carb::RString` and `pxr::TfToken`, @ref ConstName avoids the global table. Rather, the original string is
//! stored alongside the hash. The benefit of avoiding the global table is speed. Performance testing revealed that
//! when constructing names of objects during graph traversal, the cost of multiple threads reading and writing to the
//! global tables storing `carb::RString` and `pxr::TfToken`'s strings was a bottleneck.
//!
//! If you need speed in threaded code, use @ref ConstName. If you want to save space, use `carb::RString` or
//! `pxr::TfToken`.
//!
//! The object is ABI-safe. When returning a @ref ConstName or passing a @ref ConstName to/from an ABI method, prefer
//! using a const pointer rather than passing by value.
class ConstName
{
public:
//! Construct from a static compile time string.
template <std::size_t N>
explicit ConstName(const char (&s)[N]) : m_hash(carb::fnv1aHash(s, N - 1)), m_name(s, N - 1)
{
// N-1 so that we don't hash the terminating \0.
}
//! Construct from a @ref carb::cpp::string_view. This constructor also accepts `const char *`.
explicit ConstName(const carb::cpp::string_view& sv) : m_hash(carb::fnv1aHash(sv.data(), sv.size())), m_name(sv)
{
}
//! Construct from a @ref carb::cpp::string_view with an already computed hash.
explicit ConstName(const carb::cpp::string_view& sv, NameHash hash) : m_hash(hash), m_name(sv)
{
}
//! Construct from empty string.
ConstName() noexcept : m_hash(CARB_HASH_STRING(""))
{
}
//! Implementation detail. Ignore.
struct BogusArg
{
};
//! Construct from a @c std::string.
template <typename T>
explicit ConstName(const T& s, std::enable_if_t<std::is_same<std::string, T>::value, BogusArg> = {})
: m_hash(carb::fnv1aHash(s.data(), s.size())), m_name(s)
{
// the enable_if disambiguates which constructor a const char* arg should use.
// the BogusArg type keeps this constructor from being confused with the one with a hash.
}
//! Construct from a @c std::string and a pre-computed hash.
template <typename T>
explicit ConstName(const T& s, NameHash hash, std::enable_if_t<std::is_same<std::string, T>::value, BogusArg> = {})
: m_hash(hash), m_name(s)
{
// the enable_if disambiguates which constructor a const char* arg should use.
}
//! Returns the string used to generate the hash. For debugging purposes only.
const omni::string& getString() const noexcept
{
return m_name;
}
//! Returns the hash used for comparison.
//!
//! Prefer using the overloaded comparison operators (e.g. <, ==) rather than directly calling this method.
constexpr uint64_t getHash() const noexcept
{
return m_hash;
}
//! Converts to a @c std::string. For debugging purposes only.
//!
//! Prefer using @ref getString() over this method, as @ref getString() does not copy any data.
std::string toString() const
{
return std::string(m_name.data(), m_name.size());
}
//! Returns the name as a null-terminated `const char*`.
const char* c_str() const noexcept
{
return m_name.c_str();
}
private:
uint64_t m_hash;
omni::string m_name;
};
//! Compares two @ref ConstName objects.
//!
//! Returns @c true if the hashes are the same.
constexpr bool operator==(const ConstName& lhs, const ConstName& rhs) noexcept
{
return (lhs.getHash() == rhs.getHash());
}
//! Compares a @ref ConstName with a hash.
//!
//! Returns @c true if the hashes are the same.
constexpr bool operator==(const ConstName& lhs, NameHash rhs) noexcept
{
return (lhs.getHash() == rhs);
}
//! Compares a hash with a @ref ConstName
//!
//! Returns @c true if the hashes are the same.
constexpr bool operator==(NameHash lhs, const ConstName& rhs) noexcept
{
return (lhs == rhs.getHash());
}
//! Compares two @ref ConstName objects.
//!
//! Returns @c true if the hashes are not the same.
constexpr bool operator!=(const ConstName& lhs, const ConstName& rhs) noexcept
{
return (lhs.getHash() != rhs.getHash());
}
//! Compares a @ref ConstName with a hash.
//!
//! Returns @c true if the hashes are not the same.
constexpr bool operator!=(const ConstName& lhs, NameHash rhs) noexcept
{
return (lhs.getHash() != rhs);
}
//! Compares a hash with a @ref ConstName
//!
//! Returns @c true if the hashes are not the same.
constexpr bool operator!=(NameHash lhs, const ConstName& rhs) noexcept
{
return (lhs != rhs.getHash());
}
//! Compares two @ref ConstName objects.
//!
//! Returns @c true if the first hash's value is less than the seconds.
constexpr bool operator<(const ConstName& lhs, const ConstName& rhs) noexcept
{
return (lhs.getHash() < rhs.getHash());
}
//! Compares a @ref ConstName with a hash.
//!
//! Returns @c true if the first hash's value is less than the seconds.
constexpr bool operator<(const ConstName& lhs, NameHash rhs) noexcept
{
return (lhs.getHash() < rhs);
}
//! Compares a hash with a @ref ConstName
//!
//! Returns @c true if the first hash's value is less than the seconds.
constexpr bool operator<(NameHash lhs, const ConstName& rhs) noexcept
{
return (lhs < rhs.getHash());
}
//! Output stream operator overload. Outputs the contents of @p str to the stream @p os.
//!
//! @param os Stream to output the string to.
//! @param str The string to output.
//!
//! @return @p os.
//!
//! @throws std::ios_base::failure if an exception is thrown during output.
inline std::ostream& operator<<(std::ostream& os, const ConstName& str)
{
return (os << str.getString());
}
//! Concatenates @c std::string with a @ref ConstName. Returns a @c omni::string.
inline auto operator+(const std::string& lhs, const ConstName& rhs)
{
return lhs + rhs.getString();
}
//! Concatenates @c std::string with a @ref ConstName. Returns a @c omni::string.
//!
//! Concatenates strings.
inline auto operator+(const ConstName& lhs, const std::string& rhs)
{
return lhs.getString() + rhs;
}
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
namespace std
{
//! Hash specialization for std::string
template <>
struct hash<omni::graph::exec::unstable::ConstName>
{
//! Argument type alias.
using argument_type = omni::graph::exec::unstable::ConstName;
//! Result type alias.
using result_type = std::size_t;
//! Hash operator
result_type operator()(const argument_type& x) const noexcept
{
return x.getHash();
}
};
} // namespace std
| 8,494 | C | 31.30038 | 119 | 0.678479 |
omniverse-code/kit/include/omni/graph/exec/unstable/IGraphBuilderContext.gen.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Common state for graph builders.
//!
//! *TODO* We will use this class to introduce reporting from graph transformation pipeline back to the application.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderContext_abi>
: public omni::graph::exec::unstable::IGraphBuilderContext_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IGraphBuilderContext")
//! Current construction version.
//!
//! Incremented after each pass pipeline run to include definitions that were created before the run.
omni::graph::exec::unstable::Stamp getConstructionStamp() noexcept;
//! Return owner of all graphs this builder touches
//!
//! The returned @ref omni::graph::exec::unstable::IGraph will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::graph::exec::unstable::IGraph* getGraph() noexcept;
//! To be overriden by application specific class to enable reporting from transformation pipeline.
void report(const char* diagnose) noexcept;
//! Run transformation pipeline
void runTransformations(omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline omni::graph::exec::unstable::Stamp omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilderContext_abi>::getConstructionStamp() noexcept
{
return getConstructionStamp_abi();
}
inline omni::graph::exec::unstable::IGraph* omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilderContext_abi>::getGraph() noexcept
{
return getGraph_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderContext_abi>::report(const char* diagnose) noexcept
{
report_abi(diagnose);
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderContext_abi>::runTransformations(
omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept
{
runTransformations_abi(nodeGraphDef.get());
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 2,952 | C | 33.337209 | 127 | 0.738821 |
omniverse-code/kit/include/omni/graph/exec/unstable/IPassRegistry.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Registry (database) of known @ref omni::graph::exec::unstable::IPass objects.
//!
//! When registering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly accessing
//! methods on this interface.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IPassRegistry_abi>
: public omni::graph::exec::unstable::IPassRegistry_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPassRegistry")
//! Registers a new pass.
//!
//! @p type is th type of pass being registered (e.g. populate, partition, etc).
//!
//! @p name is the name of the pass. This name is used to deregister the pass (see @ref
//! omni::graph::exec::unstable::IPassRegistry::deregisterPass) so the name should be unique within this registry.
//! Must not be `nullptr`.
//!
//! @p factory is the interface that will instantiate the pass. Must not be `nullptr`.
//!
//! Some pass types (e.g. populate passes) desire to affect only a subset of the nodes and/or definitions in a
//! graph. @p nameToMatch is used to specify the name of the node/definitions the pass wishes to affect. The meaning
//! of this field is pass type dependent. Many pass types ignore @p nameToMatch. Must not be `nullptr`. This method
//! copies the name.
//!
//! Some pass types (e.g. partition passes) are designed such that only a single pass should affect an entity. When
//! multiple passes wish to affect an entity, @p priority can be used to resolve the conflict. The meaning of the
//! priority value is pass type specific. Many pass types ignore @p priority.
//!
//! When registering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly
//! accessing this method.
bool registerPass(omni::graph::exec::unstable::PassType passType,
const char* name,
omni::core::ObjectParam<omni::graph::exec::unstable::IPassFactory> factory,
const omni::graph::exec::unstable::ConstName& nameToMatch,
omni::graph::exec::unstable::PassPriority priority);
//! Deregisters a pass.
//!
//! Returns @c true if the pass was found and removed. Returns @c false if the pass could not be found.
//!
//! If multiple passes were registered with the same name, this method will only remove one of them.
//!
//! When deregistering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly
//! accessing this method.
bool deregisterPass(omni::graph::exec::unstable::PassType passType, const char* name);
//! Returns a sub-registry containing pass of the specified type.
//!
//! The returned @ref omni::graph::exec::unstable::IPassTypeRegistry will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::graph::exec::unstable::IPassTypeRegistry* getPassTypeRegistry(omni::graph::exec::unstable::PassType type) noexcept;
//! Returns version stamp for the registry.
//!
//! Version is incremented each time the content of registry changes, i.e. pass is added or removed.
omni::graph::exec::unstable::Stamp getStamp() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline bool omni::core::Generated<omni::graph::exec::unstable::IPassRegistry_abi>::registerPass(
omni::graph::exec::unstable::PassType passType,
const char* name,
omni::core::ObjectParam<omni::graph::exec::unstable::IPassFactory> factory,
const omni::graph::exec::unstable::ConstName& nameToMatch,
omni::graph::exec::unstable::PassPriority priority)
{
OMNI_THROW_IF_ARG_NULL(name);
OMNI_THROW_IF_ARG_NULL(factory);
auto return_ = registerPass_abi(passType, name, factory.get(), &nameToMatch, priority);
return return_;
}
inline bool omni::core::Generated<omni::graph::exec::unstable::IPassRegistry_abi>::deregisterPass(
omni::graph::exec::unstable::PassType passType, const char* name)
{
OMNI_THROW_IF_ARG_NULL(name);
auto return_ = deregisterPass_abi(passType, name);
return return_;
}
inline omni::graph::exec::unstable::IPassTypeRegistry* omni::core::Generated<
omni::graph::exec::unstable::IPassRegistry_abi>::getPassTypeRegistry(omni::graph::exec::unstable::PassType type) noexcept
{
return getPassTypeRegistry_abi(type);
}
inline omni::graph::exec::unstable::Stamp omni::core::Generated<omni::graph::exec::unstable::IPassRegistry_abi>::getStamp() noexcept
{
return getStamp_abi();
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 5,545 | C | 41.992248 | 132 | 0.707124 |
omniverse-code/kit/include/omni/graph/exec/unstable/IDef.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IDef.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IDef.
#pragma once
#include <omni/graph/exec/unstable/ConstName.h>
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/SchedulingInfo.h>
#include <omni/graph/exec/unstable/Status.h>
#include <omni/graph/exec/unstable/Types.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class IDef;
class IDef_abi;
class ExecutionTask;
//! Base class for all node definitions
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! Since definitions can be shared by multiple nodes, and nodes can be executed in parallel, implementations of
//! this interface should expect its methods to be called in parallel.
class IDef_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IDef")>
{
protected:
//! Execute the node definition.
//!
//! See thread safety information in interface description.
virtual Status execute_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* info) noexcept = 0;
//! Provide runtime information about scheduling constraints particular task have
//!
//! The provided @ref omni::graph::exec::unstable::ExecutionTask can be used to determine the path of the current
//! definition.
//!
//! The given task must not be @c nullptr.
//!
//! See thread safety information in interface description.
virtual SchedulingInfo getSchedulingInfo_abi(OMNI_ATTR("in, not_null, throw_if_null, ref")
const ExecutionTask* info) noexcept = 0;
//! Return unique definition identifier.
//!
//! See thread safety information in interface description.
virtual OMNI_ATTR("ref") const ConstName* getName_abi() noexcept = 0;
};
//! Smart pointer managing an instance of @ref omni::graph::exec::unstable::IDef.
using DefPtr = omni::core::ObjectPtr<IDef>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IDef.gen.h>
//! @copydoc omni::graph::exec::unstable::IDef_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IDef : public omni::core::Generated<omni::graph::exec::unstable::IDef_abi>
{
};
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IDef.gen.h>
| 2,975 | C | 33.206896 | 117 | 0.718655 |
omniverse-code/kit/include/omni/graph/exec/unstable/IPassRegistry.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IPassRegistry.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IPassRegistry.
#pragma once
#include <omni/graph/exec/unstable/ConstName.h>
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Stamp.h>
#include <omni/graph/exec/unstable/Types.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IPassFactory;
class IPassRegistry;
class IPassRegistry_abi;
class IPassTypeRegistry;
//! Registry (database) of known @ref omni::graph::exec::unstable::IPass objects.
//!
//! When registering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly accessing
//! methods on this interface.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
class IPassRegistry_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase, OMNI_TYPE_ID("omni.graph.exec.unstable.IPassRegistry")>
{
protected:
//! Registers a new pass.
//!
//! @p type is th type of pass being registered (e.g. populate, partition, etc).
//!
//! @p name is the name of the pass. This name is used to deregister the pass (see @ref
//! omni::graph::exec::unstable::IPassRegistry::deregisterPass) so the name should be unique within this registry.
//! Must not be `nullptr`.
//!
//! @p factory is the interface that will instantiate the pass. Must not be `nullptr`.
//!
//! Some pass types (e.g. populate passes) desire to affect only a subset of the nodes and/or definitions in a
//! graph. @p nameToMatch is used to specify the name of the node/definitions the pass wishes to affect. The meaning
//! of this field is pass type dependent. Many pass types ignore @p nameToMatch. Must not be `nullptr`. This method
//! copies the name.
//!
//! Some pass types (e.g. partition passes) are designed such that only a single pass should affect an entity. When
//! multiple passes wish to affect an entity, @p priority can be used to resolve the conflict. The meaning of the
//! priority value is pass type specific. Many pass types ignore @p priority.
//!
//! When registering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly
//! accessing this method.
virtual bool registerPass_abi(PassType passType,
OMNI_ATTR("c_str, not_null, throw_if_null") const char* name,
OMNI_ATTR("not_null, throw_if_null") IPassFactory* factory,
OMNI_ATTR("in, not_null, throw_if_null, ref") const ConstName* nameToMatch,
PassPriority priority) noexcept = 0;
//! Deregisters a pass.
//!
//! Returns @c true if the pass was found and removed. Returns @c false if the pass could not be found.
//!
//! If multiple passes were registered with the same name, this method will only remove one of them.
//!
//! When deregistering passes, prefer using @ref groupOmniGraphExecPassRegistration helpers rather than directly
//! accessing this method.
virtual bool deregisterPass_abi(PassType passType,
OMNI_ATTR("c_str, not_null, throw_if_null") const char* name) noexcept = 0;
//! Returns a sub-registry containing pass of the specified type.
//!
//! The returned @ref omni::graph::exec::unstable::IPassTypeRegistry will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
virtual OMNI_ATTR("no_acquire") IPassTypeRegistry* getPassTypeRegistry_abi(PassType type) noexcept = 0;
//! Returns version stamp for the registry.
//!
//! Version is incremented each time the content of registry changes, i.e. pass is added or removed.
virtual Stamp getStamp_abi() noexcept = 0;
};
//! Smart pointer managing an instance of @ref omni::graph::exec::unstable::IPassRegistry.
using PassRegistryPtr = omni::core::ObjectPtr<IPassRegistry>;
//! Returns the singleton pass registry.
inline IPassRegistry* getPassRegistry() noexcept;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IPassRegistry.gen.h>
//! @copydoc omni::graph::exec::unstable::IPassRegistry_abi
//!
//! @ingroup groupOmniGraphExecPassRegistration groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IPassRegistry
: public omni::core::Generated<omni::graph::exec::unstable::IPassRegistry_abi>
{
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IPassFactory.h>
#include <omni/graph/exec/unstable/IPassTypeRegistry.h>
//! Returns a singleton containing the pass registry.
//!
//! See @ref groupOmniGraphExecPassRegistration for more information about pass registration.
//!
//! @ingroup groupOmniGraphExecPassRegistration
inline omni::graph::exec::unstable::IPassRegistry* omni::graph::exec::unstable::getPassRegistry() noexcept
{
// createType() always calls acquire() and returns an ObjectPtr to make sure release() is called. we don't want to
// hold a ref here to avoid static destruction issues. here we allow the returned ObjectPtr to destruct (after
// calling get()) to release our ref. we know the DLL in which the singleton was created is maintaining a ref and
// will keep the singleton alive for the lifetime of the DLL.
static auto sSingleton = omni::core::createType<IPassRegistry>().get();
return sSingleton;
}
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IPassRegistry.gen.h>
| 6,207 | C | 43.342857 | 125 | 0.716449 |
omniverse-code/kit/include/omni/graph/exec/unstable/Stamp.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file Stamp.h
//!
//! @brief Defines omni::graph::exec::unstable::Stamp class.
#pragma once
#include <limits>
#include <string>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! Low-level ABI type to represent @ref Stamp.
using Stamp_abi = int16_t;
//! Lazy, light-weight change notification system.
//!
//! The heart of the EF's invalidation system is @ref Stamp and @ref SyncStamp.
//!
//! Stamps track the state/version of a resource. Stamps are implemented as an unsigned number. If the state of a
//! resource changes, the stamp is incremented.
//!
//! Stamps are broken into two parts.
//!
//! The first part is implemented by the @ref Stamp class. As a resource changes, @ref Stamp::next()| s called to
//! denote the new state of the resource. @ref Stamp objects are owned by the resource they track.
//!
//! The second part of stamps is implemented by the @ref SyncStamp class. @ref SyncStamp tracks/synchronizes to the
//! state of a @ref Stamp. @ref SyncStamp objects are owned by the entities that wish to utilize the mutating resource.
//!
//! For example, consider the following example showing how a consumer of a resource can uses stamps to detect when a
//! resource has changed and update cached data:
//!
#ifdef OMNI_GRAPH_EXEC_DOC_BUILD
//! @snippet "../tests.cpp/TestStamp.cpp" ef-docs-stamp-example
#endif
//!
//! By default constructed @ref Stamp and @ref SyncStamp are never in sync, meaning reconstruction will always happen at
//! least once.
//!
//! Stamps are a lazy, light-weight alternative to heavier change notification systems such as callbacks.
//!
//! Stamps use unsigned numbers to detect changes in the tracked resource, relying on overflow behavior to wrap the
//! unsigned number. A check for @ref Stamp::kInvalidStamp is performed during this overflow.
//!
//! Because of the limited bit depth of @ref Stamp, it is possible, though improbable, that a resource at stamp *X*,
//! wraps all the way back to *X* before a @ref SyncStamp tries to synchronize with the stamp. In such a case, the @ref
//! SyncStamp will erroneously believe it is in sync with the resource. Again, this is unlikely, though possible.
//!
//! EF makes extensive use of stamps to detect changes in pass registration, graph topology, and graph construction. See
//! @rstref{Graph Invalidation <ef_graph_invalidation>} to understand how @ref Stamp is used for invalidation in EF.
//!
//! This object is ABI-safe.
class Stamp
{
public:
//! Anonymous structure to define kInvalidStamp.
enum : Stamp_abi
{
kInvalidStamp = -1 //!< Value for an invalid stamp.
};
//! Bump the stamp
void next() noexcept
{
// depending on usage, we may have to turn this into atomic operator
// for now we don't expect this to be needed
m_generation = (m_generation == std::numeric_limits<decltype(m_generation)>::max()) ? 0 : m_generation + 1;
static_assert(offsetof(Stamp, m_generation) == 0, "unexpected offset");
}
//! Make stamp invalid
void invalidate() noexcept
{
m_generation = kInvalidStamp;
}
//! Check if stamp is valid
bool isValid() const noexcept
{
return m_generation != Stamp::kInvalidStamp;
}
//! Equal operator
bool operator==(const Stamp& rhs) const noexcept
{
return (m_generation == rhs.m_generation);
}
//! Not equal operator
bool operator!=(const Stamp& rhs) const noexcept
{
return (m_generation != rhs.m_generation);
}
//! Convert to string for debugging
std::string toString() const // may throw
{
std::string str;
if (isValid())
{
str = std::to_string(m_generation);
}
else
{
str = "INV";
}
return str;
}
private:
Stamp_abi m_generation{ kInvalidStamp }; //!< Stamp storage
friend class SyncStamp;
};
static_assert(std::is_standard_layout<Stamp>::value, "Stamp is expected to be abi safe");
static_assert(2 == sizeof(Stamp), "Stamp is an unexpected size");
//! Watcher of a mutating resource. Observes a resources @ref Stamp and detects if it has changed.
//!
//! Used always in pair with @ref Stamp class to detect changes in a resource. See @ref Stamp's docs for an
//! explanation on how this object is used during invalidation.
class SyncStamp
{
public:
enum
{
kInvalidStamp = Stamp::kInvalidStamp - 1
};
//! Constructor
SyncStamp() noexcept = default;
//! Construct in sync with given stamp
SyncStamp(Stamp id) noexcept : m_syncStamp(id.m_generation)
{
static_assert(offsetof(SyncStamp, m_syncStamp) == 0, "unexpected offset");
}
//! Check if two classes are in sync. Always return false if this instance is having invalid stamp stored.
bool inSync(const Stamp& id) const noexcept
{
if (m_syncStamp == SyncStamp::kInvalidStamp)
return false;
return (m_syncStamp == id.m_generation);
}
//! Check if two sync stamp are in sync
bool inSync(const SyncStamp& syncStamp) const noexcept
{
return (m_syncStamp == syncStamp.m_syncStamp);
}
//! Synchronize this instance with given stamp
void sync(const Stamp& id) noexcept
{
m_syncStamp = id.m_generation;
}
//! Synchronize this instance with given sync stamp
void sync(const SyncStamp& syncStamp) noexcept
{
m_syncStamp = syncStamp.m_syncStamp;
}
//! In one call test and synchronize the stamp. After this call this instance is guaranteed to be in sync with
//! given id.
//!
//! @return False if stamps were in sync and true otherwise.
bool makeSync(const Stamp& id) noexcept
{
if (inSync(id))
return false;
sync(id);
return true;
}
//! Is this sync stamp valid
bool isValid() const noexcept
{
return m_syncStamp != SyncStamp::kInvalidStamp;
}
//! Invalidate stored stamp
void invalidate() noexcept
{
m_syncStamp = SyncStamp::kInvalidStamp;
}
//! Explicit call to convert to Stamp class
Stamp toStamp() const noexcept
{
Stamp id;
if (isValid())
id.m_generation = m_syncStamp;
return id;
}
//! Convert to string for debugging
std::string toString() const // may throw
{
std::string str;
if (isValid())
{
str = std::to_string(m_syncStamp);
}
else
{
str = "INV";
}
return str;
}
private:
Stamp_abi m_syncStamp{ kInvalidStamp }; //!< Stamp storage
};
static_assert(std::is_standard_layout<SyncStamp>::value, "SyncStamp is expected to be abi safe");
static_assert(2 == sizeof(SyncStamp), "SyncStamp is an unexpected size");
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 7,400 | C | 29.331967 | 120 | 0.659054 |
omniverse-code/kit/include/omni/graph/exec/unstable/ScheduleFunction.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file ScheduleFunction.h
//!
//! @brief Helpers for @ref omni::graph::exec::unstable::IScheduleFunction.
#pragma once
#include <omni/graph/exec/unstable/IScheduleFunction.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
namespace detail
{
#ifndef DOXYGEN_BUILD
template <typename Fn>
struct ScheduleFunctionHelper
{
static omni::graph::exec::unstable::Status invoke(Fn&& fn)
{
return fn();
}
static auto capture(Fn&& fn)
{
return std::move(fn);
}
};
template <>
struct ScheduleFunctionHelper<IScheduleFunction*&>
{
static omni::graph::exec::unstable::Status invoke(IScheduleFunction* fn)
{
return fn->invoke();
}
static auto capture(IScheduleFunction* fn)
{
return omni::core::borrow(fn);
}
};
template <>
struct ScheduleFunctionHelper<omni::core::ObjectPtr<IScheduleFunction>&>
{
static omni::graph::exec::unstable::Status invoke(omni::core::ObjectPtr<IScheduleFunction>& fn)
{
return fn->invoke();
}
static omni::core::ObjectPtr<IScheduleFunction> capture(omni::core::ObjectPtr<IScheduleFunction>& fn)
{
return std::move(fn);
}
};
#endif
} // namespace detail
//! Helper function to efficiently call an invocable object (i.e. std::function, function ptr, IScheduleFunction*).
template <typename Fn>
omni::graph::exec::unstable::Status invokeScheduleFunction(Fn&& fn)
{
return detail::ScheduleFunctionHelper<Fn>::invoke(std::forward<Fn>(fn));
}
//! Helper function to efficiently capture an invocable object (i.e. std::function, function ptr, IScheduleFunction*).
//!
//! Suitable when capturing the invocable object in a lambda to be passed to a scheduler.
//!
//! Use this function when an @ref IScheduleFunction will be invoked at a later time by a scheduler. This function will
//! call @ref omni::core::IObject::acquire() on the @ref IScheduleFunction.
//!
//! If an invocable object that is not a @ref IScheduleFunction is passed to this function, @c std::move() will be
//! called.
template <typename Fn>
auto captureScheduleFunction(Fn&& fn) -> decltype(detail::ScheduleFunctionHelper<Fn>::capture(std::forward<Fn>(fn)))
{
return detail::ScheduleFunctionHelper<Fn>::capture(std::forward<Fn>(fn));
}
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 2,807 | C | 26.529412 | 120 | 0.714286 |
omniverse-code/kit/include/omni/graph/exec/unstable/INodeGraphDef.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file INodeGraphDef.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::INodeGraphDef.
#pragma once
#include <omni/graph/exec/unstable/IDef.h>
#include <omni/graph/exec/unstable/INodeFactory.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class INode;
class INodeGraphDef_abi;
class INodeGraphDef;
class ITopology;
//! Graph definition. Defines work to be done as a graph.
//!
//! Nodes within a graph represent work to be done. The actual work to be performed is described in a
//! @rstref{definition <ef_definition>}. Each node wanting to perform work points to a defintion.
//!
//! This interface is a subclass of the work definition interface (i.e. @ref omni::graph::exec::unstable::IDef) and
//! extends @ref omni::graph::exec::unstable::IDef with methods to describe work as a graph.
//!
//! Visually:
//!
//! @rst
//!
//! .. image:: /../docs/ef-simple-w-defs.svg
//! :align: center
//!
//! @endrst
//!
//! Above, you can see the two types of definitions: opaque definitions (described by @ref
//! omni::graph::exec::unstable::INodeDef) and graph definitions (described by this interface).
//!
//! Nodes within a graph definition can point to other graph definitions. This composibility is where EF gets its *graph
//! of graphs* moniker.
//!
//! Multiple node's in the execution graph can point to the same instance of a graph definition. This saves both space
//! and graph construction time. However, since each graph definition can be shared, its pointer value cannot be used
//! to uniquely identify its location in the graph. To solve this, when traversing/executing a graph definition, an
//! @ref omni::graph::exec::unstable::ExecutionPath is passed (usually via @ref
//! omni::graph::exec::unstable::ExecutionTask::getUpstreamPath()).
//!
//! When defining new graph types, it is common to create a new implementation of this interface. See @ref
//! omni::graph::exec:unstable::NodeGraphDef for an implementation of this interface that can be easily inherited from.
//! See @rstref{Definition Creation <ef_definition_creation>} for a guide on creating your own graph definition.
//!
//! How a graph definition's nodes are traversed during execution is defined by the definition's @ref
//! omni::graph::exec::unstable::IExecutor. See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth
//! guide on how executors and graph definitions work together during execution.
//!
//! See also @ref omni::graph::exec::unstable::INode, @ref omni::graph::exec::unstable::IExecutor, and @ref
//! omni::graph::exec::unstable::ExecutionTask.
class INodeGraphDef_abi : public omni::core::Inherits<IDef, OMNI_TYPE_ID("omni.graph.exec.unstable.INodeGraphDef")>
{
protected:
//! Return this graph's topology object.
//!
//! Each @ref omni::graph::exec::unstable::INodeGraphDef owns a @ref omni::graph::exec::unstable::ITopology.
//!
//! The returned @ref omni::graph::exec::unstable::ITopology. will *not* have @ref omni::core::IObject::acquire()
//! called before being returned.
virtual OMNI_ATTR("no_acquire") ITopology* getTopology_abi() noexcept = 0;
//! Initialize the state of the graph.
//!
//! It is up to the implementation of the graph type to decide whether this call needs to be propagated over all
//! nodes within the graph or a single shared state is owned by the graph.
//!
//! @param rootTask State will be initialized for every instance of this graph. Root task will provide a path to
//! allow discovery of the state. Must not be @c nullptr.
virtual OMNI_ATTR("throw_result") omni::core::Result
initializeState_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* rootTask) noexcept = 0;
//! Pre-execution call can be used to setup the graph state prior to execution or skip entirely the execution.
//!
//! The given task must not be @c nullptr.
virtual Status preExecute_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* info) noexcept = 0;
//! Post-execution call can be used to finalize the execution, e.g. transfer computation results to consumers.
//!
//! The given task must not be @c nullptr.
virtual Status postExecute_abi(OMNI_ATTR("in, out, not_null, throw_if_null, ref") ExecutionTask* info) noexcept = 0;
//! Acquire factory object allowing for allocating new node instances for this node graph def.
//!
//! Provided factory may be empty when graph def doesn't allow allocating new nodes outside of pass that constructed
//! the definition in the first place.
//!
//! Accessing node factory is thread-safe but mutating graphs topology is not. This includes node creation.
virtual INodeFactory* getNodeFactory_abi() noexcept = 0;
};
//! Smart pointer managing an instance of @ref INodeGraphDef.
using NodeGraphDefPtr = omni::core::ObjectPtr<INodeGraphDef>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/INodeGraphDef.gen.h>
//! @copydoc omni::graph::exec::unstable::INodeGraphDef_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::INodeGraphDef
: public omni::core::Generated<omni::graph::exec::unstable::INodeGraphDef_abi>
{
public:
//! Access graph's root node.
//!
//! The returned @ref INode. will *not* have @ref omni::core::IObject::acquire() called before being returned.
inline INode* getRoot() noexcept;
};
#include <omni/graph/exec/unstable/ITopology.h>
inline omni::graph::exec::unstable::INode* omni::graph::exec::unstable::INodeGraphDef::getRoot() noexcept
{
return getTopology()->getRoot();
}
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/INodeGraphDef.gen.h>
| 6,314 | C | 42.854166 | 120 | 0.721571 |
omniverse-code/kit/include/omni/graph/exec/unstable/NodeDef.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file NodeDef.h
//!
//! @brief Declares @ref omni::graph::exec::unstable::INodeDef.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/INodeDef.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
//! @copydoc omni::graph::exec::unstable::INodeDef
template <typename... Bases>
class NodeDefT : public Implements<Bases...>
{
public:
//! Construct node definition
//!
//! @param definitionName Definition name is considered as a token that transformation passes can register against
//!
//! *TODO* Replace runtime hashing of node definition name to id with a compile time hash generation.
//!
//! May throw.
static omni::core::ObjectPtr<NodeDefT> create(const char* definitionName)
{
OMNI_THROW_IF_ARG_NULL(definitionName);
return omni::core::steal(new NodeDefT(definitionName));
}
protected:
//! Core implementation of @ref omni::graph::exec::unstable::IDef::execute_abi for @ref NodeDef
//!
//! Returns success without executing anything
Status execute_abi(ExecutionTask* info) noexcept override
{
return Status::eSuccess;
}
//! Core implementation of @ref omni::graph::exec::unstable::IDef::getSchedulingInfo_abi for @ref NodeDef
//!
//! Returns serial scheduling
SchedulingInfo getSchedulingInfo_abi(const ExecutionTask* info) noexcept override
{
return SchedulingInfo::eSerial;
}
//! Core implementation of @ref omni::graph::exec::unstable::IDef::getName_abi for @ref NodeDef
const ConstName* getName_abi() noexcept override
{
return &m_name;
}
//! Constructor
NodeDefT(const char* definitionName) : m_name{ definitionName }
{
}
private:
ConstName m_name; //!< We associate a name with each opaque definition. This is where we store it.
};
//! Core NodeDef implementation for @ref omni::graph::exec::unstable::INodeDef
using NodeDef = NodeDefT<INodeDef>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 2,528 | C | 29.107143 | 120 | 0.705301 |
omniverse-code/kit/include/omni/graph/exec/unstable/ExecutionPath.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file ExecutionPath.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::ExecutionPath.
#pragma once
#include <omni/graph/exec/unstable/Assert.h>
#include <omni/graph/exec/unstable/SmallStack.h>
#include <omni/graph/exec/unstable/Span.h>
#include <omni/graph/exec/unstable/Types.h>
#include <atomic>
#include <initializer_list>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
class INode;
//! Path representing a unique location of an instantiated node during execution.
//!
//! The @ref omni::graph::exec::unstable::ExecutionPath class is an efficient utility class used to store the *execution
//! path* of an @ref omni::graph::exec::unstable::INode. There's subtlety to what an execution path is. That subtlety is
//! best explained with a diagram:
//!
//! @rst
//!
//! .. image:: /../docs/ef-execution-path-point-k.svg
//! :align: center
//!
//! @endrst
//!
//! Above, nodes are labelled with lower-case letters (e.g. *a*, *b*, etc.). Node can point to either an
//! @ref omni::graph::exec::unstable::INodeDef (which defines opaque computation) or an @ref
//! omni::graph::exec::unstable::INodeGraphDef (which defines its computation with a subgraph). In the diagram above,
//! @ref omni::graph::exec::unstable::INodeGraphDef objects are labelled with upper-case letters (e.g. *X*, *Y*).
//!
//! Observe that @ref omni::graph::exec::unstable::INodeGraphDef *X* is used by both nodes *e* and *f*. This illustrates
//! that @ref omni::graph::exec::unstable::INodeGraphDef objects can be reused within the graph. This makes sense
//! because @ref omni::graph::exec::unstable::INodeGraphDef is defining computational logic and that logic may be needed
//! in multiple places in the graph (e.g. instancing). Likewise, though not illustrated above, @ref
//! omni::graph::exec::unstable::INodeDef objects can be reused.
//!
//! Consider node *k* above (pointed to by the yellow arrow). When *k* is executing, what is its execution path? One
//! way to describe the path is to store the nodes traversed to get to the node. For instance, */a/c/m/n/h/i/k* could be
//! a likely path. Lets call this representation of a path the *traversal path*.
//!
//! EF (via @ref omni::graph::exec::unstable::ExecutionPath) does not store *traversal paths*. Rather, it uses a much
//! more compact representation called the *execution path*. In the diagram above, the execution path for *k* is
//! */f/p/k*.
//!
//! @ref omni::graph::exec::unstable::ExecutionPath stores enough information to **uniquely identify the node**. That's
//! important, since *k* is being shared in the diagram above. The execution path */e/k* points to the same *k* node's
//! memory but logically */e/k* and */f/p/k* are different nodes. This illustrates the main purpose of this object:
//! @ref omni::graph::exec::unstable::INode, @ref omni::graph::exec::unstable::INodeDef, *and* @ref
//! omni::graph::exec::unstable::INodeGraphDef *objects can not be uniquely identified by their raw pointer value.*
//! @ref omni::graph::exec::unstable::ExecutionPath *must be used to uniquely identify a node.*
//!
//! @ref omni::graph::exec::unstable::ExecutionPath is often used as a key in a key/value store to access a node's state
//! data.
//!
//! See @rstref{Execution Concepts <ef_execution_concepts>} for an in-depth guide on how this object is used during
//! execution.
//!
//! Unless otherwise noted, methods in this class are not thread-safe.
//!
//! To reduce the amount of new paths we create, we only create a new path when entering a node graph definition
//! execution. All tasks generated for computing nodes withing the same node graph will point to the same path.
class ExecutionPath
{
enum : ExecutionPathHash
{
kEmptyPathHash = 0
};
public:
//! Default constructor for an empty path. Consider using sEmpty if you need one.
ExecutionPath() noexcept
{
}
//! Copy constructor
ExecutionPath(const ExecutionPath& src) : m_path{ src.m_path }, m_cachedHash(src.m_cachedHash.load())
{
}
//! Assignment operator
ExecutionPath& operator=(const ExecutionPath& rhs)
{
m_path = rhs.m_path;
m_cachedHash.store(rhs.m_cachedHash.load());
return *this;
}
//! Construct a path for a node (used only at the beginning of the execution).
explicit ExecutionPath(omni::core::ObjectParam<INode> node) noexcept : m_path{ node.get() }
{
OMNI_GRAPH_EXEC_ASSERT(node.get());
}
//! Construct a path from an upstream path and a node. Mostly used when entering a node graph definition.
//!
//! May throw.
ExecutionPath(const ExecutionPath& upPath, omni::core::ObjectParam<INode> node)
: m_path{ upPath.m_path, node.get() }
{
OMNI_GRAPH_EXEC_ASSERT(node.get());
}
//! Convenience method for constructing paths from initializer list.
//!
//! May throw.
explicit ExecutionPath(std::initializer_list<omni::core::ObjectParam<INode>> path)
: m_path{ const_cast<INode**>(reinterpret_cast<INode* const*>(path.begin())),
const_cast<INode**>(reinterpret_cast<const INode* const*>(path.end())) }
{
static_assert(sizeof(INode*) == sizeof(omni::core::ObjectParam<INode>), "unexpected ObjectParam size");
}
private:
ExecutionPath(INode** begin, INode** end) : m_path{ begin, end }
{
}
public:
//! Append a node to the path.
//!
//! The given node is not internally acquired and it is up to the calling code to ensure the node remains alive
//! while in use by this object.
//!
//! May throw.
void push(INode* node)
{
OMNI_GRAPH_EXEC_ASSERT(node);
m_path.push(node);
m_cachedHash = kEmptyPathHash;
}
//! Return a new path with a last node removed
//!
//! May throw.
ExecutionPath copyWithoutTop() const
{
int delta = (m_path.empty() ? 0 : -1);
return ExecutionPath{ const_cast<INode**>(m_path.begin()), const_cast<INode**>(m_path.end() + delta) };
}
//! Compute unique index using pairing function and unique indexes of nodes (within owning topology)
//!
//! This is one strategy to generate a hash for a path. Other hashing strategies can be built outside of the class
//! and used for example when retrieving state from execution context.
//!
//! The result is cached and method is thread-safe.
inline ExecutionPathHash getHash() const noexcept;
//! Compute unique index using pairing function and unique indexes of nodes (within owning topology)
//!
//! @param node Include given node as the last node in the path. This allows us to avoid creating a new path
//! when fetching a state for an execution task.
//!
//! This method is thread-safe.
inline ExecutionPathHash getHashWith(omni::core::ObjectParam<INode> node) const noexcept;
//! Check if path is empty.
bool isEmpty() const noexcept
{
return m_path.empty();
}
//! Access to underlying path container
Span<INode* const> getData() const noexcept
{
return Span<INode* const>{ m_path.begin(), m_path.size() };
}
//! Return the node at the top of the stack.
//!
//! Undefined behavior if the stack is empty.
INode* getTop() const noexcept
{
return m_path.top();
}
//! An instance of an empty path.
//!
//! @warning A different empty path may be returned over multiple calls of this method. Do rely on using a pointer
//! to the returned object to check if another path is the empty path. Rather, use the @ref isEmpty()
//! method to check if a path is empty.
static const ExecutionPath& getEmpty() noexcept
{
static ExecutionPath sPath; // unique per DLL
return sPath;
}
//! Pairing function used by the hashing algorithm
static ExecutionPathHash pairingFunction(ExecutionPathHash a, ExecutionPathHash b)
{
return static_cast<ExecutionPathHash>(((a + b) * (a + b + 1) * 0.5) + b);
}
private:
// Container for nodes forming the path
//
// We use a small stack to reduce the need of heap allocations.
using PathStorage = detail::SmallStack<INode*>;
PathStorage m_path; //!< Path is defined by a series of nodes that we visit during task generation
//! Cache used to accelerate getHash method. We decided to go with mutable version, since we want to preserve
//! the const correctness in places that receive the path, i.e. we don't want anyone to attempt mutating the
//! path from these places. The alternative would be to not provide caching which means we give up performance
//! and that is not acceptable.
mutable std::atomic<ExecutionPathHash> m_cachedHash{ kEmptyPathHash };
};
static_assert(std::is_standard_layout<ExecutionPath>::value, "ExecutionPath is expected to be abi safe");
static_assert(72 == sizeof(ExecutionPath), "ExecutionPath is an unexpected size");
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// includes needed for method implementations
#include <omni/graph/exec/unstable/INode.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
inline ExecutionPathHash ExecutionPath::getHash() const noexcept
{
if (m_path.empty())
{
return kEmptyPathHash;
}
if (m_cachedHash != kEmptyPathHash)
return m_cachedHash;
ExecutionPathHash result = kEmptyPathHash;
for (INode* node : m_path)
{
result = pairingFunction(result, node->getIndexInTopology());
}
// no need for compare and exchange...all threads that potentially computed this cache will generate the same result
// and since write is atomic, all reads will see correct value
m_cachedHash = result;
return result;
}
inline ExecutionPathHash ExecutionPath::getHashWith(omni::core::ObjectParam<INode> node) const noexcept
{
OMNI_GRAPH_EXEC_ASSERT(node.get());
ExecutionPathHash result = getHash();
return pairingFunction(result, node->getIndexInTopology());
}
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
| 10,670 | C | 36.442105 | 120 | 0.68463 |
omniverse-code/kit/include/omni/graph/exec/unstable/IGraphBuilderNode.gen.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Describes a node @ref omni::graph::exec::unstable::IGraphBuilder can manipulate.
//!
//! Only @ref omni::graph::exec::unstable::IGraphBuilder should use @ref omni::graph::exec::unstable::IGraphBuilderNode.
//! One way to think about this interface is that it is a private interface used by
//! @ref omni::graph::exec::unstable::IGraphBuilder to connect instances of @ref omni::graph::exec::unstable::INode.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>
: public omni::graph::exec::unstable::IGraphBuilderNode_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IGraphBuilderNode")
//! Adds the given node as a parent (i.e. upstream) of this node.
//!
//! @ref omni::core::IObject::acquire() is not called on the given node. It is up to the calling code to ensure the
//! node persists while in use by this interface.
//!
//! @p parent must not be @c nullptr.
//!
//! It is undefined behavior to add a parent multiple times to a node.
//!
//! This method is not thread safe.
//!
//! May throw.
void _addParent(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> parent);
//! Removes the given node as a parent.
//!
//! If given node is not a parent, this method returns success.
//!
//! This method is not thread safe.
//!
//! May throw.
void _removeParent(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> parent);
//! Adds the given node as a child (i.e. downstream) of this node.
//!
//! @ref omni::core::IObject::acquire() is not called on the given node. It is up to the calling code to ensure the
//! node persists while in use by this interface.
//!
//! @p child must not be @c nullptr.
//!
//! It is undefined behavior to add a child multiple times to a node.
//!
//! This method is not thread safe.
//!
//! May throw.
void _addChild(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> child);
//! Removes the given node as a child.
//!
//! If given node is not a child, this method returns success.
//!
//! This method is not thread safe.
//!
//! May throw.
void _removeChild(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> child);
//! Remove from the container parent nodes that no longer exist in current topology, i.e are invalid.
//!
//! @ref omni::core::IObject::release() is not called on the invalid nodes.
//!
//! This method is not thread safe.
void _removeInvalidParents() noexcept;
//! Remove from the container child nodes that no longer exist in current topology, i.e are invalid.
//!
//! @ref omni::core::IObject::release() is not called on the invalid nodes.
//!
//! This method is not thread safe.
void _removeInvalidChildren() noexcept;
//! Invalidate all children and parents connections by invalidating the topology this node is sync with.
//!
//! This method is thread safe.
void _invalidateConnections() noexcept;
//! Sets the number of parents who are a part of cycle.
//!
//! This method is not thread safe.
void setCycleParentCount(uint32_t count) noexcept;
//! Sets the definition for this node.
//!
//! If a definition is already set, it will be replaced by the given definition.
//!
//! The given definition may be @c nullptr.
//!
//! @ref omni::core::IObject::acquire() is called on the given pointer.
//!
//! See also @ref omni::graph::exec::unstable::IGraphBuilderNode::_setNodeGraphDef().
//!
//! This method is not thread safe.
void _setNodeDef(omni::core::ObjectParam<omni::graph::exec::unstable::INodeDef> nodeDef) noexcept;
//! Sets the definition for this node.
//!
//! If a definition is already set, it will be replaced by the given definition.
//!
//! The given definition may be @c nullptr.
//!
//! @ref omni::core::IObject::acquire() is called on the given pointer.
//!
//! See also @ref omni::graph::exec::unstable::IGraphBuilderNode::_setNodeDef().
//!
//! This method is not thread safe.
void _setNodeGraphDef(omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept;
//! Unsets this node's definition.
//!
//! If the definition is already @c nullptr, this method does nothing.
//!
//! This method is not thread safe.
void _clearDef() noexcept;
//! Access the topology owning this node.
//!
//! The returned @ref omni::graph::exec::unstable::ITopology will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
//!
//! This method is not thread safe.
omni::graph::exec::unstable::ITopology* getTopology() noexcept;
//! Make topology valid for current topology version. Drop all the connections if topology changed.
//!
//! This method is not thread safe.
void validateOrResetTopology() noexcept;
//! Access parent at the given index.
//!
//! If the given index is greater than the parent count, an error is returned.
//!
//! This method is not thread safe.
//!
//! May throw due to internal casting.
//!
//! See @ref omni::graph::exec::unstable::IGraphBuilderNode::getParentCount().
//!
//! Consider using @ref omni::graph::exec::unstable::IGraphBuilderNode::getParents()
//! for a modern C++ wrapper to this method.
//!
//! The returned @ref omni::graph::exec::unstable::IGraphBuilderNode will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::graph::exec::unstable::IGraphBuilderNode* getParentAt(uint64_t index);
//! Returns the number of parents.
//!
//! This method is not thread safe.
uint64_t getParentCount() noexcept;
//! Access child at the given index.
//!
//! If the given index is greater than the parent count, an error is returned.
//!
//! This method is not thread safe.
//!
//! May throw due to internal casting.
//!
//! See @ref omni::graph::exec::unstable::IGraphBuilderNode::getChildCount().
//!
//! Consider using @ref omni::graph::exec::unstable::IGraphBuilderNode::getChildren()
//! for a modern C++ wrapper to this method.
//!
//! The returned @ref omni::graph::exec::unstable::IGraphBuilderNode will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::graph::exec::unstable::IGraphBuilderNode* getChildAt(uint64_t index);
//! Returns the number of children.
//!
//! This method is not thread safe.
uint64_t getChildCount() noexcept;
//! Returns @c true if the given node is an immediate child of this node.
//!
//! @p node may be @c nullptr.
//!
//! This method is not thread safe.
bool hasChild(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> node) noexcept;
//! Returns @c true if this node is the root of the topology.
//!
//! This method is not thread safe.
bool isRoot() noexcept;
//! Returns the root node of the topology of which this node is a part.
//!
//! This method is not thread safe.
//!
//! May throw due to internal casting.
//!
//! The returned @ref omni::graph::exec::unstable::IGraphBuilderNode will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
omni::core::Result getRoot(omni::graph::exec::unstable::IGraphBuilderNode** out);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_addParent(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> parent)
{
OMNI_THROW_IF_ARG_NULL(parent);
OMNI_THROW_IF_FAILED(_addParent_abi(parent.get()));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_removeParent(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> parent)
{
OMNI_THROW_IF_ARG_NULL(parent);
OMNI_THROW_IF_FAILED(_removeParent_abi(parent.get()));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_addChild(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> child)
{
OMNI_THROW_IF_ARG_NULL(child);
OMNI_THROW_IF_FAILED(_addChild_abi(child.get()));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_removeChild(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> child)
{
OMNI_THROW_IF_ARG_NULL(child);
OMNI_THROW_IF_FAILED(_removeChild_abi(child.get()));
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_removeInvalidParents() noexcept
{
_removeInvalidParents_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_removeInvalidChildren() noexcept
{
_removeInvalidChildren_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_invalidateConnections() noexcept
{
_invalidateConnections_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::setCycleParentCount(uint32_t count) noexcept
{
setCycleParentCount_abi(count);
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_setNodeDef(
omni::core::ObjectParam<omni::graph::exec::unstable::INodeDef> nodeDef) noexcept
{
_setNodeDef_abi(nodeDef.get());
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_setNodeGraphDef(
omni::core::ObjectParam<omni::graph::exec::unstable::INodeGraphDef> nodeGraphDef) noexcept
{
_setNodeGraphDef_abi(nodeGraphDef.get());
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::_clearDef() noexcept
{
_clearDef_abi();
}
inline omni::graph::exec::unstable::ITopology* omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilderNode_abi>::getTopology() noexcept
{
return getTopology_abi();
}
inline void omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::validateOrResetTopology() noexcept
{
validateOrResetTopology_abi();
}
inline omni::graph::exec::unstable::IGraphBuilderNode* omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilderNode_abi>::getParentAt(uint64_t index)
{
omni::graph::exec::unstable::IGraphBuilderNode* out;
OMNI_THROW_IF_FAILED(getParentAt_abi(index, &out));
return out;
}
inline uint64_t omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::getParentCount() noexcept
{
return getParentCount_abi();
}
inline omni::graph::exec::unstable::IGraphBuilderNode* omni::core::Generated<
omni::graph::exec::unstable::IGraphBuilderNode_abi>::getChildAt(uint64_t index)
{
omni::graph::exec::unstable::IGraphBuilderNode* out;
OMNI_THROW_IF_FAILED(getChildAt_abi(index, &out));
return out;
}
inline uint64_t omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::getChildCount() noexcept
{
return getChildCount_abi();
}
inline bool omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::hasChild(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilderNode> node) noexcept
{
return hasChild_abi(node.get());
}
inline bool omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::isRoot() noexcept
{
return isRoot_abi();
}
inline omni::core::Result omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderNode_abi>::getRoot(
omni::graph::exec::unstable::IGraphBuilderNode** out)
{
OMNI_THROW_IF_ARG_NULL(out);
auto return_ = getRoot_abi(out);
return return_;
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 12,837 | C | 35.575498 | 131 | 0.683493 |
omniverse-code/kit/include/omni/graph/exec/unstable/IGlobalPass.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Base class for global passes.
//!
//! The purpose of a global pass is to perform global transformations on the graph.
//!
//! This transformation category should be considered as a last resort given its global impact on the topology which
//! prevents threading at the pass pipeline level.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IGlobalPass_abi>
: public omni::graph::exec::unstable::IGlobalPass_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IGlobalPass")
//! Call from pass pipeline to apply global graph transformations.
void run(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline void omni::core::Generated<omni::graph::exec::unstable::IGlobalPass_abi>::run(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder)
{
OMNI_THROW_IF_FAILED(run_abi(builder.get()));
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 1,920 | C | 31.016666 | 116 | 0.740625 |
omniverse-code/kit/include/omni/graph/exec/unstable/IPopulatePass.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Base class for populate passes.
//!
//! Register a populate pass with @ref OMNI_GRAPH_EXEC_REGISTER_POPULATE_PASS(). When registering a pass, a "name to
//! match" is also specified. This name is the name of a node or definition on which the registered pass should
//! populate.
//!
//! Populate passes are typically the first pass type to run in the pass pipeline. When a node is encountered during
//! construction, only a single populate pass will get a chance to populate the newly discovered node. If no pass is
//! registered against the node's name, the node definition's name is used to find a population pass to run.
//!
//! Populate pass is allowed to attach a new definition to a node it runs on.
//!
//! Minimal rebuild of the execution graph topology should be considered by the pass each time it runs. Pass pipeline
//! leaves the responsibility of deciding if pass needs to run to the implementation. At minimum it can rely on
//! verifying that topology of @ref omni::graph::exec::unstable::NodeGraphDef it generated before is still valid or
//! @ref omni::graph::exec::unstable::NodeDef has not changed.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IPopulatePass_abi>
: public omni::graph::exec::unstable::IPopulatePass_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPopulatePass")
//! Call from pass pipeline to apply graph transformations on a given node (definition or topology).
void run(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline void omni::core::Generated<omni::graph::exec::unstable::IPopulatePass_abi>::run(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder,
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node)
{
OMNI_THROW_IF_FAILED(run_abi(builder.get(), node.get()));
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 2,956 | C | 40.069444 | 117 | 0.739513 |
omniverse-code/kit/include/omni/graph/exec/unstable/IGraphBuilderContext.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file IGraphBuilderContext.h
//!
//! @brief Defines @ref omni::graph::exec::unstable::IGraphBuilderContext.
#pragma once
#include <omni/graph/exec/unstable/IBase.h>
#include <omni/graph/exec/unstable/Stamp.h>
namespace omni
{
namespace graph
{
namespace exec
{
namespace unstable
{
// forward declarations needed by interface declaration
class IGraph;
class IGraphBuilder;
class IGraphBuilderContext;
class IGraphBuilderContext_abi;
class INodeGraphDef;
//! Common state for graph builders.
//!
//! *TODO* We will use this class to introduce reporting from graph transformation pipeline back to the application.
class IGraphBuilderContext_abi
: public omni::core::Inherits<omni::graph::exec::unstable::IBase,
OMNI_TYPE_ID("omni.graph.exec.unstable.IGraphBuilderContext")>
{
protected:
//! Current construction version.
//!
//! Incremented after each pass pipeline run to include definitions that were created before the run.
virtual Stamp getConstructionStamp_abi() noexcept = 0;
//! Return owner of all graphs this builder touches
//!
//! The returned @ref omni::graph::exec::unstable::IGraph will *not* have
//! @ref omni::core::IObject::acquire() called before being returned.
virtual OMNI_ATTR("no_acquire") IGraph* getGraph_abi() noexcept = 0;
//! To be overriden by application specific class to enable reporting from transformation pipeline.
virtual void report_abi(OMNI_ATTR("in, c_str, not_null") const char* diagnose) noexcept = 0;
//! Run transformation pipeline
virtual void runTransformations_abi(OMNI_ATTR("not_null") INodeGraphDef* nodeGraphDef) noexcept = 0;
};
//! Smart pointer managing an instance of @ref IGraphBuilderContext.
using GraphBuilderContextPtr = omni::core::ObjectPtr<IGraphBuilderContext>;
} // namespace unstable
} // namespace exec
} // namespace graph
} // namespace omni
// generated API declaration
#define OMNI_BIND_INCLUDE_INTERFACE_DECL
#include <omni/graph/exec/unstable/IGraphBuilderContext.gen.h>
//! @copydoc omni::graph::exec::unstable::IGraphBuilderContext_abi
//!
//! @ingroup groupOmniGraphExecInterfaces
class omni::graph::exec::unstable::IGraphBuilderContext
: public omni::core::Generated<omni::graph::exec::unstable::IGraphBuilderContext_abi>
{
};
// additional headers needed for API implementation
#include <omni/graph/exec/unstable/IGraph.h>
#include <omni/graph/exec/unstable/IGraphBuilder.h>
#include <omni/graph/exec/unstable/INodeGraphDef.h>
// generated API implementation
#define OMNI_BIND_INCLUDE_INTERFACE_IMPL
#include <omni/graph/exec/unstable/IGraphBuilderContext.gen.h>
| 3,089 | C | 34.113636 | 116 | 0.752994 |
omniverse-code/kit/include/omni/graph/exec/unstable/IPartitionPass.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Base class for graph partitioning passes.
//!
//! Partition passes are typically run just after population passes and only on newly modified
//! @ref omni::graph::exec::unstable::INodeGraphDef objects. The job of a partition pass is to recognize patterns in the
//! newly populated graph and replace them with a new definition or augment existing one.
//!
//! Partition passes can only mutate the graph from the @ref omni::graph::exec::unstable::IPartitionPass::commit method
//! using provided @ref omni::graph::exec::unstable::IGraphBuilder. This will guarantee that the rest of the pipeline
//! is aware of changes made to the graph and avoid potential threading issues.
//!
//! See @ref groupOmniGraphExecPasses for more pass related functionality.
template <>
class omni::core::Generated<omni::graph::exec::unstable::IPartitionPass_abi>
: public omni::graph::exec::unstable::IPartitionPass_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::exec::unstable::IPartitionPass")
//! Call from pass pipeline to initialize the pass for @p topology.
//!
//! This interface method implementation can't mutate given @p topology. Multiple passes can run concurrently on it.
//!
//! Returns True if initialization was successful and pipeline should issue calls to run and commit.
//! Otherwise this pass will be destroyed and won't participate in partitioning @p topology.
bool initialize(omni::core::ObjectParam<omni::graph::exec::unstable::ITopology> topology);
//! Call from pass pipeline to discover nodes requiring partitioning.
//!
//! No topology changes are permitted at this point. Multiple passes will get a chance to receive this
//! notification.
//!
//! Call to this method comes from graph traversal that may run multiple passes concurrently.
void run(omni::core::ObjectParam<omni::graph::exec::unstable::INode> node);
//! Call to verify generated partitions and commit new definition/s replacing discovered partitions.
//!
//! Commit of partitions is done serially and in the priority order of the pass. Passes with higher order will get
//! the chance first. This is the only partition pass method that can mutate the graph.
void commit(omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder);
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline bool omni::core::Generated<omni::graph::exec::unstable::IPartitionPass_abi>::initialize(
omni::core::ObjectParam<omni::graph::exec::unstable::ITopology> topology)
{
OMNI_THROW_IF_ARG_NULL(topology);
auto return_ = initialize_abi(topology.get());
return return_;
}
inline void omni::core::Generated<omni::graph::exec::unstable::IPartitionPass_abi>::run(
omni::core::ObjectParam<omni::graph::exec::unstable::INode> node)
{
OMNI_THROW_IF_ARG_NULL(node);
run_abi(node.get());
}
inline void omni::core::Generated<omni::graph::exec::unstable::IPartitionPass_abi>::commit(
omni::core::ObjectParam<omni::graph::exec::unstable::IGraphBuilder> builder)
{
OMNI_THROW_IF_ARG_NULL(builder);
commit_abi(builder.get());
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 4,009 | C | 39.918367 | 120 | 0.733599 |
omniverse-code/kit/include/omni/graph/ui/IOmniGraphUi.h | // Copyright (c) 2021-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <omni/core/IObject.h>
namespace omni
{
namespace graph
{
namespace ui
{
OMNI_DECLARE_INTERFACE(IOmniGraphUi);
class IOmniGraphUi_abi
: public omni::core::Inherits<omni::core::IObject, OMNI_TYPE_ID("omni.graph.ui.IOmniGraphUi")>
{
// No functionality but exists so that we can provide C++ nodes
};
} // namespace ui
} // namespace graph
} // namespace omni
#include "IOmniGraphUi.gen.h" // generated file
| 881 | C | 25.727272 | 98 | 0.754824 |
omniverse-code/kit/include/omni/graph/image/unstable/any.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <typeinfo>
#include <type_traits>
#include <utility>
namespace omni
{
namespace graph
{
namespace image
{
namespace unstable
{
namespace cpp17
{
// The class describes a type-safe container of a single value of any copy-constructible type.
// This class is a not quite standards conformant implementation of std::any.
// It does not support everything std::any supports, and the API is limited to
// a subset that is actually used currently in the project.
// For example, it is missing constructors using std::in_place_type_t<ValueType>
// disambiguation tags and std::make_any. Additionally, this implementation *does not throw exceptions*.
// Instead, it asserts and logs errors when casts fail.
// The long term intention is we will move to a C++17 compiler, and import the std
// version of this class, removing this code from our codebase. Therefore it is very important that this class
// doesn't do anything that the std can't, though the opposite is permissible.
class any final
{
public:
any()
: vtable(nullptr)
{
}
any(const any& rhs)
: vtable(rhs.vtable)
{
if (rhs.has_value())
{
rhs.vtable->copy(rhs.storage, this->storage);
}
}
any(any&& rhs) noexcept
: vtable(rhs.vtable)
{
if (rhs.has_value())
{
rhs.vtable->move(rhs.storage, this->storage);
rhs.vtable = nullptr;
}
}
~any()
{
this->reset();
}
template<typename ValueType, typename = typename std::enable_if<!std::is_same<typename std::decay<ValueType>::type, any>::value>::type>
any(ValueType&& value)
{
static_assert(std::is_copy_constructible<
typename std::decay<ValueType>::type>::value,
"T needs to be copy constructible");
this->construct(std::forward<ValueType>(value));
}
any& operator=(const any& rhs)
{
any(rhs).swap(*this);
return *this;
}
any& operator=(any&& rhs) noexcept
{
any(std::move(rhs)).swap(*this);
return *this;
}
template<typename ValueType, typename = typename std::enable_if<!std::is_same<typename std::decay<ValueType>::type, any>::value>::type>
any& operator=(ValueType&& value)
{
static_assert(std::is_copy_constructible<
typename std::decay<ValueType>::type>::value,
"T needs to be copy constructible");
any(std::forward<ValueType>(value)).swap(*this);
return *this;
}
void reset() noexcept
{
if (has_value())
{
this->vtable->destroy(storage);
this->vtable = nullptr;
}
}
bool has_value() const noexcept
{
return this->vtable != nullptr;
}
const std::type_info& type() const noexcept
{
return !has_value() ? typeid(void) : this->vtable->type();
}
void swap(any& rhs) noexcept
{
if (this->vtable != rhs.vtable)
{
any tmp(std::move(rhs));
rhs.vtable = this->vtable;
if (this->vtable != nullptr)
{
this->vtable->move(this->storage, rhs.storage);
}
this->vtable = tmp.vtable;
if (tmp.vtable != nullptr)
{
tmp.vtable->move(tmp.storage, this->storage);
tmp.vtable = nullptr;
}
}
else
{
if (this->vtable != nullptr)
{
this->vtable->swap(this->storage, rhs.storage);
}
}
}
private:
union storage_union
{
using stack_storage_t = typename std::aligned_storage<2 * sizeof(void*), std::alignment_of<void*>::value>::type;
void* dynamic;
stack_storage_t stack;
};
struct vtable_type
{
const std::type_info& (*type)() noexcept;
void(*destroy)(storage_union&) noexcept;
void(*copy)(const storage_union& src, storage_union& dest);
void(*move)(storage_union& src, storage_union& dest) noexcept;
void(*swap)(storage_union& lhs, storage_union& rhs) noexcept;
};
template<typename T>
struct vtable_dynamic
{
static const std::type_info& type() noexcept
{
return typeid(T);
}
static void destroy(storage_union& storage) noexcept
{
delete reinterpret_cast<T*>(storage.dynamic);
}
static void copy(const storage_union& src, storage_union& dest)
{
dest.dynamic = new T(*reinterpret_cast<const T*>(src.dynamic));
}
static void move(storage_union& src, storage_union& dest) noexcept
{
dest.dynamic = src.dynamic;
src.dynamic = nullptr;
}
static void swap(storage_union& lhs, storage_union& rhs) noexcept
{
std::swap(lhs.dynamic, rhs.dynamic);
}
};
template<typename T>
struct vtable_stack
{
static const std::type_info& type() noexcept
{
return typeid(T);
}
static void destroy(storage_union& storage) noexcept
{
reinterpret_cast<T*>(&storage.stack)->~T();
}
static void copy(const storage_union& src, storage_union& dest)
{
new (&dest.stack) T(reinterpret_cast<const T&>(src.stack));
}
static void move(storage_union& src, storage_union& dest) noexcept
{
new (&dest.stack) T(std::move(reinterpret_cast<T&>(src.stack)));
destroy(src);
}
static void swap(storage_union& lhs, storage_union& rhs) noexcept
{
storage_union tmp_storage;
move(rhs, tmp_storage);
move(lhs, rhs);
move(tmp_storage, lhs);
}
};
template<typename T>
struct requires_allocation :
std::integral_constant<bool,
!(std::is_nothrow_move_constructible<T>::value
&& sizeof(T) <= sizeof(storage_union::stack) &&
std::alignment_of<T>::value <= std::alignment_of<
storage_union::stack_storage_t>::value)>
{};
template<typename T>
static vtable_type* vtable_for_type()
{
using VTableType = typename std::conditional<requires_allocation<T>::value, vtable_dynamic<T>, vtable_stack<T>>::type;
static vtable_type table =
{
VTableType::type,
VTableType::destroy,
VTableType::copy, VTableType::move,
VTableType::swap,
};
return &table;
}
protected:
template<typename T>
friend const T* any_cast(const any* operand) noexcept;
template<typename T>
friend T* any_cast(any* operand) noexcept;
bool is_typed(const std::type_info& t) const
{
return is_same(this->type(), t);
}
static bool is_same(const std::type_info& a, const std::type_info& b)
{
return a == b;
}
template<typename T>
const T* cast() const noexcept
{
return requires_allocation<typename std::decay<T>::type>::value ?
reinterpret_cast<const T*>(storage.dynamic) :
reinterpret_cast<const T*>(&storage.stack);
}
template<typename T>
T* cast() noexcept
{
return requires_allocation<typename std::decay<T>::type>::value ?
reinterpret_cast<T*>(storage.dynamic) :
reinterpret_cast<T*>(&storage.stack);
}
private:
storage_union storage;
vtable_type* vtable;
template<typename ValueType, typename T>
typename std::enable_if<requires_allocation<T>::value>::type
do_construct(ValueType&& value)
{
storage.dynamic = new T(std::forward<ValueType>(value));
}
template<typename ValueType, typename T>
typename std::enable_if<!requires_allocation<T>::value>::type
do_construct(ValueType&& value)
{
new (&storage.stack) T(std::forward<ValueType>(value));
}
template<typename ValueType>
void construct(ValueType&& value)
{
using T = typename std::decay<ValueType>::type;
this->vtable = vtable_for_type<T>();
do_construct<ValueType, T>(std::forward<ValueType>(value));
}
};
namespace detail
{
template<typename ValueType>
inline ValueType any_cast_move_if_true(typename std::remove_reference<ValueType>::type* p, std::true_type)
{
return std::move(*p);
}
template<typename ValueType>
inline ValueType any_cast_move_if_true(typename std::remove_reference<ValueType>::type* p, std::false_type)
{
return *p;
}
}
template<typename ValueType>
inline ValueType any_cast(const any& operand)
{
using T = typename std::add_const<typename std::remove_reference<ValueType>::type>::type;
auto p = any_cast<T>(&operand);
if (p == nullptr)
{
CARB_LOG_ERROR("cpp17::any: Unable to cast value of type %s to type %s", operand.type().name(), typeid(T).name());
}
return *p;
}
template<typename ValueType>
inline ValueType any_cast(any& operand)
{
using T = typename std::remove_reference<ValueType>::type;
auto p = any_cast<T>(&operand);
if (p == nullptr)
{
CARB_LOG_ERROR("cpp17::any: Unable to cast value of type %s to type %s", operand.type().name(), typeid(T).name());
}
return *p;
}
template<typename ValueType>
inline ValueType any_cast(any&& operand)
{
using can_move = std::integral_constant<bool,
std::is_move_constructible<ValueType>::value
&& !std::is_lvalue_reference<ValueType>::value>;
using T = typename std::remove_reference<ValueType>::type;
auto p = any_cast<T>(&operand);
if (p == nullptr)
{
CARB_LOG_ERROR("cpp17::any: Unable to cast value of type %s to type %s", operand.type().name(), typeid(T).name());
}
return detail::any_cast_move_if_true<ValueType>(p, can_move());
}
template<typename ValueType>
inline const ValueType* any_cast(const any* operand) noexcept
{
using T = typename std::decay<ValueType>::type;
if (operand && operand->is_typed(typeid(T)))
return operand->cast<ValueType>();
return nullptr;
}
template<typename ValueType>
inline ValueType* any_cast(any* operand) noexcept
{
using T = typename std::decay<ValueType>::type;
if (operand && operand->is_typed(typeid(T)))
return operand->cast<ValueType>();
return nullptr;
}
} // namespace cpp17
} // namespace unstable
} // namespace image
} // namespace graph
} // namespace omni
| 11,046 | C | 26.826196 | 139 | 0.597954 |
omniverse-code/kit/include/omni/graph/image/unstable/ComputeParamsBuilder.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//
// This ABI is unstable and subject to change
/* _ _ _____ ______ _______ __ ______ _ _ _____ ______ ___ _ _____ _____ _____ _ __
| | | |/ ____| ____| /\|__ __| \ \ / / __ \| | | | __ \ / __ \ \ / / \ | | | __ \|_ _|/ ____| |/ /
| | | | (___ | |__ / \ | | \ \_/ / | | | | | | |__) | | | | \ \ /\ / /| \| | | |__) | | | | (___ | ' /
| | | |\___ \| __| / /\ \ | | \ /| | | | | | | _ / | | | |\ \/ \/ / | . ` | | _ / | | \___ \| <
| |__| |____) | |____ / ____ \| | | | | |__| | |__| | | \ \ | |__| | \ /\ / | |\ | | | \ \ _| |_ ____) | . \
\____/|_____/|______| /_/ \_\_| |_| \____/ \____/|_| \_\ \____/ \/ \/ |_| \_| |_| \_\_____|_____/|_|\_|
*/
#pragma once
#include <carb/cudainterop/CudaInterop.h>
#include <carb/renderer/Renderer.h>
#include <omni/fabric/IToken.h>
#include <omni/graph/core/GpuInteropEntryUserData.h>
#include <omni/graph/core/ogn/Database.h>
#include <omni/kit/renderer/IGpuFoundation.h>
#include <rtx/rendergraph/RenderGraphBuilder.h>
#include <rtx/rendergraph/RenderGraphTypes.h>
#include <rtx/resourcemanager/ResourceManager.h>
#include <rtx/resourcemanager/ResourceManagerTypes.h>
#include <rtx/hydra/HydraRenderResults.h>
#include <vector>
#include <tuple>
#include <unordered_map>
#include "any.h"
namespace omni
{
namespace graph
{
namespace image
{
namespace unstable
{
/**
* @brief Structure for holding arbitrary parameters.
*
* The ComputeParams class is used to hold and access arbitrary parameters of various types.
* It allows adding parameters with a specified key and retrieving parameters by their key and type.
*
* Example usage:
* ComputeParams<std::string> params;
* params.add("param1", 42);
* params.add("param2", "hello");
* params.add("param3", 3.14);
*
* int param1Value = params.get<int>("param1");
* std::string param2Value = params.get<std::string>("param2");
* double param3Value = params.get<double>("param3");
*/
template <typename TKey>
class ComputeParams
{
public:
/**
* @brief Constructor.
*
* @param[in] initialCapacity The initial capacity of the container where the parameters are stored.
*/
explicit ComputeParams(std::size_t initialCapacity = 32)
{
m_data.reserve(initialCapacity);
}
/**
* @brief Adds a new entry in the parameter map.
* If an element with the given key is already in the container, it is replaced.
*
* @param[in] key The unique identifier of a parameter value.
* @param[in] value The value of the parameter.
*/
template <typename T>
void add(TKey const& key, T&& value)
{
m_data[key] = std::forward<T>(value);
}
/**
* @brief Gets a value from the parameter map.
*
* The return type must match the type of the value stored for that key. If the type of the stored value does not match the requested type,
* the function logs an error and terminates the program.
*
* @param[in] key The unique identifier of the parameter.
* @return Returns the value of the specified type.
* @exception std::out_of_range if there is no data for the given key
*/
template <typename T>
T const& get(TKey const& key) const
{
return cpp17::any_cast<T const&>(m_data.at(key));
}
/**
* @brief Gets a value from the parameter map.
*
* If there is no value for the given key, or if the value type is different from the requested type, returns nullptr.
*
* @param[in] key The unique identifier of the parameter.
* @return Returns the value of the specified type.
*/
template<typename T>
T const* tryGet(TKey const& key) const noexcept
{
if (m_data.find(key) != m_data.end())
{
auto const& a = m_data.at(key);
return cpp17::any_cast<T const>(&a);
}
return nullptr;
}
/**
* @brief Checks if a key is present in the container.
*
* @param[in] key The unique identifier of the parameter.
* @return Returns true if the key is found, otherwise returns false.
*/
bool hasKey(TKey const& key) const noexcept
{
return m_data.find(key) != m_data.end();
}
private:
std::unordered_map<TKey, cpp17::any> m_data;
};
/**
* @brief A builder class for constructing instances of the ComputeParams class.
*
* The ComputeParamsBuilder provides a fluent interface for building ComputeParams objects.
* It allows setting multiple parameters of different types and creates a ComputeParams object
* with the provided parameter values. The object is intended to be used from Omnigraph nodes.
*
* Example usage:
* ComputeParams<std::string> params = ComputeParamsBuilder<std::string>{gpu, rp, db}
* .addValue("param1", 42)
* .addValue("param2", "hello")
* .addValue("param3", 3.14)
* .build();
*
* The main purpose for this builder is to facilitate the sdheduling of CUDA tasks. For this purpose,
* the builder provides some specialized APIs for adding input AOVs, for allocating new AOVs and for
* scheduling the work on the GPU.
*
* Once built, the ComputeParams instance can be passed to the CUDA task using the scheduleCudaTask function.
*
* Alternatively, the builder can directly build the params and schedule the CUDA task in the same chain of method calls.
*
* Example usage for scheduling CUDA tasks:
* ComputeParamsBuilder<std::string>{ gpu, rp, db }
* .addValue("multiplier", db.inputs.multiplier())
* .addInputTexture("inputAOV", db.inputs.inputAOV(),
* [](cudaMipmappedArray_t cudaPtr, carb::graphics::TextureDesc const* desc, ComputeParams<std::string>& params)
* {
* params.add("width", desc->width);
* params.add("height", desc->height);
* })
* .addOutputTexture("outputAOV", db.inputs.outputAOV(), db.inputs.width(), db.inputs.height(), "TestTexture")
* .scheduleCudaTask("TestCudaTask",
* [](ComputeParams<std::string>* data, cudaStream_t stream)
* {
* auto multiplier = data->get<float>("multiplier");
* auto inputAOV = data->get<cudaMipmappedArray_t>("inputAOV");
* auto outputAOV = data->get<cudaMipmappedArray_t>("outputAOV");
* auto width = data->get<uint32_t>("width");
* auto height = data->get<uint32_t>("height");
*
* // ... call CUDA kernel
* });
*
* Note: after building the ComputeParams of scheduling the CUDA task, the ComputeParamsBuilder instance cannot be modified anymore.
* This restriction is imposed in order to provide the guarantee that the AOV pointers built by the builder are not invalidated by further
* modifications of the render product through the builder API.
*/
template<typename TKey>
class ComputeParamsBuilder
{
public:
/**
* @brief Callback invoked after extracting a texture AOV with a given token from the Render Product.
* Allows adding additional parameters from the TextureDesc of the AOV, such as the width, height, etc.
*/
using TextureDescCallback = std::function<void(cudaMipmappedArray_t, carb::graphics::TextureDesc const*, ComputeParams<TKey>&)>;
/**
* @brief Callback invoked after extracting a buffer AOV with a given token from the Render Product.
* Allows adding additional parameters from the BufferDesc of the AOV, such as the bufferSize.
*/
using BufferDescCallback = std::function<void(cudaMipmappedArray_t, carb::graphics::BufferDesc const*, ComputeParams<TKey>&)>;
/**
* @brief Callback invoked by the builder to explicitly allocate an AOV.
* Allows explicit control over the parameters of the new AOV.
*/
using AllocateAOVCallback = std::function<cudaMipmappedArray_t(ComputeParams<TKey> const&,
omni::graph::core::GpuFoundationsInterfaces*,
omni::usd::hydra::HydraRenderProduct*,
rtx::resourcemanager::SyncScopeId,
uint32_t)>;
/**
* @brief Callback invoked by the builder after the allocation of a new AOV.
* Allows setting fabric attributes of the node.
*/
using PostAllocateAOVCallback = std::function<void(cudaMipmappedArray_t)>;
private:
enum class AOVType
{
Buffer,
Texture
};
template <typename T>
struct AOVParams
{
AOVType aovType;
T key;
omni::fabric::TokenC aovToken;
union
{
carb::graphics::TextureDesc textureDesc;
carb::graphics::BufferDesc bufferDesc;
};
//cpp17::any callback; // cpp17::any does not seem to work with lambdas. std::any should work, but can't use it for now
// TODO: find a better way to define the callbacks
TextureDescCallback inputTextureCb;
BufferDescCallback inputBufferCb;
AllocateAOVCallback allocateAOVCb;
PostAllocateAOVCallback postAllocateCb;
};
public:
/**
* @brief Constructor.
*
* @param[in] gpu The GPU interface.
* @param[in] rp The render product on which the CUDA computation is applied.
* @param[in] db The node database.
* @param[in] initialCapacity The initial capacity of the container where the parameters are stored.
*/
ComputeParamsBuilder(
omni::graph::core::GpuFoundationsInterfaces* gpu,
omni::usd::hydra::HydraRenderProduct* rp,
omni::graph::core::ogn::OmniGraphDatabase& db,
std::size_t initialCapacity = 32)
: m_gpu(gpu)
, m_rp(rp)
, m_db(db)
, m_data(initialCapacity)
, m_deviceIndex(s_invalidDeviceIndex)
, m_buildError(BuildError::NoError)
{
m_inputAOVs.reserve(initialCapacity);
m_outputAOVs.reserve(initialCapacity);
m_outputAOVTokens.reserve(initialCapacity);
}
/**
* @brief Set a parameter value of type T with the specified key.
*
* @param[in] key The unique identifier of the parameter value.
* @param[in] value The parameter value.
* @return Returns an r-value reference of the ComputeParamsBuilder.
*/
template<typename TValue>
ComputeParamsBuilder&& addValue(TKey const& key, TValue&& value) &&
{
m_data.add(key, value);
return std::move(*this);
}
/**
* @brief Add a texture AOV. The AOV is expected to be already allocated.
*
* @param[in] key The unique identifier of the parameter value.
* @param[in] aovToken The token used to extract the AOV from the Render Product.
* @param[in] cb Callback which can be used to add additional parameters from the texture description of the AOV.
* @return Returns an r-value reference of the ComputeParamsBuilder.
*/
ComputeParamsBuilder&& addInputTexture(TKey const& key, omni::fabric::TokenC aovToken, TextureDescCallback const& cb = nullptr) &&
{
m_inputAOVs.emplace_back(AOVParams<TKey>
{
/*.aovType =*/ AOVType::Texture,
/*.key =*/ key,
/*.aovToken =*/ aovToken
});
m_inputAOVs.back().inputTextureCb = cb;
return std::move(*this);
}
/**
* @brief Add a buffer AOV. The AOV is expected to be already allocated.
*
* @param[in] key The unique identifier of the parameter value.
* @param[in] aovToken The token used to extract the AOV from the Render Product.
* @param[in] cb Callback which can be used to add additional parameters from the buffer description of the AOV.
* @return Returns an r-value reference of the ComputeParamsBuilder.
*/
ComputeParamsBuilder&& addInputBuffer(TKey const& key, omni::fabric::TokenC aovToken, BufferDescCallback const& cb = nullptr) &&
{
m_inputAOVs.emplace_back(AOVParams<TKey>
{
/*.aovType =*/ AOVType::Buffer,
/*.key =*/ key,
/*.aovToken =*/ aovToken
});
m_inputAOVs.back().inputBufferCb = cb;
return std::move(*this);
}
/**
* @brief Allocates a new texture AOV which will be filled in the CUDA task.
*
* @param[in] key The unique identifier of the parameter value.
* @param[in] aovToken The token used to extract the AOV from the Render Product.
* @param[in] width The width of the allocated texture.
* @param[in] height The height of the allocated texture.
* @param[in] format The texture format.
* @param[in] debugName A string used to identify the new AOV in the debugger.
* @param[in] postAllocateCb A callback which allows the binding of the new AOV to an output attribute of a node.
* @return Returns an r-value reference of the ComputeParamsBuilder.
*/
ComputeParamsBuilder&& addOutputTexture(
TKey const& key,
omni::fabric::TokenC aovToken,
uint32_t width,
uint32_t height,
carb::graphics::Format format,
const char* debugName,
PostAllocateAOVCallback postAllocateCb = nullptr) &&
{
m_outputAOVTokens.emplace_back(aovToken);
m_outputAOVs.emplace_back(AOVParams<TKey>
{
/*.aovType =*/ AOVType::Texture,
/*.key =*/ key,
/*.aovToken =*/ aovToken
});
m_outputAOVs.back().textureDesc =
carb::graphics::TextureDesc{ carb::graphics::TextureType::e2D,
carb::graphics::kTextureUsageFlagShaderResourceStorage |
carb::graphics::kTextureUsageFlagShaderResource |
carb::graphics::kTextureUsageFlagExportShared,
width,
height,
1,
1,
format,
carb::graphics::SampleCount::e1x,
{ { 0, 0, 0, 0 }, nullptr },
debugName,
nullptr };
m_outputAOVs.back().postAllocateCb = postAllocateCb;
return std::move(*this);
}
/**
* @brief Allocates a new texture AOV which will be filled in the CUDA task.
*
* Allows explicit definition and initialization of the AOV.
*
* @param[in] key The unique identifier of the parameter value.
* @param[in] aovToken The token used to extract the AOV from the Render Product.
* @param[in] callback The callback where the initialization of the AOV must be done.
* @return Returns an r-value reference of the ComputeParamsBuilder.
*/
ComputeParamsBuilder&& addOutputTexture(
TKey const& key,
omni::fabric::TokenC aovToken,
AllocateAOVCallback callback) &&
{
m_outputAOVTokens.emplace_back(aovToken);
m_outputAOVs.emplace_back(AOVParams<TKey>
{
/*.aovType =*/ AOVType::Texture,
/*.key =*/ key,
/*.aovToken =*/ aovToken
});
m_outputAOVs.back().allocateAOVCb = callback;
return std::move(*this);
}
/**
* @brief Allocates a new buffer AOV which will be filled in the CUDA task.
*
* @param[in] key The unique identifier of the parameter value.
* @param[in] aovToken The token used to extract the AOV from the Render Product.
* @param[in] bufferSize The size of the allocated buffer.
* @param[in] debugName A string used to identify the new AOV in the debugger.
* @param[in] postAllocateCb A callback which allows the binding of the new AOV to an output attribute of a node.
* @return Returns an r-value reference of the ComputeParamsBuilder.
*/
ComputeParamsBuilder&& addOutputBuffer(
TKey const& key,
omni::fabric::TokenC aovToken,
uint32_t bufferSize,
const char* debugName,
PostAllocateAOVCallback postAllocateCb = nullptr) &&
{
m_outputAOVTokens.emplace_back(aovToken);
m_outputAOVs.emplace_back(AOVParams<TKey>
{
/*.aovType =*/ AOVType::Buffer,
/*.key =*/ key,
/*.aovToken =*/ aovToken,
});
m_outputAOVs.back().bufferDesc = carb::graphics::BufferDesc
{
carb::graphics::kBufferUsageFlagExportShared,
bufferSize,
debugName,
nullptr
};
m_outputAOVs.back().postAllocateCb = postAllocateCb;
return std::move(*this);
}
/**
* @brief Allocates a new buffer AOV which will be filled in the CUDA task.
*
* Allows explicit definition and initialization of the AOV.
*
* @param[in] key The unique identifier of the parameter value.
* @param[in] aovToken The token used to extract the AOV from the Render Product.
* @param[in] callback The callback where the initialization of the AOV must be done.
* @return Returns an r-value reference of the ComputeParamsBuilder.
*/
ComputeParamsBuilder&& addOutputBuffer(
TKey const& key,
omni::fabric::TokenC aovToken,
AllocateAOVCallback callback) &&
{
m_outputAOVTokens.emplace_back(aovToken);
m_outputAOVs.emplace_back(AOVParams<TKey>
{
/*.aovType =*/ AOVType::Buffer,
/*.key =*/ key,
/*.aovToken =*/ aovToken
});
m_outputAOVs.back().allocateAOVCb = callback;
return std::move(*this);
}
/**
* @brief Builds the final ComputeParams structure.
*
* The following steps are performed when building the final params, in this order:
* 1) The new AOVs are added to the Render Product
* 2) The input AOVs (already allocated) are extracted from the Render Product
* 3) The new AOVs are allocated
*
* This sequence ensures that all the AOV pointers in the resulting ComputeParams structure are valid.
*
* No further values can be added to the builder after this call.
*
* @return Returns the constructed ComputeParams object.
*/
ComputeParams<TKey> build()
{
if (!isValid())
return m_data;
// append the outputs first to avoid further structural changes which will invalidate the AOV pointers
if (!m_outputAOVTokens.empty())
appendUninitializedRenderVars(m_rp, m_outputAOVTokens);
auto rmCtx = reinterpret_cast<rtx::resourcemanager::Context*>(m_gpu->resourceManagerContext);
auto rm = reinterpret_cast<rtx::resourcemanager::ResourceManager*>(m_gpu->resourceManager);
for (auto const& aovParams : m_inputAOVs)
{
auto aovPtr = omni::usd::hydra::getRenderVarFromProduct(m_rp, aovParams.aovToken.token);
if (aovPtr == nullptr)
{
m_db.logWarning("Missing RenderVar %s", m_db.tokenToString(aovParams.aovToken));
continue;
}
if (aovPtr->resource == nullptr)
{
m_db.logWarning("RenderVar %s is an invalid resource.", m_db.tokenToString(aovParams.aovToken));
continue;
}
const uint32_t deviceIndex = rm->getFirstDeviceIndex(*rmCtx, *aovPtr->resource);
if (m_deviceIndex != deviceIndex)
{
if (m_deviceIndex == s_invalidDeviceIndex)
{
m_deviceIndex = deviceIndex;
}
else
{
m_db.logWarning("RenderVar %s has an inconsistend device index (%lu/%lu).",
m_db.tokenToString(aovParams.aovToken), static_cast<unsigned long>(deviceIndex),
static_cast<unsigned long>(m_deviceIndex));
m_buildError = BuildError::InconsistentDeviceIndex;
break;
}
}
switch (aovParams.aovType)
{
case AOVType::Texture:
{
auto cudaPtr = (cudaMipmappedArray_t)rm->getCudaMipmappedArray(*aovPtr->resource, m_deviceIndex);
if (aovParams.inputTextureCb)
{
const auto textureDesc = rm->getTextureDesc(*rmCtx, aovPtr->resource);
aovParams.inputTextureCb(cudaPtr, textureDesc, m_data);
}
m_data.add(aovParams.key, cudaPtr);
break;
}
case AOVType::Buffer:
{
auto cudaPtr = (cudaMipmappedArray_t)rm->getCudaMipmappedArray(*aovPtr->resource, m_deviceIndex);
if (aovParams.inputBufferCb)
{
const auto bufferDesc = rm->getBufferDesc(aovPtr->resource);
aovParams.inputBufferCb(cudaPtr, bufferDesc, m_data);
}
m_data.add(aovParams.key, cudaPtr);
break;
}
}
}
if (m_buildError != BuildError::NoError)
{
return m_data;
}
// the device index is not set, get the index of the first device render variable
if (m_deviceIndex == s_invalidDeviceIndex)
{
for (uint32_t i = 0; i < m_rp->renderVarCnt; i++)
{
if (m_rp->vars[i].isRpResource)
{
m_deviceIndex = rm->getFirstDeviceIndex(*rmCtx, *m_rp->vars[i].resource);
break;
}
}
// the render product has no device render variable, use the render product device index
if (m_deviceIndex == s_invalidDeviceIndex)
{
m_deviceIndex = carb::graphics::DeviceMask(m_rp->deviceMask).getFirstIndex();
}
}
auto iRenderGraph = reinterpret_cast<gpu::rendergraph::IRenderGraph*>(m_gpu->renderGraph);
auto rgBuilder = reinterpret_cast<rtx::rendergraph::RenderGraphBuilder*>(m_gpu->renderGraphBuilder);
auto renderGraph = iRenderGraph->getRenderGraph(m_deviceIndex);
const auto syncScopeId = rgBuilder->getRenderGraphDesc(*renderGraph).syncScopeId;
for (auto const& aovParams : m_outputAOVs)
{
cudaMipmappedArray_t ptr = 0;
if (aovParams.allocateAOVCb != nullptr)
{
// custom AOV allocation
auto cb = aovParams.allocateAOVCb;
ptr = cb(m_data, m_gpu, m_rp, syncScopeId, m_deviceIndex);
}
else
{
// standard AOV allocation
switch (aovParams.aovType)
{
case AOVType::Texture:
ptr = (cudaMipmappedArray_t)allocateRenderVarTexture(
m_gpu, m_rp, syncScopeId, m_deviceIndex, aovParams.aovToken, aovParams.textureDesc);
break;
case AOVType::Buffer:
ptr = (cudaMipmappedArray_t)allocateRenderVarBuffer(
m_gpu, m_rp, syncScopeId, m_deviceIndex, aovParams.aovToken, aovParams.bufferDesc);
break;
default:
break;
}
}
m_data.add(aovParams.key, ptr);
if (aovParams.postAllocateCb != nullptr)
aovParams.postAllocateCb(ptr);
}
return m_data;
}
/**
* @brief Builds the final ComputeParams structure and schedules the CUDA task.
*
* No further values can be added to the builder after this call.
*
* @param[in] renderOpName The name of the render op in the render graph.
* @param[in] computeCuda The entry point to the CUDA computation kernel.
* @return Returns true if the builder is valid and the CUDA task was scheduled, otherwise returns false.
*/
bool scheduleCudaTask(const char* renderOpName,
void (*computeCuda)(ComputeParams<TKey>* data, cudaStream_t stream)) &&;
/**
* @brief Builds the final ComputeParams structure and schedules the CUDA task.
*
* Allows validation of the ComputeParams before scheduling the CUDA task.
* No further values can be added to the builder after this call.
*
* @param[in] renderOpName The name of the render op in the render graph.
* @param[in] computeCuda The entry point to the CUDA computation kernel.
* @param[in] validateCb A callback to validate the parameters before scheduling the CUDA task.
* @return Returns true if the builder is valid, the params are validated by the user callback
* and the CUDA task was scheduled, otherwise returns false.
*/
bool scheduleCudaTask(
const char* renderOpName,
void (*computeCuda)(ComputeParams<TKey>* data, cudaStream_t stream),
bool (*validateCb)(ComputeParams<TKey> const& params)
) &&;
private:
bool isValid() const
{
if (!m_gpu || !m_rp || (m_buildError != BuildError::NoError))
{
CARB_LOG_WARN_ONCE("ComputeParamsBuilder: invalid RenderProduct inputs");
return false;
}
return true;
}
static void appendUninitializedRenderVars(omni::usd::hydra::HydraRenderProduct* rp, const std::vector<omni::fabric::TokenC>& renderVarTokens)
{
using TokenC = omni::fabric::TokenC;
// filter already existing aovs
std::vector<TokenC> filteredRenderVarTokens;
filteredRenderVarTokens.reserve(renderVarTokens.size());
for (const auto token : renderVarTokens)
{
if (!omni::usd::hydra::getRenderVarFromProduct(rp, token.token))
{
filteredRenderVarTokens.emplace_back(token);
}
}
using namespace omni::usd::hydra;
const size_t numRenderVars = filteredRenderVarTokens.size();
HydraRenderVar* newVars = new HydraRenderVar[rp->renderVarCnt + numRenderVars];
const size_t varArraySize = sizeof(HydraRenderVar) * rp->renderVarCnt;
std::memcpy(newVars, rp->vars, varArraySize);
for (size_t i = 0; i < numRenderVars; ++i)
{
newVars[rp->renderVarCnt + i].aov = filteredRenderVarTokens[i].token;
newVars[rp->renderVarCnt + i].isRpResource = true;
newVars[rp->renderVarCnt + i].resource = nullptr;
newVars[rp->renderVarCnt + i].isBufferRpResource = true;
newVars[rp->renderVarCnt + i].isFrameLifetimeRsrc = false;
}
delete[] rp->vars;
rp->vars = newVars;
rp->renderVarCnt += static_cast<uint32_t>(numRenderVars);
}
static uint64_t allocateRenderVarBuffer(omni::graph::core::GpuFoundationsInterfaces* gpu,
omni::usd::hydra::HydraRenderProduct* rp,
rtx::resourcemanager::SyncScopeId syncScopeId,
uint32_t deviceIndex,
omni::fabric::TokenC deviceRenderVarToken,
carb::graphics::BufferDesc const& buffDesc)
{
auto rmCtx = reinterpret_cast<rtx::resourcemanager::Context*>(gpu->resourceManagerContext);
auto rm = reinterpret_cast<rtx::resourcemanager::ResourceManager*>(gpu->resourceManager);
using namespace carb::graphics;
auto deviceRenderVar = omni::usd::hydra::getRenderVarFromProduct(rp, deviceRenderVarToken.token);
CARB_ASSERT(deviceRenderVar && deviceRenderVar->isRpResource && deviceRenderVar->isBufferRpResource);
if (!deviceRenderVar || !deviceRenderVar->isRpResource || !deviceRenderVar->isBufferRpResource)
{
return 0;
}
const rtx::resourcemanager::ResourceDesc resourceDesc = { rtx::resourcemanager::ResourceMode::ePooled,
MemoryLocation::eDevice,
rtx::resourcemanager::ResourceCategory::eOtherBuffer,
rtx::resourcemanager::kResourceUsageFlagCudaShared,
DeviceMask::getDeviceMaskFromIndex(deviceIndex),
deviceIndex,
syncScopeId };
CARB_ASSERT(!deviceRenderVar->resource);
if (!deviceRenderVar->resource)
{
deviceRenderVar->resource = buffDesc.size > 0 ? rm->getResourceFromBufferDesc(*rmCtx, buffDesc, resourceDesc) : nullptr;
deviceRenderVar->isFrameLifetimeRsrc = true;
}
auto cudaDevicePointer =
deviceRenderVar->resource ? rm->getCudaDevicePointer(*deviceRenderVar->resource, deviceIndex) : nullptr;
return reinterpret_cast<uint64_t>(cudaDevicePointer);
}
static uint64_t allocateRenderVarTexture(omni::graph::core::GpuFoundationsInterfaces* gpu,
omni::usd::hydra::HydraRenderProduct* rp,
rtx::resourcemanager::SyncScopeId syncScopeId,
uint32_t deviceIndex,
omni::fabric::TokenC deviceRenderVarToken,
carb::graphics::TextureDesc const& texDesc)
{
auto rmCtx = reinterpret_cast<rtx::resourcemanager::Context*>(gpu->resourceManagerContext);
auto rm = reinterpret_cast<rtx::resourcemanager::ResourceManager*>(gpu->resourceManager);
using namespace carb::graphics;
auto deviceRenderVar = omni::usd::hydra::getRenderVarFromProduct(rp, deviceRenderVarToken.token);
CARB_ASSERT(deviceRenderVar && deviceRenderVar->isRpResource && deviceRenderVar->isBufferRpResource);
if (!deviceRenderVar || !deviceRenderVar->isRpResource || !deviceRenderVar->isBufferRpResource)
{
return 0;
}
const rtx::resourcemanager::ResourceDesc resDesc =
{
rtx::resourcemanager::ResourceMode::ePooled,
carb::graphics::MemoryLocation::eDevice,
rtx::resourcemanager::ResourceCategory::eOtherTexture,
rtx::resourcemanager::kResourceUsageFlagCudaShared,
carb::graphics::DeviceMask::getDeviceMaskFromIndex(deviceIndex),
deviceIndex,
syncScopeId
};
deviceRenderVar->resource = rm->getResourceFromTextureDesc(*rmCtx, texDesc, resDesc);
deviceRenderVar->isBufferRpResource = false;
deviceRenderVar->isFrameLifetimeRsrc = true;
auto cudaDevicePointer = deviceRenderVar->resource ? rm->getCudaDevicePointer(*deviceRenderVar->resource, deviceIndex) : nullptr;
return reinterpret_cast<uint64_t>(cudaDevicePointer);
}
protected:
omni::graph::core::GpuFoundationsInterfaces* m_gpu;
omni::usd::hydra::HydraRenderProduct* m_rp;
omni::graph::core::ogn::OmniGraphDatabase& m_db;
std::vector<AOVParams<TKey>> m_inputAOVs;
std::vector<AOVParams<TKey>> m_outputAOVs;
std::vector<omni::fabric::TokenC> m_outputAOVTokens;
ComputeParams<TKey> m_data;
uint32_t m_deviceIndex;
enum class BuildError
{
NoError,
InconsistentDeviceIndex
} m_buildError;
static constexpr uint32_t s_invalidDeviceIndex = std::numeric_limits<uint32_t>::max();
};
namespace
{
// Temporary structure for passing the params and the computeCuda callback to the cudaInterop lambda.
template <typename TParams>
struct UserData
{
ComputeParams<TParams>* params;
void (*computeCuda)(ComputeParams<TParams>* data, cudaStream_t stream);
};
} // namespace
/**
* @brief Schedule a CUDA task on the post render graph.
*
* @param[in] gpu The GPU interface.
* @param[in] rp The render product on which the CUDA computation is applied.
* @param[in] computeParams The parameters of the computation.
* @param[in] renderOpName The name of the render op in the render graph.
* @param[in] computeCuda The CUDA computation entry point.
*/
template <typename TParams>
inline void scheduleCudaTask(omni::graph::core::GpuFoundationsInterfaces* gpu,
omni::usd::hydra::HydraRenderProduct* rp,
uint32_t deviceIndex,
ComputeParams<TParams> const& computeParams,
const char* renderOpName,
void (*computeCuda)(ComputeParams<TParams>* data, cudaStream_t stream))
{
CARB_ASSERT(gpu);
CARB_ASSERT(rp);
CARB_ASSERT(computeCuda);
auto iRenderGraph = reinterpret_cast<gpu::rendergraph::IRenderGraph*>(gpu->renderGraph);
auto rgBuilder = reinterpret_cast<rtx::rendergraph::RenderGraphBuilder*>(gpu->renderGraphBuilder);
auto renderGraph = iRenderGraph->getRenderGraph(deviceIndex);
auto computeParamsPtr = new ComputeParams<TParams>(std::move(computeParams));
auto cudaData = new UserData<TParams>{ computeParamsPtr, computeCuda };
const rtx::rendergraph::ParamBlockRefs paramBlockRefs{ 0, {} };
rtx::rendergraph::RenderOpParams* renderOpParams = rgBuilder->createParams(*renderGraph, paramBlockRefs);
rtx::rendergraph::addRenderOpLambdaEx(
*rgBuilder, *renderGraph, renderOpName, renderOpParams, rtx::rendergraph::kRenderOpFlagNoAnnotation,
[rgBuilder, cudaData, computeCuda](rtx::rendergraph::RenderOpInputCp renderOpInput)
{
renderOpInput->graphicsMux->cmdCudaInterop(
renderOpInput->commandList,
[](cudaStream_t cudaStream, void* userData) -> void
{
auto cudaData = reinterpret_cast<UserData<TParams>*>(userData);
auto params = cudaData->params;
auto computeCuda = cudaData->computeCuda;
computeCuda(params, cudaStream);
delete params;
delete cudaData;
},
cudaData, carb::graphicsmux::CudaInteropFlags::eNone);
});
}
template<typename TKey>
inline bool ComputeParamsBuilder<TKey>::scheduleCudaTask(const char* renderOpName, void (*computeCuda)(ComputeParams<TKey>* data, cudaStream_t stream)) &&
{
if (!isValid())
return false;
auto computeParams = build();
omni::graph::image::unstable::scheduleCudaTask(m_gpu, m_rp, m_deviceIndex, computeParams, renderOpName, computeCuda);
return true;
}
template <typename TKey>
inline bool ComputeParamsBuilder<TKey>::scheduleCudaTask(
const char* renderOpName,
void (*computeCuda)(ComputeParams<TKey>* data, cudaStream_t stream),
bool (*validateCb)(ComputeParams<TKey> const& params)) &&
{
if (!isValid())
return false;
auto computeParams = build();
if (validateCb && validateCb(computeParams))
{
omni::graph::image::unstable::scheduleCudaTask(
m_gpu, m_rp, m_deviceIndex, computeParams, renderOpName, computeCuda);
return true;
}
return false;
}
} // namespace unstable
} // namespace image
} // namespace graph
} // namespace omni
| 36,245 | C | 40.329532 | 154 | 0.597186 |
omniverse-code/kit/include/omni/graph/core/IVariable.gen.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <omni/core/ResultError.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
/**
* Object that contains a value that is local to a graph, available from anywhere in the graph
*/
template <>
class omni::core::Generated<omni::graph::core::IVariable_abi> : public omni::graph::core::IVariable_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::core::IVariable")
/**
* Returns the name of the variable object. The name is derived by
* removing any variable specific prefixes from the underlying attribute.
*
* @return The name of the variable.
*/
const char* getName() noexcept;
/**
* Returns the full path to the variables underlying attribute
*
* @return The full usd path of the variable
*/
const char* getSourcePath() noexcept;
/**
* Returns the type of the variable
*
* @return The type of the variable
*/
omni::graph::core::Type getType() noexcept;
/**
* Returns the category of the variable
*
* @return The category of the variable, or an empty string if it is not set.
*/
const char* getCategory() noexcept;
/**
* Sets the category of the variable
*
* @param[in] category A string representing the variable category
*/
void setCategory(const char* category) noexcept;
/**
* Gets the display name of the variable. By default the display name is the same
* as the variable name.
*
* @return The display name of the variable, or an empty string if it is not set.
*/
const char* getDisplayName() noexcept;
/**
* Set the display name of the variable.
*
* @param[in] displayName A string to set the display name to
*/
void setDisplayName(const char* displayName) noexcept;
/**
* Get the tooltip used for the variable.
*
* @return The tooltip of the variable, or an emtpy string if none is set.
*/
const char* getTooltip() noexcept;
/**
* Set the tooltip used for the variable
*
* @param[in] toolTip A description used as a tooltip.
*/
void setTooltip(const char* toolTip) noexcept;
/**
* Get the scope of the variable. The scope determines which graphs can read and write the value.
*
* @return The scope of the variable.
*/
omni::graph::core::eVariableScope getScope() noexcept;
/**
* Sets the scope of the variable.
*
* @param[in] scope The scope to set on the variable.
*/
void setScope(omni::graph::core::eVariableScope scope) noexcept;
/**
* Returns whether this variable is valid
*
* @return True if the variable is valid, false otherwise
*/
bool isValid() noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline const char* omni::core::Generated<omni::graph::core::IVariable_abi>::getName() noexcept
{
return getName_abi();
}
inline const char* omni::core::Generated<omni::graph::core::IVariable_abi>::getSourcePath() noexcept
{
return getSourcePath_abi();
}
inline omni::graph::core::Type omni::core::Generated<omni::graph::core::IVariable_abi>::getType() noexcept
{
return getType_abi();
}
inline const char* omni::core::Generated<omni::graph::core::IVariable_abi>::getCategory() noexcept
{
return getCategory_abi();
}
inline void omni::core::Generated<omni::graph::core::IVariable_abi>::setCategory(const char* category) noexcept
{
setCategory_abi(category);
}
inline const char* omni::core::Generated<omni::graph::core::IVariable_abi>::getDisplayName() noexcept
{
return getDisplayName_abi();
}
inline void omni::core::Generated<omni::graph::core::IVariable_abi>::setDisplayName(const char* displayName) noexcept
{
setDisplayName_abi(displayName);
}
inline const char* omni::core::Generated<omni::graph::core::IVariable_abi>::getTooltip() noexcept
{
return getTooltip_abi();
}
inline void omni::core::Generated<omni::graph::core::IVariable_abi>::setTooltip(const char* toolTip) noexcept
{
setTooltip_abi(toolTip);
}
inline omni::graph::core::eVariableScope omni::core::Generated<omni::graph::core::IVariable_abi>::getScope() noexcept
{
return getScope_abi();
}
inline void omni::core::Generated<omni::graph::core::IVariable_abi>::setScope(omni::graph::core::eVariableScope scope) noexcept
{
setScope_abi(scope);
}
inline bool omni::core::Generated<omni::graph::core::IVariable_abi>::isValid() noexcept
{
return isValid_abi();
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 5,283 | C | 26.664921 | 127 | 0.680485 |
omniverse-code/kit/include/omni/graph/core/GpuInteropEntryUserData.h | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <unordered_map>
#include <carb/graphics/GraphicsTypes.h>
namespace omni
{
namespace usd
{
namespace hydra
{
struct HydraRenderProduct;
} // namespace hydra
} // namespace usd
namespace graph
{
namespace core
{
// Less than ideal, but GpuInteropCudaEntryUserData + GpuInteropRpEntryUserData
// are filled out by RenderGraphScheduler.cpp and passed to the top level GpuInterop
// CudaEntry or RenderProductEntry nodes marking the head of post-processing chain
// for RTX Hydra Renderer
struct GpuInteropCudaResourceData
{
void* cudaResource;
uint32_t width;
uint32_t height;
uint32_t depthOrArraySize;
uint16_t mipCount;
carb::graphics::Format format;
bool isBuffer;
uint32_t deviceIndex;
};
typedef std::unordered_map<std::string, GpuInteropCudaResourceData> GpuInteropCudaResourceMap;
struct GpuInteropCudaEntryUserData
{
void* cudaStream;
double simTime;
double hydraTime;
int64_t frameId;
int64_t externalTimeOfSimFrame;
GpuInteropCudaResourceMap cudaRsrcMap;
};
// Gpu Foundations initialization inside Kit remains a trainwreck, since unresolved
// we pass
struct GpuFoundationsInterfaces
{
void* graphics;
void* graphicsMux;
void* deviceGroup;
void* renderGraphBuilder;
void* resourceManager;
void* resourceManagerContext;
void* renderGraph;
};
struct GpuInteropRpEntryUserData
{
double simTime;
double hydraTime;
GpuFoundationsInterfaces* gpu;
omni::usd::hydra::HydraRenderProduct* rp;
};
} // namespace core
} // namespace graph
} // namespace omni
| 2,028 | C | 23.154762 | 94 | 0.757396 |
omniverse-code/kit/include/omni/graph/core/OgnWrappers.h | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#pragma message ("OgnWrappers.h is deprecated - include the specific omni/graph/core/ogn/ file you require")
// This file contains simple interface classes which wrap data in the OGN database for easier use
//
// WARNING: These interfaces are subject to change without warning and are only meant to be used by generated code.
// If you call them directly you may have to modify your code when they change.
//
#include <omni/graph/core/CppWrappers.h>
#include <omni/graph/core/iComputeGraph.h>
#include <omni/graph/core/TemplateUtils.h>
#include <omni/graph/core/ogn/Types.h>
#include <omni/graph/core/ogn/StringAttribute.h>
#include <omni/graph/core/ogn/ArrayAttribute.h>
#include <omni/graph/core/ogn/SimpleAttribute.h>
| 1,181 | C | 44.461537 | 115 | 0.777307 |
omniverse-code/kit/include/omni/graph/core/StringUtils.h | // Copyright (c) 2021-2021 NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
// This file contains helpful string utilities that can be implemented entirely as inlines, preventing the
// need for a bunch of tiny little extensions.
#include <carb/logging/Log.h>
#include <omni/graph/core/Type.h>
#include <omni/graph/core/PreUsdInclude.h>
#include <pxr/base/tf/token.h>
#include <omni/graph/core/PostUsdInclude.h>
#include <string>
#include <vector>
// snprintf becomes _snprintf on Windows, but we want to use std::snprintf
#ifdef HAVE_SNPRINTF
# undef snprintf
#endif
// The namespace is merely to ensure uniqueness. There's nothing inherently associated with OmniGraph in here
namespace omni {
namespace graph {
namespace core {
// ==============================================================================================================
inline void tokenizeString(const char* input, const std::string& separator, std::vector<pxr::TfToken> & output)
{
std::string remainder = input;
size_t separatorLocation = remainder.find(separator);
while (separatorLocation != std::string::npos)
{
std::string tokenStr = remainder.substr(0, separatorLocation);
output.emplace_back(tokenStr);
remainder = remainder.substr(separatorLocation + separator.size());
separatorLocation = remainder.find(separator);
}
if (!remainder.empty())
{
output.emplace_back(remainder);
}
}
// ==============================================================================================================
// This is like tokenizeString, except returns a vector of strings, not tokens
inline std::vector<std::string> splitString(const char* string, char delimiter)
{
std::vector<std::string> strings;
const char* prev_pos = string;
while (*string++)
{
char ch = *string;
if (ch == delimiter)
{
strings.push_back(std::string(prev_pos, string));
prev_pos = string + 1;
}
}
if (prev_pos != string)
strings.push_back(std::string(prev_pos, string - 1));
return strings;
}
// ==============================================================================================================
// Return a formatted string.
// On error will return an empty string.
template <typename... Args>
std::string formatString(const char* format, Args&&... args)
{
int fmtSize = std::snprintf(nullptr, 0, format, args...) + 1; // Extra space for '\0'
if (fmtSize <= 0)
{
CARB_LOG_ERROR("Error formating string %s", format);
return {};
}
auto size = static_cast<size_t>(fmtSize);
auto buf = std::make_unique<char[]>(size);
std::snprintf(buf.get(), size, format, args...);
return std::string(buf.get(), buf.get() + size - 1); // We don't want the '\0' inside
}
//early version of GCC emit a warning if the "format" string passed to "std::snprintf" does not contain any formatting character
// Specialize the function for this use case and prevent that warning
inline std::string formatString(const char* format)
{
return std::string(format);
}
} // namespace core
} // namespace graph
} // namespace omni
| 3,565 | C | 33.288461 | 128 | 0.62216 |
omniverse-code/kit/include/omni/graph/core/IConstBundle.gen.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
//! @file
//!
//! @brief This file was generated by <i>omni.bind</i>.
#include <omni/core/Interface.h>
#include <omni/core/OmniAttr.h>
#include <functional>
#include <type_traits>
#include <utility>
#ifndef OMNI_BIND_INCLUDE_INTERFACE_IMPL
//! Provide read only access to recursive bundles.
//!
template <>
class omni::core::Generated<omni::graph::core::IConstBundle2_abi> : public omni::graph::core::IConstBundle2_abi
{
public:
OMNI_PLUGIN_INTERFACE("omni::graph::core::IConstBundle2")
//! Return true if this bundle is valid, false otherwise.
bool isValid() noexcept;
//! Return the context of this bundle.
omni::graph::core::GraphContextObj getContext() noexcept;
//! Return Handle to this bundle. Invalid handle is returned if this bundle is invalid.
omni::graph::core::ConstBundleHandle getConstHandle() noexcept;
//! Return full path of this bundle.
carb::flatcache::PathC getPath() noexcept;
//! Return name of this bundle
omni::graph::core::NameToken getName() noexcept;
//! Return handle to the parent of this bundle. Invalid handle is returned if bundle has no parent.
omni::graph::core::ConstBundleHandle getConstParentBundle() noexcept;
//! @brief Get the names and types of all attributes in this bundle.
//!
//! This method operates in two modes: **query mode** or **get mode**.
//!
//! **Query mode** is enabled when names and types are `nullptr`. When in this mode, *nameAndTypeCount
//! will be populated with the number of attributes in the bundle.
//!
//! **Get mode** is enabled when names or types is not `nullptr`. Upon entering the function, *nameAndTypeCount
//! stores the number of entries in names and types. In **Get mode** names are not nullptr, names array is populated
//! with attribute names. In **Get mode** types are not nullptr, types array is populated with attribute types.
//!
//! @param names The names of the attributes.
//! @param types The types of the attributes.
//! @param nameAndTypeCount must not be `nullptr` in both modes.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getAttributeNamesAndTypes(omni::graph::core::NameToken* const names,
omni::graph::core::Type* const types,
size_t* const nameAndTypeCount) noexcept;
//! @brief Get read only handles to all attributes in this bundle.
//!
//! This method operates in two modes: **query mode** or **get mode**.
//!
//! **Query mode** is enabled when attributes is `nullptr`. When in this mode, *attributeCount
//! will be populated with the number of attributes in the bundle.
//!
//! **Get mode** is enabled when attributes is not `nullptr`. Upon entering the function, *attributeCount
//! stores the number of entries in attributes.
//! In **Get mode** attributes are not nullptr, attributes array is populated with attribute handles in the bundle.
//!
//! @param attributes The buffer to store handles of the attributes in this bundle.
//! @param attributeCount Size of attributes buffer. Must not be `nullptr` in both modes.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getConstAttributes(omni::graph::core::ConstAttributeDataHandle* const attributes,
size_t* const attributeCount) noexcept;
//! @brief Search for read only handles of the attribute in this bundle by using attribute names.
//!
//! @param names The name of the attributes to be searched for.
//! @param nameCount Size of names buffer.
//! @param attributes The buffer to store handles of the attributes.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getConstAttributesByName(const omni::graph::core::NameToken* const names,
size_t nameCount,
omni::graph::core::ConstAttributeDataHandle* const attributes) noexcept;
//! @brief Get read only handles to all child bundles in this bundle.
//!
//! This method operates in two modes: **query mode** or **get mode**.
//!
//! **Query mode** is enabled when bundles is `nullptr`. When in this mode, *bundleCount
//! will be populated with the number of bundles in the bundle.
//!
//! **Get mode** is enabled when bundles is not `nullptr`. Upon entering the function, *bundleCount
//! stores the number of entries in bundles.
//! In **Get mode** bundles are not nullptr, bundles array is populated with bundle handles in the bundle.
//!
//! @param bundles The buffer to save child bundle handles.
//! @param bundleCount Size of the bundles buffer. Must not be `nullptr` in both modes.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getConstChildBundles(omni::graph::core::ConstBundleHandle* const bundles,
size_t* const bundleCount) noexcept;
//! @brief Get read only handle to child bundle by index.
//!
//! @param bundleIndex Bundle index in range [0, childBundleCount).
//! @param bundle Handle under the index. If bundle index is out of range, then invalid handle is returned.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getConstChildBundle(size_t bundleIndex,
omni::graph::core::ConstBundleHandle* const bundle) noexcept;
//! @brief Lookup for read only handles to child bundles under specified names.
//!
//! For children that are not found invalid handles are returned.
//!
//! @param names The names of the child bundles in this bundle.
//! @param nameCount The number of child bundles to be searched.
//! @param foundBundles Output handles to the found bundles.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getConstChildBundlesByName(const omni::graph::core::NameToken* const names,
size_t nameCount,
omni::graph::core::ConstBundleHandle* const foundBundles) noexcept;
//! Return Const Bundle Handle to Metadata Storage
omni::graph::core::ConstBundleHandle getConstMetadataStorage() noexcept;
//! @brief Get the names and types of all bundle metadata fields in this bundle.
//!
//! This method operates in two modes: **query mode** or **get mode**.
//!
//! **Query mode** is enabled when fieldNames and fieldTypes are `nullptr`. When in this mode, *fieldCount
//! will be populated with the number of metadata fields in this bundle.
//!
//! **Get mode** is enabled when fieldNames or fieldTypes is not `nullptr`. Upon entering the function,
//! *fieldCount stores the number of entries in fieldNames and @p fieldTypes.
//!
//! In **Get mode** fieldNames are not `nullptr`, fieldNames array is populated with field names.
//! In **Get mode** fieldTypes are not `nullptr`, fieldTypes array is populated with field types.
//!
//! @param fieldNames Output field names in this bundle.
//! @param fieldTypes Output field types in this bundle.
//! @param fieldCount must not be `nullptr` in both modes.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getBundleMetadataNamesAndTypes(omni::graph::core::NameToken* const fieldNames,
omni::graph::core::Type* const fieldTypes,
size_t* const fieldCount) noexcept;
//! @brief Search for field handles in this bundle by using field names.
//!
//!@param fieldNames Name of bundle metadata fields to be searched for.
//!@param fieldCount Size of fieldNames and bundleMetadata arrays.
//!@param bundleMetadata Handle to metadata fields in this bundle.
//!@return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getConstBundleMetadataByName(const omni::graph::core::NameToken* const fieldNames,
size_t fieldCount,
omni::graph::core::ConstAttributeDataHandle* const bundleMetadata) noexcept;
//! @brief Get the names and types of all attribute metadata fields in the attribute.
//!
//! This method operates in two modes: **query mode** or **get mode**.
//!
//! **Query mode** is enabled when fieldNames and @p fieldTypes are `nullptr`. When in this mode, *fieldCount
//! will be populated with the number of metadata fields in the attribute.
//!
//! **Get mode** is enabled when fieldNames or fieldTypes is not `nullptr`. Upon entering the function,
//! *fieldCount stores the number of entries in fieldNames and fieldTypes.
//!
//! In **Get mode** fieldNames are not `nullptr`, fieldNames array is populated with field names.
//! In **Get mode** fieldTypes are not `nullptr`, fieldTypes array is populated with field types.
//!
//! @param attribute Name of the attribute.
//! @param fieldNames Output field names in the attribute.
//! @param fieldTypes Output field types in the attribute.
//! @param fieldCount must not be `nullptr` in both modes.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getAttributeMetadataNamesAndTypes(omni::graph::core::NameToken attribute,
omni::graph::core::NameToken* const fieldNames,
omni::graph::core::Type* const fieldTypes,
size_t* const fieldCount) noexcept;
//! @brief Search for read only field handles in the attribute by using field names.
//!
//! @param attribute The name of the attribute.
//! @param fieldNames The names of attribute metadata fields to be searched for.
//! @param fieldCount Size of fieldNames and attributeMetadata arrays.
//! @param attributeMetadata Handles to attribute metadata fields in the attribute.
//! @return Success if executed successfully, Fail for unsuccessful execution, InvalidArgument if arguments are
//! invalid.
omni::core::Result getConstAttributeMetadataByName(
omni::graph::core::NameToken attribute,
const omni::graph::core::NameToken* const fieldNames,
size_t fieldCount,
omni::graph::core::ConstAttributeDataHandle* const attributeMetadata) noexcept;
};
#endif
#ifndef OMNI_BIND_INCLUDE_INTERFACE_DECL
inline bool omni::core::Generated<omni::graph::core::IConstBundle2_abi>::isValid() noexcept
{
return isValid_abi();
}
inline omni::graph::core::GraphContextObj omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getContext() noexcept
{
return getContext_abi();
}
inline omni::graph::core::ConstBundleHandle omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstHandle() noexcept
{
return getConstHandle_abi();
}
inline carb::flatcache::PathC omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getPath() noexcept
{
return getPath_abi();
}
inline omni::graph::core::NameToken omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getName() noexcept
{
return getName_abi();
}
inline omni::graph::core::ConstBundleHandle omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstParentBundle() noexcept
{
return getConstParentBundle_abi();
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getAttributeNamesAndTypes(
omni::graph::core::NameToken* const names, omni::graph::core::Type* const types, size_t* const nameAndTypeCount) noexcept
{
return getAttributeNamesAndTypes_abi(names, types, nameAndTypeCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstAttributes(
omni::graph::core::ConstAttributeDataHandle* const attributes, size_t* const attributeCount) noexcept
{
return getConstAttributes_abi(attributes, attributeCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstAttributesByName(
const omni::graph::core::NameToken* const names,
size_t nameCount,
omni::graph::core::ConstAttributeDataHandle* const attributes) noexcept
{
return getConstAttributesByName_abi(names, nameCount, attributes);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstChildBundles(
omni::graph::core::ConstBundleHandle* const bundles, size_t* const bundleCount) noexcept
{
return getConstChildBundles_abi(bundles, bundleCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstChildBundle(
size_t bundleIndex, omni::graph::core::ConstBundleHandle* const bundle) noexcept
{
return getConstChildBundle_abi(bundleIndex, bundle);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstChildBundlesByName(
const omni::graph::core::NameToken* const names,
size_t nameCount,
omni::graph::core::ConstBundleHandle* const foundBundles) noexcept
{
return getConstChildBundlesByName_abi(names, nameCount, foundBundles);
}
inline omni::graph::core::ConstBundleHandle omni::core::Generated<
omni::graph::core::IConstBundle2_abi>::getConstMetadataStorage() noexcept
{
return getConstMetadataStorage_abi();
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getBundleMetadataNamesAndTypes(
omni::graph::core::NameToken* const fieldNames, omni::graph::core::Type* const fieldTypes, size_t* const fieldCount) noexcept
{
return getBundleMetadataNamesAndTypes_abi(fieldNames, fieldTypes, fieldCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstBundleMetadataByName(
const omni::graph::core::NameToken* const fieldNames,
size_t fieldCount,
omni::graph::core::ConstAttributeDataHandle* const bundleMetadata) noexcept
{
return getConstBundleMetadataByName_abi(fieldNames, fieldCount, bundleMetadata);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getAttributeMetadataNamesAndTypes(
omni::graph::core::NameToken attribute,
omni::graph::core::NameToken* const fieldNames,
omni::graph::core::Type* const fieldTypes,
size_t* const fieldCount) noexcept
{
return getAttributeMetadataNamesAndTypes_abi(attribute, fieldNames, fieldTypes, fieldCount);
}
inline omni::core::Result omni::core::Generated<omni::graph::core::IConstBundle2_abi>::getConstAttributeMetadataByName(
omni::graph::core::NameToken attribute,
const omni::graph::core::NameToken* const fieldNames,
size_t fieldCount,
omni::graph::core::ConstAttributeDataHandle* const attributeMetadata) noexcept
{
return getConstAttributeMetadataByName_abi(attribute, fieldNames, fieldCount, attributeMetadata);
}
#endif
#undef OMNI_BIND_INCLUDE_INTERFACE_DECL
#undef OMNI_BIND_INCLUDE_INTERFACE_IMPL
| 16,589 | C | 48.969879 | 136 | 0.687624 |
omniverse-code/kit/include/omni/graph/core/ISchedulingHints2.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include <omni/core/IObject.h>
#include <omni/core/Omni.h>
#include <omni/inspect/IInspector.h>
#include <omni/graph/core/ISchedulingHints.h>
namespace omni
{
namespace graph
{
namespace core
{
//! The purity of the node implementation. For some context, a "pure" node is
//! one whose initialize, compute, and release methods are entirely deterministic,
//! i.e. they will always produce the same output attribute values for a given set
//! of input attribute values, and do not access, rely on, or otherwise mutate data
//! external to the node's scope
enum class ePurityStatus
{
//! Node is assumed to not be pure
eImpure,
//! Node can be considered pure if explicitly specified by the node author
ePure
};
//! Declare the ISchedulingHints2 interface definition
OMNI_DECLARE_INTERFACE(ISchedulingHints2);
//! Interface extension for ISchedulingHints that adds a new "pure" hint
class ISchedulingHints2_abi
: public omni::core::Inherits<ISchedulingHints, OMNI_TYPE_ID("omni.graph.core.ISchedulingHints2")>
{
protected:
/**
* Get the flag describing the node's purity state.
*
* @returns Value of the PurityStatus flag.
*/
virtual ePurityStatus getPurityStatus_abi() noexcept = 0;
/**
* Set the flag describing the node's purity status.
*
* @param[in] newPurityStatus New value of the PurityStatus flag.
*/
virtual void setPurityStatus_abi(ePurityStatus newPurityStatus) noexcept = 0;
};
} // namespace core
} // namespace graph
} // namespace omni
#include "ISchedulingHints2.gen.h"
//! @cond Doxygen_Suppress
//!
//! API part of the scheduling hints 2 interface
//! @copydoc omni::graph::core::ISchedulingHints2_abi
OMNI_DEFINE_INTERFACE_API(omni::graph::core::ISchedulingHints2)
//! @endcond
{
public:
//! @copydoc omni::graph::core::ISchedulingHints2::getPurityStatus_abi
inline omni::graph::core::ePurityStatus getPurityStatus() noexcept
{
return getPurityStatus_abi();
}
//! @copydoc omni::graph::core::ISchedulingHints2::setPurityStatus_abi
inline void setPurityStatus(omni::graph::core::ePurityStatus newPurityStatus) noexcept
{
setPurityStatus_abi(newPurityStatus);
}
};
| 2,661 | C | 30.690476 | 102 | 0.73168 |
omniverse-code/kit/include/omni/graph/core/PyISchedulingHints2.gen.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// --------- Warning: This is a build system generated file. ----------
//
#pragma once
#include <omni/core/ITypeFactory.h>
#include <omni/python/PyBind.h>
#include <omni/python/PyString.h>
#include <omni/python/PyVec.h>
#include <sstream>
auto bindePurityStatus(py::module& m)
{
py::enum_<omni::graph::core::ePurityStatus> e(
m, "ePurityStatus", R"OMNI_BIND_RAW_(The purity of the node implementation. For some context, a "pure" node is
one whose initialize, compute, and release methods are entirely deterministic,
i.e. they will always produce the same output attribute values for a given set
of input attribute values, and do not access, rely on, or otherwise mutate data
external to the node's scope)OMNI_BIND_RAW_");
e.value("E_IMPURE", omni::graph::core::ePurityStatus::eImpure,
R"OMNI_BIND_RAW_(Node is assumed to not be pure)OMNI_BIND_RAW_");
e.value("E_PURE", omni::graph::core::ePurityStatus::ePure,
R"OMNI_BIND_RAW_(Node can be considered pure if explicitly specified by the node author)OMNI_BIND_RAW_");
return e;
}
auto bindISchedulingHints2(py::module& m)
{
// hack around pybind11 issues with C++17
// - https://github.com/pybind/pybind11/issues/2234
// - https://github.com/pybind/pybind11/issues/2666
// - https://github.com/pybind/pybind11/issues/2856
py::class_<omni::core::Generated<omni::graph::core::ISchedulingHints2_abi>,
omni::python::detail::PyObjectPtr<omni::core::Generated<omni::graph::core::ISchedulingHints2_abi>>,
omni::core::Api<omni::graph::core::ISchedulingHints_abi>>
clsParent(m, "_ISchedulingHints2");
py::class_<omni::graph::core::ISchedulingHints2, omni::core::Generated<omni::graph::core::ISchedulingHints2_abi>,
omni::python::detail::PyObjectPtr<omni::graph::core::ISchedulingHints2>,
omni::core::Api<omni::graph::core::ISchedulingHints_abi>>
cls(m, "ISchedulingHints2",
R"OMNI_BIND_RAW_(Interface extension for ISchedulingHints that adds a new "pure" hint)OMNI_BIND_RAW_");
cls.def(py::init(
[](const omni::core::ObjectPtr<omni::core::IObject>& obj)
{
auto tmp = omni::core::cast<omni::graph::core::ISchedulingHints2>(obj.get());
if (!tmp)
{
throw std::runtime_error("invalid type conversion");
}
return tmp;
}));
cls.def(py::init(
[]()
{
auto tmp = omni::core::createType<omni::graph::core::ISchedulingHints2>();
if (!tmp)
{
throw std::runtime_error("unable to create omni::graph::core::ISchedulingHints2 instantiation");
}
return tmp;
}));
cls.def_property("purity_status", &omni::graph::core::ISchedulingHints2::getPurityStatus,
&omni::graph::core::ISchedulingHints2::setPurityStatus);
return omni::python::PyBind<omni::graph::core::ISchedulingHints2>::bind(cls);
}
| 3,455 | C | 45.079999 | 118 | 0.657308 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.