file_path
stringlengths 32
153
| content
stringlengths 0
3.14M
|
---|---|
omniverse-code/kit/include/carb/thread/Spinlock.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Carbonite Spinlock implementation.
#pragma once
#include "../Defines.h"
#include <atomic>
#include <thread>
namespace carb
{
namespace thread
{
/** Namespace for Carbonite private threading details. */
namespace detail
{
#ifndef DOXYGEN_SHOULD_SKIP_THIS
class RecursionPolicyDisallow
{
public:
constexpr RecursionPolicyDisallow() = default;
bool ownsLock() const
{
return std::this_thread::get_id() == m_owner;
}
void enter()
{
auto cur = std::this_thread::get_id();
CARB_FATAL_UNLESS(cur != m_owner, "Recursion is not allowed");
m_owner = cur;
}
bool tryLeave()
{
CARB_FATAL_UNLESS(ownsLock(), "Not owning thread");
m_owner = std::thread::id(); // clear the owner
return true;
}
private:
std::thread::id m_owner{};
};
class RecursionPolicyAllow
{
public:
constexpr RecursionPolicyAllow() = default;
bool ownsLock() const
{
return std::this_thread::get_id() == m_owner;
}
void enter()
{
auto cur = std::this_thread::get_id();
if (cur == m_owner)
++m_recursion;
else
{
CARB_ASSERT(m_owner == std::thread::id()); // owner should be clear
m_owner = cur;
m_recursion = 1;
}
}
bool tryLeave()
{
CARB_FATAL_UNLESS(ownsLock(), "Not owning thread");
if (--m_recursion == 0)
{
m_owner = std::thread::id(); // clear the owner
return true;
}
return false;
}
private:
std::thread::id m_owner{};
size_t m_recursion{ 0 };
};
#endif
/**
* Spinlock and RecursiveSpinlock are locking primitives that never enter the kernel to wait.
*
* @note Do not use SpinlockImpl directly; instead use Spinlock or RecursiveSpinlock.
*
* This class meets Cpp17BasicLockable and Cpp17Lockable named requirements.
*
* @warning Using Spinlock is generally discouraged and can lead to worse performance than using carb::thread::mutex or
* another synchronization primitive that can wait.
*/
template <class RecursionPolicy>
class SpinlockImpl
{
public:
/**
* Constructor.
*/
constexpr SpinlockImpl() = default;
/**
* Destructor.
*/
~SpinlockImpl() = default;
CARB_PREVENT_COPY(SpinlockImpl);
/**
* Locks the spinlock, spinning the current thread until it becomes available.
* If not called from RecursiveSpinlock and the calling thread already owns the lock, `std::terminate()` is called.
* The calling thread must call unlock() at a later time to release the lock.
*/
void lock()
{
if (!m_rp.ownsLock())
{
// Spin trying to set the lock bit
while (CARB_UNLIKELY(!!m_lock.fetch_or(1, std::memory_order_acquire)))
{
CARB_HARDWARE_PAUSE();
}
}
m_rp.enter();
}
/**
* Unlocks the spinlock.
* @warning `std::terminate()` is called if the calling thread does not own the spinlock.
*/
void unlock()
{
if (m_rp.tryLeave())
{
// Released the lock
m_lock.store(0, std::memory_order_release);
}
}
/**
* Attempts to immediately lock the spinlock.
* If not called from RecursiveSpinlock and the calling thread already owns the lock, `std::terminate()` is called.
* @returns `true` if the spinlock was available and the lock was taken by the calling thread (unlock() must be
* called from the calling thread at a later time to release the lock); `false` if the spinlock could not be locked
* by the calling thread.
*/
bool try_lock()
{
if (!m_rp.ownsLock())
{
// See if we can set the lock bit
if (CARB_UNLIKELY(!!m_lock.fetch_or(1, std::memory_order_acquire)))
{
// Failed!
return false;
}
}
m_rp.enter();
return true;
}
/**
* Returns true if the calling thread owns this spinlock.
* @returns `true` if the calling thread owns this spinlock; `false` otherwise.
*/
bool isLockedByThisThread() const
{
return m_rp.ownsLock();
}
private:
std::atomic<size_t> m_lock{ 0 };
RecursionPolicy m_rp;
};
} // namespace detail
/**
* A spinlock implementation that allows recursion.
*/
using RecursiveSpinlock = detail::SpinlockImpl<detail::RecursionPolicyAllow>;
/**
* A spinlock implementation that does not allow recursion.
* @warning Attempts to use this class in a recursive manner will call `std::terminate()`.
*/
using Spinlock = detail::SpinlockImpl<detail::RecursionPolicyDisallow>;
} // namespace thread
} // namespace carb
|
omniverse-code/kit/include/carb/thread/IpcLock.h | // Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../Defines.h"
#if CARB_PLATFORM_WINDOWS
# include "../CarbWindows.h"
#elif CARB_POSIX
# include <sys/stat.h>
# include <errno.h>
# include <fcntl.h>
# include <semaphore.h>
# include <string.h>
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
namespace carb
{
namespace thread
{
/** defines an implementation of an intra-process lock. This lock will only be functional within
* the threads of the process that creates it. The lock implementation will always be recursive.
* When a lock with the given name is created, it will initially be unlocked.
*/
#if CARB_PLATFORM_WINDOWS
/** defines an implementation of an inter-process lock. These locks are given a unique name
* to allow other processes to open a lock of the same name. The name may be any ASCII string
* that does not contain the slash character ('/'). The name may be limited to an implementation
* defined length. Lock names should be less than 250 characters in general. The name of the
* lock will be removed from the system when all processes that use that name destroy their
* objects. When a lock with the given name is created, it will initially be unlocked.
*/
class IpcLock
{
public:
IpcLock(const char* name)
{
m_mutex = CreateMutexA(nullptr, CARBWIN_FALSE, name);
CARB_FATAL_UNLESS(m_mutex != nullptr, "CreateMutex() failed: %u", ::GetLastError());
}
~IpcLock()
{
CloseHandle(m_mutex);
}
void lock()
{
WaitForSingleObject(m_mutex, CARBWIN_INFINITE);
}
void unlock()
{
ReleaseMutex(m_mutex);
}
bool try_lock()
{
return WaitForSingleObject(m_mutex, 0) == CARBWIN_WAIT_OBJECT_0;
}
private:
HANDLE m_mutex;
};
#elif CARB_POSIX
/** defines an implementation of an inter-process lock. These locks are given a unique name
* to allow other processes to open a lock of the same name. The name may be any string that
* does not contain the slash character ('/'). The name may be limited to an implementation
* defined length. Lock names should be less than 250 characters in general. The name of the
* lock will be removed from the system when all processes that use that name destroy their
* objects. When a lock with the given name is created, it will initially be unlocked.
*/
class IpcLock
{
public:
IpcLock(const char* name)
{
// create the name for the semaphore and remove all slashes within it (slashes are not
// allowed after the first character and the first character must always be a slash).
snprintf(m_name, CARB_COUNTOF(m_name) - 4, "/%s", name);
for (size_t i = 1; m_name[i] != 0; i++)
{
if (m_name[i] == '/')
m_name[i] = '_';
}
// create the named semaphore.
m_semaphore = sem_open(m_name, O_CREAT | O_RDWR, 0644, 1);
CARB_ASSERT(m_semaphore != SEM_FAILED);
}
~IpcLock()
{
sem_close(m_semaphore);
sem_unlink(m_name);
}
void lock()
{
int ret;
// keep trying the wait operation as long as we get interrupted by a signal.
do
{
ret = sem_wait(m_semaphore);
// Oddly enough, on Windows Subsystem for Linux, sem_wait can fail with ETIMEDOUT. Handle that case here
} while (ret == -1 && (errno == EINTR || errno == ETIMEDOUT));
}
void unlock()
{
sem_post(m_semaphore);
}
bool try_lock()
{
// keep trying the wait operation as long as we get interrupted by a signal.
int ret = CARB_RETRY_EINTR(sem_trywait(m_semaphore));
// if the lock was acquired, the return value will always be zero. If if failed either
// due to a non-signal error or because it would block, 'ret' will be -1. If the call
// was valid but would block, 'errno' is set to EAGAIN.
return ret == 0;
}
private:
sem_t* m_semaphore;
char m_name[NAME_MAX + 10];
};
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
} // namespace thread
} // namespace carb
|
omniverse-code/kit/include/carb/thread/Util.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Carbonite thread utilities.
#pragma once
#include "../Defines.h"
#include "../extras/ScopeExit.h"
#include "../math/Util.h"
#include "../process/Util.h"
#include "../profiler/IProfiler.h"
#include "../../omni/extras/ContainerHelper.h"
#if CARB_PLATFORM_WINDOWS
# include "../CarbWindows.h"
# include "../extras/Unicode.h"
#elif CARB_POSIX
# include <sys/syscall.h>
# include <pthread.h>
# include <sched.h>
# include <unistd.h>
# include <time.h>
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
#if CARB_PLATFORM_MACOS
# pragma push_macro("min")
# pragma push_macro("max")
# undef min
# undef max
# include <mach/thread_policy.h>
# include <mach/thread_act.h>
# pragma pop_macro("max")
# pragma pop_macro("min")
#endif
#include <atomic>
#include <thread>
namespace carb
{
namespace thread
{
/** The type for a process ID. */
using ProcessId = process::ProcessId;
/** The type for a thread ID. */
using ThreadId = uint32_t;
/**
* Each entry in the vector is a bitmask for a set of CPUs.
*
* On Windows each entry corresponds to a Processor Group.
*
* On Linux the entries are contiguous, like cpu_set_t.
*/
using CpuMaskVector = std::vector<uint64_t>;
/** The number of CPUs represented by an individual cpu mask. */
constexpr uint64_t kCpusPerMask = std::numeric_limits<CpuMaskVector::value_type>::digits;
#if CARB_PLATFORM_WINDOWS
static_assert(sizeof(ThreadId) >= sizeof(DWORD), "ThreadId type is too small");
#elif CARB_POSIX
static_assert(sizeof(ThreadId) >= sizeof(pid_t), "ThreadId type is too small");
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
/** The printf format macro to print a thread ID. */
#define OMNI_PRItid PRIu32
/** The printf format macro to print a thread ID in hexadecimal. */
#define OMNI_PRIxtid PRIx32
#if CARB_PLATFORM_WINDOWS
# ifndef DOXYGEN_SHOULD_SKIP_THIS
namespace detail
{
const DWORD MS_VC_EXCEPTION = 0x406D1388;
# pragma pack(push, 8)
typedef struct tagTHREADNAME_INFO
{
DWORD dwType;
LPCSTR szName;
DWORD dwThreadID;
DWORD dwFlags;
} THREADNAME_INFO;
# pragma pack(pop)
inline void setDebuggerThreadName(DWORD threadId, LPCSTR name)
{
// Do it the old way, which is only really useful if the debugger is running
if (::IsDebuggerPresent())
{
detail::THREADNAME_INFO info;
info.dwType = 0x1000;
info.szName = name;
info.dwThreadID = threadId;
info.dwFlags = 0;
# pragma warning(push)
# pragma warning(disable : 6320 6322)
__try
{
::RaiseException(detail::MS_VC_EXCEPTION, 0, sizeof(info) / sizeof(ULONG_PTR), (ULONG_PTR*)&info);
}
__except (CARBWIN_EXCEPTION_EXECUTE_HANDLER)
{
}
# pragma warning(pop)
}
}
} // namespace detail
# endif
//! The definition of a NativeHandleType. On Windows this is a `HANDLE` and on Linux it is a `pthread_t`.
using NativeHandleType = HANDLE;
#elif CARB_POSIX
//! The definition of a NativeHandleType. On Windows this is a `HANDLE` and on Linux it is a `pthread_t`.
using NativeHandleType = pthread_t;
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
/**
* Sets the name of the given thread.
*
* @note The length of the name is limited by the system.
*
* @param h The native handle to the thread.
* @param name The desired name for the thread.
*
* @note On Mac OS, it is not possible to name a thread that is not the current
* executing thread.
*/
inline void setName(NativeHandleType h, const char* name)
{
#if CARB_PLATFORM_WINDOWS
// Emulate CARB_NAME_THREAD but don't include Profile.h which would create a circular dependency.
if (g_carbProfiler)
g_carbProfiler->nameThreadDynamic(::GetThreadId(h), "%s", name);
// SetThreadDescription is only available starting with Windows 10 1607
using PSetThreadDescription = HRESULT(CARBWIN_WINAPI*)(HANDLE, PCWSTR);
static PSetThreadDescription SetThreadDescription =
(PSetThreadDescription)::GetProcAddress(::GetModuleHandleW(L"kernel32.dll"), "SetThreadDescription");
if (SetThreadDescription)
{
bool b = CARBWIN_SUCCEEDED(SetThreadDescription(h, extras::convertUtf8ToWide(name).c_str()));
CARB_UNUSED(b);
CARB_ASSERT(b);
}
else
{
detail::setDebuggerThreadName(::GetThreadId(h), name);
}
#elif CARB_PLATFORM_LINUX
if (h == pthread_self())
{
// Emulate CARB_NAME_THREAD but don't include Profile.h which would create a circular dependency.
if (g_carbProfiler)
g_carbProfiler->nameThreadDynamic(0, "%s", name);
}
if (pthread_setname_np(h, name) != 0)
{
// This is limited to 16 characters including NUL according to the man page.
char buffer[16];
strncpy(buffer, name, 15);
buffer[15] = '\0';
pthread_setname_np(h, buffer);
}
#elif CARB_PLATFORM_MACOS
if (h == pthread_self())
{
pthread_setname_np(name);
}
// not possible to name an external thread on mac
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
}
/**
* Retrieves the name of the thread previously set with setName().
*
* @note The length of the name is limited by the system.
*
* @param h The native handle to the thread.
* @return The name of the thread.
*/
inline std::string getName(NativeHandleType h)
{
#if CARB_PLATFORM_WINDOWS
// GetThreadDescription is only available starting with Windows 10 1607
using PGetThreadDescription = HRESULT(CARBWIN_WINAPI*)(HANDLE, PWSTR*);
static PGetThreadDescription GetThreadDescription =
(PGetThreadDescription)::GetProcAddress(::GetModuleHandleW(L"kernel32.dll"), "GetThreadDescription");
if (GetThreadDescription)
{
PWSTR threadName;
if (CARBWIN_SUCCEEDED(GetThreadDescription(h, &threadName)))
{
std::string s = extras::convertWideToUtf8(threadName);
::LocalFree(threadName);
return s;
}
}
return std::string();
#elif CARB_PLATFORM_LINUX || CARB_PLATFORM_MACOS
char buffer[64];
if (pthread_getname_np(h, buffer, CARB_COUNTOF(buffer)) == 0)
{
return std::string(buffer);
}
return std::string();
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
}
/**
* Sets the CPU affinity for the given thread handle
*
* Each bit represents a logical CPU; bit 0 for CPU 0, bit 1 for CPU 1, etc.
*
* @param h The native handle to the thread
* @param mask The bitmask representing the desired CPU affinity. Zero (no bits set) is ignored.
*
* @note On Mac OS, the CPU affinity works differently than on other systems.
* The mask is treated as a unique ID for groups of threads that should run
* on the same core, rather than specific CPUs.
* For single CPU masks, this will function similarly to other systems
* (aside from the fact that the specific core the threads are running on
* being different).
*
* @note M1 Macs do not support thread affinity so this will do nothing on those systems.
*/
inline void setAffinity(NativeHandleType h, size_t mask)
{
#if CARB_PLATFORM_WINDOWS
::SetThreadAffinityMask(h, mask);
#elif CARB_PLATFORM_LINUX
// From the man page: The cpu_set_t data type is implemented as a bit mask. However, the data structure should be
// treated as opaque: all manipulation of the CPU sets should be done via the macros described in this page.
if (!mask)
return;
cpu_set_t cpuSet;
CPU_ZERO(&cpuSet);
static_assert(sizeof(cpuSet) >= sizeof(mask), "Invalid assumption: use CPU_ALLOC");
do
{
int bit = __builtin_ctz(mask);
CPU_SET(bit, &cpuSet);
mask &= ~(size_t(1) << bit);
} while (mask != 0);
pthread_setaffinity_np(h, sizeof(cpu_set_t), &cpuSet);
#elif CARB_PLATFORM_MACOS
thread_affinity_policy policy{ static_cast<integer_t>(mask) };
thread_policy_set(pthread_mach_thread_np(h), THREAD_AFFINITY_POLICY, reinterpret_cast<thread_policy_t>(&policy),
THREAD_AFFINITY_POLICY_COUNT);
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
}
/**
* Sets the CPU Affinity for the thread.
*
* On Windows each entry in the CpuMaskVector represents a Processor Group. Each thread can only belong to a single
* Processor Group, so this function will only set the CPU Affinity to the first non-zero entry in the provided
* CpuMaskVector. That is to say, if both \c masks[0] and \c masks[1] both have bits sets, only the CPUs in \c masks[0]
* will be set for the affinity.
*
* On Linux, the CpuMaskVector is analogous to a cpu_set_t. There are no restrictions on the number of CPUs that the
* affinity mask can contain.
*
* @param h The thread to set CPU Affinity for.
* @param masks Affinity masks to set.
*
* @return True if the function succeeded, false otherwise. If \c masks is empty, or has no bits set, false will be
* returned. If the underlying function for setting affinity failed, then \c errno or \c last-error will be set.
*
* @note On Mac OS, the CPU affinity works differently than on other systems.
* The mask is treated as a unique ID for groups of threads that should run
* on cores that share L2 cache, rather than specific CPUs.
* For single CPU masks, this will function somewhat similarly to other
* systems, but threads won't be pinned to a specific core.
*/
inline bool setAffinity(NativeHandleType h, const CpuMaskVector& masks)
{
if (masks.empty())
{
return false;
}
#if CARB_PLATFORM_WINDOWS
// Find the lowest mask with a value set. That is the CPU Group that we'll set the affinity for.
for (uint64_t i = 0; i < masks.size(); ++i)
{
if (masks[i])
{
CARBWIN_GROUP_AFFINITY affinity{};
affinity.Group = (WORD)i;
affinity.Mask = masks[i];
return ::SetThreadGroupAffinity(h, (const GROUP_AFFINITY*)&affinity, nullptr);
}
}
// Would only reach here if no affinity mask had a cpu set.
return false;
#elif CARB_PLATFORM_LINUX
uint64_t numCpus = kCpusPerMask * masks.size();
cpu_set_t* cpuSet = CPU_ALLOC(numCpus);
if (!cpuSet)
{
return false;
}
CARB_SCOPE_EXIT
{
CPU_FREE(cpuSet);
};
CPU_ZERO_S(CPU_ALLOC_SIZE(numCpus), cpuSet);
for (uint64_t i = 0; i < masks.size(); ++i)
{
CpuMaskVector::value_type mask = masks[i];
while (mask != 0)
{
int bit = cpp::countr_zero(mask);
CPU_SET(bit + (i * kCpusPerMask), cpuSet);
mask &= ~(CpuMaskVector::value_type(1) << bit);
}
}
if (pthread_setaffinity_np(h, CPU_ALLOC_SIZE(numCpus), cpuSet) != 0)
{
return false;
}
else
{
return true;
}
#elif CARB_PLATFORM_MACOS
size_t mask = 0;
for (uint64_t i = 0; i < masks.size(); ++i)
{
mask |= 1ULL << masks[i];
}
setAffinity(h, mask);
return true;
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
}
/**
* Gets the current CPU Affinity for the thread.
*
* On Windows each entry in the CpuMaskVector represents a Processor Group.
* On Linux, the CpuMaskVector is analogous to a cpu_set_t.
*
* @param h The thread to get CPU Affinity for.
*
* @return A CpuMaskVector containing the cpu affinities for the thread. If the underlying functions to get thread
* affinity return an error, the returned CpuMaskVector will be empty and \c errno or \c last-error will be set.
*
* @note M1 Macs do not support thread affinity so this will always return an
* empty vector.
*/
inline CpuMaskVector getAffinity(NativeHandleType h)
{
CpuMaskVector results;
#if CARB_PLATFORM_WINDOWS
CARBWIN_GROUP_AFFINITY affinity;
if (!::GetThreadGroupAffinity(h, (PGROUP_AFFINITY)&affinity))
{
return results;
}
results.resize(affinity.Group + 1, 0);
results.back() = affinity.Mask;
return results;
#elif CARB_PLATFORM_LINUX
// Get the current affinity
cpu_set_t cpuSet;
CPU_ZERO(&cpuSet);
if (pthread_getaffinity_np(h, sizeof(cpu_set_t), &cpuSet) != 0)
{
return results;
}
// Convert the cpu_set_t to a CpuMaskVector
results.reserve(sizeof(cpu_set_t) / sizeof(CpuMaskVector::value_type));
CpuMaskVector::value_type* ptr = reinterpret_cast<CpuMaskVector::value_type*>(&cpuSet);
for (uint64_t i = 0; i < (sizeof(cpu_set_t) / sizeof(CpuMaskVector::value_type)); i++)
{
results.push_back(ptr[i]);
}
return results;
#elif CARB_PLATFORM_MACOS
boolean_t def = false; // if the value retrieved was the default
mach_msg_type_number_t count = 0; // the length of the returned struct in integer_t
thread_affinity_policy policy{ 0 };
int res = thread_policy_get(
pthread_mach_thread_np(h), THREAD_AFFINITY_POLICY, reinterpret_cast<thread_policy_t>(&policy), &count, &def);
if (res != 0 || def)
{
return results;
}
for (uint64_t i = 0; i < (sizeof(policy.affinity_tag) * CHAR_BIT); i++)
{
if ((policy.affinity_tag & (1ULL << i)) != 0)
{
results.push_back(i);
}
}
return results;
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
}
/**
* A utility class for providing a growing number of pause instructions, followed by yielding.
*
* The pause instruction is effectively \ref CARB_HARDWARE_PAUSE().
*
* Very fast to construct, often used in a loop such as:
* ```cpp
* for (AtomicBackoff<> b;; b.pause())
* if (condition())
* break;
* ```
* @tparam PausesBeforeYield the number of pauses that will occur before yielding begins. See
* \ref AtomicBackoff::pause().
*/
template <size_t PausesBeforeYield = 16>
class AtomicBackoff
{
public:
//! The number of pauses that should be executed before yielding the thread.
static constexpr size_t kPausesBeforeYield = PausesBeforeYield;
static_assert(carb::cpp::has_single_bit(kPausesBeforeYield), "Must be a power of 2");
//! Constructor.
constexpr AtomicBackoff() noexcept = default;
CARB_PREVENT_COPY_AND_MOVE(AtomicBackoff);
/**
* A helper function for executing the CPU pause instruction `count` times.
* @param count The number of times to execute \ref CARB_HARDWARE_PAUSE().
*/
static void pauseLoop(size_t count) noexcept
{
while (count-- > 0)
CARB_HARDWARE_PAUSE();
}
/**
* Resets `*this`.
*/
void reset() noexcept
{
m_growth = 1;
}
/**
* Every time called, will pause for exponentially longer, and after a certain amount will instead yield.
*
* Pause is as via \ref CARB_HARDWARE_PAUSE. Yield is as via `std::this_thread::yield()`.
* The pause count starts at 1 when \ref reset() or newly constructed. Each time this function is called, the CPU
* pause instruction is executed `count` times and `count` is doubled. If called when `count` exceeds
* \ref kPausesBeforeYield, a yield occurs instead. Calling \ref reset() resets the `count` to 1.
*/
void pause() noexcept
{
if (m_growth <= kPausesBeforeYield)
{
// Execute pauses
pauseLoop(m_growth);
// Pause twice as many times next time
m_growth *= 2;
}
else
{
// Too much contention; just yield to the OS
std::this_thread::yield();
}
}
/**
* Similar to \ref pause() but if would yield, instead returns false.
* @returns true if the internal `count` is less than \ref kPausesBeforeYield; `false` otherwise.
*/
bool pauseWithoutYield() noexcept
{
pauseLoop(m_growth);
if (m_growth >= kPausesBeforeYield)
return false;
// Pause twice as many times next time
m_growth *= 2;
return true;
}
private:
size_t m_growth{ 1 };
};
/**
* Similar to `std::thread::hardware_concurrency()`, but pays attention to docker cgroup config and CPU limits.
*
* Docker container CPU limits are based on the ratio of `/sys/fs/cgroup/cpu/cpu.cfs_quota_us` to
* `/sys/fs/cgroup/cpu/cpu.cfs_period_us`. Fractional CPUs of a half or larger will round up to a full CPU. It is
* possible to have an odd number reported by this function.
* Examples:
* * Docker `--cpus="3.75"` will produce `4` (rounds fractional up)
* * Docker `--cpus="3.50"` will produce `4` (rounds fractional up)
* * Docker `--cpus="3.25"` will produce `3` (rounds fractional down)
* * Docker `--cpus="0.25"` will produce `1` (minimum of 1)
*
* @returns The number of CPUs available on the current system or within the current container, if applicable.
*/
inline unsigned hardware_concurrency() noexcept
{
#if CARB_PLATFORM_LINUX
static auto dockerLimit = omni::extras::getDockerCpuLimit();
if (dockerLimit > 0)
{
return unsigned(dockerLimit);
}
#endif
return std::thread::hardware_concurrency();
}
} // namespace thread
/**
* Namespace for utilities that operate on the current thread specifically.
*/
namespace this_thread
{
/**
* A simple sleep for the current thread that does not include the overhead of `std::chrono`.
*
* @param microseconds The number of microseconds to sleep for
*/
inline void sleepForUs(uint32_t microseconds) noexcept
{
#if CARB_PLATFORM_WINDOWS
::Sleep(microseconds / 1000);
#elif CARB_POSIX
uint64_t nanos = uint64_t(microseconds) * 1'000;
struct timespec rem, req{ time_t(nanos / 1'000'000'000), long(nanos % 1'000'000'000) };
while (nanosleep(&req, &rem) != 0 && errno == EINTR)
req = rem; // Complete remaining sleep
#else
CARB_PLATFORM_UNSUPPORTED()
#endif
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
namespace detail
{
inline unsigned contentionSpins()
{
// These must be power-of-two-minus-one so that they function as bitmasks
constexpr static unsigned kSpinsMax = 128 - 1;
constexpr static unsigned kSpinsMin = 32 - 1;
// Use randomness to prevent threads from resonating at the same frequency and permanently contending. Use a
// simple LCG for randomness.
static std::atomic_uint _seed; // Use random initialization value as the starting seed
unsigned int next = _seed.load(std::memory_order_relaxed);
_seed.store(next * 1103515245 + 12345, std::memory_order_relaxed);
return ((next >> 24) & kSpinsMax) | kSpinsMin;
}
// This function name breaks naming paradigms so that it shows up prominently in stack traces. As the name implies, this
// function waits until f() returns true.
template <class Func>
void __CONTENDED_WAIT__(Func&& f) noexcept(noexcept(f()))
{
thread::AtomicBackoff<> backoff;
while (CARB_UNLIKELY(!f()))
backoff.pause();
}
} // namespace detail
#endif
/**
* Returns the native handle for the current thread.
*
* @note Windows: This handle is not unique to the thread but instead is a pseudo-handle representing "current thread".
* To obtain a unique handle to the current thread, use the Windows API function `DuplicateHandle()`.
*
* @return The native handle for the current thread.
*/
inline thread::NativeHandleType get()
{
#if CARB_PLATFORM_WINDOWS
return ::GetCurrentThread();
#elif CARB_POSIX
return pthread_self();
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
}
/**
* Returns the ID of the currently executing process.
* @returns The current ID of the process.
*/
CARB_DEPRECATED("Use this_process::getId() instead") static inline thread::ProcessId getProcessId()
{
return this_process::getId();
}
/**
* Get the ID of the currently executing process.
* @note Linux: This value is cached, so this can be unsafe if you are using fork() or clone() without calling exec()
* after. This should be safe if you're only using @ref carb::launcher::ILauncher to launch processes.
* @returns The current ID of the process.
*/
CARB_DEPRECATED("Use this_process::getIdCached() instead") static inline thread::ProcessId getProcessIdCached()
{
return this_process::getIdCached();
}
/**
* Retrieve the thread ID for the current thread.
* @return The thread ID for the current thread.
*/
inline thread::ThreadId getId()
{
#if CARB_PLATFORM_WINDOWS
return thread::ThreadId(::GetCurrentThreadId());
#elif CARB_PLATFORM_LINUX
// This value is stored internally within the pthread_t, but this is opaque and there is no public API for
// retrieving it. Therefore, we can only do this for the current thread.
// NOTE: We do not store this in a thread_local because on older versions of glibc (especially 2.17, which is what
// Centos7 uses), this will require a lock that is also shared with loading shared libraries, which can cause a
// deadlock.
thread::ThreadId tid = (thread::ThreadId)(pid_t)syscall(SYS_gettid);
return tid;
#elif CARB_PLATFORM_MACOS
return thread::ThreadId(pthread_mach_thread_np(pthread_self()));
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
}
/**
* Sets the name for the current thread.
*
* @note The length of the name is limited by the system and may be truncated.
*
* @param name The desired name for the current thread.
*/
inline void setName(const char* name)
{
thread::setName(get(), name);
}
/**
* Retrieves the name of the current thread.
*
* @return The name of the current thread.
*/
inline std::string getName()
{
return thread::getName(get());
}
/**
* Sets the affinity of the current thread.
*
* Each bit represents a logical CPU; bit 0 for CPU 0, bit 1 for CPU 1, etc.
*
* @note This function is limited to the first 64 CPUs in a system.
*
* @param mask The bitmask representing the desired CPU affinity. Zero (no bits set) is ignored.
*/
inline void setAffinity(size_t mask)
{
thread::setAffinity(get(), mask);
}
/**
* Sets the CPU Affinity for the current thread.
*
* On Windows each entry in the CpuMaskVector represents a Processor Group. Each thread can only belong to a single
* Processor Group, so this function will only set the CPU Affinity to the first non-zero entry in the provided
* CpuMaskVector. That is to say, if both \c masks[0] and \c masks[1] have bits sets, only the CPUs in \c masks[0]
* will be set for the affinity.
*
* On Linux, the CpuMaskVector is analogous to a cpu_set_t. There are no restrictions on the number of CPUs that the
* affinity mask can contain.
*
* @param masks Affinity masks to set.
*
* @return True if the function succeeded, false otherwise. If \c masks is empty, or has no bits set, false will be
* returned. If the underlying function for setting affinity failed, then \c errno or \c last-error will be set.
*/
inline bool setAffinity(const thread::CpuMaskVector& masks)
{
return thread::setAffinity(get(), masks);
}
/**
* Gets the current CPU Affinity for the current thread.
*
* On Windows each entry in the CpuMaskVector represents a Processor Group.
* On Linux, the CpuMaskVector is analogous to a cpu_set_t.
*
* @return A CpuMaskVector containing the cpu affinities for the thread. If the underlying functions to get thread
* affinity return an error, the returned CpuMaskVector will be empty and \c errno or \c last-error will be set.
*/
inline thread::CpuMaskVector getAffinity()
{
return thread::getAffinity(get());
}
/**
* Calls a predicate repeatedly until it returns \c true.
*
* This function is recommended only for situations where exactly one thread is waiting on another thread. For multiple
* threads waiting on a predicate, use \ref spinWaitWithBackoff().
*
* @param f The predicate to call repeatedly until it returns `true`.
*/
template <class Func>
void spinWait(Func&& f) noexcept(noexcept(f()))
{
while (!CARB_LIKELY(f()))
{
CARB_HARDWARE_PAUSE();
}
}
/**
* Calls a predicate until it returns true with progressively increasing delays between calls.
*
* This function is a low-level utility for high-contention cases where multiple threads will be calling @p f
* simultaneously, @p f needs to succeed (return `true`) before continuing, but @p f will only succeed for one thread at
* a time. This function does not return until @p f returns `true`, at which point this function returns immediately.
* High contention is assumed when @p f returns `false` for several calls, at which point the calling thread will
* progressively sleep between bursts of calls to @p f. This is a back-off mechanism to allow one thread to move forward
* while other competing threads wait their turn.
*
* @param f The predicate to call repeatedly until it returns `true`.
*/
template <class Func>
void spinWaitWithBackoff(Func&& f) noexcept(noexcept(f()))
{
if (CARB_UNLIKELY(!f()))
{
detail::__CONTENDED_WAIT__(std::forward<Func>(f));
}
}
/**
* Calls a predicate until it returns true or a random number of attempts have elapsed.
*
* This function is a low-level utility for high-contention cases where multiple threads will be calling @p f
* simultaneously, and @p f needs to succeed (return `true`) before continuing, but @p f will only succeed for one
* thread at a time. This function picks a pseudo-random maximum number of times to call the function (the randomness is
* so that multiple threads will not choose the same number and perpetually block each other) and repeatedly calls the
* function that number of times. If @p f returns `true`, spinTryWait() immediately returns `true`.
*
* @param f The predicate to call repeatedly until it returns `true`.
* @returns `true` immediately when @p f returns `true`. If a random number of attempts to call @p f all return `false`
* then `false` is returned.
*/
template <class Func>
bool spinTryWait(Func&& f) noexcept(noexcept(f()))
{
thread::AtomicBackoff<> backoff;
while (CARB_UNLIKELY(!f()))
if (!backoff.pauseWithoutYield())
return false;
return true;
}
/**
* A replacement function for `std::atomic_thread_fence(std::memory_order_seq_cst)` that performs better on some older
* compilers.
*/
inline void atomic_fence_seq_cst() noexcept
{
#if CARB_X86_64 && CARB_COMPILER_GNUC && __GNUC__ < 11
// On x86_64 CPUs we can use any lock-prefixed instruction as a StoreLoad operation to achieve sequential
// consistency (see https://shipilev.net/blog/2014/on-the-fence-with-dependencies/). The 'notb' instruction here has
// the added benefit of not affecting flags or other registers (see https://www.felixcloutier.com/x86/not).
// It is also likely that our 'unused' variable at the top of the stack is in L1 cache.
unsigned char unused{};
__asm__ __volatile__("lock; notb %0" : "+m"(unused)::"memory");
#else
std::atomic_thread_fence(std::memory_order_seq_cst);
#endif
}
} // namespace this_thread
} // namespace carb
|
omniverse-code/kit/include/carb/thread/RecursiveSharedMutex.h | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Recursive Shared Mutex implementation.
#pragma once
#include "SharedMutex.h"
#include "ThreadLocal.h"
#include <algorithm>
#include <vector>
namespace carb
{
namespace thread
{
#ifndef DOXYGEN_SHOULD_SKIP_THIS
class recursive_shared_mutex;
namespace detail
{
using LockEntry = std::pair<recursive_shared_mutex*, ptrdiff_t>;
using LockList = std::vector<LockEntry>;
// TL;DR: Gymnastics to get around SIOF (Static Initialization Order Fiasco) with supported compilers
//
// For GCC this is pretty easy. The init_priority attribute allows us to specify a priority value to use for
// initialization order. For recursive_shared_mutex's lockList, we really only care that it's constructed before
// application initializers run.
//
// We have to jump through some hoops here for MSVC since this is a header-only class. MSVC does have pragma init_seg,
// BUT a given translation unit (i.e. cpp files) may have only one. Since this exists as a header-only class and we
// don't want to force linkage of a cpp file specifically for this, we can get around it by injecting our initializer
// function into the appropriate segment for initializer order at link time.
//
// This is a fairly good reference for the various C-Runtime initializer sections:
// https://gist.github.com/vaualbus/622099d88334fbba1d4ae703642c2956
//
// #pragma init_seg(lib) corresponds to section .CRT$XCL (the L seems to indicate `lib`). Ironically, C=compiler,
// L=lib, and U=user are also in alphabetical order and make nice delimiters between .CRT$XCA (__xc_a) and .CRT$XCZ
// (__xc_z).
# if CARB_COMPILER_MSC
// If we just specified a variable of type carb::thread::ThreadLocal<LockList> (even allocating it into a specific
// custom section) the compiler will still try to instantiate it during the init_seg(user) order. To circumvent this
// behavior, we instead contain this variable inside `DataContainer`, but are careful to have the DataContainer()
// constructor well defined with zero side-effects. This is because constructLockList() will be called first (during the
// compiler's init_seg(lib) initialization order), which will construct the TLS member inside of DataContainer, but the
// DataContainer() constructor for lockListData runs after (during the compiler's init_seg(user) initialization order).
// clang-format off
// (for brevity)
struct DataContainer
{
struct DummyType { constexpr DummyType() noexcept {} };
union
{
DummyType empty;
carb::thread::ThreadLocal<LockList> tls;
};
constexpr DataContainer() noexcept : empty() {}
~DataContainer() noexcept {}
} __declspec(selectany) lockListData;
// clang-format on
__declspec(selectany) bool constructed{ false };
inline carb::thread::ThreadLocal<LockList>& lockList() noexcept
{
// Should have been constructed with either pConstructLockList (initializer) or ensureLockList()
CARB_ASSERT(constructed);
return lockListData.tls;
}
inline void constructLockList() noexcept
{
// Construct the lock list and then register a function to destroy it at exit time
CARB_ASSERT(!constructed);
new (&lockListData.tls) carb::thread::ThreadLocal<LockList>();
constructed = true;
::atexit([] {
lockList().~ThreadLocal();
constructed = false;
});
}
inline void ensureLockList() noexcept
{
// OVCC-1298: With LTCG turned on sometimes the linker doesn't obey the segment information below and puts
// pConstructLockList that is supposed to construct the lock list into the wrong segment, not in the initializer
// list. Which means it gets skipped at startup. As a work-around we can construct it when the
// recursive_shared_mutex constructor is called, though this may be late and cause SIOF issues (see OM-18917).
if (CARB_UNLIKELY(!constructed))
{
static std::once_flag flag;
std::call_once(flag, [] {
if (!constructed)
constructLockList();
});
CARB_ASSERT(constructed);
}
}
extern "C"
{
// Declare these so the linker knows to include them
using CRTConstructor = void(__cdecl*)();
extern CRTConstructor __xc_a[], __xc_z[];
// Force the linker to include this symbol
# pragma comment(linker, "/include:pConstructLockList")
// Inject a pointer to our constructLockList() function at XCL, the same section that #pragma init_seg(lib) uses
# pragma section(".CRT$XCL", long, read)
__declspec(allocate(".CRT$XCL")) __declspec(selectany) CRTConstructor pConstructLockList = constructLockList;
}
# else
// According to this GCC bug: https://gcc.gnu.org/bugzilla//show_bug.cgi?id=65115
// The default priority if init_priority is not specified is 65535. So we use one value lower than that.
# define DEFAULT_INIT_PRIORITY (65535)
# define LIBRARY_INIT_PRIORITY (DEFAULT_INIT_PRIORITY - 1)
struct Constructed
{
bool constructed;
constexpr Constructed() : constructed{ true }
{
}
~Constructed()
{
constructed = false;
}
explicit operator bool() const
{
return constructed;
}
} constructed CARB_ATTRIBUTE(weak, init_priority(LIBRARY_INIT_PRIORITY));
carb::thread::ThreadLocal<LockList> lockListTls CARB_ATTRIBUTE(weak, init_priority(LIBRARY_INIT_PRIORITY));
inline carb::thread::ThreadLocal<LockList>& lockList()
{
CARB_ASSERT(constructed);
return lockListTls;
}
constexpr inline void ensureLockList() noexcept
{
}
# endif
} // namespace detail
#endif
/**
* A recursive shared mutex. Similar to `std::shared_mutex` or carb::thread::shared_mutex, but can be used recursively.
*
* This primitive supports lock conversion: If a thread already holds one or more shared locks and attempts to take an
* exclusive lock, the shared locks are released and the same number of exclusive locks are added. However, this is not
* done atomically. @see recursive_shared_mutex::lock() for more info.
*
* A single thread-local storage entry is used to track the list of recursive_shared_mutex objects that a thread has
* locked and their recursive lock depth. However, as this is a header-only class, all modules that use this class will
* allocate their own thread-local storage entry.
*/
class recursive_shared_mutex : private carb::thread::shared_mutex
{
public:
/**
* Constructor.
*/
#if !CARB_DEBUG && !CARB_COMPILER_MSC
constexpr
#endif
recursive_shared_mutex()
{
detail::ensureLockList();
}
/**
* Destructor.
*
* Debug builds assert that the mutex is not busy (locked) when destroyed.
*/
~recursive_shared_mutex() = default;
/**
* Blocks until an exclusive lock can be obtained.
*
* When this function returns, the calling thread exclusively owns the mutex. At some later point the calling thread
* will need to call unlock() to release the exclusive lock.
*
* @note If the calling thread has taken shared locks on this mutex, all of the shared locks are converted to
* exclusive locks.
*
* @warning If existing shared locks must be converted to exclusive locks, the mutex must convert these shared locks
* to exclusive locks. In order to do this, it must first release all shared locks which potentially allows another
* thread to gain exclusive access and modify the shared resource. Therefore, any time an exclusive lock is taken,
* assume that the shared resource may have been modified, even if the calling thread held a shared lock before.
*/
void lock();
/**
* Attempts to immediately take an exclusive lock, but will not block if one cannot be obtained.
*
* @note If the calling thread has taken shared locks on this mutex, `false` is returned and no attempt to convert
* the locks is made. If the calling thread already has an exclusive lock on this mutex, `true` is always returned.
*
* @returns `true` if an exclusive lock could be obtained, and at some later point unlock() will need to be called
* to release the lock. If an exclusive lock could not be obtained immediately, `false` is returned.
*/
bool try_lock();
/**
* Releases either a single shared or exclusive lock on this mutex. Synonymous with unlock_shared().
*
* @note If the calling thread has recursively locked this mutex, unlock() will need to be called symmetrically for
* each call to a successful locking function.
*
* @warning `std::terminate()` will be called if the calling thread does not have the mutex locked.
*/
void unlock();
/**
* Blocks until a shared lock can be obtained.
*
* When this function returns, the calling thread has obtained a shared lock on the resources protected by the
* mutex. At some later point the calling thread must call unlock_shared() to release the shared lock.
*
* @note If the calling thread already owns an exclusive lock, then calling lock_shared() will actually increase the
* exclusive lock count.
*/
void lock_shared();
/**
* Attempts to immediately take a shared lock, but will not block if one cannot be obtained.
*
* @note If the calling thread already owns an exclusive lock, then calling try_lock_shared() will always return
* `true` and will actually increase the exclusive lock count.
*
* @returns `true` if a shared lock could be obtained, and at some later point unlock_shared() will need to be
* called to release the lock. If a shared lock could not be obtained immediately, `false` is returned.
*/
bool try_lock_shared();
/**
* Releases either a single shared or exclusive lock on this mutex. Synonymous with unlock().
*
* @note If the calling thread has recursively locked this mutex, unlock() or unlock_shared() will need to be called
* symmetrically for each call to a successful locking function.
*
* @warning `std::terminate()` will be called if calling thread does not have the mutex locked.
*/
void unlock_shared();
/**
* Returns true if the calling thread owns the lock.
* @note Use \ref owns_lock_shared() or \ref owns_lock_exclusive() for a more specific check.
* @returns `true` if the calling thread owns the lock, either exclusively or shared; `false` otherwise.
*/
bool owns_lock() const;
/**
* Returns true if the calling thread owns a shared lock.
* @returns `true` if the calling thread owns a shared lock; `false` otherwise.
*/
bool owns_lock_shared() const;
/**
* Returns true if the calling thread owns an exclusive lock.
* @returns `true` if the calling thread owns an exclusive lock; `false` otherwise.
*/
bool owns_lock_exclusive() const;
private:
const detail::LockEntry* hasLockEntry() const
{
auto& list = detail::lockList().get();
auto iter = std::find_if(list.begin(), list.end(), [this](detail::LockEntry& e) { return e.first == this; });
return iter == list.end() ? nullptr : std::addressof(*iter);
}
detail::LockEntry& lockEntry()
{
auto& list = detail::lockList().get();
auto iter = std::find_if(list.begin(), list.end(), [this](detail::LockEntry& e) { return e.first == this; });
if (iter == list.end())
iter = (list.emplace_back(this, 0), list.end() - 1);
return *iter;
}
void removeLockEntry(detail::LockEntry& e)
{
auto& list = detail::lockList().get();
CARB_ASSERT(std::addressof(e) >= std::addressof(list.front()) && std::addressof(e) <= std::addressof(list.back()));
e = list.back();
list.pop_back();
}
};
// Function implementations
inline void recursive_shared_mutex::lock()
{
detail::LockEntry& e = lockEntry();
if (e.second < 0)
{
// Already locked exclusively (negative lock count). Increase the negative count.
--e.second;
}
else
{
if (e.second > 0)
{
// This thread already has shared locks for this lock. We need to convert to exclusive.
shared_mutex::unlock_shared();
}
// Acquire the lock exclusively
shared_mutex::lock();
// Now inside the lock
e.second = -(e.second + 1);
}
}
inline bool recursive_shared_mutex::try_lock()
{
detail::LockEntry& e = lockEntry();
if (e.second < 0)
{
// Already locked exclusively (negative lock count). Increase the negative count.
--e.second;
return true;
}
else if (e.second == 0)
{
if (shared_mutex::try_lock())
{
// Inside the lock
e.second = -1;
return true;
}
// Lock failed
removeLockEntry(e);
}
// Either we already have shared locks (that can't be converted to exclusive without releasing the lock and possibly
// not being able to acquire it again) or the try_lock failed.
return false;
}
inline void recursive_shared_mutex::unlock()
{
detail::LockEntry& e = lockEntry();
CARB_CHECK(e.second != 0);
if (e.second > 0)
{
if (--e.second == 0)
{
shared_mutex::unlock_shared();
removeLockEntry(e);
}
}
else if (e.second < 0)
{
if (++e.second == 0)
{
shared_mutex::unlock();
removeLockEntry(e);
}
}
else
{
// unlock() without being locked!
std::terminate();
}
}
inline void recursive_shared_mutex::lock_shared()
{
detail::LockEntry& e = lockEntry();
if (e.second < 0)
{
// We already own an exclusive lock, which is stronger than shared. So just increase the exclusive lock.
--e.second;
}
else
{
if (e.second == 0)
{
shared_mutex::lock_shared();
// Now inside the lock
}
++e.second;
}
}
inline bool recursive_shared_mutex::try_lock_shared()
{
detail::LockEntry& e = lockEntry();
if (e.second < 0)
{
// We already own an exclusive lock, which is stronger than shared. So just increase the exclusive lock.
--e.second;
return true;
}
else if (e.second == 0 && !shared_mutex::try_lock_shared())
{
// Failed to get the shared lock
removeLockEntry(e);
return false;
}
++e.second;
return true;
}
inline void recursive_shared_mutex::unlock_shared()
{
unlock();
}
inline bool recursive_shared_mutex::owns_lock() const
{
auto entry = hasLockEntry();
return entry ? entry->second != 0 : false;
}
inline bool recursive_shared_mutex::owns_lock_exclusive() const
{
auto entry = hasLockEntry();
return entry ? entry->second < 0 : false;
}
inline bool recursive_shared_mutex::owns_lock_shared() const
{
auto entry = hasLockEntry();
return entry ? entry->second > 0 : false;
}
} // namespace thread
} // namespace carb
|
omniverse-code/kit/include/carb/l10n/IL10n.h | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
/** @file
* @brief The L10n interface.
*/
#pragma once
#include "../Defines.h"
#include "../logging/Log.h"
namespace carb
{
/** Utilities for localizing text. */
namespace l10n
{
/** The return type for @ref IL10n::getHashFromKeyString(). */
using StringIdentifier = uint64_t;
/** An opaque struct representing a localization table. */
struct LanguageTable
{
};
/** An opaque struct representing a language ID. */
struct LanguageIdentifier
{
};
/** Use the main language table for the process if this is passed. */
const LanguageTable* const kLanguageTableMain = nullptr;
/** The currently set language will be used when this is passed. */
const LanguageIdentifier* const kLanguageCurrent = nullptr;
/** The entry point to getLocalizedStringFromHash().
* @copydoc IL10n::getLocalizedStringFromHash
*/
using localizeStringFn = const char*(CARB_ABI*)(const LanguageTable* table,
StringIdentifier id,
const LanguageIdentifier* language);
/** The default language will be used when this is passed.
* The default language will always be US English.
*/
const LanguageIdentifier* const kLanguageDefault = reinterpret_cast<const LanguageIdentifier*>(0xFFFFFFFFFFFFFFFF);
/** This is returned from some interface functions when an unknown language is
* requested.
*/
const LanguageIdentifier* const kLanguageUnknown = reinterpret_cast<const LanguageIdentifier*>(0xFFFFFFFFFFFFFFFE);
/** A definition that can be used for loading a language table embedded in C++ code. */
struct LanguageTableData
{
/** The number of languages in the table. */
size_t languagesLength;
/** The number of translation entries in the table.
* Any valid language table will have at least 4 rows, since the first 4
* rows have special meanings.
*/
size_t keysLength;
/** The list of translation languages. These are specified as POSIX locale identifiers.
* The length of this array is @ref languagesLength.
* The first language in this array must be "en_US*"
*/
const char* const* languages;
/** The hashes of the key strings for the translations.
* The length of this array is @ref keysLength.
* Note that this contains keys for the first 4 rows in the table, even
* though the first 4 rows have a special purpose. The first 4 keys are
* never read.
*/
const uint64_t* keys;
/** The translation table.
* This is a matrix with @ref languagesLength columns and @ref keysLength rows.
* languageTable[i][j] refers to the translation of keys[i] in languages[j].
* The first 4 rows have special usages:
* 0: The language names for each column in US English
* 1: The territory names for each column in US English
* 2: The language names for each column in the language for that column
* 3: The territory names for each column in the language for that column
*/
const char* const* languageTable;
};
/** Boolean value tags for the getLanguageName() and getTerritoryName()
* functions. These determine how the language and territory names will be
* returned. Note, returning the name of the language in any other arbitrary
* supported language is beyond the scope of the automatic behavior of the
* tables. If such an arbitrary translation is needed, the language's name
* would have to be added to each table and translated into each target
* language. Accessing the arbitrary translations in that case would end up
* as a lookupString() call.
*/
enum class LocalizedName
{
/** Retrieve the name in US English (ie: "Polish"). Note that this will
* always be the first or second string entries in any given translation
* table.
*/
eUsEnglish,
/** Retrieve the name in the language the identifier specifies (ie:
* "Polski"). Note that this will always be the third or fourth string
* entries in any given translation table.
*/
eLocalized,
};
/** The localization interface. */
struct IL10n
{
CARB_PLUGIN_INTERFACE("carb::l10n::IL10n", 1, 0)
/** Calculates the lookup hash for a US English key string.
*
* @param[in] keyString The string to calculate the hash identifier for.
* This may not be nullptr or an empty string.
* @returns The calculated hash of the string. This will be the same
* algorithm that is used by the `String Table Conversion Tool`
* to generate the table and mapping structure.
* @returns 0 if the input string is nullptr or empty.
*
* @remarks This calculates the hash value for a string. This is useful
* for scripts to be able to pre-hash and cache their string
* identifiers for quicker lookups later.
*
* @note This is not intended to be directly used in most situations.
* Typical C++ code should use CARB_LOCALIZE() and typical python
* code should use carb_localize() or carb_localize_hashed().
*/
StringIdentifier(CARB_ABI* getHashFromKeyString)(const char* keyString) noexcept;
/** Looks up a string's translation in the localization system.
*
* @param[in] table The optional local language table to search first
* for the requested key string. If this is
* non-nullptr and the key string is not found in this
* table or the requested language is not supported by
* this table, the framework's registered main table
* will be checked as well. This may be nullptr to
* only search the framework's main table.
* @param[in] id The hashed string identifier of the string to look
* up.
* @param[in] language The language to retrieve the translated string in.
* This may be set to @ref kLanguageCurrent to use the
* current language for the localization system (this
* is the default behavior). This can also be any
* specific language identifier to retrieve the string
* in another supported language. This may also be
* @ref kLanguageDefault to retrieve the string in the
* system's default language if a translation is
* available.
* @returns The translated string is a supported language is requested and
* the string with the requested hash is found in the table.
* @returns nullptr if no translation is found in the table, if an
* unsupported language is requested, or if the key string has no
* mapping in the table.
* @returns An error message if the config setting to return noticeable
* failure strings is enabled.
*
* @note This is not intended to be directly used in most situations.
* Typical C++ code should use CARB_LOCALIZE() and typical python
* code should use carb_localize() or carb_localize_hashed().
*/
const char*(CARB_ABI* getLocalizedStringFromHash)(const LanguageTable* table,
StringIdentifier id,
const LanguageIdentifier* language) noexcept;
/** Retrieves the current system locale information.
*
* @returns A language identifier for the current system language if it
* matches one or more of the supported translation tables.
* @returns The language identifier for US English if no matching
* translation tables are found.
*/
const LanguageIdentifier*(CARB_ABI* getSystemLanguage)() noexcept;
/** Enumerates available/supported language identifiers in the localization system.
*
* @param[in] table The optional local table to also search for unique
* language identifiers to return. If this is
* non-nullptr, the supported language identifiers in
* this table will be enumerated first, followed by any
* new unique language identifiers in the framework's
* registered main table. This may be nullptr to only
* enumerate identifiers in the main table.
* @param[in] index The index of the language identifier number to be
* returned. Set this to 0 to retrieve the first
* supported language (this will always return the
* language identifier corresponding to US English as the
* first supported language identifier). Set this to
* increasing consecutive indices to retrieve following
* supported language codes.
* @returns The language identifier corresponding to the supported
* language at index @p index.
* @retval kLanguageUnknown if the given index is out of range of
* the supported languages.
*/
const LanguageIdentifier*(CARB_ABI* enumLanguageIdentifiers)(const LanguageTable* table, size_t index) noexcept;
/** Retrieves the language identifier for a given locale name.
*
* @param[in] table The optional local table to also search for a
* matching language identifier in. This may be
* nullptr to only search the framework's 'main'
* table.
* @param[in] language The standard Unix locale name in the format
* "<language>_<territory>" where "<language>" is a
* two character ISO-639-1 language code and
* "<territory>" is a two-character ISO-3166-1
* Alpha-2 territory code. An optional encoding
* string may follow this but will be ignored. This
* must not be nullptr or an empty string.
* @returns The language identifier corresponding to the selected Unix
* locale name if a table for the requested language and
* territory is found. If multiple matching supported tables are
* found for the requested language (ie: Canadian French, France
* French, Swiss French, etc), the one for the matching territory
* will be returned instead. If no table exists for the
* requested territory in the given language, the language
* identifier for an arbitrary table for the requested language
* will be returned instead. This behavior may be modified by a
* runtime config setting that instead causes @ref
* kLanguageUnknown to be returned if no exact language/territory
* match exists.
* @retval kLanguageUnknown if the requested language does not have
* a translation table for it in the localization system, or if
* the config setting to only allow exact matches is enabled and
* no exact language/territory match could be found.
*/
const LanguageIdentifier*(CARB_ABI* getLanguageIdentifier)(const LanguageTable* table, const char* language) noexcept;
/** Retrieves a language's or territory's name as a friendly string.
*
* @param[in] table The optional local language table to check for
* the requested name first. If this is nullptr
* or the requested language identifier is not
* supported by the given table, the framework's
* main registered table will be checked.
* @param[in] language The language identifier of the language or
* territory name to retrieve. This may not be
* @ref kLanguageUnknown. This may be @ref
* kLanguageCurrent to retrieve the name for the
* currently selected language.
* @param[in] retrieveIn The language to return the string in. This can
* be used to force the language's or territory's
* name to be returned in US English or the name
* of @p language in @p language.
* @returns The name of the language or territory in the specified
* localization.
* @returns An empty string if the no translation table exists for the
* requested language or an invalid language identifier is given.
* @returns An error message if the config setting to return noticeable
* failure strings is enabled.
*
* @note This will simply return the strings in the second and third, or
* fourth and fifth rows of the CSV table (which should have become
* properties of the table once loaded).
*/
const char*(CARB_ABI* getLanguageName)(const LanguageTable* table,
const LanguageIdentifier* language,
LocalizedName retrieveIn) noexcept;
/** @copydoc getLanguageName */
const char*(CARB_ABI* getTerritoryName)(const LanguageTable* table,
const LanguageIdentifier* language,
LocalizedName retrieveIn) noexcept;
/** Retrieves the standard Unix locale name for the requested language identifier.
*
* @param[in] table The optional local language table to retrieve the
* locale identifier from. This may be nullptr to
* only search the framework's registered main
* language table.
* @param[in] language The language identifier to retrieve the Unix locale
* name for. This may not be @ref kLanguageUnknown.
* This may be @ref kLanguageCurrent to retrieve the
* locale name for the currently selected language.
* @returns The standard Unix locale name for the requested language
* identifier.
* @returns an empty string if the language identifier is invalid or no translation table exist
* for it.
* @returns an error message if the config setting to return noticeable failure string is
* enabled.
*/
const char*(CARB_ABI* getLocaleIdentifierName)(const LanguageTable* table,
const LanguageIdentifier* language) noexcept;
/** Sets the new current language from a standard Unix locale name or language identifier.
*
* @param[in] table The optional local language table to check to see
* if the requested language is supported or not.
* This may be nullptr to only search the framework's
* registered main table. If the local table doesn't
* support the requested language, the framework's
* main table will still be searched.
* @param[in] language Either the locale name or identifier for the new
* language to set as current for the calling process.
* For the string version, this may be nullptr or an
* empty string to switch back to the system default
* language. For the language identifier version,
* this may be set to @ref kLanguageDefault to switch
* back to the system default language.
* @returns true if the requested language is supported and is
* successfully set.
* @returns false if the requested language is not supported.
* In this case, the current language will not be modified.
*
* @note the variant that takes a string locale identifier will just be a
* convenience helper function that first looks up the language
* identifier for the locale then passes it to the other variant.
* If the locale lookup fails, the call will fail since it would be
* requesting an unsupported language.
*/
bool(CARB_ABI* setCurrentLanguage)(const LanguageTable* table, const LanguageIdentifier* language) noexcept;
/** @copydoc setCurrentLanguage */
bool(CARB_ABI* setCurrentLanguageFromString)(const LanguageTable* table, const char* language) noexcept;
/** Retrieves the language identifier for the current language.
*
* @returns The identifier for the current language.
* @retval kLanguageDefault if an error occurs.
*/
const LanguageIdentifier*(CARB_ABI* getCurrentLanguage)() noexcept;
/** Registers the host app's main language translation table.
*
* @param[in] table The table to register as the app's main lookup table.
* This may be nullptr to indicate that no language table
* should be used and that only US English strings will
* be used by the app.
* @returns true if the new main language table is successfully set.
* @returns false if the new main language table could not be set.
*
* @note This is a per-process setting.
*/
bool(CARB_ABI* setMainLanguageTable)(const LanguageTable* table) noexcept;
/** Creates a new local language translation table.
*
* @param[in] data The language table to load.
* This language table must remain valid and constant
* until unloadLanguageTable() is called.
* The intended use of this function is to load a static
* constant data table.
* @returns The newly loaded and created language table if the data file
* exists and was successfully loaded. This must be destroyed
* with unloadLanguageTable() when it is no longer needed.
* @returns nullptr if an unrecoverable error occurred.
*/
LanguageTable*(CARB_ABI* loadLanguageTable)(const LanguageTableData* data) noexcept;
/** Creates a new local language translation table from a data file.
*
* @param[in] filename The name of the data file to load as a language
* translation table. This may not be nullptr or an
* empty string. If this does not have an extension,
* both the given filename and one ending in ".lang"
* will be tried.
* @returns The newly loaded and created language table if the data file
* exists and was successfully loaded. This must be destroyed
* with unloadLanguageTable() when it is no longer needed.
* @returns nullptr if the data file was not found with or without the
* ".lang" extension, or the file was detected as corrupt while
* loading.
*
* @note The format of the localization file is as follows:
* byte count | segment description
* [0-13] | File signature. The exact UTF-8 text: "nvlocalization".
* [14-15] | File format version. Current version is 00.
* | This version number is 2 hex characters.
* [16-19] | Number of languages.
* | This corresponds to @ref LanguageTableData::languagesLength.
* [20-23] | Number of keys.
* | This corresponds to @ref LanguageTableData::keysLength.
* [24-..] | Table of @ref LanguageTableData::keysLength 64 bit keys.
* | This is @ref LanguageTableData::keysLength * 8 bytes long.
* | This corresponds to @ref LanguageTableData::keys.
* [..-..] | Block of @ref LanguageTableData::languagesLength null
* | terminated language names.
* | This will contain exactly @ref LanguageTableData::languagesLength
* | 0x00 bytes; each of those bytes indicates the end of a string.
* | The length of this segment depends on the data within it;
* | the full segment must be read to find the start of the
* | next section.
* | This corresponds to @ref LanguageTableData::languages.
* [..-..] | Block of @ref LanguageTableData::languagesLength *
* | @ref LanguageTableData::keysLength
* | null terminated translations.
* | This will contain exactly @ref LanguageTableData::languagesLength *
* | @ref LanguageTableData::keysLength 0x00 bytes; each of those bytes
* | indicates the end of a string.
* | The last byte of the file should be the null terminator of the last
* | string in the file.
* | The length of this section also depends on the length of
* | the data contained within these strings.
* | If the end of the file is past the final 0x00 byte in this
* | segment, the reader will assume the file is corrupt.
* | This corresponds to @ref LanguageTableData::languageTable.
*/
LanguageTable*(CARB_ABI* loadLanguageTableFromFile)(const char* fileName) noexcept;
/** The language table to be destroyed.
*
* @param[in] table The language table to be destroyed.
* This must not be nullptr.
* This should be a table that was previously returned
* from loadLanguageTable().
* It is the caller's responsibility to ensure this table
* will no longer be needed or accessed.
*/
void(CARB_ABI* unloadLanguageTable)(LanguageTable* table) noexcept;
/** Sets the current search path for finding localization files for a module.
*
* @param[in] searchPath The search path for where to look for
* localization data files.
* This can be an absolute or relative path.
* @returns true if the new search path is successfully set.
* @returns false if the new search path could not be set.
*
* @remarks This sets the search path to use for finding localization
* files when modules load. By default, only the same directory
* as the loaded module or script will be searched. This can be
* used to specify additional directories to search for
* localization files in. For example, the localization files
* may be stored in the 'lang/' folder for the app instead of in
* the 'bin/' folder.
*/
bool(CARB_ABI* addLanguageSearchPath)(const char* searchPath) noexcept;
/** Sets the current search path for finding localization files for a module.
*
* @param[in] searchPath The search path to remove from the search path
* list.
* @returns true if the search path was successfully removed.
* @returns false if the search path was not found.
*
* @remarks This removes a search path added by addLanguageSearchPath().
* If the same path was added multiple times, it will have to be
* removed multiple times.
*
* @note The executable directory can be removed from the search path
* list, if that is desired.
*/
bool(CARB_ABI* removeLanguageSearchPath)(const char* searchPath) noexcept;
/** Enumerate the search paths that are currently set.
* @param[in] index The index of the search path to retrieve.
* The first search path index will always be 0.
* The valid search paths are a contiguous range of
* indices, so the caller can pass incrementing values
* beginning at 0 for @p index to enumerate all of the
* search paths.
*
* @returns The search path corresponding to @p index if one exists at index.
* @returns nullptr if there is no search path corresponding to @p index.
*
* @remarks The example usage of this function would be to call this in a
* loop where @p index starts at 0 and increments until a call to
* enumLanguageSearchPaths(@p index) returns nullptr. This would
* enumerate all search paths that are currently set.
* The index is no longer valid if the search path list has been
* modified.
*/
const char*(CARB_ABI* enumLanguageSearchPaths)(size_t index) noexcept;
};
/** A version of getLocalizedStringFromHash() for when the localization plugin is unloaded.
* @param[in] table The localization table to use for the lookup.
* @param[in] id The hash of @p string.
* @param[in] language The language to perform the lookup in.
* @returns nullptr.
*/
inline const char* CARB_ABI getLocalizedStringFromHashNoPlugin(const LanguageTable* table,
StringIdentifier id,
const LanguageIdentifier* language) noexcept
{
CARB_UNUSED(table, id, language);
CARB_LOG_ERROR("localization is being used with carb.l10n.plugin not loaded");
return nullptr;
}
} // namespace l10n
} // namespace carb
/** Pointer to the interface for use from CARB_LOCALIZE(). Defined in @ref CARB_LOCALIZATION_GLOBALS. */
CARB_WEAKLINK CARB_HIDDEN carb::l10n::IL10n* g_carbLocalization;
/** Pointer to the function called by CARB_LOCALIZE(). Defined in @ref CARB_LOCALIZATION_GLOBALS. */
CARB_WEAKLINK CARB_HIDDEN carb::l10n::localizeStringFn g_localizationFn = carb::l10n::getLocalizedStringFromHashNoPlugin;
/* Exhale can't handle these for some reason and they aren't meant to be used directly */
#ifndef DOXYGEN_SHOULD_SKIP_THIS
namespace carb
{
namespace l10n
{
/** An internal helper for CARB_LOCALIZE()
* @param[in] id The hash of @p string.
* @param[in] string The localization keystring.
* @returns The translated string is a supported language is requested and
* the string with the requested hash is found in the table.
* @returns @p string if no translation is found in the table, if an
* unsupported language is requested, or if the key string has no
* mapping in the table.
* @returns An error message if the config setting to return noticeable
* failure strings is enabled.
*
* @note This is an internal implementation for CARB_LOCALIZE() as well as the
* script bindings. Do not directly call this function.
*/
inline const char* getLocalizedString(StringIdentifier id, const char* string) noexcept
{
const char* s = g_localizationFn(kLanguageTableMain, id, kLanguageCurrent);
return (s != nullptr) ? s : string;
}
} // namespace l10n
} // namespace carb
#endif
/** Look up a string from the localization database for the current plugin.
* @param string A string literal.
* This must not be nullptr.
* This is the key string to look up in the database.
*
* @returns The localized string for the keystring @p string, given the current
* localization that has been set for the process.
* @returns If there is no localized string for the given keystring @p string,
* the US English string will be returned.
* @returns If @p string is not found in the localization database at all,
* @p string will be returned.
* @returns An error message if the localized string is found and the config
* setting to return noticeable failure strings is enabled.
*/
#define CARB_LOCALIZE(string) carb::l10n::getLocalizedString(CARB_HASH_STRING(string), string)
|
omniverse-code/kit/include/carb/l10n/L10nUtils.h | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
/** @file
* @brief The L10n interface.
*/
#pragma once
#include "../Framework.h"
#include "IL10n.h"
/** Placeholder for global scope work that needs to be done for localization. Do not call
* this directly. This is called by @ref CARB_GLOBALS().
*/
#define CARB_LOCALIZATION_GLOBALS()
namespace carb
{
namespace l10n
{
/** Called during client initialization to obtain the globals needed for localization. */
inline void registerLocalizationForClient() noexcept
{
g_carbLocalization = getFramework()->tryAcquireInterface<IL10n>();
if (g_carbLocalization != nullptr)
g_localizationFn = g_carbLocalization->getLocalizedStringFromHash;
}
/** Called during client shutdown to clear out the global state. */
inline void deregisterLocalizationForClient() noexcept
{
g_carbLocalization = nullptr;
g_localizationFn = carb::l10n::getLocalizedStringFromHashNoPlugin;
}
} // namespace l10n
} // namespace carb
|
omniverse-code/kit/include/carb/l10n/L10nBindingsPython.h | // Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../BindingsPythonUtils.h"
#include "IL10n.h"
#include "L10nUtils.h"
namespace carb
{
namespace l10n
{
inline void definePythonModule(py::module& m)
{
m.doc() = "pybind11 carb.l10n bindings";
py::class_<LanguageTable>(m, "LanguageTable");
py::class_<LanguageIdentifier>(m, "LanguageIdentifier");
m.def("register_for_client", carb::l10n::registerLocalizationForClient,
R"(Register the l10n plugin for the current client.
This must be called before using any of the localization plugins, if
carb::startupFramework() has not been called.
This use case is only encountered in the tests. Standard Carbonite applications
call carb::startupFramework() so they should never have to call this.
If this is not called, the localization system will be non-functional.)",
py::call_guard<py::gil_scoped_release>());
m.def("deregister_localization_for_client", carb::l10n::deregisterLocalizationForClient,
R"(Deregister the localization plugin for the current client.
This can be called to deregister the localization plugin for the current client,
if carb::shutdownFramework will not be called.)",
py::call_guard<py::gil_scoped_release>());
m.def("get_localized_string",
[](const char* string) {
return carb::l10n::getLocalizedString(
(g_carbLocalization == nullptr) ? 0 : g_carbLocalization->getHashFromKeyString(string), string);
},
R"(Retrieve a string from the localization database, given its hash.
This function retrieves a localized string based on the hash of the keystring.
This should be used on all strings found in the UI, so that they can
automatically be shown in the correct language. Strings returned from this
function should never be cached, so that changing the language at runtime will
not result in stale strings being shown in the UI.
Args:
string: The keystring that identifies the set of localized strings to
return. This will typically correspond to the US English string
for this UI text. This string will be returned if there is no
localization table entry for this key.
Returns:
The localized string for the input hash in the currently set language, if
a string exists for that language.
If no localized string from the currently set language exists for the hash,
the US English string will be returned.
If the hash is not found in the localization database, the string parameter
will be returned. Alternatively, if a config setting is enabled, error
messages will be returned in this case.)",
py::call_guard<py::gil_scoped_release>());
m.def("get_hash_from_key_string",
[](const char* string) {
return (g_carbLocalization == nullptr) ? 0 : g_carbLocalization->getHashFromKeyString(string);
},
R"(Hash a keystring for localization.
This hashes a keystring that can be looked up with carb_localize_hashed().
Strings must be hashed before passing them into carb_localize_hashed(); this is done
largely so that automated tools can easily find all of the localized strings in
scripts by searching for this function name.
In cases where a string will be looked up many times, it is ideal to cache the
hash returned, so that it is not recalculated excessively.
Args:
string: The keystring to hash.
This must be a string.
This must not be None.
Returns:
The hash for the string argument.
This hash can be used in carb_localize_hashed().)",
py::call_guard<py::gil_scoped_release>());
m.def("get_localized_string_from_hash",
[](StringIdentifier id, const char* string) { return carb::l10n::getLocalizedString(id, string); },
R"(Retrieve a string from the localization database, given its hash.
This function retrieves a localized string based on the hash of the keystring.
This should be used on all strings found in the UI, so that they can
automatically be shown in the correct language. Strings returned from this
function should never be cached, so that changing the language at runtime will
not result in stale strings being shown in the UI.
Args:
id: A hash that was previously returned by hash_localization_string().
string: The keystring that was hashed with hash_localization_string().
This is passed to ensure that a readable string is returned if
the hash is not found in the localization table.
Returns:
The localized string for the input hash in the currently set language, if
a string exists for that language.
If no localized string from the currently set language exists for the hash,
the US English string will be returned.
If the hash is not found in the localization database, the string parameter
will be returned. Alternatively, if a config setting is enabled, error
messages will be returned in this case.)",
py::arg("id") = 0, py::arg("string") = "{TRANSLATION NOT FOUND}", py::call_guard<py::gil_scoped_release>());
}
} // namespace l10n
} // namespace carb
|
omniverse-code/kit/include/carb/crashreporter/ICrashReporter.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
///! @file
///! @brief Main interface header for ICrashReporter and related types and values.
#pragma once
#include "../Interface.h"
#include "../Types.h"
namespace carb
{
/** Namespace for the crash reporter. */
namespace crashreporter
{
/** Prototype for a callback that indicates when a crash dump upload has completed.
*
* @param[in] userData The opaque user data object that was originally passed to the
* @ref ICrashReporter::sendAndRemoveLeftOverDumpsAsync() function
* in the @a userData parameter.
* @returns No return value.
*
* @remarks This callback function will be performed when the upload of old crash dump files
* has completed, successfully or otherwise. At this point, the upload request made
* by the corresponding @ref ICrashReporter::sendAndRemoveLeftOverDumpsAsync() call
* has completed. However, this does not necessarily mean that the thread created
* by it has exited. If another call was made, a new request would have been queued
* on that same thread and would be serviced next by the same thread.
*
* @note This callback is both separate and different from the callback specified by the
* @ref OnCrashSentFn prototype. This particular callback is only performed when the
* full upload request of all existing old crash dump files completes whereas the
* @ref OnCrashSentFn callback is performed every time any single upload completes.
*/
using OnDumpSubmittedFn = void (*)(void* userData);
/** Result codes used to notify subscribers of crash dump uploads whether an upload succeed
* or not. These result codes are passed to the callback function specified in calls to
* @ref ICrashReporter::addCrashSentCallback().
*/
enum class CrashSentResult
{
eSuccess, ///< The upload completed successfully.
eFailure ///< The upload failed for some unspecified reason.
};
/** Possible types that a volatile metadata value could be. These are used to determine which
* type of value is to be returned from a volatile metadata callback function and how that value
* is to be converted into a string to be sent as metadata. The return type of the callback is
* split into common primitive types to discourage implementors of the callbacks from using their
* own potentially dangerous methods of converting the metadata value to a string.
*/
enum class MetadataValueType
{
eInteger, ///< The callback will return a signed 64-bit integer value.
eUInteger, ///< The callback will return an unsigned 64-bit integer value.
eFloat, ///< The callback will return a 64-bit floating point value.
eString, ///< The callback will return an arbitrary length UTF-8 encoded string.
eWideString, ///< The callback will return an arbitrary length wide string (`wchar_t` characters).
};
/** Provides a single piece of additional information or context to a crash upload complete
* callback function. This is stored as a key/value pair. An array of these objects is
* passed to the @ref OnCrashSentFn callback to provide extra context to why a crash dump
* upload may have failed or additional information about a successful upload. This
* information is typically only useful for display to a user or to be output to a log.
*/
struct CrashSentInfo
{
const char* key; ///< The key name for this piece of information.
const char* value; ///< The specific value associated with the given key.
};
/** Prototype for a callback function that is performed any time a dump is successfully uploaded.
*
* @param[in] crashSentResult The result code of the upload operation. Currently this only
* indicates whether the upload was successful or failed. Further
* information about the upload operation can be found in the
* @p infoData array.
* @param[in] infoData An array of zero or more key/value pairs containing additional
* information for the upload operation. On failure, this may
* include the status code or status message from the server. On
* success, this may include a unique fingerprint for the crash
* dump that was uploaded. This array will contain exactly
* @p infoDataCount items.
* @param[in] infoDataCount The total number of items in the @p infoData array.
* @param[in] userData The opaque caller specified data object that was provided when
* this callback was originally registered. It is the callee's
* responsibility to know how to successfully make use of this
* value.
* @returns No return value.
*
* @remarks This callback is performed every time a crash dump file upload completes. This
* will be called whether the upload is successful or not. This will not however
* be called if crash dump uploads are disabled (ie: the
* `/crashreporter/devOnlyOverridePrivacyAndForceUpload` setting is false and the
* user has not provided 'performance' consent) or the file that an upload was requested
* for was missing some required metadata (ie: the `/crashreporter/product` and
* `/crashreporter/version` settings). In both those cases, no upload attempt will be
* made.
*
* @remarks The following key/value pair is defined for this callback when using the
* `carb.crashreporter-breakpad.plugin` implementation:
* * "response": A string containing the HTTP server's response to the upload
* attempt. If this string needs to persist, it must be copied
* by the callee.
*
* @thread_safety Calls to this callback will be serialized. It is however the callee's
* responsibility to safely access any additional objects including the
* @p userData object and any global resources.
*/
using OnCrashSentFn = void (*)(CrashSentResult crashSentResult,
const CrashSentInfo* infoData,
size_t infoDataCount,
void* userData);
/** Opaque handle for a single registered @ref OnCrashSentFn callback function. This is
* returned from ICrashReporter::addCrashSentCallback() and can be passed back to
* ICrashReporter::removeCrashSentCallback() to unregister it.
*/
struct CrashSentCallbackId;
/** Prototype for a callback function used to resolve symbol information.
*
* @param[in] address The address of the symbol being resolved.
* @param[in] name If the symbol resolution was successful, this will be the name of the
* symbol that @p address is contained in. If the resolution fails, this
* will be `nullptr`. If non-`nullptr`, this string must be copied before
* returning from the callback function if it needs to persist.
* @param[in] userData The opaque user data passed to the @ref ICrashReporter::resolveSymbol()
* function.
* @returns No return value.
*
* @remarks This callback is used to deliver the results of an attempt to resolve the name of
* a symbol in the current process. This callback is always performed synchronously
* to the call to ICrashReporter::resolveSymbol().
*/
using ResolveSymbolFn = void (*)(const void* address, const char* name, void* userData);
/** Metadata value callback function prototype.
*
* @param[in] context The opaque context value that was used when the metadata value was
* originally registered.
* @returns The current value of the metadata at the time of the call.
*
* @note Because these callbacks may be called during the handling of a crash, the calling thread
* and other threads may be in an unstable or undefined state when these are called.
* Implementations of these callbacks should avoid any allocations and locks if in any way
* avoidable. See ICrashReporter::addVolatileMetadataValue() for more information on how
* these callbacks should behave.
*/
using OnGetMetadataIntegerFn = int64_t (*)(void* context);
/** @copydoc OnGetMetadataIntegerFn */
using OnGetMetadataUIntegerFn = uint64_t (*)(void* context);
/** @copydoc OnGetMetadataIntegerFn */
using OnGetMetadataFloatFn = double (*)(void* context);
/** Metadata value callback function prototype.
*
* @param[out] buffer Receives the string value. This must be UTF-8 encoded and must not
* exceed @p maxLength bytes including the null terminator. This buffer
* will never be `nullptr`. Writing a null terminator is optional.
* @param[in] maxLength The maximum number of bytes including the null terminator that can fit
* in the buffer @p buffer. This will never be 0. It is the callback's
* responsibility to ensure no more than this many bytes is written to
* the output buffer.
* @param[in] context The opaque context value that was used when the metadata value was
* originally registered.
* @returns The total number of bytes **not including the null terminator character** that were
* written to the output buffer. This value **MUST** not exceed \p maxLength.
*/
using OnGetMetadataStringFn = size_t (*)(char* buffer, size_t maxLength, void* context);
/** Metadata value callback function prototype.
*
* @param[out] buffer Receives the string value. This must be wide characters (`wchar_t`) and not
* exceed @p maxLength *characters* including the null terminator. This buffer
* will never be `nullptr`. Writing a null terminator is optional.
* @param[in] maxLength The maximum number of *characters* including the null terminator that can fit
* in the buffer @p buffer. This will never be 0. It is the callback's
* responsibility to ensure no more than this many characters is written to
* the output buffer.
* @param[in] context The opaque context value that was used when the metadata value was
* originally registered.
* @returns The total number of *characters* **not including any null terminator** that were
* written to the output buffer. This value **MUST** not exceed \p maxLength.
*/
using OnGetMetadataWideStringFn = size_t (*)(wchar_t* buffer, size_t maxLength, void* context);
/** Descriptor of a single metadata callback function. This describes which type of callback is
* being contained and the pointer to the function to call.
*/
struct MetadataValueCallback
{
/** The type of the callback. This indicates which of the callbacks in the @ref fn union
* below will be called to retrieve the value.
*/
MetadataValueType type;
/** A union containing the different types of function pointers for this callback. Exactly
* one of these will be chosen based on @ref type.
*/
union
{
OnGetMetadataIntegerFn getInteger; ///< Callback returning a signed 64-bit integer.
OnGetMetadataUIntegerFn getUInteger; ///< Callback returning an unsigned 64-bit integer.
OnGetMetadataFloatFn getFloat; ///< Callback returning a 64-bit floating point value.
OnGetMetadataStringFn getString; ///< Callback returning an arbitrary length string.
OnGetMetadataWideStringFn getWString; ///< Callback returning an arbitrary length wide string.
} fn;
};
/** Registration identifier for a single metadata value. This is only used to unregister the
* callback that was registered with the original metadata.
*/
using MetadataId = size_t;
/** Special metadata identifier to indicate an invalid metadata value or general failure in
* registering the value with addVolatileMetadata*().
*/
constexpr MetadataId kInvalidMetadataId = (MetadataId)(-1ll);
/** Special metadata identifier to indicate that a bad parameter was passed into one of the
* ICrashReporter::addVolatileMetadata*() functions. This is not a valid identifier and will be
* ignored if passed to ICrashReporter::removeVolatileMetadataValue().
*/
constexpr MetadataId kMetadataFailBadParameter = (MetadataId)(-2ll);
/** Special metadata identifier to indicate that the key being registered is either a known
* reserved key or has already been registered as a volatile metadata key. This is not a valid
* identifier and will be ignored if passed to ICrashReporter::removeVolatileMetadataValue().
*/
constexpr MetadataId kMetadataFailKeyAlreadyUsed = (MetadataId)(-3ll);
/** ICrashReporter is the interface to implement a plugin that catches and reports information
* about the crash to either a local file, a server, or both.
*
* ICrashReporter is an optional plugin that is automatically loaded by the framework and doesn't
* need to be specifically listed in the configuration. If an ICrashReporter plugin is found,
* it's enabled. Only one ICrashReporter instance is supported at a time.
*
* The crash report itself consists of multiple parts. Some parts are only present on certain
* supported platforms. All generated crash dump files will appear in the directory named by the
* "/crashreporter/dumpDir" setting. If no value is provided, the current working directory
* is used instead. The following parts could be expected:
* * A minidump file. This is only generated on Windows. This file will contain the state of
* the process's threads, stack memory, global memory space, register values, etc at the time
* of the crash. This file will end in '.dmp'.
* * A stack trace of the crash point file. This could be produced on all platforms. This
* file will end in '.txt'.
* * A metadata file. This is a TOML formatted file that contains all the metadata values that
* were known by the crash reporter at the time of the crash. This file will end in '.toml'.
*
* The crash reporter may have any number of arbitrary metadata values associated with it. These
* values are defined as key/value pair strings. There are two ways a metadata value can be
* defined:
* * Add a value to the `/crashreporter/data/` branch of the settings registry. This can be
* done directly through the ISettings interface, adding a value to one of the app's config
* files, or by using the addCrashMetadata() utility function. These values should be set
* once and either never or very rarely modified. There is a non-trivial amount of work
* related to collecting a new metadata value in this manner that could lead to an overall
* performance impact if done too frequently.
* * Add a key and data callback to collect the current value of a metadata key for something
* that changes frequently. This type of metadata value is added with addVolatileMetadata()
* on this interface. These values may change as frequently as needed. The current value
* will only ever be collected when a crash does occur or when the callback is removed.
*
* Once a metadata value has been added to the crash reporter, it cannot be removed. The value
* will remain even if the key is removed from `/crashreporter/data/` or its value callback is
* removed. This is intentional so that as much data as possible can be collected to be sent
* with the crash report as is possible.
*
* If a metadata key is registered as a volatile value, it will always override a key of the
* same name that is found under the `/crashreporter/data/` branch of the settings registry.
* Even if the volatile metadata value is removed or unregistered, it will still override any
* key of the same name found in the settings registry.
*
* Metadata key names may or may not be case sensitive depending on their origin. If a metadata
* value comes from the settings registry, its name is case sensitive since the settings registry
* is also case sensitive. Metadata values that are registered as volatile metadata values do
* not have case sensitive names. Attempting to register a new value under the same key but with
* different casing will fail since it would overwrite an existing name. This difference is
* intentional to avoid confusion in the metadata output. When adding metadata values through
* the settings registry, care should be taken to use consistent casing to avoid confusion in
* the output.
*/
struct ICrashReporter
{
// 2.3: Added MetadataValueType::eWideString
CARB_PLUGIN_INTERFACE("carb::crashreporter::ICrashReporter", 2, 3)
/**
* Upon crash, a crash dump is written to disk, uploaded, and then removed. However, due to settings or because the
* application is in an undefined state, the upload may fail. This method can be used on subsequent runs of the
* application to attempt to upload/cleanup previous failed uploads.
*
* This method returns immediately, performing all uploads/removals asynchronously. Supply an optional callback to
* be notified when the uploads/removals have been completed. The callback will be performed regardless of whether
* the upload is successful. However, each crash dump file will only be removed from the local file system if its
* upload was successful and the "/crashreporter/preserveDump" setting is `false`. A future call to this function
* will try the upload again on failed crash dumps.
*
* The callback will be performed on the calling thread before return if there is no upload task to perform or if
* the crash reporter is currently disabled. In all other cases, the callback will be performed in the context of
* another thread. It is the caller's responsibility to ensure all accesses made in the callback are thread safe.
* The supplied callback may neither directly nor indirectly access this instance of ICrashReporter.
*
* @thread_safety This method is thread safe and can be called concurrently.
*
* @param onDumpSubmitted The callback function to be called when the dumps are uploaded and deleted.
* @param userData The user data to be passed to the callback function.
*/
void(CARB_ABI* sendAndRemoveLeftOverDumpsAsync)(OnDumpSubmittedFn onDumpSubmitted, void* userData);
/**
* Adds a new callback that is called after sending (successfully or not) a crash dump to a server.
*
* Registration of multiple callbacks is allowed and all registered callbacks will be called serially (the order in
* which callbacks are called is undefined). It is allowed to use the same callback function (and userData) multiple
* times.
*
* This method is thread safe and can be called concurrently.
*
* The supplied callback may neither directly nor indirectly access this instance of ICrashReporter.
*
* @param onCrashSent The new callback to register, must not be nullptr.
* @param userData The user data to be passed to the callback function, can be nullptr.
*
* @return Not null if the provided callback was successfully registered, nullptr otherwise.
*/
CrashSentCallbackId*(CARB_ABI* addCrashSentCallback)(OnCrashSentFn onCrashSent, void* userData);
/**
* Removes previously registered callback.
*
* This method is thread safe and can be called concurrently.
*
* The given parameter is the id returned from addCrashSentCallback.
*
* The given callback id can be nullptr or an invalid id.
*
* @param callbackId The callback to remove. A null or invalid pointer is accepted (though may produce an error
* message).
*/
void(CARB_ABI* removeCrashSentCallback)(CrashSentCallbackId* callbackId);
/**
* Attempts to resolve a given address to a symbolic name using debugging features available to the system.
*
* If symbol resolution fails or is not available, @p func is called with a `nullptr` name.
*
* @note This function can be extremely slow. Use for debugging only.
*
* @param address The address to attempt to resolve.
* @param func The func to call upon resolution
* @param user User-specific data to be passed to @p func
*
* @thread_safety The callback function is always performed synchronously to this call. It
* is the callee's responsibility to ensure safe access to both the @p user
* pointer and any global resources.
*/
void(CARB_ABI* resolveSymbol)(const void* address, ResolveSymbolFn func, void* user);
/** Adds a new volatile metadata value to the crash report.
*
* @param[in] keyName The name of the metadata key to set. This must only contain
* printable ASCII characters except for a double quote ('"'),
* slash ('/'), or whitespace. It is the caller's responsibility
* to ensure the key name will not be overwriting another system's
* metadata value. One way to do this is to prefix the key name
* with the name of the extension or plugin (sanitized to follow
* the above formatting rules). Volatile metadata key names are
* not case sensitive. This may not be nullptr or an empty string.
* @param[in] maxLength The maximum number of characters, including the null terminator,
* that the metadata's value will occupy when its value is retrieved.
* This is ignored for integer and floating point values (the maximum
* size for those types will always be used regardless of the value).
* When retrieved, if the value is longer than this limit, the new
* metadata value will truncated. This may be 0 for integer and
* floating point value types. For string values, there may be
* an arbitrary amount of extra space added internally. This is
* often for padding or alignment purposes. Callers should however
* neither count on this space being present nor expect any strings
* to always be truncated at an exact length.
* @param[in] callback The callback and data type that will provide the value for the new
* metadata key. This may not contain a `nullptr` callback function.
* See below for notes on what the callback function may and may not
* do.
* @param[in] context An opaque context pointer that will be passed to the callback
* function when called. This will not be accessed or evaluated in
* any way, but must remain valid for the entire duration that the
* callback is registered here.
* @returns An identifier that can be used to unregister the callback in the event that the
* owning module needs to be unloaded. It is the caller's responsibility to ensure
* that the metadata callback is properly unregistered with a call to
* removeVolatileMetadataValue() before it unloads.
*
* @retval kMetadataFailBadParameter if an invalid parameter is passed in.
* @retval kMetadataFailKeyAlreadyUsed if the given key name is already in use or is a reserved name.
* @retval kInvalidMetadataId if a crash dump is currently in progress during this call.
*
* @remarks This registers a new volatile metadata value with the crash reporter. This new
* value includes a callback that will be used to acquire the most recent value of
* the metadata key when a crash does occur. The value may be provided as either a
* signed or unsigned integer (64 bit), a floating point value (64 bit), or a string
* of arbitrary length. Callback types are intentionally provided for each type to
* discourage the implementations from doing their own string conversions that could
* be dangerous while handling a crash event.
*
* @remarks Because the process may be in an unstable or delicate state when the callback
* is performed to retrieve the metadata values, there are several restrictions on
* what the callback function can and cannot do. In general, the callback function
* should provide the metadata value as quickly and simply as possible. An ideal
* case would be just to return the current value of a local, global, or member
* variable. Some guidelines are:
* * Do not perform any allocations or call into anything that may perform an
* allocation. At the time of a crash many things could have gone wrong and the
* allocations could fail or hang for various reasons.
* * Do not use any STL container classes other than to retrieve a current value.
* Many STL container class operations can implicitly perform an allocation
* to resize a buffer, array, new node, etc. If a resize, copy, or assign
* operation is unavoidable, try to use a container class that provides the
* possibility to reserve space for expected operations early (ie: string,
* vector, etc).
* * Avoid doing anything that may use a mutex or other locking primitive that
* is not in a strictly known state at the time. During a crash, the state of
* any lock could be undefined leading to a hang if an attempt is made to
* acquire it. If thread safety is a concern around accessing the value, try
* using an atomic variable instead of depending on a lock.
* * Do not make any calls into ICrashReporter from the callback function. This
* will result in a deadlock.
* * Under no circumstances should a new thread be created by the callback.
*
* @note The addVolatileMetadata() helper functions have been provided to make it easier
* to register callbacks for each value type. Using these is preferable to calling
* into internalAddVolatileMetadata() directly.
*
* @sa internalAddVolatileMetadata(), addVolatileMetadata().
*/
/** @private */
MetadataId(CARB_ABI* internalAddVolatileMetadata)(const char* keyName,
size_t maxLength,
MetadataValueCallback* callback,
void* context);
/** Adds a new volatile metadata value to the crash report.
*
* @param[in] keyName The name of the metadata key to set. This must only contain
* printable ASCII characters except for a double quote ('"'),
* slash ('/'), or whitespace. It is the caller's responsibility
* to ensure the key name will not be overwriting another system's
* metadata value. One way to do this is to prefix the key name
* with the name of the extension or plugin (sanitized to follow
* the above formatting rules). Volatile metadata key names are
* not case sensitive. This may not be `nullptr` or an empty string.
* @param[in] callback The callback function that will provide the value for the new
* metadata key. This may not be a `nullptr` callback function.
* See below for notes on what the callback function may and may not
* do.
* @param[in] context An opaque context pointer that will be passed to the callback
* function when called. This will not be accessed or evaluated in
* any way, but must remain valid for the entire duration that the
* callback is registered here.
* @returns An identifier that can be used to unregister the callback in the event that the
* owning module needs to be unloaded. It is the caller's responsibility to ensure
* that the metadata callback is properly unregistered with a call to
* removeVolatileMetadataValue() before it unloads.
*
* @retval kMetadataFailBadParameter if an invalid parameter is passed in.
* @retval kMetadataFailKeyAlreadyUsed if the given key name is already in use or is a reserved name.
* @retval kInvalidMetadataId if a crash dump is currently in progress during this call.
*
* @remarks This registers a new volatile metadata value with the crash reporter. This new
* value includes a callback that will be used to acquire the most recent value of
* the metadata key when a crash does occur. The value may be provided as either a
* signed or unsigned integer (64 bit), a floating point value (64 bit), or a string
* of arbitrary length. Callback types are intentionally provided for each type to
* discourage the implementations from doing their own string conversions that could
* be dangerous while handling a crash event.
*
* @remarks Because the process may be in an unstable or delicate state when the callback
* is performed to retrieve the metadata values, there are several restrictions on
* what the callback function can and cannot do. In general, the callback function
* should provide the metadata value as quickly and simply as possible. An ideal
* case would be just to return the current value of a local, global, or member
* variable. Some guidelines are:
* * Do not perform any allocations or call into anything that may perform an
* allocation. At the time of a crash, many things could have gone wrong and
* allocations could fail or hang for various reasons.
* * Do not use any STL container classes other than to retrieve a current value.
* Many STL container class operations can implicitly perform an allocation
* to resize a buffer, array, new node, etc. If a resize, copy, or assign
* operation is unavoidable, try to use a container class that provides the
* possibility to reserve space for expected operations early (ie: string,
* vector, etc).
* * Avoid doing anything that may use a mutex or other locking primitive that
* is not in a strictly known state at the time. During a crash, the state of
* any lock could be undefined leading to a hang if an attempt is made to
* acquire it. If thread safety is a concern around accessing the value, try
* using an atomic variable instead of depending on a lock.
* * Do not make any calls into ICrashReporter from the callback function. This
* will result in a deadlock.
* * Under no circumstances should a new thread be created by the callback.
*
* @thread_safety This call is thread safe.
*/
MetadataId addVolatileMetadata(const char* keyName, OnGetMetadataIntegerFn callback, void* context);
/** @copydoc addVolatileMetadata(const char*,OnGetMetadataIntegerFn,void*) */
MetadataId addVolatileMetadata(const char* keyName, OnGetMetadataUIntegerFn callback, void* context);
/** @copydoc addVolatileMetadata(const char*,OnGetMetadataIntegerFn,void*) */
MetadataId addVolatileMetadata(const char* keyName, OnGetMetadataFloatFn callback, void* context);
/** @copydoc addVolatileMetadata(const char*,OnGetMetadataIntegerFn,void*)
* @param[in] maxLength The maximum number of characters, including the null terminator,
* that the metadata's value will occupy when its value is retrieved.
* When retrieved, if the value is longer than this limit, this new
* metadata value will be truncated. There may be an arbitrary
* amount of extra space added internally. This is often done for
* padding or alignment purposes. Callers should however neither
* count on this space being present nor expect any strings to always
* be truncated at an exact length.
*/
MetadataId addVolatileMetadata(const char* keyName, size_t maxLength, OnGetMetadataStringFn callback, void* context);
/** @copydoc addVolatileMetadata(const char*,size_t,OnGetMetadataStringFn,void*) */
MetadataId addVolatileMetadata(const char* keyName, size_t maxLength, OnGetMetadataWideStringFn callback, void* context);
/** Removes a previously registered volatile metadata value.
*
* @param[in] id The identifier of the metadata value to remove. This was returned from
* a previous successful call to addVolatileMetadata*(). This call will be
* ignored if the identifier is invalid.
* @returns No return value.
*
* @remarks This removes a volatile metadata value from the crash reporter. The value will
* be retrieved from the callback and stored internally before it is removed from
* the crash reporter. The given identifier will be invalid upon return.
*
* @sa internalAddVolatileMetadata(), addVolatileMetadata().
*/
void(CARB_ABI* removeVolatileMetadataValue)(MetadataId id);
};
inline MetadataId ICrashReporter::addVolatileMetadata(const char* keyName, OnGetMetadataIntegerFn callback, void* context)
{
MetadataValueCallback data;
data.type = MetadataValueType::eInteger;
data.fn.getInteger = callback;
return internalAddVolatileMetadata(keyName, 0, &data, context);
}
inline MetadataId ICrashReporter::addVolatileMetadata(const char* keyName, OnGetMetadataUIntegerFn callback, void* context)
{
MetadataValueCallback data;
data.type = MetadataValueType::eUInteger;
data.fn.getUInteger = callback;
return internalAddVolatileMetadata(keyName, 0, &data, context);
}
inline MetadataId ICrashReporter::addVolatileMetadata(const char* keyName, OnGetMetadataFloatFn callback, void* context)
{
MetadataValueCallback data;
data.type = MetadataValueType::eFloat;
data.fn.getFloat = callback;
return internalAddVolatileMetadata(keyName, 0, &data, context);
}
inline MetadataId ICrashReporter::addVolatileMetadata(const char* keyName,
size_t maxLength,
OnGetMetadataStringFn callback,
void* context)
{
MetadataValueCallback data;
data.type = MetadataValueType::eString;
data.fn.getString = callback;
return internalAddVolatileMetadata(keyName, maxLength, &data, context);
}
inline MetadataId ICrashReporter::addVolatileMetadata(const char* keyName,
size_t maxLength,
OnGetMetadataWideStringFn callback,
void* context)
{
MetadataValueCallback data;
data.type = MetadataValueType::eWideString;
data.fn.getWString = callback;
return internalAddVolatileMetadata(keyName, maxLength, &data, context);
}
} // namespace crashreporter
} // namespace carb
|
omniverse-code/kit/include/carb/crashreporter/CrashReporterUtils.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
///! @file
///! @brief Utility helper functions for the crash reporter.
#pragma once
#include "../Framework.h"
#include "../InterfaceUtils.h"
#include "../logging/Log.h"
#include "../settings/ISettings.h"
#include "ICrashReporter.h"
#if CARB_PLATFORM_WINDOWS && !defined(_DLL)
# include "../CarbWindows.h"
# include "../extras/Library.h"
#endif
#include <signal.h>
#include <string.h>
#include <future>
/** Global accessor object for the loaded ICrashReporter object. This is intended to be used
* as a shortcut for accessing the @ref carb::crashreporter::ICrashReporter instance if the
* crash reporter plugin has been loaded in the process. This will be `nullptr` if the
* crash reporter plugin is not loaded. This symbol is unique to each plugin module and
* will be filled in by the framework upon load if the crash reporter plugin is present.
* Callers should always check if this value is `nullptr` before accessing it. This should
* not be accessed during or after framework shutdown.
*/
CARB_WEAKLINK carb::crashreporter::ICrashReporter* g_carbCrashReporter;
#ifdef DOXYGEN_BUILD
/** Defines global symbols specifically related to the crash reporter. */
# define CARB_CRASH_REPORTER_GLOBALS()
#else
// only install the signal handler for modules that have been statically linked to the
// Windows CRT (OVCC-1379). This is done because plugins that statically link to the
// CRT have their own copies of the signal handlers table and the crash reporter is
// unable to directly manipulate those. By setting the signal handler here in the
// context of the statically linked plugin, we provide a way to relay that abort
// signal to the crash reporter.
# if CARB_PLATFORM_WINDOWS && !defined(_DLL)
# if !CARB_COMPILER_MSC
static_assert(false, "Unsupported compiler!");
# endif
# define CARB_CRASH_REPORTER_GLOBALS() \
bool g_carbSignalHandlerInstalled = carb::crashreporter::detail::installSignalHandler();
namespace carb
{
namespace crashreporter
{
namespace detail
{
/** Installs a SIGABRT signal handler to act as a relay.
*
* @returns `true` if the signal handler is successfully installed. Returns `false` otherwise.
*
* @remarks This installs a SIGABRT signal handler to act as an event relay for plugins and apps
* that are statically linked to the Windows CRT. This allows each statically linked
* module's own signal handler chain to catch `abort()` and `std::terminate()` calls
* and pass that on to a special handler function in `carb.dll` that will then relay
* the event to the crash reporter plugin.
*
* @note This signal handler can be disabled by defining the `CARB_DISABLE_ABORT_HANDLER`
* environment variable and setting its value to '1'.
*
* @note This should not be called directly. This is called automatically as needed during
* plugin and app load.
*/
inline bool installSignalHandler()
{
using SignalHandlerFn = void (*)(int);
static bool disableHandler = []() {
WCHAR envVarValue[32] = { 0 };
return GetEnvironmentVariableW(L"CARB_DISABLE_ABORT_HANDLER", envVarValue, CARB_COUNTOF32(envVarValue)) != 0 &&
envVarValue[0] == '1' && envVarValue[1] == '\0';
}();
if (disableHandler)
return false;
static SignalHandlerFn handler = []() -> SignalHandlerFn {
SignalHandlerFn fn;
carb::extras::LibraryHandle handle = carb::extras::loadLibrary(
"carb", carb::extras::fLibFlagMakeFullLibName | carb::extras::fLibFlagLoadExisting);
if (handle == carb::extras::kInvalidLibraryHandle)
return nullptr;
fn = carb::extras::getLibrarySymbol<SignalHandlerFn>(handle, "carbSignalHandler");
if (fn == nullptr)
{
carb::extras::unloadLibrary(handle);
return nullptr;
}
return fn;
}();
if (handler == nullptr)
return false;
// install the signal handler for this thread in this module. Since signals on Windows
// are a bonus feature and rarely used, we don't care about preserving the previous
// signal handler.
signal(SIGABRT, handler);
return true;
}
} // namespace detail
} // namespace crashreporter
} // namespace carb
# else
# define CARB_CRASH_REPORTER_GLOBALS()
# endif
#endif
namespace carb
{
/** Namespace for the crash reporter. */
namespace crashreporter
{
/** Base magic signature value used to verify crash reporter resources. The lower 8 bits of this
* value can be incremented to allow for different versions of this resources this signature
* protects.
*/
constexpr uintptr_t kBaseMagicSignature = 0xc7a547e907137700ull;
/** Current magic signature used to verify crash reporter resources. This value is intended to
* be incremented for versioning purposes if the handling of the crash reporter resources needs
* to change in the future.
*/
constexpr uintptr_t kMagicSignature = kBaseMagicSignature;
#if CARB_PLATFORM_LINUX || defined(DOXYGEN_BUILD)
/** Signal number to use to handle external termination requests. This signal is intentionally
* a seldom used one so that it is unlikely to interfere with other normal signal usage in the
* process. Even with the rarely used signal, we'll still include other safety checks on the
* received signal before scheduling an intentional termination.
*/
static int kExternalTerminationSignal = SIGRTMAX - 1;
#elif CARB_PLATFORM_MACOS
/** MacOS doesn't support realtime signals. We'll use SIGUSR2 instead. */
constexpr int kExternalTerminationSignal = SIGUSR2;
#endif
/** Registers the crash reporter for this process and sets it up.
*
* @returns No return value.
*
* @remarks This installs the crash reporter in the calling process. This will include
* installing the crash handler hook and setting up its state according to the
* current values in the `/crashreporter/` branch of the settings registry.
* If the ISettings interface is not available, the crash reporter will only
* use its default settings and many features will be disabled. In this case
* the disabled features will include monitoring for changes to the various
* `/crashreporter/` settings, specifying metadata to include in crash reports,
* and controlling how and where the crash dump files are written out.
*
* @note When the process is shutting down, the crash reporter should be disabled
* by calling @ref carb::crashreporter::deregisterCrashReporterForClient().
* It is the host app's responsibility to properly disable the crash reporter
* before the plugin is unloaded.
*
* @thread_safety This operation is not thread safe. It is the caller's responsibility
* to ensure this is only called from a single thread at any given time.
* However, this will be automatically called during Carbonite framework
* startup (in carb::startupFramework()) and does not necessarily need
* to be called directly.
*/
inline void registerCrashReporterForClient()
{
g_carbCrashReporter = getFramework()->tryAcquireInterface<ICrashReporter>();
}
/** Deregisters and disables the crash reporter for the calling process.
*
* @returns No return value.
*
* @remarks This removes the crash reporter interface from the global variable
* @ref g_carbCrashReporter so that callers cannot access it further.
* The crash reporter plugin is also potentially unloaded.
*
* @thread_safety This operation is not thread safe. It is the caller's responsibility
* to ensure this is only called from a single thread at any given time.
* However, this will be automatically called during Carbonite framework
* shutdown (in carb::shutdownFramework()) and does not necessarily need
* to be called directly.
*/
inline void deregisterCrashReporterForClient()
{
if (g_carbCrashReporter)
{
getFramework()->releaseInterface(g_carbCrashReporter);
g_carbCrashReporter = nullptr;
}
}
/** Attempts to upload any crash dump files left by a previously crashed process.
*
* @returns A future that can be used to check on the completion of the upload operation.
* The operation is fully asynchronous and will proceed on its own. The future
* object will be signaled once the operation completes, successfully or otherwise.
*
* @remarks This starts off the process of checking for and uploading old crash dump files
* that may have been left over by a previous crashed process. This situation can
* occur if the upload failed in the previous process (ie: network connection
* issue, etc), or the process crashed again during the upload. A list of old
* crash dump files will be searched for in the currently set dump directory
* (as set by `/crashreporter/dumpDir`). If any are found, they will be uploaded
* one by one to the currently set upload URL (`/crashreporter/url`). Each
* crash dump file will be uploaded with its original metadata if the matching
* metadata file can be found. Once a file has been successfully uploaded to
* the given upload URL, it will be deleted from local storage unless the
* `/crashreporter/preserveDump` setting is `true`. This entire process will
* be skipped if the `/crashreporter/skipOldDumpUpload` setting is `true` and
* this call will simply return immediately.
*
* @thread_safety This function is thread safe. If multiple calls are made while an upload
* is still in progress, a new task will just be added to the upload queue
* instead of starting off another upload thread.
*
* @note If an upload is in progress when the process tries to exit or the crash reporter
* plugin tries to unload, any remaining uploads will be canceled, but the current
* upload operation will wait to complete. If this is a large file being uploaded
* or the internet connection's upload speed is particularly slow, this could potentially
* take a long time. There is unfortunately no reliable way to cancel this upload
* in progress currently.
*/
inline std::future<void> sendAndRemoveLeftOverDumpsAsync()
{
std::unique_ptr<std::promise<void>> sentPromise(new std::promise<void>());
std::future<void> sentFuture(sentPromise->get_future());
if (g_carbCrashReporter)
{
const auto finishCallback = [](void* promisePtr) {
auto sentPromise = reinterpret_cast<std::promise<void>*>(promisePtr);
sentPromise->set_value();
delete sentPromise;
};
g_carbCrashReporter->sendAndRemoveLeftOverDumpsAsync(finishCallback, sentPromise.release());
}
else
{
CARB_LOG_WARN("No crash reporter present, dumps uploading isn't available.");
sentPromise->set_value();
}
return sentFuture;
}
/** Namespace for internal helper functions. */
namespace detail
{
/** Sanitizes a string to be usable as a key name in the settings registry.
*
* @param[in] keyName The key string to be sanitized. This may be any string in theory, but
* should really be a short descriptive name for a crash metadata value or
* extra crash file. All non-ASCII, non-numeric characters will be replaced
* with underscores.
* @returns A string containing the sanitized key name.
*
* @note This is called internally by the addExtraCrashFile() and isExtraCrashFileKeyUsed()
* functions and should not be called directly.
*/
inline std::string sanitizeExtraCrashFileKey(const char* keyName)
{
std::string key = keyName;
// sanitize the key name so that it contains only database friendly characters.
for (auto& c : key)
{
if (c <= ' ' || c >= 127 || strchr("\"'\\/,#$%^&*()!~`[]{}|<>?;:=+.\t\b\n\r ", c) != nullptr)
{
c = '_';
}
}
return key;
}
} // namespace detail
/** Adds a metadata value to the crash reporter.
*
* @tparam T The type of the value to set. This may be any value type that is
* compatible with @a std::to_string().
* @param[in] keyName The name of the metadata key to set. This must only contain printable
* ASCII characters except for a double quote ('"'), slash ('/'), or
* whitespace. These rules get the key to a format that can be accepted
* by the settings registry. Note that further sanitization on the key
* name may also occur later. Any character that is not suitable for a
* database key name will be replaced by an underscore ('_'). It is the
* caller's responsibility to ensure the key name will not be overwriting
* another system's metadata value. One way to do this is to prefix the
* key name with the name of the extension or plugin (sanitized to follow
* the above formatting rules).
* @param[in] value The value to add to the crash reporter's metadata table. This may be
* any string that is accepted by carb::settings::ISettings::setString()
* as a new value. Note that this will remove the metadata value if it is
* set to `nullptr` or an empty string.
* @returns `true` if the new metadata value is successfully set. Returns `false` otherwise.
*
* @remarks This adds a new metadata value to the crash reporter. When a crash occurs, all
* values added through here will be collected and transmitted as metadata to
* accompany the crash report. The metadata value will value will be added (or
* updated) to the crash reporter by adding (or updating) a key under the
* "/crashreporter/data/" settings branch.
*
* @note This should not be called frequently to update the value of a piece of metadata.
* Doing so will be likely to incur a performance hit since the crash reporter watches
* for changes on the "/crashreporter/data/" settings branch that is modified here.
* Each time the branch changes, the crash reporter's metadata list is updated. If
* possible, the value for any given piece of metadata should only be updated when
* it either changes or just set once on startup and left alone.
*/
template <typename T>
inline bool addCrashMetadata(const char* keyName, T value)
{
return addCrashMetadata(keyName, std::to_string(value).c_str());
}
/** @copydoc carb::crashreporter::addCrashMetadata(const char*,T). */
template <>
inline bool addCrashMetadata(const char* keyName, const char* value)
{
carb::settings::ISettings* settings = carb::getCachedInterface<carb::settings::ISettings>();
std::string key;
if (settings == nullptr)
return false;
key = detail::sanitizeExtraCrashFileKey(keyName);
settings->setString((std::string("/crashreporter/data/") + key).c_str(), value);
return true;
}
/** Retrieves the value of a crash metadata value (if defined).
*
* @param[in] keyName The name of the metadata key to retrieve the value for. This must only
* contain printable ASCII characters except for a double quote ('"'), slash
* ('/'), or whitespace. These rules get the key to a format that can be
* accepted by the settings registry. Note that further sanitization on the
* key name may also occur later. Any character that is not suitable for a
* database key name will be replaced by an underscore ('_'). It is the
* caller's responsibility to ensure the key name will not be overwriting
* another system's metadata value. One way to do this is to prefix the
* key name with the name of the extension or plugin (sanitized to follow
* the above formatting rules).
* @returns The value of the requested crash metadata if it is defined. Returns `nullptr` if
* the requested metadata value has not been defined. This will not modify any existing
* crash metadata keys or values.
*/
inline const char* getCrashMetadataValue(const char* keyName)
{
carb::settings::ISettings* settings = carb::getCachedInterface<carb::settings::ISettings>();
std::string key;
if (settings == nullptr)
return nullptr;
key = "/crashreporter/data/" + detail::sanitizeExtraCrashFileKey(keyName);
return settings->getStringBuffer(key.c_str());
}
/** Adds an extra file to be uploaded when a crash occurs.
*
* @param[in] keyName The name of the key to give to the file. This is what the file will be
* uploaded as. Using the file's original name should be fine in most
* cases, however it should not contain characters such as '/' or '\'
* at the very least. Non-ASCII characters should be avoided if possible
* too. It is the caller's responsibility to ensure adding this new file
* will not overwrite another upload file with the same key name. This
* may not use the reserved name 'upload_file_minidump'. This key name
* string will always be sanitized to only contain database friendly
* characters. All invalid characters will be replaced by an underscore
* ('_').
* @param[in] filename The full path to the file to upload. This may be a relative or absolute
* path. The file may or may not exist at the time of this call, it will
* still be added to the list of files to be uploaded. If the file does not
* exist at the time of the crash, it will be filtered out of the list at
* that point. A warnings message will be written out for each listed file
* that is missing at the time of the crash however.
* @returns `true` if the new entry is added to the list. Returns `false` if the file could
* not be added. This failure will only occur if the \ref carb::settings::ISettings interface is not
* available. Note that a `true` return does not necessarily mean that the new file
* was fully added to the list. It would have been written to the list in the settings
* registry, but may have been ignored by the crash reporter if the same key was given
* as a previous file.
*
* @remarks This adds a filename to be tracked to upload with the next crash report that is
* generated. This setting is not persistent across sessions. If no crash occurs,
* the file will not be uploaded anywhere. This cannot be used to rename a file that
* has already been added to the upload list (ie: change the filename under an existing
* key). If a second filename is specified with the same key, it will be ignored.
*
* @note Extra files added with this function will not be deleted once a crash report is
* successfully uploaded. Only the crash report's main dump file and metadata files
* will be deleted in this case.
*/
inline bool addExtraCrashFile(const char* keyName, const char* filename)
{
carb::settings::ISettings* settings = carb::getCachedInterface<carb::settings::ISettings>();
std::string key;
if (settings == nullptr)
return false;
// sanitize the key name so that it contains only database friendly characters.
key = detail::sanitizeExtraCrashFileKey(keyName);
settings->setString(("/crashreporter/files/" + key).c_str(), filename);
return true;
}
/** Checks whether a key for an extra crash report file has already been used.
*
* @param[in] keyName The name of the key to be used. This will be used to identify the extra
* file in the settings registry. See addExtraCrashFile() for more
* information on how this value is used.
* @returns `true` if the crash file key has been used already. Returns `false` otherwise.
*
* @remarks When adding new extra files to a crash report, it is the caller's responsibility
* that an existing filename will not be overwritten by addExtraCrashFile(). This
* function can be used to check whether a given key had already been used to add
* an extra file to the crash report.
*/
inline bool isExtraCrashFileKeyUsed(const char* keyName)
{
carb::settings::ISettings* settings = carb::getCachedInterface<carb::settings::ISettings>();
std::string key;
if (settings == nullptr)
return false;
// sanitize the key name so that it contains only database friendly characters.
key = detail::sanitizeExtraCrashFileKey(keyName);
return settings->isAccessibleAs(carb::dictionary::ItemType::eString, ("/crashreporter/files/" + key).c_str());
}
} // namespace crashreporter
} // namespace carb
|
omniverse-code/kit/include/carb/input/InputTypes.h | // Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../Types.h"
namespace carb
{
namespace input
{
struct InputDevice;
struct Keyboard;
struct Mouse;
struct Gamepad;
/**
* Type used as an identifier for all subscriptions.
*/
typedef uint32_t SubscriptionId;
/**
* Subscription order.
*
* [0..N-1] requires to insert before the position from the beginning and shift tail on the right.
* [-1..-N] requires to insert after the position relative from the end and shift head on the left.
*
* Please look at the examples below:
*
* Assume we initially have a queue of N subscribers a b c .. y z:
* +---+---+---+-- --+---+---+
* | a | b | c | | y | z | -----events--flow--->
* +---+---+---+-- --+---+---+
* | 0 | 1 | 2 | |N-2|N-1| ---positive-order--->
* +---+---+---+-- --+---+---+
* | -N| | | | -2| -1| <---negative-order---
* +---+---+---+-- --+---+---+
* first last
*
* After inserting subscriber e with the order 1:
* +---+---+---+---+-- --+---+---+
* | a | e | b | c | | y | z |
* +---+---+---+---+-- --+---+---+
* | 0 | 1 | 2 | 3 | |N-1| N |
* +---+---+---+---+-- --+---+---+
* first last
*
* After inserting subscriber f with the order -1:
* +---+---+---+---+-- --+---+---+---+
* | a | e | b | c | | y | z | f |
* +---+---+---+---+-- --+---+---+---+
* | 0 | 1 | 2 | 3 | |N-1| N |N+1|
* +---+---+---+---+-- --+---+---+---+
* | 0 | 1 | 2 | 3 | |M-3|M-2|M-1|
* +---+---+---+---+-- --+---+---+---+
* first last
*
*/
using SubscriptionOrder = int32_t;
/**
* Default subscription order.
*/
static constexpr SubscriptionOrder kSubscriptionOrderFirst = 0;
static constexpr SubscriptionOrder kSubscriptionOrderLast = -1;
static constexpr SubscriptionOrder kSubscriptionOrderDefault = kSubscriptionOrderLast;
/**
* Defines possible input event types.
* TODO: This is not supported yet.
*/
enum class EventType : uint32_t
{
eUnknown
};
/**
* Defines event type mask.
* TODO: Flags are not customized yet.
*/
typedef uint32_t EventTypeMask;
static constexpr EventTypeMask kEventTypeAll = EventTypeMask(-1);
/**
* Defines possible press states.
*/
typedef uint32_t ButtonFlags;
const uint32_t kButtonFlagTransitionUp = 1;
const uint32_t kButtonFlagStateUp = (1 << 1);
const uint32_t kButtonFlagTransitionDown = (1 << 2);
const uint32_t kButtonFlagStateDown = (1 << 3);
/**
* Defines possible device types.
*/
enum class DeviceType
{
eKeyboard,
eMouse,
eGamepad,
eCount,
eUnknown = eCount
};
/**
* Defines keyboard modifiers.
*/
typedef uint32_t KeyboardModifierFlags;
const uint32_t kKeyboardModifierFlagShift = 1 << 0;
const uint32_t kKeyboardModifierFlagControl = 1 << 1;
const uint32_t kKeyboardModifierFlagAlt = 1 << 2;
const uint32_t kKeyboardModifierFlagSuper = 1 << 3;
const uint32_t kKeyboardModifierFlagCapsLock = 1 << 4;
const uint32_t kKeyboardModifierFlagNumLock = 1 << 5;
/**
* Defines totl number of keyboard modifiers.
*/
const uint32_t kKeyboardModifierFlagCount = 6;
/**
* Defines keyboard event type.
*/
enum class KeyboardEventType
{
eKeyPress, ///< Sent when key is pressed the first time.
eKeyRepeat, ///< Sent after a platform-specific delay if key is held down.
eKeyRelease, ///< Sent when the key is released.
eChar, ///< Sent when a character is produced by the input actions, for example during key presses.
// Must always be last
eCount ///< The number of KeyboardEventType elements.
};
/**
* Defines input code type.
*
*/
typedef uint32_t InputType;
/**
* Defines keyboard key codes
*
* The key code represents the physical key location in the standard US keyboard layout keyboard, if they exist
* in the US keyboard.
*
* eUnknown is sent for key events that do not have a key code.
*/
enum class KeyboardInput : InputType
{
eUnknown,
eSpace,
eApostrophe,
eComma,
eMinus,
ePeriod,
eSlash,
eKey0,
eKey1,
eKey2,
eKey3,
eKey4,
eKey5,
eKey6,
eKey7,
eKey8,
eKey9,
eSemicolon,
eEqual,
eA,
eB,
eC,
eD,
eE,
eF,
eG,
eH,
eI,
eJ,
eK,
eL,
eM,
eN,
eO,
eP,
eQ,
eR,
eS,
eT,
eU,
eV,
eW,
eX,
eY,
eZ,
eLeftBracket,
eBackslash,
eRightBracket,
eGraveAccent,
eEscape,
eTab,
eEnter,
eBackspace,
eInsert,
eDel,
eRight,
eLeft,
eDown,
eUp,
ePageUp,
ePageDown,
eHome,
eEnd,
eCapsLock,
eScrollLock,
eNumLock,
ePrintScreen,
ePause,
eF1,
eF2,
eF3,
eF4,
eF5,
eF6,
eF7,
eF8,
eF9,
eF10,
eF11,
eF12,
eNumpad0,
eNumpad1,
eNumpad2,
eNumpad3,
eNumpad4,
eNumpad5,
eNumpad6,
eNumpad7,
eNumpad8,
eNumpad9,
eNumpadDel,
eNumpadDivide,
eNumpadMultiply,
eNumpadSubtract,
eNumpadAdd,
eNumpadEnter,
eNumpadEqual,
eLeftShift,
eLeftControl,
eLeftAlt,
eLeftSuper,
eRightShift,
eRightControl,
eRightAlt,
eRightSuper,
eMenu,
eCount
};
/**
* UTF8 RFC3629 - max 4 bytes per character
*/
const uint32_t kCharacterMaxNumBytes = 4;
/**
* Defines a keyboard event.
*/
struct KeyboardEvent
{
union
{
Keyboard* keyboard;
InputDevice* device;
};
KeyboardEventType type;
union
{
KeyboardInput key;
InputType inputType;
char character[kCharacterMaxNumBytes];
};
KeyboardModifierFlags modifiers;
};
/**
* Defines the mouse event types.
*/
enum class MouseEventType
{
eLeftButtonDown,
eLeftButtonUp,
eMiddleButtonDown,
eMiddleButtonUp,
eRightButtonDown,
eRightButtonUp,
eMove,
eScroll,
// Must always be last
eCount ///< The number of MouseEventType elements.
};
/**
* Defines the mouse event.
*
* normalizedCoords - mouse coordinates only active in move events, normalized to [0.0, 1.0] relative to the
* associated window size.
* unscaledCoords - mouse coordinates only active in move events, not normalized.
* scrollDelta - scroll delta, only active in scroll events.
*/
struct MouseEvent
{
union
{
Mouse* mouse;
InputDevice* device;
};
MouseEventType type;
union
{
Float2 normalizedCoords;
Float2 scrollDelta;
};
KeyboardModifierFlags modifiers;
Float2 pixelCoords;
};
/**
* Defines a mouse input.
*/
enum class MouseInput : InputType
{
eLeftButton,
eRightButton,
eMiddleButton,
eForwardButton,
eBackButton,
eScrollRight,
eScrollLeft,
eScrollUp,
eScrollDown,
eMoveRight,
eMoveLeft,
eMoveUp,
eMoveDown,
eCount
};
/**
* Defines a gamepad input.
*
* Expected ABXY buttons layout:
* Y
* X B
* A
* eMenu1 - maps to View (XBone) / Share (DS4)
* eMenu2 - maps to Menu (XBone) / Options (DS4)
*/
enum class GamepadInput : InputType
{
eLeftStickRight,
eLeftStickLeft,
eLeftStickUp,
eLeftStickDown,
eRightStickRight,
eRightStickLeft,
eRightStickUp,
eRightStickDown,
eLeftTrigger,
eRightTrigger,
eA,
eB,
eX,
eY,
eLeftShoulder,
eRightShoulder,
eMenu1,
eMenu2,
eLeftStick,
eRightStick,
eDpadUp,
eDpadRight,
eDpadDown,
eDpadLeft,
eCount
};
/**
* Defines a gamepad event.
*/
struct GamepadEvent
{
union
{
Gamepad* gamepad;
InputDevice* device;
};
union
{
GamepadInput input;
InputType inputType;
};
float value;
};
/**
* Defines the gamepad connection event types.
*/
enum class GamepadConnectionEventType
{
eCreated,
eConnected,
eDisconnected,
eDestroyed
};
/**
* Defines the gamepad connection event.
*/
struct GamepadConnectionEvent
{
union
{
Gamepad* gamepad;
InputDevice* device;
};
GamepadConnectionEventType type;
};
/**
* Defines the unified input event.
*/
struct InputEvent
{
DeviceType deviceType;
union
{
KeyboardEvent keyboardEvent;
MouseEvent mouseEvent;
GamepadEvent gamepadEvent;
InputDevice* device;
};
};
/**
* Defines action mapping description.
*/
struct ActionMappingDesc
{
DeviceType deviceType;
union
{
Keyboard* keyboard;
Mouse* mouse;
Gamepad* gamepad;
InputDevice* device;
};
union
{
KeyboardInput keyboardInput;
MouseInput mouseInput;
GamepadInput gamepadInput;
InputType inputType;
};
KeyboardModifierFlags modifiers;
};
/**
* Defines an action event.
*/
struct ActionEvent
{
const char* action;
float value;
ButtonFlags flags;
};
/**
* Function type that describes keyboard event callback.
*
* @param evt The event description.
* @param userData Pointer to the user data.
* @return Whether event should be processed by subsequent event subscribers.
*/
typedef bool (*OnActionEventFn)(const ActionEvent& evt, void* userData);
/**
* Function type that describes keyboard event callback.
*
* @param evt The event description.
* @param userData Pointer to the user data.
* @return Whether event should be processed by subsequent event subscribers.
*/
typedef bool (*OnKeyboardEventFn)(const KeyboardEvent& evt, void* userData);
/**
* Function type that describes mouse event callback.
*
* @param evt The event description.
* @param userData Pointer to the user data.
* @return Whether event should be processed by subsequent event subscribers.
*/
typedef bool (*OnMouseEventFn)(const MouseEvent& evt, void* userData);
/**
* Function type that describes gamepad event callback.
*
* @param evt The event description.
* @param userData Pointer to the user data.
* @return Whether event should be processed by subsequent event subscribers.
*/
typedef bool (*OnGamepadEventFn)(const GamepadEvent& evt, void* userData);
/**
* Function type that describes gamepad connection event callback.
*
* @param evt The event description.
* @param userData Pointer to the user data.
* @return Whether event should not be processed anymore by subsequent event subscribers.
*/
typedef void (*OnGamepadConnectionEventFn)(const GamepadConnectionEvent& evt, void* userData);
/**
* Function type that describes input event callback.
*
* @param evt The event description.
* @param userData Pointer to the user data.
* @return Whether event should be processed by subsequent event subscribers.
*/
typedef bool (*OnInputEventFn)(const InputEvent& evt, void* userData);
/**
* The result returned by InputEventFilterFn.
*/
enum class FilterResult : uint8_t
{
//! The event should be retained and sent later when IInput::distributeBufferedEvents() is called.
eRetain = 0,
//! The event has been fully processed by InputEventFilterFn and should NOT be sent later when
//! IInput::distributeBufferedEvents() is called.
eConsume = 1,
};
/**
* Callback function type for filtering events.
*
* @see IInput::filterBufferedEvents() for more information.
*
* @param evt A reference to the unified event description. The event may be modified.
* @param userData A pointer to the user data passed to IInput::filterBufferedEvents().
* @return The FilterResult indicating what should happen with the event.
*/
typedef FilterResult (*InputEventFilterFn)(InputEvent& evt, void* userData);
static const char* const kAnyDevice = nullptr;
} // namespace input
} // namespace carb
|
omniverse-code/kit/include/carb/input/InputBindingsPython.h | // Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../BindingsPythonUtils.h"
#include "../BindingsPythonTypes.h"
#include "../Framework.h"
#include "IInput.h"
#include "InputProvider.h"
#include "InputUtils.h"
#include <memory>
#include <string>
#include <vector>
namespace carb
{
namespace input
{
struct InputDevice
{
};
struct Gamepad
{
};
class ActionMappingSet
{
};
} // namespace input
namespace input
{
namespace detail
{
inline ActionMappingDesc toMouseMapping(Mouse* mouse, MouseInput input, KeyboardModifierFlags modifiers)
{
ActionMappingDesc mapping{};
mapping.deviceType = DeviceType::eMouse;
mapping.mouse = mouse;
mapping.mouseInput = input;
mapping.modifiers = modifiers;
return mapping;
}
inline ActionMappingDesc toGamepadMapping(Gamepad* gamepad, GamepadInput input)
{
ActionMappingDesc mapping{};
mapping.deviceType = DeviceType::eGamepad;
mapping.gamepad = gamepad;
mapping.gamepadInput = input;
return mapping;
}
} // namespace detail
inline void definePythonModule(py::module& m)
{
m.doc() = "pybind11 carb.input bindings";
py::class_<InputDevice> device(m, "InputDevice");
py::class_<Keyboard>(m, "Keyboard", device);
py::class_<Mouse>(m, "Mouse", device);
py::class_<Gamepad>(m, "Gamepad", device);
py::class_<ActionMappingSet>(m, "ActionMappingSet");
py::enum_<EventType>(m, "EventType").value("UNKNOWN", EventType::eUnknown);
m.attr("EVENT_TYPE_ALL") = py::int_(kEventTypeAll);
m.attr("SUBSCRIPTION_ORDER_FIRST") = py::int_(kSubscriptionOrderFirst);
m.attr("SUBSCRIPTION_ORDER_LAST") = py::int_(kSubscriptionOrderLast);
m.attr("SUBSCRIPTION_ORDER_DEFAULT") = py::int_(kSubscriptionOrderDefault);
py::enum_<DeviceType>(m, "DeviceType")
.value("KEYBOARD", DeviceType::eKeyboard)
.value("MOUSE", DeviceType::eMouse)
.value("GAMEPAD", DeviceType::eGamepad);
py::enum_<KeyboardEventType>(m, "KeyboardEventType")
.value("KEY_PRESS", KeyboardEventType::eKeyPress)
.value("KEY_REPEAT", KeyboardEventType::eKeyRepeat)
.value("KEY_RELEASE", KeyboardEventType::eKeyRelease)
.value("CHAR", KeyboardEventType::eChar);
py::enum_<KeyboardInput>(m, "KeyboardInput")
.value("UNKNOWN", KeyboardInput::eUnknown)
.value("SPACE", KeyboardInput::eSpace)
.value("APOSTROPHE", KeyboardInput::eApostrophe)
.value("COMMA", KeyboardInput::eComma)
.value("MINUS", KeyboardInput::eMinus)
.value("PERIOD", KeyboardInput::ePeriod)
.value("SLASH", KeyboardInput::eSlash)
.value("KEY_0", KeyboardInput::eKey0)
.value("KEY_1", KeyboardInput::eKey1)
.value("KEY_2", KeyboardInput::eKey2)
.value("KEY_3", KeyboardInput::eKey3)
.value("KEY_4", KeyboardInput::eKey4)
.value("KEY_5", KeyboardInput::eKey5)
.value("KEY_6", KeyboardInput::eKey6)
.value("KEY_7", KeyboardInput::eKey7)
.value("KEY_8", KeyboardInput::eKey8)
.value("KEY_9", KeyboardInput::eKey9)
.value("SEMICOLON", KeyboardInput::eSemicolon)
.value("EQUAL", KeyboardInput::eEqual)
.value("A", KeyboardInput::eA)
.value("B", KeyboardInput::eB)
.value("C", KeyboardInput::eC)
.value("D", KeyboardInput::eD)
.value("E", KeyboardInput::eE)
.value("F", KeyboardInput::eF)
.value("G", KeyboardInput::eG)
.value("H", KeyboardInput::eH)
.value("I", KeyboardInput::eI)
.value("J", KeyboardInput::eJ)
.value("K", KeyboardInput::eK)
.value("L", KeyboardInput::eL)
.value("M", KeyboardInput::eM)
.value("N", KeyboardInput::eN)
.value("O", KeyboardInput::eO)
.value("P", KeyboardInput::eP)
.value("Q", KeyboardInput::eQ)
.value("R", KeyboardInput::eR)
.value("S", KeyboardInput::eS)
.value("T", KeyboardInput::eT)
.value("U", KeyboardInput::eU)
.value("V", KeyboardInput::eV)
.value("W", KeyboardInput::eW)
.value("X", KeyboardInput::eX)
.value("Y", KeyboardInput::eY)
.value("Z", KeyboardInput::eZ)
.value("LEFT_BRACKET", KeyboardInput::eLeftBracket)
.value("BACKSLASH", KeyboardInput::eBackslash)
.value("RIGHT_BRACKET", KeyboardInput::eRightBracket)
.value("GRAVE_ACCENT", KeyboardInput::eGraveAccent)
.value("ESCAPE", KeyboardInput::eEscape)
.value("TAB", KeyboardInput::eTab)
.value("ENTER", KeyboardInput::eEnter)
.value("BACKSPACE", KeyboardInput::eBackspace)
.value("INSERT", KeyboardInput::eInsert)
.value("DEL", KeyboardInput::eDel)
.value("RIGHT", KeyboardInput::eRight)
.value("LEFT", KeyboardInput::eLeft)
.value("DOWN", KeyboardInput::eDown)
.value("UP", KeyboardInput::eUp)
.value("PAGE_UP", KeyboardInput::ePageUp)
.value("PAGE_DOWN", KeyboardInput::ePageDown)
.value("HOME", KeyboardInput::eHome)
.value("END", KeyboardInput::eEnd)
.value("CAPS_LOCK", KeyboardInput::eCapsLock)
.value("SCROLL_LOCK", KeyboardInput::eScrollLock)
.value("NUM_LOCK", KeyboardInput::eNumLock)
.value("PRINT_SCREEN", KeyboardInput::ePrintScreen)
.value("PAUSE", KeyboardInput::ePause)
.value("F1", KeyboardInput::eF1)
.value("F2", KeyboardInput::eF2)
.value("F3", KeyboardInput::eF3)
.value("F4", KeyboardInput::eF4)
.value("F5", KeyboardInput::eF5)
.value("F6", KeyboardInput::eF6)
.value("F7", KeyboardInput::eF7)
.value("F8", KeyboardInput::eF8)
.value("F9", KeyboardInput::eF9)
.value("F10", KeyboardInput::eF10)
.value("F11", KeyboardInput::eF11)
.value("F12", KeyboardInput::eF12)
.value("NUMPAD_0", KeyboardInput::eNumpad0)
.value("NUMPAD_1", KeyboardInput::eNumpad1)
.value("NUMPAD_2", KeyboardInput::eNumpad2)
.value("NUMPAD_3", KeyboardInput::eNumpad3)
.value("NUMPAD_4", KeyboardInput::eNumpad4)
.value("NUMPAD_5", KeyboardInput::eNumpad5)
.value("NUMPAD_6", KeyboardInput::eNumpad6)
.value("NUMPAD_7", KeyboardInput::eNumpad7)
.value("NUMPAD_8", KeyboardInput::eNumpad8)
.value("NUMPAD_9", KeyboardInput::eNumpad9)
.value("NUMPAD_DEL", KeyboardInput::eNumpadDel)
.value("NUMPAD_DIVIDE", KeyboardInput::eNumpadDivide)
.value("NUMPAD_MULTIPLY", KeyboardInput::eNumpadMultiply)
.value("NUMPAD_SUBTRACT", KeyboardInput::eNumpadSubtract)
.value("NUMPAD_ADD", KeyboardInput::eNumpadAdd)
.value("NUMPAD_ENTER", KeyboardInput::eNumpadEnter)
.value("NUMPAD_EQUAL", KeyboardInput::eNumpadEqual)
.value("LEFT_SHIFT", KeyboardInput::eLeftShift)
.value("LEFT_CONTROL", KeyboardInput::eLeftControl)
.value("LEFT_ALT", KeyboardInput::eLeftAlt)
.value("LEFT_SUPER", KeyboardInput::eLeftSuper)
.value("RIGHT_SHIFT", KeyboardInput::eRightShift)
.value("RIGHT_CONTROL", KeyboardInput::eRightControl)
.value("RIGHT_ALT", KeyboardInput::eRightAlt)
.value("RIGHT_SUPER", KeyboardInput::eRightSuper)
.value("MENU", KeyboardInput::eMenu)
.value("COUNT", KeyboardInput::eCount);
py::enum_<MouseEventType>(m, "MouseEventType")
.value("LEFT_BUTTON_DOWN", MouseEventType::eLeftButtonDown)
.value("LEFT_BUTTON_UP", MouseEventType::eLeftButtonUp)
.value("MIDDLE_BUTTON_DOWN", MouseEventType::eMiddleButtonDown)
.value("MIDDLE_BUTTON_UP", MouseEventType::eMiddleButtonUp)
.value("RIGHT_BUTTON_DOWN", MouseEventType::eRightButtonDown)
.value("RIGHT_BUTTON_UP", MouseEventType::eRightButtonUp)
.value("MOVE", MouseEventType::eMove)
.value("SCROLL", MouseEventType::eScroll);
py::enum_<MouseInput>(m, "MouseInput")
.value("LEFT_BUTTON", MouseInput::eLeftButton)
.value("RIGHT_BUTTON", MouseInput::eRightButton)
.value("MIDDLE_BUTTON", MouseInput::eMiddleButton)
.value("FORWARD_BUTTON", MouseInput::eForwardButton)
.value("BACK_BUTTON", MouseInput::eBackButton)
.value("SCROLL_RIGHT", MouseInput::eScrollRight)
.value("SCROLL_LEFT", MouseInput::eScrollLeft)
.value("SCROLL_UP", MouseInput::eScrollUp)
.value("SCROLL_DOWN", MouseInput::eScrollDown)
.value("MOVE_RIGHT", MouseInput::eMoveRight)
.value("MOVE_LEFT", MouseInput::eMoveLeft)
.value("MOVE_UP", MouseInput::eMoveUp)
.value("MOVE_DOWN", MouseInput::eMoveDown)
.value("COUNT", MouseInput::eCount);
py::enum_<GamepadInput>(m, "GamepadInput")
.value("LEFT_STICK_RIGHT", GamepadInput::eLeftStickRight)
.value("LEFT_STICK_LEFT", GamepadInput::eLeftStickLeft)
.value("LEFT_STICK_UP", GamepadInput::eLeftStickUp)
.value("LEFT_STICK_DOWN", GamepadInput::eLeftStickDown)
.value("RIGHT_STICK_RIGHT", GamepadInput::eRightStickRight)
.value("RIGHT_STICK_LEFT", GamepadInput::eRightStickLeft)
.value("RIGHT_STICK_UP", GamepadInput::eRightStickUp)
.value("RIGHT_STICK_DOWN", GamepadInput::eRightStickDown)
.value("LEFT_TRIGGER", GamepadInput::eLeftTrigger)
.value("RIGHT_TRIGGER", GamepadInput::eRightTrigger)
.value("A", GamepadInput::eA)
.value("B", GamepadInput::eB)
.value("X", GamepadInput::eX)
.value("Y", GamepadInput::eY)
.value("LEFT_SHOULDER", GamepadInput::eLeftShoulder)
.value("RIGHT_SHOULDER", GamepadInput::eRightShoulder)
.value("MENU1", GamepadInput::eMenu1)
.value("MENU2", GamepadInput::eMenu2)
.value("LEFT_STICK", GamepadInput::eLeftStick)
.value("RIGHT_STICK", GamepadInput::eRightStick)
.value("DPAD_UP", GamepadInput::eDpadUp)
.value("DPAD_RIGHT", GamepadInput::eDpadRight)
.value("DPAD_DOWN", GamepadInput::eDpadDown)
.value("DPAD_LEFT", GamepadInput::eDpadLeft)
.value("COUNT", GamepadInput::eCount);
m.attr("BUTTON_FLAG_RELEASED") = py::int_(kButtonFlagTransitionUp);
m.attr("BUTTON_FLAG_UP") = py::int_(kButtonFlagStateUp);
m.attr("BUTTON_FLAG_PRESSED") = py::int_(kButtonFlagTransitionDown);
m.attr("BUTTON_FLAG_DOWN") = py::int_(kButtonFlagStateDown);
m.attr("KEYBOARD_MODIFIER_FLAG_SHIFT") = py::int_(kKeyboardModifierFlagShift);
m.attr("KEYBOARD_MODIFIER_FLAG_CONTROL") = py::int_(kKeyboardModifierFlagControl);
m.attr("KEYBOARD_MODIFIER_FLAG_ALT") = py::int_(kKeyboardModifierFlagAlt);
m.attr("KEYBOARD_MODIFIER_FLAG_SUPER") = py::int_(kKeyboardModifierFlagSuper);
m.attr("KEYBOARD_MODIFIER_FLAG_CAPS_LOCK") = py::int_(kKeyboardModifierFlagCapsLock);
m.attr("KEYBOARD_MODIFIER_FLAG_NUM_LOCK") = py::int_(kKeyboardModifierFlagNumLock);
py::class_<KeyboardEvent>(m, "KeyboardEvent")
.def_readonly("device", &KeyboardEvent::device)
.def_readonly("keyboard", &KeyboardEvent::keyboard)
.def_readonly("type", &KeyboardEvent::type)
.def_property_readonly("input",
[](const KeyboardEvent& desc) {
switch (desc.type)
{
case KeyboardEventType::eChar:
return pybind11::cast(std::string(
desc.character, strnlen(desc.character, kCharacterMaxNumBytes)));
default:
return pybind11::cast(desc.key);
}
})
.def_readonly("modifiers", &KeyboardEvent::modifiers);
py::class_<MouseEvent>(m, "MouseEvent")
.def_readonly("device", &MouseEvent::device)
.def_readonly("mouse", &MouseEvent::mouse)
.def_readonly("type", &MouseEvent::type)
.def_readonly("normalized_coords", &MouseEvent::normalizedCoords)
.def_readonly("pixel_coords", &MouseEvent::pixelCoords)
.def_readonly("scrollDelta", &MouseEvent::scrollDelta)
.def_readonly("modifiers", &MouseEvent::modifiers);
py::class_<GamepadEvent>(m, "GamepadEvent")
.def_readonly("device", &GamepadEvent::device)
.def_readonly("gamepad", &GamepadEvent::gamepad)
.def_readonly("input", &GamepadEvent::input)
.def_readonly("value", &GamepadEvent::value);
py::enum_<GamepadConnectionEventType>(m, "GamepadConnectionEventType")
.value("CREATED", GamepadConnectionEventType::eCreated)
.value("CONNECTED", GamepadConnectionEventType::eConnected)
.value("DISCONNECTED", GamepadConnectionEventType::eDisconnected)
.value("DESTROYED", GamepadConnectionEventType::eDestroyed);
py::class_<GamepadConnectionEvent>(m, "GamepadConnectionEvent")
.def_readonly("type", &GamepadConnectionEvent::type)
.def_readonly("gamepad", &GamepadConnectionEvent::gamepad)
.def_readonly("device", &GamepadConnectionEvent::device);
py::class_<InputEvent>(m, "InputEvent")
.def_readonly("deviceType", &InputEvent::deviceType)
.def_readonly("device", &InputEvent::device)
.def_property_readonly("event", [](const InputEvent& desc) {
switch (desc.deviceType)
{
case DeviceType::eKeyboard:
return pybind11::cast(desc.keyboardEvent);
case DeviceType::eMouse:
return pybind11::cast(desc.mouseEvent);
case DeviceType::eGamepad:
return pybind11::cast(desc.gamepadEvent);
default:
return py::cast(nullptr);
}
});
py::class_<ActionMappingDesc>(m, "ActionMappingDesc")
.def_readonly("deviceType", &ActionMappingDesc::deviceType)
.def_readonly("modifiers", &ActionMappingDesc::modifiers)
.def_property_readonly("device",
[](const ActionMappingDesc& desc) {
switch (desc.deviceType)
{
case DeviceType::eKeyboard:
return pybind11::cast(desc.keyboard);
case DeviceType::eMouse:
return pybind11::cast(desc.mouse);
case DeviceType::eGamepad:
return pybind11::cast(desc.gamepad);
default:
return py::cast(nullptr);
}
})
.def_property_readonly("input", [](const ActionMappingDesc& desc) {
switch (desc.deviceType)
{
case DeviceType::eKeyboard:
return pybind11::cast(desc.keyboardInput);
case DeviceType::eMouse:
return pybind11::cast(desc.mouseInput);
case DeviceType::eGamepad:
return pybind11::cast(desc.gamepadInput);
default:
return py::cast(nullptr);
}
});
py::class_<ActionEvent>(m, "ActionEvent")
.def_readonly("action", &ActionEvent::action)
.def_readonly("value", &ActionEvent::value)
.def_readonly("flags", &ActionEvent::flags);
m.def("get_action_mapping_desc_from_string", [](const std::string& str) {
std::string deviceId;
ActionMappingDesc actionMappingDesc;
{
py::gil_scoped_release nogil;
actionMappingDesc = getActionMappingDescFromString(str.c_str(), &deviceId);
}
py::tuple t(4);
t[0] = actionMappingDesc.deviceType;
t[1] = actionMappingDesc.modifiers;
switch (actionMappingDesc.deviceType)
{
case DeviceType::eKeyboard:
{
t[2] = actionMappingDesc.keyboardInput;
break;
}
case DeviceType::eMouse:
{
t[2] = actionMappingDesc.mouseInput;
break;
}
case DeviceType::eGamepad:
{
t[2] = actionMappingDesc.gamepadInput;
break;
}
default:
{
t[2] = py::none();
break;
}
}
t[3] = deviceId;
return t;
});
m.def("get_string_from_action_mapping_desc",
[](KeyboardInput keyboardInput, KeyboardModifierFlags modifiers) {
ActionMappingDesc actionMappingDesc = {};
actionMappingDesc.deviceType = DeviceType::eKeyboard;
actionMappingDesc.keyboardInput = keyboardInput;
actionMappingDesc.modifiers = modifiers;
return getStringFromActionMappingDesc(actionMappingDesc, nullptr);
},
py::call_guard<py::gil_scoped_release>())
.def("get_string_from_action_mapping_desc",
[](MouseInput mouseInput, KeyboardModifierFlags modifiers) {
ActionMappingDesc actionMappingDesc = {};
actionMappingDesc.deviceType = DeviceType::eMouse;
actionMappingDesc.mouseInput = mouseInput;
actionMappingDesc.modifiers = modifiers;
return getStringFromActionMappingDesc(actionMappingDesc, nullptr);
},
py::call_guard<py::gil_scoped_release>())
.def("get_string_from_action_mapping_desc",
[](GamepadInput gamepadInput) {
ActionMappingDesc actionMappingDesc = {};
actionMappingDesc.deviceType = DeviceType::eGamepad;
actionMappingDesc.gamepadInput = gamepadInput;
actionMappingDesc.modifiers = 0;
return getStringFromActionMappingDesc(actionMappingDesc, nullptr);
},
py::call_guard<py::gil_scoped_release>());
static ScriptCallbackRegistryPython<size_t, bool, const InputEvent&> s_inputEventCBs;
static ScriptCallbackRegistryPython<size_t, bool, const KeyboardEvent&> s_keyboardEventCBs;
static ScriptCallbackRegistryPython<size_t, bool, const MouseEvent&> s_mouseEventCBs;
static ScriptCallbackRegistryPython<size_t, bool, const GamepadEvent&> s_gamepadEventCBs;
static ScriptCallbackRegistryPython<size_t, void, const GamepadConnectionEvent&> s_gamepadConnectionEventCBs;
static ScriptCallbackRegistryPython<size_t, bool, const ActionEvent&> s_actionEventCBs;
defineInterfaceClass<IInput>(m, "IInput", "acquire_input_interface")
.def("get_device_name", wrapInterfaceFunction(&IInput::getDeviceName), py::call_guard<py::gil_scoped_release>())
.def("get_device_type", wrapInterfaceFunction(&IInput::getDeviceType), py::call_guard<py::gil_scoped_release>())
.def("subscribe_to_input_events",
[](IInput* iface, const decltype(s_inputEventCBs)::FuncT& eventFn, EventTypeMask eventTypes,
InputDevice* device, SubscriptionOrder order) {
auto eventFnCopy = s_inputEventCBs.create(eventFn);
SubscriptionId id =
iface->subscribeToInputEvents(device, eventTypes, s_inputEventCBs.call, eventFnCopy, order);
s_inputEventCBs.add(hashPair(0x3e1, id), eventFnCopy);
return id;
},
py::arg("eventFn"), py::arg("eventTypes") = kEventTypeAll, py::arg("device") = nullptr,
py::arg("order") = kSubscriptionOrderDefault, py::call_guard<py::gil_scoped_release>())
.def("unsubscribe_to_input_events",
[](IInput* iface, SubscriptionId id) {
iface->unsubscribeToInputEvents(id);
s_inputEventCBs.removeAndDestroy(hashPair(0x3e1, id));
},
py::call_guard<py::gil_scoped_release>())
.def("get_keyboard_name", wrapInterfaceFunction(&IInput::getKeyboardName),
py::call_guard<py::gil_scoped_release>())
.def("subscribe_to_keyboard_events",
[](IInput* iface, Keyboard* keyboard, const decltype(s_keyboardEventCBs)::FuncT& eventFn) {
auto eventFnCopy = s_keyboardEventCBs.create(eventFn);
SubscriptionId id = iface->subscribeToKeyboardEvents(keyboard, s_keyboardEventCBs.call, eventFnCopy);
s_keyboardEventCBs.add(hashPair(keyboard, id), eventFnCopy);
return id;
},
py::call_guard<py::gil_scoped_release>())
.def("unsubscribe_to_keyboard_events",
[](IInput* iface, Keyboard* keyboard, SubscriptionId id) {
iface->unsubscribeToKeyboardEvents(keyboard, id);
s_keyboardEventCBs.removeAndDestroy(hashPair(keyboard, id));
},
py::call_guard<py::gil_scoped_release>())
.def("get_keyboard_value", wrapInterfaceFunction(&IInput::getKeyboardValue),
py::call_guard<py::gil_scoped_release>())
.def("get_keyboard_button_flags", wrapInterfaceFunction(&IInput::getKeyboardButtonFlags),
py::call_guard<py::gil_scoped_release>())
.def("get_mouse_name", wrapInterfaceFunction(&IInput::getMouseName), py::call_guard<py::gil_scoped_release>())
.def("get_mouse_value", wrapInterfaceFunction(&IInput::getMouseValue), py::call_guard<py::gil_scoped_release>())
.def("get_mouse_button_flags", wrapInterfaceFunction(&IInput::getMouseButtonFlags),
py::call_guard<py::gil_scoped_release>())
.def("get_mouse_coords_normalized", wrapInterfaceFunction(&IInput::getMouseCoordsNormalized),
py::call_guard<py::gil_scoped_release>())
.def("get_mouse_coords_pixel", wrapInterfaceFunction(&IInput::getMouseCoordsPixel),
py::call_guard<py::gil_scoped_release>())
.def("subscribe_to_mouse_events",
[](IInput* iface, Mouse* mouse, const decltype(s_mouseEventCBs)::FuncT& eventFn) {
auto eventFnCopy = s_mouseEventCBs.create(eventFn);
SubscriptionId id = iface->subscribeToMouseEvents(mouse, s_mouseEventCBs.call, eventFnCopy);
s_mouseEventCBs.add(hashPair(mouse, id), eventFnCopy);
return id;
},
py::call_guard<py::gil_scoped_release>())
.def("unsubscribe_to_mouse_events",
[](IInput* iface, Mouse* mouse, SubscriptionId id) {
iface->unsubscribeToMouseEvents(mouse, id);
s_mouseEventCBs.removeAndDestroy(hashPair(mouse, id));
},
py::call_guard<py::gil_scoped_release>())
.def("get_gamepad_name", wrapInterfaceFunction(&IInput::getGamepadName), py::call_guard<py::gil_scoped_release>())
.def("get_gamepad_guid", wrapInterfaceFunction(&IInput::getGamepadGuid), py::call_guard<py::gil_scoped_release>())
.def("get_gamepad_value", wrapInterfaceFunction(&IInput::getGamepadValue),
py::call_guard<py::gil_scoped_release>())
.def("get_gamepad_button_flags", wrapInterfaceFunction(&IInput::getGamepadButtonFlags),
py::call_guard<py::gil_scoped_release>())
.def("subscribe_to_gamepad_events",
[](IInput* iface, Gamepad* gamepad, const decltype(s_gamepadEventCBs)::FuncT& eventFn) {
auto eventFnCopy = s_gamepadEventCBs.create(eventFn);
SubscriptionId id = iface->subscribeToGamepadEvents(gamepad, s_gamepadEventCBs.call, eventFnCopy);
s_gamepadEventCBs.add(hashPair(gamepad, id), eventFnCopy);
return id;
},
py::call_guard<py::gil_scoped_release>())
.def("unsubscribe_to_gamepad_events",
[](IInput* iface, Gamepad* gamepad, SubscriptionId id) {
iface->unsubscribeToGamepadEvents(gamepad, id);
s_gamepadEventCBs.removeAndDestroy(hashPair(gamepad, id));
},
py::call_guard<py::gil_scoped_release>())
.def("subscribe_to_gamepad_connection_events",
[](IInput* iface, const decltype(s_gamepadConnectionEventCBs)::FuncT& eventFn) {
auto eventFnCopy = s_gamepadConnectionEventCBs.create(eventFn);
SubscriptionId id =
iface->subscribeToGamepadConnectionEvents(s_gamepadConnectionEventCBs.call, eventFnCopy);
s_gamepadConnectionEventCBs.add(id, eventFnCopy);
return id;
},
py::call_guard<py::gil_scoped_release>())
.def("unsubscribe_to_gamepad_connection_events",
[](IInput* iface, SubscriptionId id) {
iface->unsubscribeToGamepadConnectionEvents(id);
s_gamepadConnectionEventCBs.removeAndDestroy(id);
},
py::call_guard<py::gil_scoped_release>())
.def("get_actions",
[](const IInput* iface, ActionMappingSet* actionMappingSet) {
std::vector<std::string> res(iface->getActionCount(actionMappingSet));
auto actions = iface->getActions(actionMappingSet);
for (size_t i = 0; i < res.size(); i++)
{
res[i] = actions[i];
}
return res;
},
py::call_guard<py::gil_scoped_release>())
.def("add_action_mapping",
[](IInput* iface, ActionMappingSet* actionMappingSet, const char* action, Keyboard* keyboard,
KeyboardInput keyboardInput, KeyboardModifierFlags modifiers) {
return iface->addActionMapping(
actionMappingSet, action,
ActionMappingDesc{ DeviceType::eKeyboard, { keyboard }, { keyboardInput }, modifiers });
},
py::call_guard<py::gil_scoped_release>())
.def("add_action_mapping",
[](IInput* iface, ActionMappingSet* actionMappingSet, const char* action, Gamepad* gamepad,
GamepadInput gamepadInput) {
return iface->addActionMapping(
actionMappingSet, action, detail::toGamepadMapping(gamepad, gamepadInput));
},
py::call_guard<py::gil_scoped_release>())
.def("add_action_mapping",
[](IInput* iface, ActionMappingSet* actionMappingSet, const char* action, Mouse* mouse,
MouseInput mouseInput, KeyboardModifierFlags modifiers) {
return iface->addActionMapping(
actionMappingSet, action, detail::toMouseMapping(mouse, mouseInput, modifiers));
},
py::call_guard<py::gil_scoped_release>())
.def("set_action_mapping",
[](IInput* iface, ActionMappingSet* actionMappingSet, const char* action, uint32_t index,
Keyboard* keyboard, KeyboardInput keyboardInput, KeyboardModifierFlags modifiers) {
return iface->setActionMapping(
actionMappingSet, action, index,
ActionMappingDesc{ DeviceType::eKeyboard, { keyboard }, { keyboardInput }, modifiers });
},
py::call_guard<py::gil_scoped_release>())
.def("set_action_mapping",
[](IInput* iface, ActionMappingSet* actionMappingSet, const char* action, uint32_t index, Gamepad* gamepad,
GamepadInput gamepadInput) {
return iface->setActionMapping(
actionMappingSet, action, index, detail::toGamepadMapping(gamepad, gamepadInput));
},
py::call_guard<py::gil_scoped_release>())
.def("set_action_mapping",
[](IInput* iface, ActionMappingSet* actionMappingSet, const char* action, uint32_t index, Mouse* mouse,
MouseInput mouseInput, KeyboardModifierFlags modifiers) {
return iface->setActionMapping(
actionMappingSet, action, index, detail::toMouseMapping(mouse, mouseInput, modifiers));
},
py::call_guard<py::gil_scoped_release>())
.def("remove_action_mapping", wrapInterfaceFunction(&IInput::removeActionMapping),
py::call_guard<py::gil_scoped_release>())
.def("clear_action_mappings", wrapInterfaceFunction(&IInput::clearActionMappings),
py::call_guard<py::gil_scoped_release>())
.def("get_action_mappings",
[](const IInput* iface, ActionMappingSet* actionMappingSet, const char* action) {
auto size = iface->getActionMappingCount(actionMappingSet, action);
std::vector<ActionMappingDesc> res;
res.reserve(size);
auto mappings = iface->getActionMappings(actionMappingSet, action);
std::copy(mappings, mappings + size, std::back_inserter(res));
return res;
},
py::call_guard<py::gil_scoped_release>())
.def("get_action_mapping_count", wrapInterfaceFunction(&IInput::getActionMappingCount),
py::call_guard<py::gil_scoped_release>())
.def("set_default_action_mapping",
[](IInput* iface, ActionMappingSet* actionMappingSet, const char* action, Keyboard* keyboard,
KeyboardInput keyboardInput, KeyboardModifierFlags modifiers) {
return setDefaultActionMapping(
iface, actionMappingSet, action,
ActionMappingDesc{ DeviceType::eKeyboard, { keyboard }, { keyboardInput }, modifiers });
},
py::call_guard<py::gil_scoped_release>())
.def("set_default_action_mapping",
[](IInput* iface, ActionMappingSet* actionMappingSet, const char* action, Gamepad* gamepad,
GamepadInput gamepadInput) {
return setDefaultActionMapping(
iface, actionMappingSet, action, detail::toGamepadMapping(gamepad, gamepadInput));
},
py::call_guard<py::gil_scoped_release>())
.def("set_default_action_mapping",
[](IInput* iface, ActionMappingSet* actionMappingSet, const char* action, Mouse* mouse,
MouseInput mouseInput, KeyboardModifierFlags modifiers) {
return setDefaultActionMapping(
iface, actionMappingSet, action, detail::toMouseMapping(mouse, mouseInput, modifiers));
},
py::call_guard<py::gil_scoped_release>())
.def("get_action_value", wrapInterfaceFunction(&IInput::getActionValue), py::call_guard<py::gil_scoped_release>())
.def("get_action_button_flags", wrapInterfaceFunction(&IInput::getActionButtonFlags),
py::call_guard<py::gil_scoped_release>())
.def("subscribe_to_action_events",
[](IInput* iface, ActionMappingSet* actionMappingSet, const char* action,
const decltype(s_actionEventCBs)::FuncT& eventFn) {
auto eventFnCopy = s_actionEventCBs.create(eventFn);
SubscriptionId id =
iface->subscribeToActionEvents(actionMappingSet, action, s_actionEventCBs.call, eventFnCopy);
s_actionEventCBs.add(id, eventFnCopy);
return id;
},
py::call_guard<py::gil_scoped_release>())
.def("unsubscribe_to_action_events",
[](IInput* iface, SubscriptionId id) {
iface->unsubscribeToActionEvents(id);
s_actionEventCBs.removeAndDestroy(id);
},
py::call_guard<py::gil_scoped_release>())
.def("get_action_mapping_set_by_path", wrapInterfaceFunction(&IInput::getActionMappingSetByPath),
py::return_value_policy::reference, py::call_guard<py::gil_scoped_release>())
.def("get_modifier_flags",
[](IInput* iface, KeyboardModifierFlags modifierFlags, const std::vector<const InputDevice*>& inputDev,
const std::vector<DeviceType>& inputDevTypes, const std::vector<MouseInput>& mouseButtons) {
return iface->getModifierFlags(modifierFlags, inputDev.data(), inputDev.size(), inputDevTypes.data(),
inputDevTypes.size(), mouseButtons.data(), mouseButtons.size());
},
py::arg("modifiers") = KeyboardModifierFlags(0),
py::arg("input_devices") = std::vector<const InputDevice*>(),
py::arg("device_types") = std::vector<DeviceType>(), py::arg("mouse_buttons") = std::vector<MouseInput>(),
py::call_guard<py::gil_scoped_release>())
.def("get_global_modifier_flags",
[](IInput* iface, KeyboardModifierFlags modifierFlags, const std::vector<MouseInput>& mouseButtons) {
return iface->getGlobalModifierFlags(modifierFlags, mouseButtons.data(), mouseButtons.size());
},
py::arg("modifiers") = KeyboardModifierFlags(0), py::arg("mouse_buttons") = std::vector<MouseInput>(),
py::call_guard<py::gil_scoped_release>())
;
m.def("acquire_input_provider",
[](const char* pluginName, const char* libraryPath) {
return libraryPath ? acquireInterfaceFromLibraryForBindings<IInput>(libraryPath)->getInputProvider() :
acquireInterfaceForBindings<IInput>(pluginName)->getInputProvider();
},
py::arg("plugin_name") = nullptr, py::arg("library_path") = nullptr, py::return_value_policy::reference,
py::call_guard<py::gil_scoped_release>());
py::class_<InputProvider>(m, "InputProvider")
.def("create_keyboard", wrapInterfaceFunction(&InputProvider::createKeyboard),
py::return_value_policy::reference, py::call_guard<py::gil_scoped_release>())
.def("destroy_keyboard", wrapInterfaceFunction(&InputProvider::destroyKeyboard),
py::call_guard<py::gil_scoped_release>())
.def("update_keyboard", wrapInterfaceFunction(&InputProvider::updateKeyboard),
py::call_guard<py::gil_scoped_release>())
.def("buffer_keyboard_key_event",
[](InputProvider* iface, Keyboard* keyboard, KeyboardEventType type, KeyboardInput key,
KeyboardModifierFlags modifiers) {
KeyboardEvent event;
event.keyboard = keyboard;
event.type = type;
event.key = key;
event.modifiers = modifiers;
iface->bufferKeyboardEvent(event);
},
py::call_guard<py::gil_scoped_release>())
.def("buffer_keyboard_char_event",
[](InputProvider* iface, Keyboard* keyboard, py::str character, KeyboardModifierFlags modifiers) {
// Cast before releasing GIL
auto characterStr = character.cast<std::string>();
py::gil_scoped_release nogil;
KeyboardEvent event{};
event.keyboard = keyboard;
event.type = KeyboardEventType::eChar;
event.modifiers = modifiers;
size_t maxCopyBytes = ::carb_min(characterStr.length(), size_t(kCharacterMaxNumBytes));
memcpy((void*)event.character, characterStr.c_str(), maxCopyBytes);
iface->bufferKeyboardEvent(event);
})
.def("create_mouse", wrapInterfaceFunction(&InputProvider::createMouse), py::return_value_policy::reference,
py::call_guard<py::gil_scoped_release>())
.def("destroy_mouse", wrapInterfaceFunction(&InputProvider::destroyMouse),
py::call_guard<py::gil_scoped_release>())
.def("update_mouse", wrapInterfaceFunction(&InputProvider::updateMouse), py::call_guard<py::gil_scoped_release>())
.def("buffer_mouse_event",
[](InputProvider* iface, Mouse* mouse, MouseEventType type, Float2 value, KeyboardModifierFlags modifiers,
Float2 pixelValue) {
MouseEvent event;
event.mouse = mouse;
event.type = type;
if (type == MouseEventType::eScroll)
{
event.scrollDelta = value;
}
else
{
event.normalizedCoords = value;
}
event.pixelCoords = pixelValue;
event.modifiers = modifiers;
iface->bufferMouseEvent(event);
},
py::call_guard<py::gil_scoped_release>())
.def("create_gamepad", wrapInterfaceFunction(&InputProvider::createGamepad), py::return_value_policy::reference,
py::call_guard<py::gil_scoped_release>())
.def("set_gamepad_connected", wrapInterfaceFunction(&InputProvider::setGamepadConnected),
py::call_guard<py::gil_scoped_release>())
.def("destroy_gamepad", wrapInterfaceFunction(&InputProvider::destroyGamepad),
py::call_guard<py::gil_scoped_release>())
.def("update_gamepad", wrapInterfaceFunction(&InputProvider::updateGamepad),
py::call_guard<py::gil_scoped_release>())
.def("buffer_gamepad_event",
[](InputProvider* iface, Gamepad* gamepad, GamepadInput input, float value) {
GamepadEvent event;
event.gamepad = gamepad;
event.input = input;
event.value = value;
iface->bufferGamepadEvent(event);
},
py::call_guard<py::gil_scoped_release>());
}
} // namespace input
} // namespace carb
|
omniverse-code/kit/include/carb/input/InputProvider.h | // Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../Defines.h"
#include "InputTypes.h"
namespace carb
{
namespace input
{
/**
* Defines an input provider interface.
* This interface is meant to be used only by the input providers. Hence it is in the separate file.
* The examples of input providers could be windowing system or network input stream.
*/
struct InputProvider
{
/**
* Create a logical keyboard.
*
* @param name Logical keyboard name.
* @return The keyboard created.
*/
Keyboard*(CARB_ABI* createKeyboard)(const char* name);
/**
* Destroys the keyboard.
*
* @param keyboard The logical keyboard.
*/
void(CARB_ABI* destroyKeyboard)(Keyboard* keyboard);
/**
* Input "tick" for specific keyboard. Is meant to be called in the beginning of a new frame, right before sending
* events. It saves old device state, allowing to differentiate pressed and released state of the buttons. @see
* ButtonFlags.
*
* @param keyboard Logical keyboard to update.
*/
void(CARB_ABI* updateKeyboard)(Keyboard* keyboard);
/**
* Sends keyboard event.
*
* @param evt Keyboard event.
*/
void(CARB_ABI* bufferKeyboardEvent)(const KeyboardEvent& evt);
/**
* Create a logical mouse.
*
* @param name Logical mouse name.
* @return The mouse created.
*/
Mouse*(CARB_ABI* createMouse)(const char* name);
/**
* Destroys the mouse.
*
* @param mouse The logical mouse.
*/
void(CARB_ABI* destroyMouse)(Mouse* mouse);
/**
* Input "tick" for specific mouse. Is meant to be called in the beginning of a new frame, right before sending
* events. It saves old device state, allowing to differentiate pressed and released state of the buttons. @see
* ButtonFlags.
*
* @param mouse Logical mouse to update.
*/
void(CARB_ABI* updateMouse)(Mouse* mouse);
/**
* Sends mouse event.
*
* @param evt Mouse event.
*/
void(CARB_ABI* bufferMouseEvent)(const MouseEvent& evt);
/**
* Create a logical gamepad.
*
* @param name Logical gamepad name.
* @param guid Device GUID.
* @return The gamepad created.
*/
Gamepad*(CARB_ABI* createGamepad)(const char* name, const char* guid);
/**
* Create a logical gamepad.
*
* @param gamepad The logical gamepad.
* @param connected Is the gamepad connected?.
*/
void(CARB_ABI* setGamepadConnected)(Gamepad* gamepad, bool connected);
/**
* Destroys the gamepad.
*
* @param gamepad The logical gamepad.
*/
void(CARB_ABI* destroyGamepad)(Gamepad* gamepad);
/**
* Input "tick" for specific gamepad. Is meant to be called in the beginning of a new frame, right before sending
* events. It saves old device state, allowing to differentiate pressed and released state of the buttons. @see
* ButtonFlags.
*
* @param gamepad Logical gamepad to update.
*/
void(CARB_ABI* updateGamepad)(Gamepad* gamepad);
/**
* Send gamepad event.
*
* @param evt Mouse event.
*/
void(CARB_ABI* bufferGamepadEvent)(const GamepadEvent& evt);
/**
* Sends unified input event.
*
* @param evt A reference to unified input event description.
*/
void(CARB_ABI* bufferInputEvent)(const InputEvent& evt);
};
} // namespace input
} // namespace carb
|
omniverse-code/kit/include/carb/input/InputUtils.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../logging/Log.h"
#include "IInput.h"
#include <map>
#include <string>
#include <cstring>
#include <functional>
namespace carb
{
namespace input
{
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Name Mapping //
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail
{
template <typename Key, typename LessFn, typename ExtractKeyFn, typename StaticMappingDesc, size_t count>
const StaticMappingDesc* getMappingByKey(Key key, const StaticMappingDesc (&items)[count])
{
static std::map<Key, const StaticMappingDesc*, LessFn> s_mapping;
static bool s_isInitialized = false;
if (!s_isInitialized)
{
for (size_t i = 0; i < count; ++i)
{
s_mapping.insert(std::make_pair(ExtractKeyFn().operator()(items[i]), &items[i]));
}
s_isInitialized = true;
}
auto found = s_mapping.find(key);
if (found != s_mapping.end())
{
return found->second;
}
return nullptr;
}
template <typename T>
struct Less
{
bool operator()(const T& a, const T& b) const
{
return std::less<T>(a, b);
}
};
template <>
struct Less<const char*>
{
bool operator()(const char* a, const char* b) const
{
return std::strcmp(a, b) < 0;
}
};
template <typename Struct, typename String>
struct ExtractName
{
String operator()(const Struct& item) const
{
return item.name;
}
};
template <typename Struct, typename Ident>
struct ExtractIdent
{
Ident operator()(const Struct& item) const
{
return item.ident;
}
};
template <typename Ident, typename String, typename StaticMappingDesc, size_t count>
Ident getIdentByName(String name, const StaticMappingDesc (&items)[count], Ident defaultIdent)
{
using LessFn = Less<String>;
using ExtractNameFn = ExtractName<StaticMappingDesc, String>;
using ExtractIdentFn = ExtractIdent<StaticMappingDesc, Ident>;
const auto* item = getMappingByKey<String, LessFn, ExtractNameFn, StaticMappingDesc, count>(name, items);
return (item != nullptr) ? ExtractIdentFn().operator()(*item) : defaultIdent;
}
template <typename String, typename Ident, typename StaticMappingDesc, size_t count>
String getNameByIdent(Ident ident, const StaticMappingDesc (&items)[count], String defaultName)
{
using LessFn = std::less<Ident>;
using ExtractIdentFn = ExtractIdent<StaticMappingDesc, Ident>;
using ExtractNameFn = ExtractName<StaticMappingDesc, String>;
const auto* item = getMappingByKey<Ident, LessFn, ExtractIdentFn, StaticMappingDesc, count>(ident, items);
return (item != nullptr) ? ExtractNameFn().operator()(*item) : defaultName;
}
} // namespace detail
static constexpr struct
{
DeviceType ident;
const char* name;
} g_deviceTypeToName[] = {
// clang-format off
{ DeviceType::eKeyboard, "Keyboard" },
{ DeviceType::eMouse, "Mouse" },
{ DeviceType::eGamepad, "Gamepad" }
// clang-format on
};
inline const char* getDeviceTypeString(DeviceType deviceType)
{
using namespace detail;
return getNameByIdent(deviceType, g_deviceTypeToName, "Unknown");
}
inline DeviceType getDeviceTypeFromString(const char* deviceTypeString)
{
using namespace detail;
return getIdentByName(deviceTypeString, g_deviceTypeToName, DeviceType::eUnknown);
}
static constexpr struct
{
KeyboardInput ident;
const char* name;
} kKeyboardInputCodeName[] = {
// clang-format off
{ KeyboardInput::eUnknown, "Unknown" },
{ KeyboardInput::eSpace, "Space" },
{ KeyboardInput::eApostrophe, "'" },
{ KeyboardInput::eComma, "," },
{ KeyboardInput::eMinus, "-" },
{ KeyboardInput::ePeriod, "." },
{ KeyboardInput::eSlash, "/" },
{ KeyboardInput::eKey0, "0" },
{ KeyboardInput::eKey1, "1" },
{ KeyboardInput::eKey2, "2" },
{ KeyboardInput::eKey3, "3" },
{ KeyboardInput::eKey4, "4" },
{ KeyboardInput::eKey5, "5" },
{ KeyboardInput::eKey6, "6" },
{ KeyboardInput::eKey7, "7" },
{ KeyboardInput::eKey8, "8" },
{ KeyboardInput::eKey9, "9" },
{ KeyboardInput::eSemicolon, ";" },
{ KeyboardInput::eEqual, "=" },
{ KeyboardInput::eA, "A" },
{ KeyboardInput::eB, "B" },
{ KeyboardInput::eC, "C" },
{ KeyboardInput::eD, "D" },
{ KeyboardInput::eE, "E" },
{ KeyboardInput::eF, "F" },
{ KeyboardInput::eG, "G" },
{ KeyboardInput::eH, "H" },
{ KeyboardInput::eI, "I" },
{ KeyboardInput::eJ, "J" },
{ KeyboardInput::eK, "K" },
{ KeyboardInput::eL, "L" },
{ KeyboardInput::eM, "M" },
{ KeyboardInput::eN, "N" },
{ KeyboardInput::eO, "O" },
{ KeyboardInput::eP, "P" },
{ KeyboardInput::eQ, "Q" },
{ KeyboardInput::eR, "R" },
{ KeyboardInput::eS, "S" },
{ KeyboardInput::eT, "T" },
{ KeyboardInput::eU, "U" },
{ KeyboardInput::eV, "V" },
{ KeyboardInput::eW, "W" },
{ KeyboardInput::eX, "X" },
{ KeyboardInput::eY, "Y" },
{ KeyboardInput::eZ, "Z" },
{ KeyboardInput::eLeftBracket, "[" },
{ KeyboardInput::eBackslash, "\\" },
{ KeyboardInput::eRightBracket, "]" },
{ KeyboardInput::eGraveAccent, "`" },
{ KeyboardInput::eEscape, "Esc" },
{ KeyboardInput::eTab, "Tab" },
{ KeyboardInput::eEnter, "Enter" },
{ KeyboardInput::eBackspace, "Backspace" },
{ KeyboardInput::eInsert, "Insert" },
{ KeyboardInput::eDel, "Del" },
{ KeyboardInput::eRight, "Right" },
{ KeyboardInput::eLeft, "Left" },
{ KeyboardInput::eDown, "Down" },
{ KeyboardInput::eUp, "Up" },
{ KeyboardInput::ePageUp, "PageUp" },
{ KeyboardInput::ePageDown, "PageDown" },
{ KeyboardInput::eHome, "Home" },
{ KeyboardInput::eEnd, "End" },
{ KeyboardInput::eCapsLock, "CapsLock" },
{ KeyboardInput::eScrollLock, "ScrollLock" },
{ KeyboardInput::eNumLock, "NumLock" },
{ KeyboardInput::ePrintScreen, "PrintScreen" },
{ KeyboardInput::ePause, "Pause" },
{ KeyboardInput::eF1, "F1" },
{ KeyboardInput::eF2, "F2" },
{ KeyboardInput::eF3, "F3" },
{ KeyboardInput::eF4, "F4" },
{ KeyboardInput::eF5, "F5" },
{ KeyboardInput::eF6, "F6" },
{ KeyboardInput::eF7, "F7" },
{ KeyboardInput::eF8, "F8" },
{ KeyboardInput::eF9, "F9" },
{ KeyboardInput::eF10, "F10" },
{ KeyboardInput::eF11, "F11" },
{ KeyboardInput::eF12, "F12" },
{ KeyboardInput::eNumpad0, "Num0" },
{ KeyboardInput::eNumpad1, "Num1" },
{ KeyboardInput::eNumpad2, "Num2" },
{ KeyboardInput::eNumpad3, "Num3" },
{ KeyboardInput::eNumpad4, "Num4" },
{ KeyboardInput::eNumpad5, "Num5" },
{ KeyboardInput::eNumpad6, "Num6" },
{ KeyboardInput::eNumpad7, "Num7" },
{ KeyboardInput::eNumpad8, "Num8" },
{ KeyboardInput::eNumpad9, "Num9" },
{ KeyboardInput::eNumpadDel, "NumDel" },
{ KeyboardInput::eNumpadDivide, "NumDivide" },
{ KeyboardInput::eNumpadMultiply, "NumMultiply" },
{ KeyboardInput::eNumpadSubtract, "NumSubtract" },
{ KeyboardInput::eNumpadAdd, "NumAdd" },
{ KeyboardInput::eNumpadEnter, "NumEnter" },
{ KeyboardInput::eNumpadEqual, "NumEqual" },
{ KeyboardInput::eLeftShift, "LeftShift" },
{ KeyboardInput::eLeftControl, "LeftControl" },
{ KeyboardInput::eLeftAlt, "LeftAlt" },
{ KeyboardInput::eLeftSuper, "LeftSuper" },
{ KeyboardInput::eRightShift, "RightShift" },
{ KeyboardInput::eRightControl, "RightControl" },
{ KeyboardInput::eRightAlt, "RightAlt" },
{ KeyboardInput::eRightSuper, "RightSuper" },
{ KeyboardInput::eMenu, "Menu" }
// clang-format on
};
inline const char* getKeyboardInputString(KeyboardInput key)
{
using namespace detail;
return getNameByIdent(key, kKeyboardInputCodeName, "");
}
inline KeyboardInput getKeyboardInputFromString(const char* inputString)
{
using namespace detail;
return getIdentByName(inputString, kKeyboardInputCodeName, KeyboardInput::eUnknown);
}
static constexpr struct
{
KeyboardModifierFlags ident;
const char* name;
} kModifierFlagName[] = {
// clang-format off
{ kKeyboardModifierFlagShift, "Shift" },
{ kKeyboardModifierFlagControl, "Ctrl" },
{ kKeyboardModifierFlagAlt, "Alt" },
{ kKeyboardModifierFlagSuper, "Super" },
{ kKeyboardModifierFlagCapsLock, "CapsLock" },
{ kKeyboardModifierFlagNumLock, "NumLock" }
// clang-format on
};
inline const char* getModifierFlagString(KeyboardModifierFlags flag)
{
using namespace detail;
return getNameByIdent(flag, kModifierFlagName, "");
}
inline KeyboardModifierFlags getModifierFlagFromString(const char* inputString)
{
using namespace detail;
return getIdentByName(inputString, kModifierFlagName, 0);
}
const char kDeviceNameSeparator[] = "::";
const char kModifierSeparator[] = " + ";
inline std::string getModifierFlagsString(KeyboardModifierFlags mod)
{
std::string res = "";
for (const auto& desc : kModifierFlagName)
{
const auto& flag = desc.ident;
if ((mod & flag) != flag)
continue;
if (!res.empty())
res += kModifierSeparator;
res += desc.name;
}
return res;
}
inline KeyboardModifierFlags getModifierFlagsFromString(const char* modString)
{
KeyboardModifierFlags res = KeyboardModifierFlags(0);
const size_t kModifierSeparatorSize = strlen(kModifierSeparator);
std::string modifierNameString;
const char* modifierName = modString;
while (true)
{
const char* modifierNameEnd = strstr(modifierName, kModifierSeparator);
if (modifierNameEnd)
{
modifierNameString = std::string(modifierName, modifierNameEnd - modifierName);
}
else
{
modifierNameString = std::string(modifierName);
}
KeyboardModifierFlags mod = getModifierFlagFromString(modifierNameString.c_str());
if (mod)
{
res = (KeyboardModifierFlags)((uint32_t)res | (uint32_t)mod);
}
else
{
CARB_LOG_VERBOSE("Unknown hotkey modifier encountered: %s in %s", modifierNameString.c_str(), modString);
}
if (!modifierNameEnd)
{
break;
}
modifierName = modifierNameEnd;
modifierName += kModifierSeparatorSize;
}
return res;
}
static constexpr struct
{
MouseInput ident;
const char* name;
} kMouseInputCodeName[] = {
// clang-format off
{ MouseInput::eLeftButton, "LeftButton" },
{ MouseInput::eRightButton, "RightButton" },
{ MouseInput::eMiddleButton, "MiddleButton" },
{ MouseInput::eForwardButton, "ForwardButton" },
{ MouseInput::eBackButton, "BackButton" },
{ MouseInput::eScrollRight, "ScrollRight" },
{ MouseInput::eScrollLeft, "ScrollLeft" },
{ MouseInput::eScrollUp, "ScrollUp" },
{ MouseInput::eScrollDown, "ScrollDown" },
{ MouseInput::eMoveRight, "MoveRight" },
{ MouseInput::eMoveLeft, "MoveLeft" },
{ MouseInput::eMoveUp, "MoveUp" },
{ MouseInput::eMoveDown, "MoveDown" }
// clang-format on
};
inline const char* getMouseInputString(MouseInput key)
{
using namespace detail;
return getNameByIdent(key, kMouseInputCodeName, "");
}
inline MouseInput getMouseInputFromString(const char* inputString)
{
using namespace detail;
return getIdentByName(inputString, kMouseInputCodeName, MouseInput::eCount);
}
static constexpr struct
{
GamepadInput ident;
const char* name;
} kGamepadInputCodeName[] = {
// clang-format off
{ GamepadInput::eLeftStickRight, "LeftStickRight" },
{ GamepadInput::eLeftStickLeft, "LeftStickLeft" },
{ GamepadInput::eLeftStickUp, "LeftStickUp" },
{ GamepadInput::eLeftStickDown, "LeftStickDown" },
{ GamepadInput::eRightStickRight, "RightStickRight" },
{ GamepadInput::eRightStickLeft, "RightStickLeft" },
{ GamepadInput::eRightStickUp, "RightStickUp" },
{ GamepadInput::eRightStickDown, "RightStickDown" },
{ GamepadInput::eLeftTrigger, "LeftTrigger" },
{ GamepadInput::eRightTrigger, "RightTrigger" },
{ GamepadInput::eA, "ButtonA" },
{ GamepadInput::eB, "ButtonB" },
{ GamepadInput::eX, "ButtonX" },
{ GamepadInput::eY, "ButtonY" },
{ GamepadInput::eLeftShoulder, "LeftShoulder" },
{ GamepadInput::eRightShoulder, "RightShoulder" },
{ GamepadInput::eMenu1, "Menu1" },
{ GamepadInput::eMenu2, "Menu2" },
{ GamepadInput::eLeftStick, "LeftStick" },
{ GamepadInput::eRightStick, "RightStick" },
{ GamepadInput::eDpadUp, "DpadUp" },
{ GamepadInput::eDpadRight, "DpadRight" },
{ GamepadInput::eDpadDown, "DpadDown" },
{ GamepadInput::eDpadLeft, "DpadLeft" }
// clang-format on
};
inline const char* getGamepadInputString(GamepadInput key)
{
using namespace detail;
return getNameByIdent(key, kGamepadInputCodeName, "");
}
inline GamepadInput getGamepadInputFromString(const char* inputString)
{
using namespace detail;
return getIdentByName(inputString, kGamepadInputCodeName, GamepadInput::eCount);
}
enum class PreviousButtonState
{
kUp,
kDown
};
inline PreviousButtonState toPreviousButtonState(bool wasDown)
{
return wasDown ? PreviousButtonState::kDown : PreviousButtonState::kUp;
}
enum class CurrentButtonState
{
kUp,
kDown
};
inline CurrentButtonState toCurrentButtonState(bool isDown)
{
return isDown ? CurrentButtonState::kDown : CurrentButtonState::kUp;
}
inline ButtonFlags toButtonFlags(PreviousButtonState previousButtonState, CurrentButtonState currentButtonState)
{
ButtonFlags flags = 0;
if (currentButtonState == CurrentButtonState::kDown)
{
flags = kButtonFlagStateDown;
if (previousButtonState == PreviousButtonState::kUp)
flags |= kButtonFlagTransitionDown;
}
else
{
flags = kButtonFlagStateUp;
if (previousButtonState == PreviousButtonState::kDown)
flags |= kButtonFlagTransitionUp;
}
return flags;
}
inline std::string getDeviceNameString(DeviceType deviceType, const char* deviceId)
{
if ((size_t)deviceType >= (size_t)DeviceType::eCount)
return "";
std::string result = getDeviceTypeString(deviceType);
if (deviceId)
{
result.append("[");
result.append(deviceId);
result.append("]");
}
return result;
}
inline void parseDeviceNameString(const char* deviceName, DeviceType* deviceType, std::string* deviceId)
{
if (!deviceName)
{
CARB_LOG_WARN("parseDeviceNameString: Empty device name");
if (deviceType)
{
*deviceType = DeviceType::eCount;
}
return;
}
const char* deviceIdString = strstr(deviceName, "[");
if (deviceType)
{
if (deviceIdString)
{
std::string deviceTypeString(deviceName, deviceIdString - deviceName);
*deviceType = getDeviceTypeFromString(deviceTypeString.c_str());
}
else
{
*deviceType = getDeviceTypeFromString(deviceName);
}
}
if (deviceId)
{
if (deviceIdString)
{
const char* deviceNameEnd = deviceIdString + strlen(deviceIdString);
*deviceId = std::string(deviceIdString + 1, deviceNameEnd - deviceIdString - 2);
}
else
{
*deviceId = "";
}
}
}
inline bool getDeviceInputFromString(const char* deviceInputString,
DeviceType* deviceTypeOut,
KeyboardInput* keyboardInputOut,
MouseInput* mouseInputOut,
GamepadInput* gamepadInputOut,
std::string* deviceIdOut = nullptr)
{
if (!deviceTypeOut)
return false;
const char* deviceInputStringTrimmed = deviceInputString;
// Skip initial spaces
while (*deviceInputStringTrimmed == ' ')
++deviceInputStringTrimmed;
// Skip device name
const char* inputNameString = strstr(deviceInputStringTrimmed, kDeviceNameSeparator);
std::string deviceName;
// No device name specified - fall back
if (!inputNameString)
inputNameString = deviceInputStringTrimmed;
else
{
deviceName = std::string(deviceInputStringTrimmed, inputNameString - deviceInputStringTrimmed);
const size_t kDeviceNameSeparatorLen = strlen(kDeviceNameSeparator);
inputNameString += kDeviceNameSeparatorLen;
}
parseDeviceNameString(deviceName.c_str(), deviceTypeOut, deviceIdOut);
if ((*deviceTypeOut == DeviceType::eKeyboard) && keyboardInputOut)
{
KeyboardInput keyboardInput = getKeyboardInputFromString(inputNameString);
*keyboardInputOut = keyboardInput;
return (keyboardInput != KeyboardInput::eCount);
}
if ((*deviceTypeOut == DeviceType::eMouse) && mouseInputOut)
{
MouseInput mouseInput = getMouseInputFromString(inputNameString);
*mouseInputOut = mouseInput;
return (mouseInput != MouseInput::eCount);
}
if ((*deviceTypeOut == DeviceType::eGamepad) && gamepadInputOut)
{
GamepadInput gamepadInput = getGamepadInputFromString(inputNameString);
*gamepadInputOut = gamepadInput;
return (gamepadInput != GamepadInput::eCount);
}
return false;
}
inline ActionMappingDesc getActionMappingDescFromString(const char* hotkeyString, std::string* deviceId)
{
const size_t kModifierSeparatorSize = strlen(kModifierSeparator);
ActionMappingDesc actionMappingDesc;
actionMappingDesc.keyboard = nullptr;
actionMappingDesc.mouse = nullptr;
actionMappingDesc.gamepad = nullptr;
actionMappingDesc.modifiers = (KeyboardModifierFlags)0;
std::string modifierNameString;
const char* modifierName = hotkeyString;
while (true)
{
const char* modifierNameEnd = strstr(modifierName, kModifierSeparator);
if (modifierNameEnd)
{
modifierNameString = std::string(modifierName, modifierNameEnd - modifierName);
}
else
{
modifierNameString = std::string(modifierName);
}
KeyboardModifierFlags mod = getModifierFlagFromString(modifierNameString.c_str());
if (mod)
{
actionMappingDesc.modifiers = (KeyboardModifierFlags)((uint32_t)actionMappingDesc.modifiers | (uint32_t)mod);
}
else
{
getDeviceInputFromString(modifierNameString.c_str(), &actionMappingDesc.deviceType,
&actionMappingDesc.keyboardInput, &actionMappingDesc.mouseInput,
&actionMappingDesc.gamepadInput, deviceId);
}
if (!modifierNameEnd)
{
break;
}
modifierName = modifierNameEnd;
modifierName += kModifierSeparatorSize;
}
return actionMappingDesc;
}
inline std::string getStringFromActionMappingDesc(const ActionMappingDesc& actionMappingDesc,
const char* deviceName = nullptr)
{
std::string result = getModifierFlagsString(actionMappingDesc.modifiers);
if (!result.empty())
{
result.append(kModifierSeparator);
}
if (deviceName)
{
result.append(deviceName);
}
else
{
result.append(getDeviceTypeString(actionMappingDesc.deviceType));
}
result.append(kDeviceNameSeparator);
switch (actionMappingDesc.deviceType)
{
case DeviceType::eKeyboard:
{
result.append(getKeyboardInputString(actionMappingDesc.keyboardInput));
break;
}
case DeviceType::eMouse:
{
result.append(getMouseInputString(actionMappingDesc.mouseInput));
break;
}
case DeviceType::eGamepad:
{
result.append(getGamepadInputString(actionMappingDesc.gamepadInput));
break;
}
default:
{
break;
}
}
return result;
}
inline bool setDefaultActionMapping(IInput* input,
ActionMappingSet* actionMappingSet,
const char* actionName,
const ActionMappingDesc& desc)
{
size_t actionMappingsCount = input->getActionMappingCount(actionMappingSet, actionName);
if (actionMappingsCount > 0)
{
return false;
}
input->addActionMapping(actionMappingSet, actionName, desc);
return true;
}
/**
* Subscribes to the keyboard event stream for a specified keyboard.
*
* @param input A pointer to input interface.
* @param keyboard A pointer to Logical keyboard, or nullptr if subscription to events from all keyboards is desired.
* @param functor A universal reference to function-like callable object to be called on each keyboard event.
* @return Subscription identifier.
*/
template <typename Functor>
inline SubscriptionId subscribeToKeyboardEvents(IInput* input, Keyboard* keyboard, Functor&& functor)
{
return input->subscribeToKeyboardEvents(
keyboard,
[](const KeyboardEvent& evt, void* userData) -> bool { return (*static_cast<Functor*>(userData))(evt); },
&functor);
}
/**
* Subscribes to the mouse event stream for a specified mouse.
*
* @param input A pointer to input interface.
* @param mouse A pointer to Logical mouse, or nullptr if subscription to events from all mice is desired.
* @param functor A universal reference to function-like callable object to be called on each mouse event.
* @return Subscription identifier.
*/
template <typename Functor>
inline SubscriptionId subscribeToMouseEvents(IInput* input, Mouse* mouse, Functor&& functor)
{
return input->subscribeToMouseEvents(
mouse, [](const MouseEvent& evt, void* userData) -> bool { return (*static_cast<Functor*>(userData))(evt); },
&functor);
}
/**
* Subscribes to the gamepad event stream for a specified gamepad.
*
* @param input A pointer to input interface.
* @param gamepad A pointer to Logical gamepad, or nullptr if subscription to events from all gamepads is desired.
* @param functor A universal reference to function-like callable object to be called on each gamepad event.
* @return Subscription identifier.
*/
template <typename Functor>
inline SubscriptionId subscribeToGamepadEvents(IInput* input, Gamepad* gamepad, Functor&& functor)
{
return input->subscribeToGamepadEvents(
gamepad, [](const GamepadEvent& evt, void* userData) -> bool { return (*static_cast<Functor*>(userData))(evt); },
&functor);
}
/**
* Subscribes to the gamepad connection event stream.
* Once subscribed callback is called for all previously created gamepads.
*
* @param input A pointer to input interface.
* @param functor A universal reference to function-like callable object to be called on each gamepad connection event.
* @return Subscription identifier.
*/
template <typename Functor>
inline SubscriptionId subscribeToGamepadConnectionEvents(IInput* input, Functor&& functor)
{
return input->subscribeToGamepadConnectionEvents(
[](const GamepadConnectionEvent& evt, void* userData) { (*static_cast<Functor*>(userData))(evt); }, &functor);
}
/**
* Subscribes to the action event stream for a specified action.
* Event is triggered on any action value change.
*
* @param input A pointer to input interface.
* @param actionMappingSet A pointer to action mapping set
* @param actionName A pointer to action string identifier.
* @param functor A universal reference to function-like callable object to be called on the action event.
* @return Subscription identifier.
*/
template <typename Functor>
inline SubscriptionId subscribeToActionEvents(IInput* input,
ActionMappingSet* actionMappingSet,
const char* actionName,
Functor&& functor)
{
return input->subscribeToActionEvents(
actionMappingSet, actionName,
[](const ActionEvent& evt, void* userData) -> bool { return (*static_cast<Functor*>(userData))(evt); }, &functor);
}
/**
* Filter and modify a unified input events in the event buffer.
*
* @param input A pointer to input interface.
* @param functor A universal reference to function-like callable object to be called on each input event.
*/
template <typename Callable>
inline void filterBufferedEvents(IInput* input, Callable&& callable)
{
using Func = std::decay_t<Callable>;
input->filterBufferedEvents(
[](InputEvent& evt, void* userData) { return (*static_cast<Func*>(userData))(evt); }, &callable);
}
} // namespace input
} // namespace carb
|
omniverse-code/kit/include/carb/input/IInput.h | // Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../Interface.h"
#include "InputTypes.h"
namespace carb
{
namespace input
{
struct InputProvider;
class ActionMappingSet;
/**
* Defines an input interface.
*
* Input plugin allows user to listen to the input devices, but it
* is not intended to work with the input hardware. The input hardware processing
* is delegated to the input providers, which should be implemented as a separate
* plugins.
* Input providers create logical input devices. For example, a window may have a keyboard and mouse associated
* with it, i.e. a physical keyboard state may be different from a logical
* keyboard associated with a window, due to some physical key state changes
* being sent to a different window.
*
* Everything to be used by input providers is put into the InputProvider struct in the separate file.
* All the functions from Input.h is meant to be used by input consumers (end user).
*
* User can subscribe to the device events, as well as device connection events,
* and upon subscribing to device connection events, user immediately receives
* "connect" notifications for all already present events of the kind. Similar is
* true for unsubscribing - user will immediately get "disconnect" notifications
* for all still present events.
*
* One notable feature of device handling is that there is no logical difference
* between a button(key) and an axis: both can be either polled by value, producing
* floating-point value, or by button flags, which allow to treat analog inputs
* as buttons (one example is treat gamepad stick as discrete d-pad).
*
* The plugins also allows to map actions to device inputs, allowing to
* set up multiple slots per action mapping. Those actions could be polled in
* a similar manner (i.e. by value or as button flags).
*/
struct IInput
{
CARB_PLUGIN_INTERFACE("carb::input::IInput", 1, 1)
/**
* Gets the input provider's part of the input interface.
*
* @return Input provider interface.
*/
InputProvider*(CARB_ABI* getInputProvider)();
/**
* Start processing input.
*/
void(CARB_ABI* startup)();
/**
* Shutdown and stop processing input.
*/
void(CARB_ABI* shutdown)();
/**
* Get keyboard logical device name.
*
* @param keyboard Logical keyboard.
* @return Specified keyboard logical device name string.
*
* @rst
.. deprecated:: 100.1
This method is deprecated and will be removed soon, please use getDeviceName instead.
@endrst
*/
const char*(CARB_ABI* getKeyboardName)(Keyboard* keyboard);
/**
* Subscribes plugin user to the keyboard event stream for a specified keyboard.
*
* @param keyboard Logical keyboard, or nullptr if subscription to events from all keyboards is desired.
* @param fn Callback function to be called on received event.
* @param userData Pointer to the user data to be passed into the callback.
* @return Subscription identifier.
*
* @rst
.. deprecated:: 100.1
This method is deprecated and will be removed soon, please use subscribeToInputEvents instead.
@endrst
*/
SubscriptionId(CARB_ABI* subscribeToKeyboardEvents)(Keyboard* keyboard, OnKeyboardEventFn fn, void* userData);
/**
* Unsubscribes plugin user from the keyboard event stream for a specified keyboard.
*
* @param keyboard Logical keyboard.
* @param subscriptionId Subscription identifier.
*
* @rst
.. deprecated:: 100.1
This method is deprecated and will be removed soon, please use unsubscribeToInputEvents instead.
@endrst
*/
void(CARB_ABI* unsubscribeToKeyboardEvents)(Keyboard* keyboard, SubscriptionId id);
/**
* Gets the value for the specified keyboard input kind or number of keys that are pressed.
*
* @param keyboard Logical keyboard or nullptr to test all keyboards.
* @param input Keyboard input kind (key) to test, or KeyboardInput::eCount to count the number of keys down.
* @return Specified keyboard input value when key is specific, or count of keys pressed for KeyboardInput::eCount.
*/
float(CARB_ABI* getKeyboardValue)(Keyboard* keyboard, KeyboardInput input);
/**
* Gets the button flag for the specified keyboard input kind.
* Each input is treated as button, based on the press threshold.
*
* @param keyboard Logical keyboard.
* @param input Keyboard input kind (key).
* @return Specified keyboard input as button flags.
*/
ButtonFlags(CARB_ABI* getKeyboardButtonFlags)(Keyboard* keyboard, KeyboardInput input);
/**
* Get mouse logical device name.
*
* @param mouse Logical mouse.
* @return Specified mouse logical device name string.
*
* @rst
.. deprecated:: 100.1
This method is deprecated and will be removed soon, please use getDeviceName instead.
@endrst
*/
const char*(CARB_ABI* getMouseName)(Mouse* mouse);
/**
* Gets the value for the specified mouse input kind.
*
* @param mouse Logical mouse.
* @param input Mouse input kind (button/axis).
* @return Specified mouse input value.
*/
float(CARB_ABI* getMouseValue)(Mouse* mouse, MouseInput input);
/**
* Gets the button flag for the specified mouse input kind.
* Each input is treated as button, based on the press threshold.
*
* @param mouse Logical mouse.
* @param input Mouse input kind (button/axis).
* @return Specified mouse input as button flags.
*/
ButtonFlags(CARB_ABI* getMouseButtonFlags)(Mouse* mouse, MouseInput input);
/**
* Gets the mouse coordinates for the specified mouse, normalized by the associated window size.
*
* @param mouse Logical mouse.
* @return Coordinates.
*/
Float2(CARB_ABI* getMouseCoordsNormalized)(Mouse* mouse);
/**
* Gets the absolute mouse coordinates for the specified mouse.
*
* @param mouse Logical mouse.
* @return Coordinates.
*/
Float2(CARB_ABI* getMouseCoordsPixel)(Mouse* mouse);
/**
* Subscribes plugin user to the mouse event stream for a specified mouse.
*
* @param mouse Logical mouse, or nullptr if subscription to events from all mice is desired.
* @param fn Callback function to be called on received event.
* @param userData Pointer to the user data to be passed into the callback.
* @return Subscription identifier.
*
* @rst
.. deprecated:: 100.1
This method is deprecated and will be removed soon, please use subscribeToInputEvents instead.
@endrst
*/
SubscriptionId(CARB_ABI* subscribeToMouseEvents)(Mouse* mouse, OnMouseEventFn fn, void* userData);
/**
* Unsubscribes plugin user from the mouse event stream for a specified mouse.
*
* @param mouse Logical mouse.
* @param subscriptionId Subscription identifier.
*
* @rst
.. deprecated:: 100.1
This method is deprecated and will be removed soon, please use unsubscribeToInputEvents instead.
@endrst
*/
void(CARB_ABI* unsubscribeToMouseEvents)(Mouse* mouse, SubscriptionId id);
/**
* Get gamepad logical device name.
*
* @param gamepad Logical gamepad.
* @return Specified gamepad logical device name string.
*
* @rst
.. deprecated:: 100.1
This method is deprecated and will be removed soon, please use getDeviceName instead.
@endrst
*/
const char*(CARB_ABI* getGamepadName)(Gamepad* gamepad);
/**
* Get gamepad GUID.
*
* @param gamepad Logical gamepad.
* @return Specified gamepad logical device GUID.
*/
const char*(CARB_ABI* getGamepadGuid)(Gamepad* gamepad);
/**
* Gets the value for the specified gamepad input kind.
*
* @param gamepad Logical gamepad.
* @param input Gamepad input kind (button/axis).
* @return Specified gamepad input value.
*/
float(CARB_ABI* getGamepadValue)(Gamepad* gamepad, GamepadInput input);
/**
* Gets the button flag for the specified gamepad input kind.
* Each input is treated as button, based on the press threshold.
*
* @param gamepad Logical gamepad.
* @param input Gamepad input kind (button/axis).
* @return Specified gamepad input as button flags.
*/
ButtonFlags(CARB_ABI* getGamepadButtonFlags)(Gamepad* gamepad, GamepadInput input);
/**
* Subscribes plugin user to the gamepad event stream for a specified gamepad.
*
* @param gamepad Logical gamepad, or nullptr if subscription to events from all gamepads is desired.
* @param fn Callback function to be called on received event.
* @param userData Pointer to the user data to be passed into the callback.
* @return Subscription identifier.
*
* @rst
.. deprecated:: 100.1
This method is deprecated and will be removed soon, please use subscribeToInputEvents instead.
@endrst
*/
SubscriptionId(CARB_ABI* subscribeToGamepadEvents)(Gamepad* gamepad, OnGamepadEventFn fn, void* userData);
/**
* Unsubscribes plugin user from the gamepad event stream for a specified gamepad.
*
* @param gamepad Logical gamepad.
* @param subscriptionId Subscription identifier.
*
* @rst
.. deprecated:: 100.1
This method is deprecated and will be removed soon, please use unsubscribeToInputEvents instead.
@endrst
*/
void(CARB_ABI* unsubscribeToGamepadEvents)(Gamepad* gamepad, SubscriptionId id);
/**
* Subscribes plugin user to the gamepad connection event stream.
* Once subscribed callback is called for all previously created gamepads.
*
* @param fn Callback function to be called on received event.
* @param userData Pointer to the user data to be passed into the callback.
* @return Subscription identifier.
*/
SubscriptionId(CARB_ABI* subscribeToGamepadConnectionEvents)(OnGamepadConnectionEventFn fn, void* userData);
/**
* Unsubscribes plugin user from the gamepad connection event stream.
* Unsubscription triggers callback to be called with all devices left as being destroyed.
*
* @param subscriptionId Subscription identifier.
*/
void(CARB_ABI* unsubscribeToGamepadConnectionEvents)(SubscriptionId id);
/**
* Processes buffered events queue and sends unconsumed events as device events, action mapping events, and
* updates device states. Clears buffered events queues.
*/
void(CARB_ABI* distributeBufferedEvents)();
/**
* Create action mapping set - a place in settings where named action mappings are stored.
*
* @param settingsPath Path in settings where the set mappings are stored.
* @return Opaque pointer to the action mapping set.
*/
ActionMappingSet*(CARB_ABI* createActionMappingSet)(const char* settingsPath);
/**
* Get existing action mapping set from the settings path provided.
*
* @param settingsPath Path in settings where the set mappings are stored.
* @return Opaque pointer to the action mapping set.
*/
ActionMappingSet*(CARB_ABI* getActionMappingSetByPath)(const char* settingsPath);
/**
* Destroy action mapping set.
*
* @param actionMappingSet Opaque pointer to the action mapping set.
*/
void(CARB_ABI* destroyActionMappingSet)(ActionMappingSet* actionMappingSet);
/**
* Get total action count registered in the plugin with 1 or more action mapping.
*
* @return The number of the actions.
*/
size_t(CARB_ABI* getActionCount)(ActionMappingSet* actionMappingSet);
/**
* Get array of all actions.
* The size of an array is equal to the Input::getActionCount().
*
* @return The array of actions.
*/
const char* const*(CARB_ABI* getActions)(ActionMappingSet* actionMappingSet);
/**
* Adds action mapping to the specified action.
* Each action keeps a list of mappings. This function push mapping to the end of the list.
*
* @param actionName Action string identifier.
* @param desc Action mapping description.
* @return The index of added mapping.
*/
size_t(CARB_ABI* addActionMapping)(ActionMappingSet* actionMappingSet,
const char* actionName,
const ActionMappingDesc& desc);
/**
* Sets and overrides the indexed action mapping for the specified action.
* Each action keeps a list of mappings. This function sets list item according by the index.
*
* @param actionName Action string identifier.
* @param index The index of mapping to override. It should be in range [0, mapping count).
* @param desc Action mapping description.
*/
void(CARB_ABI* setActionMapping)(ActionMappingSet* actionMappingSet,
const char* actionName,
size_t index,
const ActionMappingDesc& desc);
/**
* Remove indexed action mapping for the specified action.
* Each action keeps a list of mappings. This function removes list item by the index.
*
* @param actionName Action string identifier.
* @param index The index of mapping to remove. It should be in range [0, mapping count).
*/
void(CARB_ABI* removeActionMapping)(ActionMappingSet* actionMappingSet, const char* actionName, size_t index);
/**
* Clears and removes all mappings associated with the action.
*
* @param actionName Action string identifier.
*/
void(CARB_ABI* clearActionMappings)(ActionMappingSet* actionMappingSet, const char* actionName);
/**
* Get mappings count associated with the action.
*
* @param action Action string identifier.
* @return The number of the mapping in the list for an action.
*/
size_t(CARB_ABI* getActionMappingCount)(ActionMappingSet* actionMappingSet, const char* actionName);
/**
* Get array of mappings associated with the action.
* The size of an array is equal to the Input::getMappingCount().
*
* @param actionName Action string identifier.
* @return The array of mappings for an action.
*/
const ActionMappingDesc*(CARB_ABI* getActionMappings)(ActionMappingSet* actionMappingSet, const char* actionName);
/**
* Gets the value for the specified action.
* If multiple mapping are associated with the action the biggest value is returned.
*
* @param actionName Action string identifier.
* @return Specified action value.
*/
float(CARB_ABI* getActionValue)(ActionMappingSet* actionMappingSet, const char* actionName);
/**
* Gets the button flag for the specified action.
* Each mapping is treated as button, based on the press threshold.
*
* @param actionName Action string identifier.
* @return Specified action value as button flags.
*/
ButtonFlags(CARB_ABI* getActionButtonFlags)(ActionMappingSet* actionMappingSet, const char* actionName);
/**
* Subscribes plugin user to the action event stream for a specified action.
* Event is triggered on any action value change.
*
* @param action Action string identifier.
* @param fn Callback function to be called on received event.
* @param userData Pointer to the user data to be passed into the callback.
* @return Subscription identifier.
*/
SubscriptionId(CARB_ABI* subscribeToActionEvents)(ActionMappingSet* actionMappingSet,
const char* actionName,
OnActionEventFn fn,
void* userData);
/**
* Unsubscribes plugin user from the action event stream for a specified action.
*
* @param action Action string identifier.
* @param subscriptionId Subscription identifier.
*/
void(CARB_ABI* unsubscribeToActionEvents)(SubscriptionId id);
/**
* Filters all buffered events by calling the specified filter function on each event.
*
* The given @p fn may modify events in-place and/or may add additional events via the InputProvider obtained from
* getInputProvider(). Any additional events that are added during a call to filterBufferedEvents() will not be
* passed to @p fn during that call. However, future calls to filterBufferedEvents() will pass the events to @p fn.
* Any new buffered events added by InputProvider during @p fn will be added to the end of the event list. Events
* modified during @p fn remain in their relative position in the event list.
*
* The outcome of an event is based on what @p fn returns for that event. If FilterResult::eConsume is returned, the
* event is considered processed and is removed from the list of buffered events. Future calls to
* filterBufferedEvents() will not receive the event and it will not be sent when distributeBufferedEvents() is
* called. If FilterResult::eRetain is returned, the (possibly modified) event remains in the list of buffered
* events. Future calls to filterBufferedEvents() will receive the event and it will be sent when
* distributeBufferedEvents() is called.
*
* This function may be called multiple times to re-filter events. For instance, the given @p fn may be interested
* in only certain types of events.
*
* The remaining buffered events are sent when distributeBufferedEvents() is called, at which point the list of
* buffered events is cleared.
*
* @warning Calling filterBufferedEvents() or distributeBufferedEvents() from @p fn is expressly disallowed.
*
* @thread_safety An internal lock is held while @p fn is called on all events, which synchronizes-with
* distributeBufferedEvents() and the various InputProvider functions to buffer events. Although the lock provides
* thread safety to synchronize these operations, if buffered events are added from other threads it is conceivable
* that events could be added between filterBufferedEvents() and distributeBufferedEvents(), causing them to be sent
* before being filtered. If this is a cause for concern, use of an external lock is recommended.
*
* @param fn A pointer to a callback function to be called on each input event.
* @param userData A pointer to the user data to be passed into the callback.
*/
void(CARB_ABI* filterBufferedEvents)(InputEventFilterFn fn, void* userData);
/**
* Get input device name.
*
* @param device Input device.
* @return Specified input device name string.
*/
const char*(CARB_ABI* getDeviceName)(InputDevice* device);
/**
* Get input device type.
*
* @param device Input device.
* @return Specified input device type or DeviceType::eUnknown.
*/
DeviceType(CARB_ABI* getDeviceType)(InputDevice* device);
/**
* Subscribes plugin user to the input event stream for a specified device.
*
* @param device Input device, or nullptr if subscription to events from all devices is desired.
* @param events A bit mask to event types to subscribe to. Currently kEventTypeAll is only supported.
* @param fn Callback function to be called on received event.
* @param userData Pointer to the user data to be passed into the callback.
* @param order Subscriber position hint [0..N-1] from the beginning, [-1, -N] from the end (-1 is default).
* @return Subscription identifier.
*/
SubscriptionId(CARB_ABI* subscribeToInputEvents)(
InputDevice* device, EventTypeMask events, OnInputEventFn fn, void* userData, SubscriptionOrder order);
/**
* Unsubscribes plugin user from the input event stream for a specified device.
*
* @param subscriptionId Subscription identifier.
*/
void(CARB_ABI* unsubscribeToInputEvents)(SubscriptionId id);
/**
* Gets the modifer state on specific devices and/or device-types.
*
* @param modifierFlags The modifiers to check against, or 0 to check against all modifiers.
* @param devices An array of InputDevice pointers.
* @param nDevices The number of devices in the devices array.
* @param deviceTypes An array of device-types to check against.
* @param nDeviceTypes The number of device-types in the devices array.
* @param mouseButtons An array of mouse buttons to test against.
* @param numMouseButtons The number of buttons in the mouseButtons array.
* @return KeyboardModifierFlags for all devices queried.
*/
KeyboardModifierFlags(CARB_ABI* getModifierFlags)(KeyboardModifierFlags modifierFlags,
const InputDevice* const* devices,
size_t nDevices,
const DeviceType* const deviceTypes,
size_t nDeviceTypes,
const MouseInput* mouseButtons,
size_t numMouseButtons);
/**
* Gets the modifer state on all known keyboard and mouse devices.
*
* @param modifierFlags The modifiers to check against, or 0 to check against all modifiers.
* @param mouseButtons An array of mouse buttons to test against (providing nullptr will test against all mouse
* buttons).
* @param numMouseButtons The number of buttons in the mouseButtons array.
* @return KeyboardModifierFlags for all devices queried.
*/
KeyboardModifierFlags(CARB_ABI* getGlobalModifierFlags)(KeyboardModifierFlags modifierFlags,
const MouseInput* mouseButtons,
size_t numMouseButtons);
};
} // namespace input
} // namespace carb
|
omniverse-code/kit/include/carb/delegate/Delegate.h | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Carbonite Delegate implementation.
#pragma once
#include "../Defines.h"
#include "../Strong.h"
#include "../container/IntrusiveList.h"
#include "../cpp/Tuple.h"
#include "../thread/Mutex.h"
#include "../thread/Util.h"
#include <type_traits>
#include <vector>
#include <memory>
namespace carb
{
//! Namespace for Carbonite delegate implementation.
namespace delegate
{
template <class T>
class Delegate;
template <class T>
class DelegateRef;
/**
* Implements a thread-safe callback system that can have multiple subscribers.
*
* A delegate is a weak-coupling callback system. Essentially, a system uses Delegate to have a callback that can
* be received by multiple subscribers.
*
* Delegate has two ways to uniquely identify a bound callback: \ref Bind() will output a \ref Handle, or the
* caller can provide a key of any type with BindWithKey(). Either the \ref Handle or the given key can be passed to
* \ref Unbind() in order to remove a callback.
*
* Delegate can call all bound callbacks with the Call() function. Recursive calling is allowed with caveats listed
* below.
*
* Delegate is thread-safe for all operations. Call() can occur simultaneously in multiple threads. An Unbind()
* will wait if the bound callback is currently executing in another thread.
*
* Delegate can be destroyed from a binding (during \ref Call()) as the internal state is not disposed of
* until all active calls have been completed. See ~Delegate().
*
* Delegate does not hold any internal locks while calling bound callbacks. It is strongly recommended to avoid
* holding locks when invoking Delegate's \ref Call() function.
*
* These tenets make up the basis of Carbonite's Basic Callback Hygiene as described in the @rstdoc{../../../../CODING}.
*/
template <class... Args>
class Delegate<void(Args...)>
{
public:
//! A type representing the function type
using FunctionType = void(Args...);
/**
* A quasi-unique identifier outputted from Bind()
*
* \ref Handle is unique as long as it has not rolled over.
*/
CARB_STRONGTYPE(Handle, size_t);
CARB_DOC_CONSTEXPR static Handle kInvalidHandle{ 0 }; //!< A value representing an invalid \ref Handle value
/**
* Constructs an empty delegate
*/
Delegate() = default;
/**
* Move constructor.
*
* @param other The Delegate to move from. This Delegate will be left in a valid but empty state.
*/
Delegate(Delegate&& other);
/**
* Move-assign operator
*
* @param other The Delegate to move-assign from. Will be swapped with `*this`.
*
* @returns `*this`
*/
Delegate& operator=(Delegate&& other);
/**
* Destructor.
*
* The destructor unbinds all bindings and follows the waiting paradigm explained by \ref UnbindAll(). As the
* internal state of the delegate is held until all active calls have completed, it is valid to destroy Delegate
* from a callback.
*/
~Delegate();
/**
* Binds a callable (with optional additional arguments) to the delegate.
*
* \thread_safety: Thread-safe with respect to other Delegate operations except for construction and destruction.
*
* \note This function can be done from within a callback. If done during a callback, the newly bound callable will
* not be available to be called until \ref Call() returns, at which point the callback can be called by other
* threads or outer \ref Call() calls (in the case of recursive calls to \ref Call()).
*
* @param hOut An optional pointer that receives a \ref Handle representing the binding to \c Callable. This can
* be \c nullptr to ignore the \ref Handle. The same \ref Handle is also returned. In a multi-threaded environment,
* it is possible for \p func to be called before \ref Bind() returns, but \p hOut will have already been assigned.
* @param func A callable object, such as lambda, functor or [member-]function. Return values are ignored. The
* callable must take as parameters \p args followed by the \c Args declared in the delegate template signature.
* @param args Additional optional arguments to bind with \p func. If \p func is a member function pointer the
* first argument must be the \c this pointer to call the member function with.
* @return The \ref Handle also passed to \p hOut.
*/
template <class Callable, class... BindArgs>
Handle Bind(Handle* hOut, Callable&& func, BindArgs&&... args);
/**
* Binds a callable (with optional additional arguments) to the delegate with a user-defined key.
*
* \thread_safety: Thread-safe with respect to other Delegate operations except for construction and destruction.
*
* \note This function can be done from within a callback. If done during a callback, the newly bound callable will
* not be available to be called until \ref Call() returns, at which point the callback can be called by other
* threads or outer \ref Call() calls (in the case of recursive calls to \ref Call()).
*
* @param key A user-defined key of any type that supports equality (==) to identify this binding. Although multiple
* bindings can be referenced by the same key, Unbind() will only remove a single binding.
* @param func A callable object, such as lambda, functor or [member-]function. Return values are ignored. The
* callable must take as parameters \p args followed by the \c Args declared in the delegate template signature.
* @param args Additional optional arguments to bind with \p func. If \p func is a member function pointer the
* first argument must be the \c this pointer to call the member function with.
*/
template <class KeyType, class Callable, class... BindArgs>
void BindWithKey(KeyType&& key, Callable&& func, BindArgs&&... args);
/**
* Unbinds any single binding referenced by the given key.
*
* \thread_safety: Thread-safe with respect to other Delegate operations except for construction and destruction.
*
* This function can be done from within a callback. If the referenced binding is currently executing in
* another thread, Unbind() will not return until it has finished. Any binding can be safely unbound during a
* callback. If a binding un-binds itself, the captured arguments and callable object will not be destroyed
* until just before \ref Call() returns.
*
* \note It is guaranteed that when \ref Unbind() returns, the callback is not running and will never run in any
* threads.
*
* @param key A \ref Handle or user-defined key previously passed to \ref BindWithKey().
* @return \c true if a binding was un-bound; \c false if no binding matching key was found.
*/
template <class KeyType>
bool Unbind(KeyType&& key);
/**
* Indicates if a binding exists in `*this` with the given key or Handle.
*
* \thread_safety: Thread-safe with respect to other Delegate operations except for construction and destruction.
* However, without external synchronization, it is possible for the result of this function to be incorrect by the
* time it is used.
*
* @param key A \ref Handle or user-defined key previously passed to \ref BindWithKey().
* @returns \c true if a binding exists with the given \p key; \c false if no binding matching key was found.
*/
template <class KeyType>
bool HasKey(KeyType&& key) const noexcept;
/**
* Unbinds the currently executing callback without needing an identifying key.
*
* \thread_safety: Thread-safe with respect to other Delegate operations except for construction and destruction.
*
* \note If not done within the context of a callback, this function has no effect.
*
* @return \c true if a binding was un-bound; \c false if there is no current binding.
*/
bool UnbindCurrent();
/**
* Unbinds all bound callbacks, possibly waiting for active calls to complete.
*
* \thread_safety: Thread-safe with respect to other Delegate operations except for construction and destruction.
*
* Unbinds all currently bound callbacks. This function will wait to return until bindings that it unbinds have
* completed all calls in other threads. It is safe to perform this operation from within a callback.
*/
void UnbindAll();
/**
* Returns the number of active bound callbacks.
*
* \thread_safety: Thread-safe with respect to other Delegate operations except for construction and destruction.
*
* \note This function returns the count of \a active bound callbacks only. Pending callbacks (that were added with
* \ref Bind() during \ref Call()) are not counted. Use \ref HasPending() to determine if pending bindings exist.
*
* @returns the number of active bound callbacks.
*/
size_t Count() const noexcept;
/**
* Checks whether the Delegate has any pending bindings.
*
* \thread_safety: Thread-safe with respect to other Delegate operations except for construction and destruction.
* The nature of this function is such that the result may be stale by the time it is read in the calling thread,
* unless the calling thread has at least one pending binding.
*
* \note This function returns \c true if any \a pending bound callbacks exist. This will only ever be non-zero if
* one or more threads are currently in the \ref Call() function.
*
* @returns \c true if any pending bindings exist; \c false otherwise.
*/
bool HasPending() const noexcept;
/**
* Checks whether the Delegate contains no pending or active bound callbacks.
*
* \thread_safety: Thread-safe with respect to other Delegate operations except for construction and destruction.
* However, without external synchronization, it is possible for the result of this function to be incorrect by the
* time it is used.
*
* @returns \c true if there are no active or pending callbacks present in `*this`; \c false otherwise.
*/
bool IsEmpty() const noexcept;
/**
* Given a type, returns a \c std::vector containing a copy of all keys used for bindings.
*
* \thread_safety: Thread-safe with respect to other Delegate operations except for construction and destruction.
*
* \note This function can be done from within a callback. Pending callbacks (that were added with \ref Bind()
* during \ref Call()) are included, even if they are pending in other threads. Note that in a multi-threaded
* environment, the actual keys in use by Delegate may change after this function returns; in such cases, an
* external mutex is recommended. \c KeyType must be Copyable in order for this function to compile.
*
* @tparam KeyType \ref Handle or a type previously passed to \ref BindWithKey()
* @return a \c std::vector of copies of keys of the given type in use by this Delegate.
*/
template <class KeyType>
std::vector<std::decay_t<KeyType>> GetKeysByType() const;
/**
* Calls all bound callbacks for this Delegate.
*
* \thread_safety: Thread-safe with respect to other Delegate operations except for construction and destruction.
*
* \note This function can be done concurrently in multiple threads simultaneously. Recursive calls to \ref Call()
* are allowed but the caller must take care to avoid endless recursion. Callbacks are free to call \ref Bind(),
* \ref Unbind() or any other Delegate function. No internal locks are held while callbacks are called.
*
* @param args The arguments to pass to the callbacks.
*/
void Call(Args... args);
/**
* Syntactic sugar for \ref Call()
*/
void operator()(Args... args);
/**
* Swaps with another Delegate.
*
* @param other The Delegate to swap with.
*/
void swap(Delegate& other);
CARB_PREVENT_COPY(Delegate);
private:
template <class U>
friend class DelegateRef;
struct BaseBinding;
template <class Key>
struct KeyedBinding;
using Container = carb::container::IntrusiveList<BaseBinding, &BaseBinding::link>;
struct ActiveCall;
using ActiveCallList = carb::container::IntrusiveList<ActiveCall, &ActiveCall::link>;
struct Impl : public std::enable_shared_from_this<Impl>
{
mutable carb::thread::mutex m_mutex;
Container m_entries;
ActiveCallList m_activeCalls;
~Impl();
};
constexpr Delegate(std::nullptr_t);
Delegate(std::shared_ptr<Impl> pImpl);
ActiveCall* lastCurrentThreadCall();
const ActiveCall* lastCurrentThreadCall() const;
void UnbindInternal(std::unique_lock<carb::thread::mutex>& g, typename Container::iterator iter);
static size_t nextHandle();
std::shared_ptr<Impl> m_impl{ std::make_shared<Impl>() };
};
/**
* Holds a reference to a Delegate.
*
* Though Delegate is non-copyable, \c DelegateRef can be thought of as a `std::shared_ptr` for Delegate.
* This allows a Delegate's bindings to remain active even though the original Delegate has been destroyed, which can
* allow calls in progress to complete, or a mutex protecting the original Delegate to be unlocked.
*/
template <class... Args>
class DelegateRef<void(Args...)>
{
public:
//! The Delegate type that is referenced.
using DelegateType = Delegate<void(Args...)>;
/**
* Default constructor.
*
* Creates an empty DelegateRef such that `bool(*this)` would be `false`.
*/
constexpr DelegateRef() noexcept;
/**
* Constructor.
*
* Constructs a DelegateRef that holds a strong reference to \p delegate.
* @param delegate The Delegate object to hold a reference to.
*/
explicit DelegateRef(DelegateType& delegate);
/**
* Copy constructor.
*
* References the same underlying Delegate that \p other references. If \p other is empty, `*this` will also be
* empty.
* @param other A DelegateRef to copy.
*/
DelegateRef(const DelegateRef& other);
/**
* Move constructor.
*
* Moves the reference from \p other to `*this`. If \p other is empty, `*this` will also be empty. \p other is left
* in a valid but empty state.
* @param other A DelegateRef to move.
*/
DelegateRef(DelegateRef&& other) = default;
/**
* Destructor
*/
~DelegateRef();
/**
* Copy-assign.
*
* References the same underlying Delegate that \p other references and releases any existing reference. The order
* of these operations is unspecified, so assignment from `*this` is undefined behavior.
* @param other A DelegateRef to copy.
* @returns `*this`.
*/
DelegateRef& operator=(const DelegateRef& other);
/**
* Move-assign.
*
* Moves the reference from \p other to `*this` and releases any existing reference. The order of these operations
* is unspecified, so assignment from `*this` is undefined behavior. If \p other is empty, `*this` will also be
* empty. \p other is left in a valid but empty state.
* @param other A DelegateRef to move.
* @returns `*this`.
*/
DelegateRef& operator=(DelegateRef&& other) = default;
/**
* Checks whether the DelegateRef holds a valid reference.
* @returns `true` if `*this` holds a valid reference; `false` otherwise.
*/
explicit operator bool() const noexcept;
/**
* Clears the DelegateRef to an empty reference.
*
* Postcondition: `bool(*this)` will be `false`.
*/
void reset();
/**
* References a different Delegate and releases any existing reference.
* @param delegate The Delegate to reference.
*/
void reset(DelegateType& delegate);
/**
* Swaps the reference with another DelegateRef.
* @param other A DelegateRef to swap with.
*/
void swap(DelegateRef& other);
/**
* Retrieves the underlying DelegateType.
* @returns a pointer to the referenced Delegate, or `nullptr` if `bool(*this)` would return false.
*/
DelegateType* get() const noexcept;
/**
* Dereferences *this.
* @returns a reference to the referenced Delegate. If `bool(*this)` would return false, behavior is undefined.
*/
DelegateType& operator*() const noexcept;
/**
* Dereferences *this.
* @returns a pointer to the referenced Delegate. If `bool(*this)` would return false, behavior is undefined.
*/
DelegateType* operator->() const noexcept;
private:
DelegateType m_delegate;
};
//! A helper class for determining the type of a \ref carb::delegate::DelegateRef based on a
//! \ref carb::delegate::Delegate. @tparam Del a \ref carb::delegate::Delegate
template <class Del>
struct RefFromDelegate
{
//! The type of \ref DelegateRef that should be used for a \ref Delegate of type `Del`
using type = DelegateRef<typename Del::FunctionType>;
};
//! Definition helper for `RefFromDelegate<Del>::type`
template <class Del>
using RefFromDelegate_t = typename RefFromDelegate<Del>::type;
} // namespace delegate
} // namespace carb
#include "DelegateImpl.inl"
CARB_INCLUDE_PURIFY_TEST({
using namespace carb::delegate;
Delegate<void()> d, d2{ Delegate<void()>{} }, d3 = Delegate<void()>();
auto b = d.Bind(nullptr, [] {});
d.Bind(nullptr, [](bool) {}, true);
d.BindWithKey(0, [] {});
d.BindWithKey(1, [](bool) {}, false);
d.Unbind(1);
d.Unbind(b);
d.HasKey(0);
d.UnbindCurrent();
d2.UnbindAll();
d.Count();
d.HasPending();
d.IsEmpty();
d.GetKeysByType<int>();
d.Call();
d();
d.swap(d3);
DelegateRef<void()> dr(d), dr2{};
DelegateRef<void()> dr3(dr);
DelegateRef<void()> dr4(std::move(dr2));
DelegateRef<void()> dr5 = std::move(dr4);
DelegateRef<void()> dr6 = dr5;
CARB_UNUSED(bool(dr6));
dr6.reset();
dr5.reset(d);
dr5.get();
(*dr).Call();
dr->Call();
});
|
omniverse-code/kit/include/carb/delegate/DelegateImpl.inl | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
CARB_IGNOREWARNING_CLANG_WITH_PUSH("-Wunused-local-typedef") // unused type alias 'Tuple'
namespace carb
{
namespace delegate
{
#ifndef DOXYGEN_BUILD
namespace detail
{
template <class F, class Tuple, size_t... I, class... Args>
constexpr decltype(auto) applyExtraImpl(F&& f, Tuple&& t, std::index_sequence<I...>, Args&&... args)
{
return carb::cpp::invoke(std::forward<F>(f), std::get<I>(std::forward<Tuple>(t))..., std::forward<Args>(args)...);
}
// like std::apply() but allows additional args
template <class F, class Tuple, class... Args>
constexpr decltype(auto) applyExtra(F&& f, Tuple&& t, Args&&... args)
{
return applyExtraImpl(std::forward<F>(f), std::forward<Tuple>(t),
std::make_index_sequence<std::tuple_size<std::remove_reference_t<Tuple>>::value>{},
std::forward<Args>(args)...);
}
// Returns a unique identifier for a type that doesn't require RTTI like type_info
template <class T>
size_t typeId()
{
static const bool addr{ false };
return size_t(std::addressof(addr));
}
} // namespace detail
#endif
template <class... Args>
Delegate<void(Args...)>::Impl::~Impl()
{
// Cannot be destroyed while in an active call!
CARB_CHECK(m_activeCalls.empty());
}
template <class... Args>
Delegate<void(Args...)>::Delegate(Delegate&& other) : m_impl(std::exchange(other.m_impl, std::make_shared<Impl>()))
{
}
template <class... Args>
constexpr Delegate<void(Args...)>::Delegate(std::nullptr_t) : m_impl{}
{
}
template <class... Args>
Delegate<void(Args...)>::Delegate(std::shared_ptr<Impl> pImpl) : m_impl(std::move(pImpl))
{
}
template <class... Args>
auto Delegate<void(Args...)>::operator=(Delegate&& other) -> Delegate&
{
swap(other);
return *this;
}
template <class... Args>
void Delegate<void(Args...)>::swap(Delegate& other)
{
std::swap(m_impl, other.m_impl);
}
template <class... Args>
Delegate<void(Args...)>::~Delegate()
{
// The only time that m_impl is allowed to be `nullptr` is an empty DelegateRef.
if (m_impl)
{
// UnbindAll() will wait for calls in progress by unbound callbacks to complete before returning, but will allow
// calls from the current thread to remain.
UnbindAll();
}
}
template <class... Args>
template <class Callable, class... BindArgs>
auto Delegate<void(Args...)>::Bind(Handle* hOut, Callable&& func, BindArgs&&... args) -> Handle
{
using Base = KeyedBinding<Handle>;
struct Binding : Base
{
using Tuple = std::tuple<std::decay_t<BindArgs>...>;
std::decay_t<Callable> func;
Tuple bindArgs;
Binding(Handle h, Callable&& func_, BindArgs&&... args)
: Base(h), func(std::forward<Callable>(func_)), bindArgs(std::forward<BindArgs>(args)...)
{
}
void Call(Args... args) override
{
detail::applyExtra(func, bindArgs, args...);
}
};
Handle h(nextHandle());
if (hOut)
{
*hOut = h;
}
Binding* b = new Binding(h, std::forward<Callable>(func), std::forward<BindArgs>(args)...);
std::lock_guard<carb::thread::mutex> g(m_impl->m_mutex);
ActiveCall* call = lastCurrentThreadCall();
if (!call)
{
m_impl->m_entries.push_back(*b);
}
else
{
call->newEntries.push_back(*b);
}
return h;
}
template <class... Args>
template <class KeyType, class Callable, class... BindArgs>
void Delegate<void(Args...)>::BindWithKey(KeyType&& key, Callable&& func, BindArgs&&... args)
{
static_assert(!std::is_same<Handle, std::decay_t<KeyType>>::value, "Handle not allowed as a key type (use Bind())");
static_assert(
!std::is_null_pointer<std::decay_t<KeyType>>::value, "nullptr_t not allowed as a key type (use Bind())");
using Base = KeyedBinding<std::decay_t<KeyType>>;
struct Binding : Base
{
using Tuple = std::tuple<std::decay_t<BindArgs>...>;
std::decay_t<Callable> func;
Tuple bindArgs;
Binding(KeyType&& key, Callable&& func_, BindArgs&&... args)
: Base(std::forward<KeyType>(key)),
func(std::forward<Callable>(func_)),
bindArgs(std::forward<BindArgs>(args)...)
{
}
void Call(Args... args) override
{
detail::applyExtra(func, bindArgs, args...);
}
};
Binding* b = new Binding(std::forward<KeyType>(key), std::forward<Callable>(func), std::forward<BindArgs>(args)...);
std::lock_guard<carb::thread::mutex> g(m_impl->m_mutex);
ActiveCall* call = lastCurrentThreadCall();
if (!call)
{
m_impl->m_entries.push_back(*b);
}
else
{
call->newEntries.push_back(*b);
}
}
template <class... Args>
template <class KeyType>
bool Delegate<void(Args...)>::Unbind(KeyType&& key)
{
using BindingType = KeyedBinding<std::decay_t<KeyType>>;
std::unique_lock<carb::thread::mutex> g(m_impl->m_mutex);
for (auto iter = m_impl->m_entries.begin(); iter != m_impl->m_entries.end(); ++iter)
{
if (iter->getKeyType() == BindingType::keyType())
{
BindingType& binding = static_cast<BindingType&>(*iter);
if (binding.key == key)
{
UnbindInternal(g, iter);
return true;
}
}
}
// Must belong to a specific list. Find it and remove it.
for (auto& active : m_impl->m_activeCalls)
{
for (auto& entry : active.newEntries)
{
if (entry.getKeyType() == BindingType::keyType())
{
BindingType& binding = static_cast<BindingType&>(entry);
if (binding.key == key)
{
active.newEntries.remove(entry);
entry.release();
return true;
}
}
}
}
return false;
}
template <class... Args>
template <class KeyType>
bool Delegate<void(Args...)>::HasKey(KeyType&& key) const noexcept
{
using BindingType = KeyedBinding<std::decay_t<KeyType>>;
std::unique_lock<carb::thread::mutex> g(m_impl->m_mutex);
for (auto iter = m_impl->m_entries.begin(); iter != m_impl->m_entries.end(); ++iter)
{
if (iter->getKeyType() == BindingType::keyType())
{
BindingType& binding = static_cast<BindingType&>(*iter);
if (binding.key == key)
{
return true;
}
}
}
// May belong to a specific list
for (auto& active : m_impl->m_activeCalls)
{
for (auto& entry : active.newEntries)
{
if (entry.getKeyType() == BindingType::keyType())
{
BindingType& binding = static_cast<BindingType&>(entry);
if (binding.key == key)
{
return true;
}
}
}
}
return false;
}
template <class... Args>
bool Delegate<void(Args...)>::UnbindCurrent()
{
std::unique_lock<carb::thread::mutex> g(m_impl->m_mutex);
ActiveCall* ac = lastCurrentThreadCall();
if (ac && ac->cur != m_impl->m_entries.end())
{
UnbindInternal(g, ac->cur);
return true;
}
return false;
}
template <class... Args>
void Delegate<void(Args...)>::UnbindAll()
{
std::unique_lock<carb::thread::mutex> g(m_impl->m_mutex);
while (m_impl->m_entries.rbegin() != m_impl->m_entries.rend())
{
UnbindInternal(g, --m_impl->m_entries.rbegin().base());
// UnbindInternal() unlocks, so re-lock here
g.lock();
}
for (auto& active : m_impl->m_activeCalls)
{
while (!active.newEntries.empty())
{
active.newEntries.remove(active.newEntries.front()).release();
}
}
}
template <class... Args>
size_t Delegate<void(Args...)>::Count() const noexcept
{
return m_impl->m_entries.size();
}
template <class... Args>
bool Delegate<void(Args...)>::HasPending() const noexcept
{
std::unique_lock<carb::thread::mutex> g(m_impl->m_mutex);
for (auto& active : m_impl->m_activeCalls)
{
if (!active.newEntries.empty())
return true;
}
return false;
}
template <class... Args>
bool Delegate<void(Args...)>::IsEmpty() const noexcept
{
std::unique_lock<carb::thread::mutex> g(m_impl->m_mutex);
if (!m_impl->m_entries.empty())
return false;
for (auto& active : m_impl->m_activeCalls)
{
if (!active.newEntries.empty())
return false;
}
return true;
}
template <class... Args>
template <class KeyType>
std::vector<std::decay_t<KeyType>> Delegate<void(Args...)>::GetKeysByType() const
{
std::vector<std::decay_t<KeyType>> vec;
using BindingType = KeyedBinding<std::decay_t<KeyType>>;
std::unique_lock<carb::thread::mutex> g(m_impl->m_mutex);
for (auto& entry : m_impl->m_entries)
{
if (entry.getKeyType() == BindingType::keyType())
{
const BindingType& binding = static_cast<const BindingType&>(entry);
vec.emplace_back(binding.key);
}
}
for (auto& active : m_impl->m_activeCalls)
{
for (auto& entry : active.newEntries)
{
if (entry.getKeyType() == BindingType::keyType())
{
const BindingType& binding = static_cast<const BindingType&>(entry);
vec.emplace_back(binding.key);
}
}
}
return vec;
}
template <class... Args>
void Delegate<void(Args...)>::Call(Args... args)
{
std::shared_ptr<Impl> impl;
std::unique_lock<carb::thread::mutex> g(m_impl->m_mutex);
// Early out if there is nothing to do
if (m_impl->m_entries.empty())
{
return;
}
impl = m_impl; // Hold a reference while calling
ActiveCall activeCall;
impl->m_activeCalls.push_back(activeCall);
activeCall.next = activeCall.cur = impl->m_entries.begin();
++activeCall.next;
for (;;)
{
BaseBinding& cc = *activeCall.cur;
cc.addRef();
g.unlock();
cc.Call(args...);
g.lock();
cc.release(activeCall.cur == impl->m_entries.end());
activeCall.cur = activeCall.next;
if (activeCall.cur == impl->m_entries.end())
{
break;
}
++activeCall.next;
}
impl->m_activeCalls.remove(activeCall);
if (!activeCall.newEntries.empty())
{
// Add the new entries to the end of the list
BaseBinding& cc = activeCall.newEntries.front();
impl->m_entries.splice(impl->m_entries.end(), activeCall.newEntries);
auto newiter = impl->m_entries.iter_from_value(cc);
// Add the new entries to any active calls in progress
for (auto& call : impl->m_activeCalls)
{
if (call.next == impl->m_entries.end())
{
call.next = newiter;
}
}
}
}
template <class... Args>
void Delegate<void(Args...)>::operator()(Args... args)
{
Call(std::forward<Args>(args)...);
}
template <class... Args>
struct Delegate<void(Args...)>::BaseBinding
{
constexpr BaseBinding() = default;
virtual ~BaseBinding() = default;
virtual size_t getKeyType() const = 0;
virtual void Call(Args...) = 0;
void addRef() noexcept
{
auto old = refCount.fetch_add(1, std::memory_order_relaxed);
CARB_UNUSED(old);
CARB_ASSERT(old != 0); // no resurrection
CARB_ASSERT((old + 1) != 0); // no rollover
}
void addRefs(size_t count) noexcept
{
auto old = refCount.fetch_add(count, std::memory_order_relaxed);
CARB_UNUSED(old);
CARB_ASSERT(old != 0); // no resurrection
CARB_ASSERT((old + count) >= old); // no rollover or negative
}
void release(bool notifyLastRef = false)
{
auto const current = (refCount.fetch_sub(1, std::memory_order_release) - 1);
CARB_ASSERT((current + 1) != 0); // no rollover/double-release
if (current == 1 && notifyLastRef)
{
refCount.notify_one();
}
if (current == 0)
{
std::atomic_thread_fence(std::memory_order_acquire);
delete this;
}
}
void waitForLastRef()
{
auto ref = refCount.load(std::memory_order_relaxed);
while (ref != 1)
{
refCount.wait(ref, std::memory_order_relaxed);
ref = refCount.load(std::memory_order_relaxed);
}
}
carb::cpp::atomic_size_t refCount{ 1 };
carb::container::IntrusiveListLink<BaseBinding> link;
};
template <class... Args>
template <class KeyType>
struct Delegate<void(Args...)>::KeyedBinding : BaseBinding
{
// Ensure that KeyType is a decayed type
static_assert(std::is_same<std::decay_t<KeyType>, KeyType>::value, "Must be decayed!");
static size_t keyType()
{
return detail::typeId<KeyType>();
}
virtual size_t getKeyType() const override
{
return keyType();
}
KeyType const key;
template <class Key>
constexpr KeyedBinding(Key&& key_) : BaseBinding(), key(std::forward<Key>(key_))
{
}
};
template <class... Args>
struct Delegate<void(Args...)>::ActiveCall
{
typename Container::iterator cur;
typename Container::iterator next;
uint32_t threadId{ carb::this_thread::getId() };
Container newEntries;
carb::container::IntrusiveListLink<ActiveCall> link;
};
template <class... Args>
auto Delegate<void(Args...)>::lastCurrentThreadCall() -> ActiveCall*
{
if (!m_impl->m_activeCalls.empty())
{
const uint32_t threadId = this_thread::getId();
for (auto iter = m_impl->m_activeCalls.rbegin(); iter != m_impl->m_activeCalls.rend(); ++iter)
{
if (iter->threadId == threadId)
{
return std::addressof(*iter);
}
}
}
return nullptr;
}
template <class... Args>
auto Delegate<void(Args...)>::lastCurrentThreadCall() const -> const ActiveCall*
{
if (!m_impl->m_activeCalls.empty())
{
const uint32_t threadId = this_thread::getId();
for (auto iter = m_impl->m_activeCalls.rbegin(); iter != m_impl->m_activeCalls.rend(); ++iter)
{
if (iter->threadId == threadId)
{
return std::addressof(*iter);
}
}
}
return nullptr;
}
template <class... Args>
void Delegate<void(Args...)>::UnbindInternal(std::unique_lock<carb::thread::mutex>& g, typename Container::iterator iter)
{
// Found it. See if it's involved in any active calls and wait.
bool hasActiveInOtherThreads = false;
size_t thisThreadActiveCount = 0;
if (!m_impl->m_activeCalls.empty())
{
const uint32_t thisThread = this_thread::getId();
for (auto& active : m_impl->m_activeCalls)
{
// Skip if next
if (active.next == iter)
{
++active.next;
}
// Handle if current
if (active.cur == iter)
{
// Clear the current entry. This signals to the ActiveCall that we're waiting on it.
active.cur = m_impl->m_entries.end();
if (active.threadId != thisThread)
{
hasActiveInOtherThreads = true;
}
else
{
// Since we want to wait for the last ref, release any references that the current thread has on it.
++thisThreadActiveCount;
iter->release();
}
}
}
}
BaseBinding& cc = *iter;
m_impl->m_entries.remove(iter);
g.unlock();
if (hasActiveInOtherThreads)
{
cc.waitForLastRef();
}
if (thisThreadActiveCount)
{
// Add back any references that the current thread was holding
cc.addRefs(thisThreadActiveCount);
}
// Release the implicit ref. If the current thread was holding any references, the earliest Call() will be the one
// to actually release it.
cc.release();
}
template <class... Args>
size_t Delegate<void(Args...)>::nextHandle()
{
static std::atomic_size_t handle{ 1 };
size_t val;
do
{
val = handle.fetch_add(1, std::memory_order_relaxed);
} while (CARB_UNLIKELY(val == 0));
return val;
}
template <class... Args>
constexpr DelegateRef<void(Args...)>::DelegateRef() noexcept : m_delegate{ nullptr }
{
}
template <class... Args>
DelegateRef<void(Args...)>::DelegateRef(DelegateType& delegate) : m_delegate{ delegate.m_impl }
{
}
template <class... Args>
DelegateRef<void(Args...)>::DelegateRef(const DelegateRef& other) : m_delegate{ other.m_delegate.m_impl }
{
}
template <class... Args>
DelegateRef<void(Args...)>::~DelegateRef()
{
// The Delegate destructor calls UnbindAll(), which we definitely don't want. So just reset our reference.
m_delegate.m_impl.reset();
}
template <class... Args>
auto DelegateRef<void(Args...)>::operator=(const DelegateRef& other) -> DelegateRef&
{
m_delegate.m_impl = other.m_delegate.m_impl;
return *this;
}
template <class... Args>
DelegateRef<void(Args...)>::operator bool() const noexcept
{
return m_delegate.m_impl.operator bool();
}
template <class... Args>
void DelegateRef<void(Args...)>::reset()
{
m_delegate.m_impl.reset();
}
template <class... Args>
void DelegateRef<void(Args...)>::reset(DelegateType& delegate)
{
m_delegate.m_impl = delegate.m_impl;
}
template <class... Args>
void DelegateRef<void(Args...)>::swap(DelegateRef& other)
{
m_delegate.swap(other.m_delegate);
}
template <class... Args>
auto DelegateRef<void(Args...)>::get() const noexcept -> DelegateType*
{
return m_delegate.m_impl ? const_cast<DelegateType*>(&m_delegate) : nullptr;
}
template <class... Args>
auto DelegateRef<void(Args...)>::operator*() const noexcept -> DelegateType&
{
CARB_ASSERT(*this);
return const_cast<DelegateType&>(m_delegate);
}
template <class... Args>
auto DelegateRef<void(Args...)>::operator->() const noexcept -> DelegateType*
{
CARB_ASSERT(*this);
return const_cast<DelegateType*>(&m_delegate);
}
} // namespace delegate
} // namespace carb
CARB_IGNOREWARNING_CLANG_POP
|
omniverse-code/kit/include/carb/launcher/ILauncher.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
/** @file
* @brief Simple external process launcher helper interface.
*/
#pragma once
#include "../Interface.h"
#if CARB_PLATFORM_LINUX
# include <sys/prctl.h>
# include <sys/signal.h>
#endif
namespace carb
{
/** Namespace for the Carbonite process launch helper interface. */
namespace launcher
{
// ****************************** structs, enums, and constants ***********************************
/** Opaque object used to represent a process that has been launched using the ILauncher
* interface. A value of `nullptr` indicates an invalid process object.
*/
struct Process;
/** Base type for a process exit code. Process exit codes differ between Windows and Linux - on
* Windows a process exit code is a DWORD, while on Linux it is an int. This type should be able
* to successfully hold any value from either platform.
*/
using ExitCode = int64_t;
/** Base type for the identifier of a process. This does not conform directly to the local
* definitions of a process ID for either Windows or Linux, but it should at least be large
* enough to properly contain either.
*/
using ProcessId = uint64_t;
/** Format code to use for the @ref carb::launcher::ProcessId data type in printf() style format
* strings.
*/
#define OMNI_ILauncher_PRIpid PRIu64
/** Special value to indicate a bad process identifier. This can be returned from
* ILauncher::getProcessId() if the given child process is no longer running.
*/
constexpr ProcessId kBadId = ~0ull;
/** Prototype for a stream read callback function.
*
* @param[in] data The buffer of data that was read from the child process's stream. This
* will never be `nullptr`. Only the first @p bytes bytes of data in this
* buffer will contain valid data. Any data beyond that should be considered
* undefined and not accessed.
* @param[in] bytes The number of bytes of valid data in the @p data buffer. This will never
* be 0 as long as the connection to the child process is active. When the
* child process exits and the read thread has read all of the data from the
* child process, one final callback will be performed passing 0 for the byte
* count to indicate the end of the stream. This count will not exceed the
* buffer size specified in the original call to ILauncher::openProcess().
* @param[in] context The context value that was originally passed to ILauncher::openProcess()
* when the child process was created.
* @returns No return value.
*
* @remarks This callback will be performed any time data is successfully read from one of the
* child process's output streams (ie: `stdout` or `stderr`). The call will be performed
* on a worker thread that was created specifically for the child process. This
* callback will be performed as soon after reading the data as possible. The reader
* thread will remain in an efficient wait state while there is no data read to be
* read. It is the callback's responsibility to ensure any shared resources that are
* accessed in the callback are appropriately protected from race conditions and
* general thread safety issues.
*
* @remarks When reading from one of the child process' output streams, every effort will be
* taken to ensure the contents of at least one 'message' is delivered to the callback
* at a time. A message can be thought of as the unit of data that was last written
* to the stream on the child process's side - for example, the output of a single call
* to fwrite() or fprintf(). However, there are a some caveats to this behavior that
* the callback and its owner need to be able to handle:
* * It is possible that depending on the size and arrival times of messages, multiple
* messages may be concatenated into a single callback call. The callback needs to
* be able to handle this by being able to identify expected message ends and
* properly parse them out if needed.
* * If the current message or set of messages fills up the read buffer, the buffer
* as it is will be delivered to the callback with the last message truncated. The
* remainder of the message will be sent in the next callback. The callback needs
* to be able to handle this by either using a buffer size appropriate for the
* expected output of the child process, or by having the callback simply concatenate
* incoming data onto a data queue that is then processed elsewhere.
*
* @remarks This callback should attempt to complete its task as quickly as possible to avoid
* blocking the read thread. If the callback blocks or takes a long time to process
* it may result in blocking the child process's attempts to write to the stream. The
* child process' thread will be effectively stopped until buffer space is freed up
* on the parent's read side. It is best practice to have the callback simply queue
* up new data for later consumption on another thread in the parent process or to do
* a few simple string or data checks if searching for a specific incoming data message.
*
* @remarks When the stream for this callback ends due to either the child or parent process
* closing it, one final callback will be performed. The last callback will always
* have a @p bytes value of 0 in this case. All other callbacks during the stream
* will have a non-zero @p bytes value. Even in this final callback case however,
* a non-`nullptr` @p data buffer will still be provided. Once the zero sized buffer
* has been delivered, the parent process can safely assume that the child process
* is done transmitting any data to the parent.
*/
using OnProcessReadFn = void (*)(const void* data, size_t bytes, void* context);
/** A default buffer size to use for reading from a child process's `stdout` or `stderr` streams. */
constexpr size_t kDefaultProcessBufferSize = 1ull << 17;
/** Launcher flags
* @{
*/
/** Base type for flags to the @ref carb::launcher::ILauncher::launchProcess function. Valid flags for this
* type are the carb::launcher::fLaunchFlag* flags.
*/
using LauncherFlags = uint32_t;
/** Flag to indicate that the stdin stream for the child process should be opened and accessible
* on the side of the parent process. If this flag is not present, any attempts to call
* ILauncher::writeProcessStdin() will fail immediately. If this flag is present, the parent
* process may write information to the child process through its stdin stream. The child
* process will be able to poll its stdin stream for input and read it. If this is used, the
* child process will only be able to read input from the parent process. If not used, the
* child process should assume that stdin cannot be read (though the actual behavior may
* differ by platform following native stdin inheritance rules).
*/
constexpr LauncherFlags fLaunchFlagOpenStdin = 0x00000001;
/** Flag to indicate that the new child process should be killed when the calling parent process
* exits. If this flag is not present, the child process will only exit when it naturally exits
* on its own or is explicitly killed by another process. If this flag is present, if the parent
* process exits in any way (ie: ends naturally, crashes, is killed, etc), the child process
* will also be killed. Note that the child process will be killed without warning or any
* chance to clean up. Any state in the child process that was not already saved to persistent
* storage will be lost. Also, if the child process is in the middle of modifying persistent
* storage when it is killed, that resource may be left in an undefined state.
*
* @note This flag is not supported on Mac. It will be ignored if used on Mac and the child
* process(es) must be manually terminated by the parent process if necessary.
*/
constexpr LauncherFlags fLaunchFlagKillOnParentExit = 0x00000002;
/** When the @ref fLaunchFlagKillOnParentExit flag is also used, this indicates that the child
* process should be forcibly terminated instead of just being asked to exit when the parent
* process dies. This flag is only used on Linux where there is the possibility of a child
* process catching and handling a SIGTERM signal. If the child process generally installs
* a SIGTERM handler and doesn't exit as a result, this flag should be used to allow a SIGKILL
* to be sent instead (which can neither be caught nor ignored). Generally, sending a SIGTERM
* is considered the 'correct' or 'appropriate' way to kill a process on Linux. This flag is
* ignored on Windows.
*/
constexpr LauncherFlags fLaunchFlagForce = 0x00000004;
/** Flag to indicate that reading from the `stdout` or `stderr` streams of the child process should
* be handled as a byte stream. Data will be delivered to the stream callback as soon as it is
* available. The delivered bytes may only be a small portion of a complete message sent from
* the child process. At least one byte will be sent when in this mode. This is the default
* mode for all child processes. This flag may not be combined with @ref fLaunchFlagMessageMode.
* When using this read mode, it is the callback's responsibility to process the incoming data
* and wait for any delimiters to arrive as is necessary for the task. The message mode may not
* be changed after the child process has been launched.
*
* @note [Windows] This mode is the only mode that is currently supported on Windows. This may
* be fixed in a future version of the interface.
*/
constexpr LauncherFlags fLaunchFlagByteMode = 0x00000000;
/** Flag to indicate that reading from the `stdout` or `stderr` streams of the child process should
* be handled as a message stream. Data will not be delivered to the stream callback until a
* complete message has been received. A message is considered to be the contents of a single
* write call on the child's process side (up to a reasonable limit). The data passed to the
* callbacks may be split into multiple messages if a very large message is sent in a single
* write call. On Linux at least, this message limit is 4KB. The message mode may not be
* changed after the child process has been launched.
*
* @note [Windows] This mode is not currently supported on Windows. This may be fixed in a
* future version of the interface.
*/
constexpr LauncherFlags fLaunchFlagMessageMode = 0x00000008;
/** Flag to indicate that the calling process's environment should not be inherited by the child
* process in addition to the new environment variables specified in the launch descriptor. When
* no environment block is given in the descriptor, the default behavior is for the child
* process to inherit the parent's (ie: this calling process) environment block. Similarly for
* when a non-empty environment block is specified in the launch descriptor - the environment
* block of the calling process will be prepended to the environment variables given in the
* launch descriptor. However, when this flag is used, that will indicate that the new child
* process should only get the environment block that is explicitly given in the launch
* descriptor.
*/
constexpr LauncherFlags fLaunchFlagNoInheritEnv = 0x00000010;
/** Flag to indicate that the child process should still continue to be launched even if the
* environment block for it could not be created for any reason. This flag is ignored if
* @ref LaunchDesc::env is `nullptr` or the block environment block object is successfully
* created. The most common cause for failing to create the environment block is an out of
* memory situation or invalid UTF-8 codepoints being used in the given environment block.
* This flag is useful when the additional environment variables for the child process are
* optional to its functionality.
*/
constexpr LauncherFlags fLaunchFlagAllowBadEnv = 0x00000020;
/** Flag to indicate that the requested command should be launched as a script. An attempt
* will be made to determine an appropriate command interpreter for it based on its file
* extension if no interpreter command is explicitly provided in @ref LaunchDesc::interpreter.
* When this flag is not present, the named command will be assumed to be a binary executable.
*
* @note [Linux] This flag is not necessary when launching a script that contains a shebang
* on its first line. The shebang indicates the command interpreter to be used when
* executing the script. In this case, the script will also need to have its executable
* permission bit set for the current user. If the shebang is missing however, this
* flag will be needed.
*/
constexpr LauncherFlags fLaunchFlagScript = 0x00000040;
/** Flags to indicate that the child process' standard output streams should be closed upon
* launch. This is useful if the output from the child is not interesting or would otherwise
* garble up the parent process' standard streams log. An alternative to this would be to
* create a dummy read function for both `stdout` and `stderr` where the parent process just drops
* all incoming messages from the child process. This however also has its drawbacks since it
* would create one to two threads to listen for messages that would just otherwise be
* ignored. If these flags are not used, the OS's default behavior of inheriting the parent
* process' standard streams will be used. Each standard output stream can be disabled
* individually or together if needed depending on which flag(s) are used.
*
* @note Any callbacks specified for @ref LaunchDesc::onReadStdout or
* @ref LaunchDesc::onReadStderr will be ignored if the corresponding flag(s) to disable
* those streams is also used.
*/
constexpr LauncherFlags fLaunchFlagNoStdOut = 0x00000080;
/** @copydoc fLaunchFlagNoStdOut */
constexpr LauncherFlags fLaunchFlagNoStdErr = 0x00000100;
/** @copydoc fLaunchFlagNoStdOut */
constexpr LauncherFlags fLaunchFlagNoStdStreams = fLaunchFlagNoStdOut | fLaunchFlagNoStdErr;
/** Flag to indicate that launching the child process should not fail if either of the log
* files fails to open for write. This flag is ignored if the @ref LaunchDesc::stdoutLog
* and @ref LaunchDesc::stderrLog log filenames are `nullptr` or empty strings. If either
* log file fails to open, the child process will still be launched but the default behavior
* of inheriting the parent's standard output streams will be used instead. If this flag
* is not used, the default behavior is to fail the operation and not launch the child
* process.
*/
constexpr LauncherFlags fLaunchFlagAllowBadLog = 0x00000200;
/** Flag to indicate that the child process should be launched fully detached from the launching
* (ie: parent) process. This flag only has an effect on Linux and Mac OS. On Windows this
* flag is simply ignored. This intentionally orphans the child process so that the terminal
* session or @a initd becomes the one responsible for waiting on the orphaned child process
* instead of the launching parent process. This allows the child process to be a fully fire-
* and-forget process on all platforms. Note that using this flag may make communication
* with the child process difficult or impossible. This flag should generally not be used
* in combination with child processes that need to read stdout and stderr output.
*/
constexpr LauncherFlags fLaunchFlagLaunchDetached = 0x00000400;
/** @} */
/** Stream waiting flags
* @{
*/
/** Base type for flags to the @ref carb::launcher::ILauncher::waitForStreamEnd() function.
* Valid flags for this type are the carb::launcher::fWaitFlag* flags.
*/
using WaitFlags = uint32_t;
/** Flag to indicate that the `stdout` stream should be waited on. The stream will be signaled
* when it is closed by either the child process and all of the data on the stream has been
* consumed, or by using the @ref fWaitFlagCloseStdOutStream flag.
*/
constexpr WaitFlags fWaitFlagStdOutStream = 0x00000001;
/** Flag to indicate that the `stderr` stream should be waited on. The stream will be signaled
* when it is closed by either the child process and all of the data on the stream has been
* consumed, or by using the @ref fWaitFlagCloseStdErrStream flag.
*/
constexpr WaitFlags fWaitFlagStdErrStream = 0x00000002;
/** Flag to indicate that the `stdout` stream for a child should be closed before waiting on it.
* Note that doing so will truncate any remaining incoming data on the stream. This is useful
* for closing the stream and exiting the reader thread when the parent process is no longer
* interested in the output of the child process (ie: the parent only wanted to wait on a
* successful startup signal from the child). This flag has no effect if no read callback was
* provided for the `stdout` stream when the child process was launched.
*
* @note [Linux] If this is used on a child process to close the parent process's end of the
* `stdout` stream, the child process will be terminated with SIGPIPE if it ever tries
* to write to `stdout` again. This is the default handling of writing to a broken
* pipe or socket on Linux. The only way around this default behavior is to ensure
* that the child process ignores SIGPIPE signals. Alternatively, the parent could
* just wait for the child process to exit before destroying its process handle or
* closing the stream.
*/
constexpr WaitFlags fWaitFlagCloseStdOutStream = 0x00000004;
/** Flag to indicate that the `stderr` stream for a child should be closed before waiting on it.
* Note that doing so will truncate any remaining incoming data on the stream. This is useful
* for closing the stream and exiting the reader thread when the parent process is no longer
* interested in the output of the child process (ie: the parent only wanted to wait on a
* successful startup signal from the child). This flag has no effect if no read callback was
* provided for the `stderr` stream when the child process was launched.
*
* @note [Linux] If this is used on a child process to close the parent process's end of the
* `stderr` stream, the child process will be terminated with SIGPIPE if it ever tries
* to write to `stderr` again. This is the default handling of writing to a broken
* pipe or socket on Linux. The only way around this default behavior is to ensure
* that the child process ignores SIGPIPE signals. Alternatively, the parent could
* just wait for the child process to exit before destroying its process handle or
* closing the stream.
*/
constexpr WaitFlags fWaitFlagCloseStdErrStream = 0x00000008;
/** Flag to indicate that the wait should succeed when any of the flagged streams have been
* successfully waited on. The default behavior is to wait for all flagged streams to be
* completed before returning or timing out.
*/
constexpr WaitFlags fWaitFlagAnyStream = 0x00000010;
/** @} */
/** Process kill flags.
* @{
*/
/** Base type for flags to the @ref carb::launcher::ILauncher::killProcess() function. Valid flags for this
* type are the carb::launcher::fKillFlag* flags.
*/
using KillFlags = uint32_t;
/** Flag to indicate that any direct child processes of the process being terminated should
* also be terminated. Note that this will not cause the full hierarchy of the process's
* ancestors to be terminated as well. The caller should manage its process tree directly
* if multiple generations are to be terminated as well.
*/
constexpr KillFlags fKillFlagKillChildProcesses = 0x00000001;
/** Flag to indicate that a child process should be force killed. This only has an effect
* on Linux where a SIGKILL signal will be sent to the process instead of SIGTERM. This
* flag is ignored on Windows. The potential issue with SIGTERM is that a process can
* trap and handle that signal in a manner other than terminating the process. The SIGKILL
* signal however cannot be trapped and will always terminate the process.
*/
constexpr KillFlags fKillFlagForce = 0x00000002;
/** Flag to indicate that ILauncher::killProcess() or ILauncher::killProcessWithTimeout() calls
* should simply fail if a debugger is currently attached to the child process being terminated.
* The default behavior is to still attempt to kill the child process and wait for it to exit.
* On Linux, this will work as intended. On Windows however, a process being debugged cannot
* be terminated without first detaching the debugger. The attempt to terminate the child
* process will be queued for after the debugger has been attached.
*/
constexpr KillFlags fKillFlagFailOnDebugger = 0x00000004;
/** Flag to indicate that the ILauncher::killProcess() or ILauncher::killProcessWithTimeout()
* calls should not wait for the child process to fully exit before returning. This allows
* calls to return more quickly, but could result in other functions such as
* ILauncher::isProcessActive() and ILauncher::getProcessExitCode() returning false results
* for a short period after ILauncher::killProcess() returns. This period is usually only a
* few milliseconds, but may be inconsistent due to other system behavior and load.
*/
constexpr KillFlags fKillFlagSkipWait = 0x00000008;
/** @} */
/** Special value that can be passed to ILauncher::writeProcessStdin() for the @a bytes parameter
* to indicate that the input is a null terminated UTF-8 string. When this special length value
* is used, it is the caller's responsibility to ensure the input string is actually null
* terminated.
*/
constexpr size_t kNullTerminated = ~0ull;
/** Special exit code to indicate that the process is still running and has not exited yet.
* This can be returned from ILauncher::waitProcessExit() and ILauncher::getProcessExitCode().
*/
constexpr ExitCode kStillActive = 0x8000000000000000ll;
/** Indicates an infinite timeout for use in the ILauncher::waitProcessExit() function in its
* @a timeout parameter. The call will block until the requested process ends.
*/
constexpr uint64_t kInfiniteTimeout = ~0ull;
/** Return statuses for ILauncher::killProcessWithTimeout(). These indicate how the termination
* attempt completed.
*/
enum class KillStatus
{
/** The child process was successfully terminated. It will also have been confirmed to
* have exited fully if the @ref fKillFlagSkipWait flag was not used. If the
* @ref fKillFlagSkipWait flag is used in the call, this status only indicates that the
* signal to terminate the child process was successfully sent. The child process will
* exit at some point in the near future. If a very short timeout was used in a call to
* ILauncher::killProcessWithTimeout(), and the child process had exited within that
* period, this will be returned, otherwise @ref KillStatus::eWaitFailed will be returned.
* In most situations, the time period between termination and when the child process fully
* exits will only be a few milliseconds. However, within that time period, calls to
* functions such as ILauncher::isProcessActive() or ILauncher::getProcessExitCode() may
* return false results.
*/
eSuccess,
/** A debugger was attached to the child process at the time the termination attempt was
* made. This will only be returned if the @ref fKillFlagFailOnDebugger flag is used
* and a debugger is attached to the child process. No attempt to terminate the process
* will be made in this case. A future call to ILauncher::killProcess() or
* ILauncher::killProcessWithTimeout() (once the debugger has been detached) will be
* needed to actually terminate the child process.
*/
eDebuggerAttached,
/** A debugger was attached to the child process and that prevented it from being terminated.
* This will only be returned on Windows. A similar situation can still occur on Linux,
* except that on Linux the child process will be terminated successfully. In that case,
* the debugger process will just be left in an invalid state where the only course of action
* is to detach from the terminated process. Note that when this value is returned, the
* child process will be marked for termination by the system, but it will not actually be
* terminated until the debugger is detached from it.
*/
eDebuggerFail,
/** The attempt to signal the child process to terminate failed. This can occur if the
* child process' handle is invalid or there is a permission problem. This will not
* happen in most common situations.
*/
eTerminateFailed,
/** Waiting for the child process to exit failed or timed out. When this is returned, the
* child process has still been successfully signaled to exit, but it didn't fully exit
* before the timeout expired. This may still be viewed as a successful result however.
* This status code can be suppressed in successful cases with the @ref fKillFlagSkipWait
* flag. That flag is especially useful when a zero timeout is desired but a successful
* result should still be returned. If this value is returned, the caller is responsible
* for ensuring the child process successfully exits. This state can be verified with
* calls such as ILauncher::isProcessActive(), ILauncher::getProcessExitCode(), and
* ILauncher::waitProcessExit().
*/
eWaitFailed,
/** An invalid parameter was passed into ILauncher::killProcessWithTimeout(). */
eInvalidParameter,
};
/** Standard command interpreters for Windows and Linux. These can be used in the launch
* descriptor's @ref LaunchDesc::interpreter value to override some default interpreter
* detection functionality.
*
* @note These interpreter names are just 'safe' interpreters. If a caller has additional
* knowledge of the functional requirements of a script (ie: requires python 3.6+,
* requires a specific install of python, requires additional options for the interpreter,
* etc), it is the caller's responsibility to ensure an appropriate interpreter path and
* command is provided in @ref LaunchDesc::interpreter. If no interpreter path is given
* in the launch descriptor, one of these interpreters will be chosen based on the
* extension of the script file.
*
* @note If you need to use `cmd /C`, you must use @ref kInterpreterShellScript so that ILauncher
* can properly quote your arguments, since `cmd /C` does not interpret a command argument
* in the way that almost every other interpreter does.
*/
#if CARB_PLATFORM_WINDOWS
constexpr const char* const kInterpreterShellScript = "cmd /C";
constexpr const char* const kInterpreterShellScript2 = "cmd /C";
#else
constexpr const char* const kInterpreterShellScript = "sh";
/** @copydoc kInterpreterShellScript */
constexpr const char* const kInterpreterShellScript2 = "bash";
#endif
/** Interpreter names for python scripts. Using this command assumes that at least one version
* of python is installed locally on the system and is available through the system PATH
* variable. It is the caller's responsibility to ensure that a global python instance is
* installed on the calling system before either using this interpreter string in a launch
* descriptor or attempting to run a python script with a `nullptr` interpreter.
*
* To check if the global python interpreter is installed on the calling system, a call to
* ILauncher::launchProcess() with a @ref kInterpreterPythonCommand as the interpreter and
* the simple script "quit" can be used. If ILauncher::launchProcess() succeeds, the global
* python interpreter is installed. If it fails, it is not installed. In the latter case,
* it is the caller's responsibility to either find or install an appropriate python interpreter
* before attempting to launch a python script.
*/
constexpr const char* const kInterpreterPythonScript = "python";
/** @copydoc kInterpreterPythonScript */
constexpr const char* const kInterpreterPythonCommand = "python -c";
/** Descriptor of the new process to be launched by ILauncher::openProcess(). This contains
* all the information needed to launch and communicate with the new child process.
*/
struct LaunchDesc
{
/** A vector of command line arguments for the new process to launch. This may not be
* `nullptr`. The first argument in the vector must be the path to the executable to run.
* This may be a relative or absolute path. All following arguments will be passed to the
* executable as command line arguments. Each string must be UTF-8 encoded. Note that if
* a relative path is used for the first argument, it must be given relative to the current
* working directory for the parent (ie: calling) process, not the path given by @ref path.
* The @ref path value will become the current working directory for the child process, not
* the path to find the executable image at.
*/
const char* const* argv = nullptr;
/** The total number of arguments in the @ref argv vector. */
size_t argc = 0;
/** The optional initial working directory of the new process. If not specified, this will
* default to the calling process's current working directory. Once the child process has
* been successfully started, its current working directory will be set to this path. This
* will neither affect the current working directory of the parent process nor will it be
* used as the path to find the child process' executable.
*/
const char* path = nullptr;
/** Callback to be performed whenever data is successfully read from the child process's
* `stdout` or `stderr` streams. These may be `nullptr` if nothing needs to be read from the
* child process's `stdout` or `stderr` streams. Note that providing a callback here will
* spawn a new thread per callback to service read requests on the child process's `stdout`
* or `stderr` streams. See @ref OnProcessReadFn for more information on how these callbacks
* should be implemented and what their responsibilities are.
*
* @note [Linux] If this is non-`nullptr` and the parent process destroys its process handle
* for the child process before the child process exits, the child process will be
* terminated with SIGPIPE if it ever tries to write to the corresponding stream again
* This is the default Linux kernel behavior for writing to a broken pipe or socket.
* It is best practice to first wait for the child process to exit before destroying
* the process handle in the parent process.
*/
OnProcessReadFn onReadStdout = nullptr;
/** @copydoc onReadStdout */
OnProcessReadFn onReadStderr = nullptr;
/** The opaque context value to be passed to the read callbacks when they are performed. This
* will never be accessed directly by the process object, just passed along to the callbacks.
* It is the responsibility of the callbacks to know how to appropriately interpret these
* values. These values are ignored if both @ref onReadStdout and @ref onReadStderr are
* `nullptr`.
*/
void* readStdoutContext = nullptr;
/** @copydoc readStdoutContext */
void* readStderrContext = nullptr;
/** Flags to control the behavior of the new child process. This is zero or more of the
* @ref LauncherFlags flags.
*/
LauncherFlags flags = 0;
/** A hint for the size of the buffer to use when reading from the child process's `stdout`
* and `stderr` streams. This represents the maximum amount of data that can be read at
* once and returned through the onRead*() callbacks. This is ignored if both
* @ref onReadStdout and @ref onReadStderr are `nullptr`.
*
* Note that this buffer size is only a hint and may be adjusted internally to meet a
* reasonable minimum read size for the platform.
*/
size_t bufferSize = kDefaultProcessBufferSize;
/** A block of environment variables to pass to the child process. If the @ref flags
* include the @ref fLaunchFlagNoInheritEnv flag, this environment block will be used
* explicitly if non-`nullptr`. If that flag is not used, the calling process's current
* environment will be prepended to the block specified here. Any environment variables
* specified in here will replace any variables in the calling process's environment
* block. Each string in this block must be UTF-8 encoded. If this is `nullptr`, the
* default behavior is for the child process to inherit the entire environment of the
* parent process. If an empty non-`nullptr` environment block is specified here, the
* child process will be launched without any environment.
*/
const char* const* env = nullptr;
/** The number of environment variables specified in the environment block @ref env. This
* may only be 0 if the environment block is empty or `nullptr`.
*/
size_t envCount = 0;
/** An optional command interpreter name to use when launching the new process. This can be
* used when launching a script file (ie: shell script, python script, etc). This must be
* `nullptr` if the command being launched is a binary executable. If this is `nullptr`
* and a script file is being executed, an attempt will be made to determine the appropriate
* command interpreter based on the file extension of the first argument in @ref argv.
* The ILauncher::launchProcess() call may fail if this is `nullptr`, an appropriate
* interpreter could not be found for the named script, and the named script could not
* be launched directly on the calling platform (ie: a script using a shebang on its
* first line will internally specify its own command interpreter). This value is ignored
* if the @ref fLaunchFlagScript flag is not present in @ref flags.
*
* This can be one of the kInterpreter* names to use a standard command interpreter that
* is [presumably] installed on the system. If a specific command interpreter is to be
* used instead, it is the caller's responsibility to set this appropriately.
*
* @note This interpreter process will be launched with a search on the system's 'PATH'
* variable. On Windows this is always the behavior for launching any process.
* However, on Linux a process must always be identified by its path (relative or
* absolute) except in the case of launching a command interpreter.
*/
const char* interpreter = nullptr;
/** Optional names of log files to redirect `stdout` and `stderr` output from the child process
* to in lieu of a callback. The output from these streams won't be visible to the parent
* process (unless it is also reading from the log file). If either of these are
* non-`nullptr` and not an empty string, and the callbacks (@ref onReadStdout or
* @ref onReadStderr) are also `nullptr`, the corresponding stream from the child process
* will be redirected to these files(s) on disk. It is the caller's responsibility to ensure
* the requested filename is valid and writable. If the log fails to open, launching the
* child process will be silently ignored or fail depending on whether the
* @ref fLaunchFlagAllowBadLog flag is used or not. These logs will be ignored if any
* of the @ref fLaunchFlagNoStdStreams flags are used. The log file corresponding to the
* flag(s) used will be disabled.
*
* When a log is requested, it will be opened in a sharing mode. The log will be opened
* with shared read and write permissions. If the named log file already exists, it will
* always be truncated when opened. It is the caller's responsibility to ensure the previous
* file is moved or renamed if its contents need to be preserved. The log will also be
* opened in 'append' mode (ie: as if "a+" had been passed to fopen()) so that all new
* messages are written at the end of the file. It is the child process's responsibility
* to ensure that all messages written to the log files are synchronized so that messages
* do not get incorrectly interleaved. If both log files are given the same name, it is
* also the child process's responsibility to ensure writes to `stdout` and `stderr` are
* appropriately serialized.
*
* Setting log file names here will behave roughly like shell redirection to a file. The
* two streams can be independently specified (as in "> out.txt & err.txt"), or they can
* both be redirected to the same file (as in "&> log.txt" or > log.txt & log.txt"). The
* files will behave as expected in the child process - writing to `stdout` will be buffered
* while writing to `stderr` will be unbuffered. If the named log file already exists, it
* will always be overwritten and truncated.
*
* This filename can be either an absolute or relative path. Relative paths will be opened
* relative to the current working directory at the time. The caller is responsible for
* ensuring all path components leading up to the log filename exist before calling. Note
* that the log file(s) will be created before the child process is launched. If the child
* process fails to launch, it will be the caller's responsibility to clean up the log file
* if necessary.
*
* @note if a non-`nullptr` callback is given for either `stdout` or `stderr`, that will
* override the log file named here. Each log file will only be created if its
* corresponding callback is not specified.
*/
const char* stdoutLog = nullptr;
/** @copydoc stdoutLog */
const char* stderrLog = nullptr;
/** Reserved for future expansion. This must be `nullptr`. */
void* ext = nullptr;
};
// ********************************** interface declaration ***************************************
/** A simple process launcher helper interface. This is responsible for creating child processes,
* tracking their lifetime, and communicating with the child processes. All operations on this
* interface are thread safe within themselves. It is the caller's responsibility to manage the
* lifetime of and any multi-threaded access to each process handle object that is returned from
* launchProcess(). For example, destroying a process handle object while another thread is
* operating on it will result in undefined behavior.
*
* Linux notes:
* * If any other component of the software using this plugin sets the SIGCHLD handler back to
* default behavior (ie: SIG_DFL) or installs a SIGCHLD handler function, any child process
* that exits while the parent is still running will become a zombie process. The only way
* to remove a zombie process in this case is to wait on it in the parent process with
* waitProcessExit() or getProcessExitCode() at some point after the child process has
* exited. Explicitly killing the child process with killProcess() will also avoid creating
* a zombie process.
* * Setting the SIGCHLD handler to be ignored (ie: SIG_IGN) will completely break the ability
* to wait on the exit of any child process for this entire process. Any call to wait on a
* child process such as waitProcessExit() or getProcessExitCode() will fail immediately.
* There is no way to detect or work around this situation on the side of this interface
* since signal handlers are process-wide and can be installed at any time by any component
* of the process.
* * The problem with leaving a zombie processes hanging around is that it continues to take
* up a slot in the kernel's process table. If this process table fills up, the kernel
* will not be able to launch any new processes (or threads). All of the zombie children
* of a process will be automatically cleaned up when the parent exits however.
* * If a read callback is provided for either `stdout` or `stderr` of the child process and
* the parent process destroys its process handle for the child process before the child
* process exits, the child process will be terminated with SIGPIPE if it ever tries to
* write to either stream again. This is the default Linux kernel behavior for writing
* to a broken pipe or socket. The only portable way around this is to ensure the child
* process will ignore SIGPIPE signals. It is generally best practice to ensure the parent
* process waits on all child processes before destroying its process handle. This also
* prevents the appearance of zombie processes as mentioned above.
*
* Windows notes:
* * Reading from a child process's `stdout` or `stderr` streams will not necessarily be
* aligned to the end of a single 'message' (ie: the contents of a single write call on the
* child process's end of the stream). This means that a partial message may be received in
* the callbacks registered during the launch of a child process. It is left up to the
* caller to accumulate input from these streams until an appropriate delimiter has been
* reached and the received data can be fully parsed. This may be fixed in a future version
* of this interface.
*/
struct ILauncher
{
CARB_PLUGIN_INTERFACE("carb::launcher::ILauncher", 1, 3)
/** Launches a new child process.
*
* @param[in] desc A descriptor of the child process to launch. At least the
* @ref LaunchDesc::argc and @ref LaunchDesc::argv members must be
* filled in. Default values on all other members are sufficient.
* @returns A new process object if the child process is successfully launched. This must
* be destroyed with destroyProcessHandle() when it is no longer needed by the
* caller. Note that closing the process object will not terminate the child
* process. It simply means that the calling process can no longer communicate
* with or kill the child process. If the child process needs to be killed first,
* it is the caller's responsibility to call kill() before destroying the handle.
* @returns `nullptr` if the new process could not be launched for any reason. This may
* include insufficient permissions, failed memory or resource allocations, etc.
*
* @remarks This attempts to launch a new child process. The new process will be created
* and start to run before successful return here. Depending on what flags and
* callbacks are provided in the launch descriptor, it may be possible for this
* process to communicate with the new child process.
*
* @note On Linux, the child process's executable should not have the set-user-ID or set-
* group-ID capabilities set on it when using the @ref carb::launcher::fLaunchFlagKillOnParentExit
* flag. These executables will clear the setting that allows the child process
* to receive a signal when the parent process exits for any reason (ie: exits
* gracefully, crashes, is externally terminated, etc). Setting these capabilities
* on an executable file requires running something along the lines of:
* * @code{.sh}
* sudo setcap cap_setuid+ep <filename>
* sudo setcap cap_setgid+ep <filename>
* @endcode
*
* @note Any built executable will, be default, not have any of these capabilities set.
* They will have to be explicitly set by a user, installer, or script. These
* executables can be identified in an "ls -laF" output by either being highlighted
* in red (default bash color scheme), or by having the '-rwsr-xr-x' permissions
* (or similar) instead of '-rwxr-xr-x', or by passing the executable path to
* 'capsh --print' to see what it's effective or permissive capabilities are.
*
* @note If a set-user-ID or set-group-ID executable needs to be launched as a child process
* and the @ref fLaunchFlagKillOnParentExit flag is desired for it, the child process
* should call @code{.cpp}prctl(PR_SET_PDEATHSIG, SIGTERM)@endcode early in its main()
* function to regain this behavior. The restoreParentDeathSignal() helper function
* is also offered here to make that call easier in Carbonite apps. Note however that
* there will be a possible race condition in this case - if the parent exits before
* the child process calls prctl(), the child process will not be signaled. Also note
* that unconditionally calling one of these functions in a child process will result
* in the @ref fLaunchFlagKillOnParentExit behavior being imposed on that child
* process regardless of whether the flag was used by the parent process.
*
* @note Since we can neither guarantee the child process will be a Carbonite app nor that
* it will even be source under control of the same developer (ie: a command line
* tool), resetting this death signal is unfortunately not handled as a task in this
* interface.
*/
Process*(CARB_ABI* launchProcess)(LaunchDesc& desc);
/** Launches a detached child process.
*
* @param[in] desc A descriptor of the child process to launch. At least the
* @ref LaunchDesc::argc and @ref LaunchDesc::argv members must be
* filled in. Default values on all other members are sufficient.
* Note that setting a read callback for `stdout` or `stderr` is
* not allowed in a detached process. The @ref LaunchDesc::onReadStderr
* and @ref LaunchDesc::onReadStdout parameters will be cleared to
* `nullptr` before launching the child process. Callers may still
* redirect `stdout` and `stderr` to log files however. The
* @ref fLaunchFlagLaunchDetached flag will also be added to the
* descriptor so that zombie processes are not inadvertently created
* on Unix based platforms.
* @returns The process ID of the new child process if successfully launched.
* @retval kBadId if the new process could not be launched for any reason. This may
* include insufficient permissions, failed memory or resource allocations, etc.
*
* @remarks This is a convenience version of launchProcess() that launches a child process
* but does not return the process handle. Instead the operating system's process
* ID for the new child process is returned. This is intended to be used for
* situations where the parent process neither needs to communicate with the child
* process nor know when it has exited. Using this should be reserved for child
* processes that manage their own lifetime and communication with the parent in
* another prearranged manner. The returned process ID may be used in OS level
* process management functions, but is not useful to pass into any other ILauncher
* functions.
*/
ProcessId launchProcessDetached(LaunchDesc& desc)
{
ProcessId id;
Process* proc;
// registering read callbacks is not allowed in a detached process since we'll be
// immediately destroying the handle before return. If there were set the child
// process would be killed on Linux if it ever tried to write to one of its streams.
// On both Windows and Linux having these as non-`nullptr` would also cause a lot
// of unnecessary additional work to be done both during and after the launch.
desc.onReadStderr = nullptr;
desc.onReadStdout = nullptr;
// add the 'launch detached' flag to ensure we don't need to wait on the child process
// on Linux. This is to avoid leaving a zombie process lying around.
desc.flags |= fLaunchFlagLaunchDetached;
proc = launchProcess(desc);
if (proc == nullptr)
return kBadId;
id = getProcessId(proc);
destroyProcessHandle(proc);
return id;
}
/** Destroys a process handle when it is no longer needed.
*
* @param[in] process The process handle to destroy. This will no longer be valid upon
* return. This call will be silently ignored if `nullptr` is passed in.
* @returns No return value.
*
* @remarks This destroys a process handle object that was previously returned by a call
* to launchProcess(). The process handle will no longer be valid upon return.
*
* @note Calling this does *not* kill the child process the process handle refers to.
* It simply recovers the resources taken up by the process handle object. In order
* to kill the child process, the caller must call killProcess() first.
*
* @note [Linux] If the child process was started with a read callback for one or both of
* its standard streams, destroying the handle here will cause the child process to
* be terminated with SIGPIPE if it ever tries to write to any of the standard streams
* that were redirected to the read callbacks. This is the default Linux kernel
* behavior for attempting to write to a broken pipe or socket. The only portable
* way to work around this is to ensure the child process ignores SIGPIPE signals.
* However, the general best practice is for the parent process to wait for the
* child process to exit (or explicitly kill it) before destroying the process handle.
*/
void(CARB_ABI* destroyProcessHandle)(Process* process);
/** Retrieves the process identifier for a child process.
*
* @param[in] process The process handle object representing the child process to retrieve
* the identifier for. This may not be `nullptr`.
* @returns The integer identifier of the child process if it is still running.
* @retval kBadId if the child process if it is not running.
*
* @remarks This retrieves the process identifier for a child process. This can be used
* to gain more advanced platform specific access to the child process if needed,
* or simply for debugging or logging identification purposes.
*/
ProcessId(CARB_ABI* getProcessId)(Process* process);
/** Waits for a child process to exit.
*
* @param[in] process The process handle object representing the child process to wait
* for. This may not be `nullptr`. This is returned from a previous
* call to launchProcess().
* @param[in] timeout The maximum time in milliseconds to wait for the child process to
* exit. This may be @ref kInfiniteTimeout to specify that this should
* block until the child process exits. This may be 0 to simply poll
* whether the child process has exited or not.
* @returns The exit code of the child process if it successfully exited in some manner.
* @retval kStillActive if the wait timed out and the child process had not exited yet.
* @returns `EXIT_FAILURE` if @p process is `nullptr`.
*
* @remarks This waits for a child process to exit and retrieves its exit code if the exit
* has occurred. Note that this does not in any way signal the child process to
* exit. That is left up to the caller since that method would be different for
* each child process. This simply waits for the exit to occur. If the child
* process doesn't exit within the allotted timeout period, it will remain running
* and the special @ref kStillActive exit code will be returned.
*
* @note Despite the timeout value being a 64-bit value, on Windows this will only wait
* for up to ~49.7 days at a time (ie: 32 bits) for the child process to exit. This
* is due to all the underlying timing functions only taking a 32-bit timeout value.
* If a longer wait is required, the wait will be repeated with the remaining time.
* However, if this process is blocking for that long waiting for the child process,
* that might not be the most desirable behavior and a redesign might be warranted.
*
* @note [Linux] if the calling process sets a SIG_IGN signal handler for the SIGCHLD signal,
* this call will fail immediately regardless of the child process's current running
* state. If a handler function is installed for SIGCHLD or that signal's handler is
* still set to its default behavior (ie: SIG_DFL), the child process will become a
* zombie process once it exits. This will continue to occupy a slot in the kernel's
* process table until the child process is waited on by the parent process. If the
* kernel's process table fills up with zombie processes, the system will no longer
* be able to create new processes or threads.
*
* @note [Linux] It is considered an programming error to allow a child process to be leaked
* or to destroy the process handle without first waiting on the child process. If
* the handle is destroyed without waiting for the child process to [successfully] exit
* first, a zombie process will be created. The only exception to this is if the
* parent process exits before the child process. In this case, any zombie child
* child processes will be inherited by the init (1) system process which will wait
* on them and recover their resources.
*
* @note In general it is best practice to always wait on all child processes before
* destroying the process handle object. This guarantees that any zombie children
* will be removed and that all resources will be cleaned up from the child process.
*/
ExitCode(CARB_ABI* waitProcessExit)(Process* process, uint64_t timeout);
/** Attempts to retrieve the exit code for a child process.
*
* @param[in] process The process handle object representing the child process to retrieve
* the exit code for. The child process may or may not still be running.
* This may not be `nullptr`.
* @returns The exit code of the child process if it has already exited in some manner.
* @retval kStillActive if the child process is still running.
* @returns `EXIT_FAILURE` if @p process is `nullptr`.
*
* @remarks This attempts to retrieve the exit code for a child process if it has exited.
* The exit code isn't set until the child process exits, is killed, or crashes.
* The special exit code @ref kStillActive will be returned if the child process
* is still running.
*
* @note [Linux] see the notes about zombie processes in waitProcessExit(). These also
* apply to this function since it will also perform a short wait to see if the
* child process has exited. If it has exited already, calling this will also
* effectively clean up the child zombie process. However, if another component
* of this process has set the SIGCHLD signal handler to be ignored (ie: SIG_IGN),
* this call will also fail immediately.
*/
ExitCode(CARB_ABI* getProcessExitCode)(Process* process);
/** Writes a buffer of data to the stdin stream of a child process.
*
* @param[in] process The process handle object representing the child process to write
* the data to. This may not be `nullptr`. The child process must
* have been opened using the @ref fLaunchFlagOpenStdin flag in
* order for this to succeed.
* @param[in] data The buffer of data to write to the child process.
* @param[in] bytes The total number of bytes to write to the child process or the
* special value @ref kNullTerminated to indicate that the buffer
* @p data is a null-terminated UTF-8 string. If the latter value
* is used, it is the caller's responsibility to ensure the data
* buffer is indeed null terminated.
* @returns `true` if the buffer of data is successfully written to the child process's
* stdin stream.
* @returns `false` if the entire data buffer could not be written to the child process's
* stdin stream. In this case, the child process's stdin stream will be left
* in an unknown state. It is the responsibility of the caller and the child
* process to negotiate a way to resynchronize the stdin stream.
* @returns `false` if @p process is `nullptr`.
*
* @remarks This attempts to write a buffer of data to a child process's stdin stream.
* This call will be ignored if the child process was not launched using the
* @ref fLaunchFlagOpenStdin flag or if the stdin handle for the process has
* since been closed with closeProcessStdin(). The entire buffer will be
* written to the child process's stdin stream. If even one byte cannot be
* successfully written, this call will fail. This can handle data buffers
* larger than 4GB if needed. This will block until all data is written.
*
* @remarks When the @ref kNullTerminated value is used in @p bytes, the data buffer is
* expected to contain a null terminated UTF-8 string. The caller is responsible
* for ensuring this. In the case of a null terminated string, all of the string
* up to but not including the null terminator will be sent to the child process.
* If the null terminator is to be sent as well, the caller must specify an
* explicit byte count.
*/
bool(CARB_ABI* writeProcessStdin)(Process* process, const void* data, size_t bytes);
/** Closes the stdin stream of a child process.
*
* @param[in] process The process handle object representing the child process to close
* the stdin stream for. This may not be `nullptr`.
* @returns No return value.
*
* @remarks This closes the stdin stream of a child process. This call will be ignored if
* the stdin stream for the child process either was never opened or if it has
* already been closed by a previous call to closeProcessStdin(). Closing the
* stdin stream only closes the side of the stream that is owned by the calling
* process. It will not close the actual stream owned by the child process itself.
* Other processes may still retrieve the child process's stdin stream handle in
* other ways if needed to communicate with it.
*/
void(CARB_ABI* closeProcessStdin)(Process* process);
/** Kills a running child process.
*
* @param[in] process The process handle object representing the child process to kill.
* This may not be `nullptr`.
* @param[in] flags Flags to affect the behavior of this call. This may be zero or
* more of the @ref KillFlags flags.
* @returns No return value.
*
* @remarks This kills a running child process. If the process has already exited on its
* own, this call will be ignored. The child process will be terminated without
* being given any chance for it to clean up any of its state or save any
* information to persistent storage. This could result in corrupted data if
* the child process is in the middle of writing to persistent storage when it
* is terminated. It is the caller's responsibility to ensure the child process
* is as safe as possible to terminate before calling.
*
* @remarks On both Windows and Linux, this will set an exit code of either 137 if the
* @ref fKillFlagForce flag is used or 143 otherwise. These values are the
* exit codes that are set in Linux by default for a terminated process. They
* are 128 plus the signal code used to terminate the process. In the case of
* a forced kill, this sends SIGKILL (9). In the case of a normal kill, this
* sends SIGTERM (15). On Windows, this behavior is simply mimicked.
*
* @note On Linux, the @ref fKillFlagForce flag is available to cause the child process to
* be sent a SIGKILL signal instead of the default SIGTERM. The difference between
* the two signals is that SIGTERM can be caught and handled by the process whereas
* SIGKILL cannot. Having SIGTERM be sent to end the child process on Linux does
* allow for the possibility of a graceful shutdown for apps that handle the signal.
* On Linux, it is recommended that if a child process needs to be killed, it is
* first sent SIGTERM (by not using the @ref fKillFlagForce flag), then if after a
* short time (ie: 2-5 seconds, depending on the app's shutdown behavior) has not
* exited, kill it again using the @ref fKillFlagForce flag.
*/
void(CARB_ABI* killProcess)(Process* process, KillFlags flags);
/** Tests whether a process is still running.
*
* @param[in] process The process handle object representing the child process to query.
* This may not be `nullptr`.
* @returns `true` if the given child process is still running.
* @returns `false` if the child process has exited.
*/
inline bool isProcessActive(Process* process)
{
return getProcessExitCode(process) == kStillActive;
}
/** @copydoc killProcess().
*
* @param[in] timeout The time in milliseconds to wait for the child process to fully
* exit. This may be 0 to indicate that no wait should occur after
* signaling the child process to terminate. This may also be
* @ref kInfiniteTimeout to wait indefinitely for the child process
* to exit.
* @retval KillStatus::eSuccess if the child process is successfully terminated
* and was confirmed to have exited or the @ref fKillFlagSkipWait flag is used
* and the child process is successfully signaled to exit, or if the child process
* had already terminated on its own or was otherwise killed before this call.
* @retval KillStatus::eWaitFailed if the child process was successfully signaled to
* exit but the timeout for the wait for it to fully exit expired before it exited.
* This may still indicate successful termination, but it is left up to the caller
* to determine that.
* @returns Another @ref KillStatus code if the termination failed in any way.
*
* @remarks This variant of killProcess() can be used to have more control over how the
* child process is terminated and whether it was successful or not. The timeout
* allows the caller control over how long the call should wait for the child
* process to exit. This can also be used in combination with other functions
* such as ILauncher::isDebuggerAttached() to figure out how and when a child
* process should be terminated.
*/
KillStatus(CARB_ABI* killProcessWithTimeout)(Process* process, KillFlags flags, uint64_t timeout);
/** Tests whether a debugger is currently attached to a child process.
*
* @param[in] process The child process to check the debugger status for. This may not be
* `nullptr`. This may be a handle to a child process that has already
* exited.
* @returns `true` if there is a debugger currently attached to the given child process.
* @returns `false` if the child process has exited or no debugger is currently attached to
* it.
* @returns `false` if @p process is `nullptr`.
*
* @remarks This tests whether a debugger is currently attached to the given child process.
* On Windows at least, having a debugger attached to a child process will prevent
* it from being terminated with killProcess(). This can be queried to see if the
* debugger task has completed before attempting to kill it.
*/
bool(CARB_ABI* isDebuggerAttached)(Process* process);
/** Waits for one or more standard streams from a child process to end.
*
* @param[in] process The child process to wait on the standard streams for. This may not
* be `nullptr`. This may be a handle to a child process that has
* already exited. This call will be ignored if the flagged stream(s)
* were not opened for the given child process.
* @param[in] flags Flags to control how the operation occurs and which stream(s) to wait
* on. At least one flag must be specified. This may not be 0. It is
* not an error to specify flags for a stream that was not opened by the
* child process. In this case, the corresponding flag(s) will simply be
* ignored.
* @param[in] timeout The maximum amount of time in milliseconds to wait for the flagged
* streams to end. This may be @ref kInfiniteTimeout to wait infinitely
* for the stream(s) to end. A stream ends when it is closed by either
* the child process (by it exiting or explicitly closing its standard
* streams) or by the parent process closing it by using one of the
* @ref fWaitFlagCloseStdOutStream or @ref fWaitFlagCloseStdErrStream
* flags. When a stream ends due to the child process closing it, all
* of the pending data will have been consumed by the reader thread and
* already delivered to the read callback function for that stream.
* @returns `true` if the wait operation specified by @p flags completed successfully. This
* means that at least one, possibly both, streams have ended and been fully
* consumed.
* @returns `false` if the flagged streams did not end within the timeout period.
*
* @remarks This is used to wait on one or more of the standard streams (`stdout` or `stderr`)
* from the child process to end. This ensures that all data coming from the
* child process has been consumed and delivered to the reader callbacks.
*
* @note This does not allow for waiting on the standard streams of child processes that
* are writing directly to log files (ie: using the @ref LaunchDesc::stdoutLog and
* @ref LaunchDesc::stderrLog parameters). This only affects child processes that
* were launched with a read callback specified for `stdout`, `stderr`, or both.
*/
bool(CARB_ABI* waitForStreamEnd)(Process* process, WaitFlags flags, uint64_t timeout);
};
/** Restores the parent death signal on set-user-ID and set-group-ID images.
*
* @param[in] flags Flags to indicate which signal should be sent on on parent process
* exit. This may be 0 to cause the child process to be asked to
* gracefully terminate or @ref fKillFlagForce to forcibly terminate
* the child process. In the latter case, the child process will not
* get any chance to clean up or release resources.
* @returns No return value.
*
* @remarks This restores the parent death signal on Linux after executing an image file
* that has a set-user-ID or set-group-ID capability set on it. Unfortunately,
* executing one of these image files has the result of clearing the parent
* death signal attribute that is set when using the @ref fLaunchFlagKillOnParentExit
* flag. These capabilities are set on the image file with the "setcap" command
* line tool and require super user access. The capabilities of an executable image
* file can be discovered with the "capsh --print" command line tool. See the notes
* on ILauncher::launch() for more information.
*
* @note This only needs to be called when a child process's image file has a set-user-ID
* or set-group-ID capability set on it on Linux and the @ref fLaunchFlagKillOnParentExit
* flag was used when launching the child process. This is still safe to call on
* Windows and will simply do nothing. Note that calling this when the child was not
* launched with the @ref fLaunchFlagKillOnParentExit flag will cause that behavior
* to be imposed on the child process. Communicating the need for this call is left
* as an exercise for both the child and parent processes.
*/
inline void restoreParentDeathSignal(KillFlags flags = 0)
{
CARB_UNUSED(flags);
#if CARB_PLATFORM_LINUX
prctl(PR_SET_PDEATHSIG, (flags & fKillFlagForce) != 0 ? SIGKILL : SIGTERM);
#endif
}
} // namespace launcher
} // namespace carb
|
omniverse-code/kit/include/carb/launcher/LauncherUtils.h | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
/** @file
* @brief Helper classes and functions for the ILauncher interface.
*/
#pragma once
#include "../Defines.h"
#include "../extras/StringSafe.h"
#include "../../omni/extras/PathMap.h"
#include "../settings/SettingsUtils.h"
#include <string>
#include <unordered_map>
#include <vector>
#include <sstream>
#if CARB_PLATFORM_LINUX
# include <unistd.h>
#elif CARB_PLATFORM_MACOS
# include <crt_externs.h>
#else
# include "../CarbWindows.h"
# include "../extras/Unicode.h"
#endif
namespace carb
{
/** Namespace for the Carbonite process launch helper interface. */
namespace launcher
{
/** Base type for the flags used when adding a settings tree to an argument collector object. */
using SettingsEnumFlags = uint32_t;
/** Flag to indicate that the settings in the requested tree should be added recursively to
* the argument collector. If this flag is not present, only the settings directly in the
* named path will be added to the object.
*/
constexpr SettingsEnumFlags fSettingsEnumFlagRecursive = 0x01;
/** Prototype for a callback function used to check if a setting should be added.
*
* @param[in] path The full path to the setting being queried. This will never be `nullptr`.
* @param[in] context A caller specified context value that is passed to the callback.
* @returns `true` if the setting should be added to the argument collector. Returns `false`
* if the setting should not be added to the argument collector.
*
* @remarks Prototype for a callback predicate function that is used to allow the caller to
* determine if a particular setting value should be added to the argument collector.
* This is called for each value setting (ie: for `int`, `bool`, `float` and `string`
* items). The return value gives the caller an opportunity to provide more fine
* grained control over which settings are added to the argument collector than just
* the full tree.
*/
using AddSettingPredicateFn = bool (*)(const char* path, void* context);
/** A simple child process argument collector helper class. This allows arguments of different
* types to be accumulated into a list that can then later be retrieved as a Unix style argument
* list that can be passed to ILauncher::launchProcess() in its @ref LaunchDesc::argv descriptor
* member. This allows for string arguments and various integer type arguments to be trivially
* added to the argument list without needing to locally convert all of them to strings. The
* argument count is also tracked as the arguments are collected. Once all arguments have been
* collected, the final Unix style argument list can be retrieved with getArgs() and the count
* with getCount(). All collected arguments will remain in the order they are originally
* added in.
*
* The basic usage of this is to create a new object, add one or more arguments of various
* types to it using the '+=' operators, then retrieve the Unix style argument list with
* getArgs() to assign to @ref LaunchDesc::argv and getCount() to assign to
* @ref LaunchDesc::argc before calling ILauncher::launchProcess(). Copy and move operators
* and constructors are also provided to make it easier to assign other argument lists to
* another object to facilitate more advanced multiple process launches (ie: use a set of
* base arguments for each child process then add other child specific arguments to each
* one before launching).
*
* This helper class is not thread safe. It is the caller's responsibility to ensure thread
* safe access to objects of this class if needed.
*/
class ArgCollector
{
public:
ArgCollector() = default;
/** Copy constructor: copies another argument collector object into this one.
*
* @param[in] rhs The argument collector object to copy from. This will be left unmodified.
*/
ArgCollector(const ArgCollector& rhs)
{
*this = rhs;
}
/** Move constructor: moves the contents of another argument collector object into this one.
*
* @param[in] rhs The argument collector object to move from. This object will be reset to
* an empty state.
*/
ArgCollector(ArgCollector&& rhs)
{
*this = std::move(rhs);
}
~ArgCollector()
{
clear();
}
/** Clears out this object and resets it back to its initially constructed state.
*
* @returns No return value.
*
* @remarks This clears out all content collected into this object so far. This object
* will be reset back to its original constructed state and will be suitable
* for reuse.
*/
void clear()
{
m_argList.reset();
m_args.clear();
m_allocCount = 0;
}
/** Retrieves the final argument list as a Unix style null terminated list.
*
* @param[out] argCountOut Optionally receives the number of arguments as by via getCount().
* @returns A Unix style argument list. This list will always be terminated by a `nullptr`
* entry so that it can be self-counted if needed. This returned argument list
* object is owned by this object and should not be deleted or freed. See the
* remarks below for more information on the lifetime and use of this object.
* @returns `nullptr` if the buffer for the argument list could not be allocated.
*
* @remarks This retrieves the final argument list for this object. The list object is
* owned by this object and should not be freed or deleted. The returned list
* will be valid until this object is destroyed or until getArgs() is called
* again after adding new arguments. If the caller needs to keep a copy of the
* returned argument list, the caller must perform a deep copy on the returned
* object. This task is out of the scope of this object and is left as an
* exercise for the caller.
*/
const char* const* getArgs(size_t* argCountOut = nullptr)
{
if (argCountOut)
{
*argCountOut = m_args.size();
}
if (m_args.empty())
{
return emptyArgList();
}
if (m_allocCount < m_args.size())
{
m_allocCount = m_args.size();
m_argList.reset(new (std::nothrow) const char*[m_args.size() + 1]);
if (CARB_UNLIKELY(m_argList == nullptr))
{
if (argCountOut)
{
*argCountOut = 0;
}
m_allocCount = 0;
return nullptr;
}
}
for (size_t i = 0; i < m_args.size(); i++)
{
m_argList[i] = m_args[i].c_str();
}
// null terminate the list since some platforms and apps expect that behavior.
m_argList[m_args.size()] = nullptr;
return m_argList.get();
}
/** Retrieves the argument count for this object.
*
* @returns The number of arguments that have been collected into this object so far. This
* is incremented each time the '+=' operator is used.
*/
size_t getCount() const
{
return m_args.size();
}
/** Copy assignment operator.
*
* @param[in] rhs The argument collector object to copy from. This object will receive a
* copy of all the arguments currently listed in the @p rhs argument
* collector object. The @p rhs object will not be modified.
* @returns A reference to this object suitable for chaining other operators or calls.
*/
ArgCollector& operator=(const ArgCollector& rhs)
{
if (this == &rhs)
return *this;
clear();
if (rhs.m_args.size() == 0)
return *this;
m_args = rhs.m_args;
return *this;
}
/** Move assignment operator.
*
* @param[inout] rhs The argument collector object to move from. This object will
* steal all arguments from @p rhs and will clear out the other
* object before returning.
* @returns A reference to this object suitable for chaining other operators or calls.
*/
ArgCollector& operator=(ArgCollector&& rhs)
{
if (this == &rhs)
return *this;
clear();
m_args = std::move(rhs.m_args);
return *this;
}
/** Compare this object to another argument collector object for equality.
*
* @param[in] rhs The argument collector object to compare to this one.
* @returns `true` if the two objects contain the same list of arguments. Note that each
* object must contain the same arguments in the same order in order for them
* to match.
* @returns `false` if the argument lists in the two objects differ.
*/
bool operator==(const ArgCollector& rhs)
{
size_t count = m_args.size();
if (&rhs == this)
return true;
if (count != rhs.m_args.size())
return false;
for (size_t i = 0; i < count; i++)
{
if (m_args[i] != rhs.m_args[i])
return false;
}
return true;
}
/** Compare this object to another argument collector object for inequality.
*
* @param[in] rhs The argument collector object to compare to this one.
* @returns `true` if the two objects contain a different list of arguments. Note that each
* object must contain the same arguments in the same order in order for them
* to match. If either the argument count differs or the order of the arguments
* differs, this will succeed.
* @returns `false` if the argument lists in the two objects match.
*/
bool operator!=(const ArgCollector& rhs)
{
return !(*this == rhs);
}
/** Tests whether this argument collector is empty.
*
* @returns `true` if this argument collector object is empty.
* @returns `false` if argument collector has at least one argument in its list.
*/
bool operator!() const
{
return m_args.size() == 0;
}
/** Tests whether this argument collector is non-empty.
*
* @returns `true` if argument collector has at least one argument in its list.
* @returns `false` if this argument collector object is empty.
*/
explicit operator bool() const
{
return !m_args.empty();
}
/** Adds a formatted string as an argument.
*
* @param[in] fmt The printf-style format string to use to create the new argument. This
* may not be `nullptr`.
* @param[in] ... The arguments required by the format string.
* @returns A references to this object suitable for chaining other operators or calls.
*/
ArgCollector& format(const char* fmt, ...) CARB_PRINTF_FUNCTION(2, 3)
{
CARB_FORMATTED(fmt, [&](const char* p) { add(p); });
return *this;
}
/** Adds a new argument or set of arguments to the end of the list.
*
* @param[in] value The value to add as a new argument. This may be a string or any
* primitive integer or floating point type value. For integer and
* floating point values, they will be converted to a string before
* being added to the argument list. For the variants that add
* another argument collector list or a `nullptr` terminated string
* list, or a vector of strings to this one, all arguments in the
* other object will be copied into this one in the same order. All
* new argument(s) will always be added at the end of the list.
* @returns A reference to this object suitable for chaining other operators or calls.
*/
ArgCollector& add(const char* value)
{
m_args.push_back(value);
return *this;
}
/** @copydoc add(const char*) */
ArgCollector& add(const std::string& value)
{
m_args.push_back(value);
return *this;
}
/** @copydoc add(const char*) */
ArgCollector& add(const ArgCollector& value)
{
for (auto& arg : value.m_args)
m_args.push_back(arg);
return *this;
}
/** @copydoc add(const char*) */
ArgCollector& add(const char* const* value)
{
for (const char* const* arg = value; arg[0] != nullptr; arg++)
m_args.push_back(arg[0]);
return *this;
}
/** @copydoc add(const char*) */
ArgCollector& add(const std::vector<const char*>& value)
{
for (auto& v : value)
add(v);
return *this;
}
/** @copydoc add(const char*) */
ArgCollector& add(const std::vector<std::string>& value)
{
for (auto& v : value)
add(v);
return *this;
}
/** @copydoc add(const char*) */
ArgCollector& operator+=(const char* value)
{
return add(value);
}
/** @copydoc add(const char*) */
ArgCollector& operator+=(const std::string& value)
{
return add(value);
}
/** @copydoc add(const char*) */
ArgCollector& operator+=(const ArgCollector& value)
{
return add(value);
}
/** @copydoc add(const char*) */
ArgCollector& operator+=(const char* const* value)
{
return add(value);
}
/** @copydoc add(const char*) */
ArgCollector& operator+=(const std::vector<const char*>& value)
{
return add(value);
}
/** @copydoc add(const char*) */
ArgCollector& operator+=(const std::vector<std::string>& value)
{
return add(value);
}
/** Macro to add other [almost identical] variants of the add() and operator+=() functions.
* Note that this unfortunately can't be a template because a 'const char*' value is not
* allowed as a template argument since it doesn't lead to a unique instantiation. This
* is similar to the reasoning for a float value not being allowed as a template argument.
* Using a macro here saves 140+ lines of code (mostly duplicated) and documentation.
* @private
*/
#define ADD_PRIMITIVE_HANDLER(type, fmt) \
/** Adds a new primitive value to this argument collector object. \
@param[in] value The primitive numerical value to add to this argument collector \
object. This will be converted to a string before adding to the \
collector. \
@returns A reference to this argument collector object. \
*/ \
ArgCollector& add(type value) \
{ \
char buffer[128]; \
carb::extras::formatString(buffer, CARB_COUNTOF(buffer), fmt, value); \
m_args.push_back(buffer); \
return *this; \
} \
/** @copydoc add(type) */ \
ArgCollector& operator+=(type value) \
{ \
return add(value); \
}
// unsigned integer handlers.
ADD_PRIMITIVE_HANDLER(unsigned char, "%u")
ADD_PRIMITIVE_HANDLER(unsigned short, "%u")
ADD_PRIMITIVE_HANDLER(unsigned int, "%u")
ADD_PRIMITIVE_HANDLER(unsigned long, "%lu")
ADD_PRIMITIVE_HANDLER(unsigned long long, "%llu")
// signed integer handlers.
ADD_PRIMITIVE_HANDLER(char, "%d")
ADD_PRIMITIVE_HANDLER(short, "%d")
ADD_PRIMITIVE_HANDLER(int, "%d")
ADD_PRIMITIVE_HANDLER(long, "%ld")
ADD_PRIMITIVE_HANDLER(long long, "%lld")
// other numerical handlers. Note that some of these can be trivially implicitly cast to
// other primitive types so we can't define them again. Specifically the size_t,
// intmax_t, and uintmax_t types often match other types with handlers defined above.
// Which handler each of these matches to will differ by platform however.
ADD_PRIMITIVE_HANDLER(float, "%.10f")
ADD_PRIMITIVE_HANDLER(double, "%.20f")
#undef ADD_PRIMITIVE_HANDLER
/** Adds all settings under a branch in the settings registry to this object.
*
* @param[in] root The root of the settings tree to copy into this argument
* collector. This may be `nullptr` or an empty string to
* add all settings starting from the root of the settings
* registry. This string should start with a '/' so that it is
* always an absolute settings path.
* @param[in] prefix The prefix to add to each option before adding it to this
* argument collector. This may be `nullptr` or an empty string
* to not use any prefix.
* @param[in] flags Flags to control the behavior of this operation. This may be
* zero or more of the @ref SettingsEnumFlags flags. This defaults
* to 0.
* @param[in] predicate A predicate function that will be called for each value to give
* the caller a chance to decide whether it should be added to this
* object or not. This may be `nullptr` if all settings under the
* given root should always be added. This defaults to `nullptr`.
* @param[in] context An opaque context value that will be passed to the predicate
* function @p predicate. This will not be accessed or used in
* any way except to pass to the predicate function. This defaults
* to `nullptr`.
* @returns A reference to this argument collector object.
*
* @remarks This adds echoes of all settings under a given root branch as arguments in this
* argument collector. Each setting that is found is given the prefix @p prefix
* (typically something like "--/"). This is useful for passing along certain
* subsets of a parent process's settings tree to a child process.
*
* @note It is the caller's responsibility to ensure that only expected settings are added
* to this argument collector. A predicate function can be provided to allow per-item
* control over which settings get added. By default, the search is not recursive.
* This is intentional since adding a full tree could potentially add a lot of new
* arguments to this object.
*/
ArgCollector& add(const char* root,
const char* prefix,
SettingsEnumFlags flags = 0,
AddSettingPredicateFn predicate = nullptr,
void* context = nullptr)
{
dictionary::IDictionary* dictionary = getCachedInterface<dictionary::IDictionary>();
settings::ISettings* settings = getCachedInterface<settings::ISettings>();
std::string rootPath;
auto addSetting = [&](const char* path, int32_t elementData, void* context) -> int32_t {
dictionary::ItemType type;
CARB_UNUSED(context);
type = settings->getItemType(path);
// skip dictionaries since we're only interested in leaves here.
if (type == dictionary::ItemType::eDictionary)
return elementData + 1;
if ((flags & fSettingsEnumFlagRecursive) == 0 && elementData > 1)
return elementData;
// verify that the caller wants this setting added.
if (predicate != nullptr && !predicate(path, context))
return elementData + 1;
switch (type)
{
case dictionary::ItemType::eBool:
format("%s%s=%s", prefix, &path[1], settings->getAsBool(path) ? "true" : "false");
break;
case dictionary::ItemType::eInt:
format("%s%s=%" PRId64, prefix, &path[1], settings->getAsInt64(path));
break;
case dictionary::ItemType::eFloat:
format("%s%s=%g", prefix, &path[1], settings->getAsFloat64(path));
break;
case dictionary::ItemType::eString:
format("%s%s=\"%s\"", prefix, &path[1], settings->getStringBuffer(path));
break;
default:
break;
}
return elementData;
};
// unexpected root prefix (not an absolute settings path) => fail.
if (root == nullptr || root[0] == 0)
root = "/";
// avoid a `nullptr` check later.
if (prefix == nullptr)
prefix = "";
// make sure to strip off any trailing separators since that would break the lookups.
rootPath = root;
if (rootPath.size() > 1 && rootPath[rootPath.size() - 1] == '/')
rootPath = rootPath.substr(0, rootPath.size() - 1);
// walk the settings tree to collect all the requested settings.
settings::walkSettings(
dictionary, settings, dictionary::WalkerMode::eIncludeRoot, rootPath.c_str(), 0, addSetting, context);
return *this;
}
/** Retrieves the argument string at a given index.
*
* @param[in] index The zero based index of the argument to retrieve. This must be
* strictly less than the number of arguments in the list as returned
* by getCount(). If this index is out of range, an empty string
* will be returned instead.
* @returns A reference to the string contained in the requested argument in the list.
* This returned string should not be modified by the caller.
*
* @remarks This retrieves the argument string stored at the given index in the argument
* list. This string will be the one stored in the list itself and should not
* be modified.
*/
const std::string& at(size_t index) const
{
if (index >= m_args.size())
return m_empty;
return m_args[index];
}
/** @copydoc at() */
const std::string& operator[](size_t index) const
{
return at(index);
}
/** Removes the last argument from the list.
*
* @returns No return value.
*
* @remarks This removes the last argument from the list. If this is called, any previous
* returned object from getArgs() will no longer be valid. The updated list object
* must be retrieved again with another call to getArgs().
*/
void pop()
{
if (m_args.empty())
return;
m_args.pop_back();
}
/** Removes an argument from the list by its index.
*
* @returns `true` if the item is successfully removed.
* @returns `false` if the given index is out of range of the argument list's size.
*
* @remarks This removes an argument from the list. If this is called, any previous returned
* object from getArgs() will no longer be valid. The updated list object must be
* retrieved again with another call to getArgs().
*/
bool erase(size_t index)
{
if (index >= m_args.size())
return false;
m_args.erase(m_args.begin() + index);
return true;
}
/** Returns a string of all arguments for debugging purposes.
* @returns A `std::string` of all arguments concatenated. This is for debugging/logging purposes only.
*/
std::string toString() const
{
std::ostringstream stream;
for (auto& arg : m_args)
{
size_t index = size_t(-1);
for (;;)
{
size_t start = index + 1;
index = arg.find_first_of("\\\" '", start); // Characters that must be escaped
stream << arg.substr(start, index - start);
if (index == std::string::npos)
break;
stream << '\\' << arg[index];
}
// Always add a trailing space. It will be popped before return.
stream << ' ';
}
std::string out = stream.str();
if (!out.empty())
out.pop_back(); // Remove the extra space
return out;
}
private:
static const char* const* emptyArgList()
{
static const char* const empty{ nullptr };
return ∅
}
/** An empty string to be retrieved from operator[] in the event of an out of range index.
* This is used to avoid the undesirable behavior of having an exception thrown by the
* underlying implementation of std::vector<>.
*/
std::string m_empty;
/** The vector of collected arguments. This represents the most current list of arguments
* for this object. The list in @p m_argList may be out of date if new arguments are
* added or existing ones removed. The argument count is always taken from the size of
* this vector.
*/
std::vector<std::string> m_args;
/** The Unix style list of arguments as last retrieved by getArgs(). This is only updated
* when getArgs() is called. This object may be destroyed and recreated in subsequent
* calls to getArgs() if the argument list is modified.
*/
std::unique_ptr<const char*[]> m_argList;
/** The last allocation size of the @p m_argList object in items. This is used to avoid
* reallocating the previous object if it is already large enough for a new call to
* getArgs().
*/
size_t m_allocCount = 0;
};
/** A simple environment variable collector helper class. This provides a way to collect a set of
* environment variables and their values for use in ILauncher::launchProcess(). Each variable
* in the table will be unique. Attempting to add a variable multiple times will simply replace
* any previous value. Specifying a variable without a value will remove it from the table.
* Values for variables may be specified in any primitive integer or floating point type as
* well as string values. Once all desired variables have been collected into the object, a
* Unix style environment table can be retrieved with getEnv(). and the could with getCount().
* The order of the variables in the environment block will be undefined.
*
* The basic usage of this is to create a new object, add one or more variables and their
* values (of various types) using the various add() or '+=' operators, the retrieve the Unix
* style environment block with getEnv() to assign to @ref LaunchDesc::env, and getCount() to
* assign to @ref LaunchDesc::envCount. This environment block is then passed through the
* launch descriptor to ILauncher::launchProcess(). Copy and move constructors are also
* provided to make it easier to assign and combine other environment lists.
*
* The calling process's current environment can also be added using the add() function without
* any arguments. This can be used to allow a child process to explicitly inherit the parent
* process's environment block while also adding other variables or changing existing ones.
*
* On Windows all environment variable names used in this object are treated as case insensitive.
* All values set set for the variables will be case preserving. This matches Windows' native
* behavior in handling environment variables. If multiple casings are used in specifying the
* variable name when modifying any given variable, the variable's name will always keep the
* casing from when it was first added. Later changes to that variable regardless of the casing
* will only modify the variable's value.
*
* On Linux, all environment variable names used in this object are treated as case sensitive.
* All values set for the variables will be case preserving. This matches Linux's native
* behavior in handling environment variables. It is the caller's responsibility to ensure
* the proper casing is used when adding or modifying variables. Failure to match the case
* of an existing variable for example will result in two variables with the same name but
* different casing being added. This can be problematic for example when attempting to
* modify a standard variable such as "PATH" or "LD_LIBRARY_PATH".
*
* Also note that using this class does not affect or modify the calling process's environment
* variables in any way. This only collects variables and their values in a format suitable
* for setting as a child process's new environment.
*
* This helper class is not thread safe. It is the caller's responsibility to ensure thread
* safe access to objects of this class if needed.
*/
class EnvCollector
{
public:
EnvCollector() = default;
/** Copy constructor: copies another environment collector object into this one.
*
* @param[in] rhs The other environment collector object whose contents will be copied
* into this one. This other object will not be modified.
*/
EnvCollector(const EnvCollector& rhs)
{
*this = rhs;
}
/** Move constructor: moves the contents of an environment collector object into this one.
*
* @param[in] rhs The other environment collector object whose contents will be moved
* into this one. Upon return, this other object will be reset to an
* empty state.
*/
EnvCollector(EnvCollector&& rhs)
{
*this = std::move(rhs);
}
~EnvCollector()
{
clear();
}
/** Clears out this environment block object.
*
* @returns No return value.
*
* @remarks This clears out this environment block object. Any existing variables and their
* values will be lost and the object will be reset to its default constructed
* state for reuse.
*/
void clear()
{
m_env.clear();
m_args.clear();
}
/** Retrieves the Unix style environment block representing the variables in this object.
*
* @returns A Unix style environment block. This will be an array of string pointers.
* The last entry in the array will always be a `nullptr` string. This can be
* used to count the length of the environment block without needing to explicitly
* pass in its size as well.
* @returns `nullptr` if the buffer for the environment block could not be allocated.
*
* @remarks This retrieves the Unix style environment block for this object. The environment
* block object is owned by this object and should not be freed or deleted. The
* returned block will be valid until this object is destroyed or until getEnv()
* is called again. If the caller needs to keep a copy of the returned environment
* block object, the caller must perform a deep copy on the returned object. This
* task is out of the scope of this object and is left as an exercise for the caller.
*/
const char* const* getEnv()
{
m_args.clear();
for (auto& var : m_env)
m_args.format("%s=%s", var.first.c_str(), var.second.c_str());
return m_args.getArgs();
}
/** Retrieves the number of unique variables in the environment block.
*
* @returns The total number of unique variables in this environment block.
* @returns `0` if this environment block object is empty.
*/
size_t getCount() const
{
return m_env.size();
}
/** Copy assignment operator.
*
* @param[in] rhs The environment collector object to copy from. This object will receive
* a copy of all the variables currently listed in the @p rhs environment
* collector object. The @p rhs object will not be modified.
* @returns A reference to this object suitable for chaining other operators or calls.
*/
EnvCollector& operator=(const EnvCollector& rhs)
{
if (this == &rhs)
return *this;
clear();
if (rhs.m_env.size() == 0)
return *this;
m_env = rhs.m_env;
return *this;
}
/** Move assignment operator.
*
* @param[inout] rhs The argument collector object to move from. This object will
* steal all arguments from @p rhs and will clear out the other
* object before returning.
* @returns A reference to this object suitable for chaining other operators or calls.
*/
EnvCollector& operator=(EnvCollector&& rhs)
{
if (this == &rhs)
return *this;
clear();
m_env = std::move(rhs.m_env);
return *this;
}
/** Compare this object to another argument collector object for equality.
*
* @param[in] rhs The argument collector object to compare to this one.
* @returns `true` if the two objects contain the same list of arguments. Note that each
* object must contain the same arguments in the same order in order for them
* to match.
* @returns `false` if the argument lists in the two objects differ.
*/
bool operator==(const EnvCollector& rhs)
{
size_t count = m_env.size();
if (&rhs == this)
return true;
if (count != rhs.m_env.size())
return false;
for (auto var : m_env)
{
auto other = rhs.m_env.find(var.first);
if (other == rhs.m_env.end())
return false;
if (other->second != var.second)
return false;
}
return true;
}
/** Compare this object to another argument collector object for inequality.
*
* @param[in] rhs The argument collector object to compare to this one.
* @returns `true` if the two objects contain a different list of arguments. Note that each
* object must contain the same arguments in the same order in order for them
* to match. If either the argument count differs or the order of the arguments
* differs, this will succeed.
* @returns `false` if the argument lists in the two objects match.
*/
bool operator!=(const EnvCollector& rhs)
{
return !(*this == rhs);
}
/** Tests whether this argument collector is empty.
*
* @returns `true` if this argument collector object is empty.
* @returns `false` if argument collector has at least one argument in its list.
*/
bool operator!()
{
return m_env.size() == 0;
}
/** Tests whether this argument collector is non-empty.
*
* @returns `true` if argument collector has at least one argument in its list.
* @returns `false` if this argument collector object is empty.
*/
operator bool()
{
return !m_env.empty();
}
/** Adds a new environment variable by name and value.
*
* @param[in] name The name of the environment variable to add or replace. This may
* not be an empty string or `nullptr`, and should not contain an '='
* except as the first character.
* @param[in] value The value to assign to the variable @p name. This may be `nullptr`
* or an empty string to add a variable with no value.
* @returns A reference to this object suitable for chaining multiple calls.
*
* @remarks These functions allow various combinations of name and value types to be used
* to add new environment variables to this object. Values may be set as strings,
* integers, or floating point values.
*/
EnvCollector& add(const char* name, const char* value)
{
m_env[name] = value == nullptr ? "" : value;
return *this;
}
/** @copydoc add(const char*,const char*) */
EnvCollector& add(const std::string& name, const std::string& value)
{
m_env[name] = value;
return *this;
}
/** @copydoc add(const char*,const char*) */
EnvCollector& add(const std::string& name, const char* value)
{
m_env[name] = value == nullptr ? "" : value;
return *this;
}
/** @copydoc add(const char*,const char*) */
EnvCollector& add(const char* name, const std::string& value)
{
m_env[name] = value;
return *this;
}
#define ADD_PRIMITIVE_HANDLER(type, fmt) \
/** Adds a new name and primitive value to this environment collector object. \
@param[in] name The name of the new environment variable to be added. This may \
not be `nullptr` or an empty string. \
@param[in] value The primitive numerical value to set as the environment variable's \
value. This will be converted to a string before adding to the \
collector. \
@returns A reference to this environment collector object. \
*/ \
EnvCollector& add(const char* name, type value) \
{ \
char buffer[128]; \
carb::extras::formatString(buffer, CARB_COUNTOF(buffer), fmt, value); \
return add(name, buffer); \
} \
/** @copydoc add(const char*,type) */ \
EnvCollector& add(const std::string& name, type value) \
{ \
return add(name.c_str(), value); \
}
// unsigned integer handlers.
ADD_PRIMITIVE_HANDLER(uint8_t, "%" PRIu8)
ADD_PRIMITIVE_HANDLER(uint16_t, "%" PRIu16)
ADD_PRIMITIVE_HANDLER(uint32_t, "%" PRIu32)
ADD_PRIMITIVE_HANDLER(uint64_t, "%" PRIu64)
// signed integer handlers.
ADD_PRIMITIVE_HANDLER(int8_t, "%" PRId8)
ADD_PRIMITIVE_HANDLER(int16_t, "%" PRId16)
ADD_PRIMITIVE_HANDLER(int32_t, "%" PRId32)
ADD_PRIMITIVE_HANDLER(int64_t, "%" PRId64)
// other numerical handlers. Note that some of these can be trivially implicitly cast to
// other primitive types so we can't define them again. Specifically the size_t,
// intmax_t, and uintmax_t types often match other types with handlers defined above.
// Which handler each of these matches to will differ by platform however.
ADD_PRIMITIVE_HANDLER(float, "%.10f")
ADD_PRIMITIVE_HANDLER(double, "%.20f")
#undef ADD_PRIMITIVE_HANDLER
/** Adds or replaces a variable specified in a single string.
*
* @param[in] var The variable name and value to set. This may not be `nullptr` or an empty
* string. This must be in the format `<name>=<value>`. There should not
* be any spaces between the name, '=' and value portions of the string.
* If the '=' is missing or no value is given after the '=', the value of
* the named variable will be cleared out, but the variable will still
* remain valid.
* @returns A reference to this object suitable for chaining multiple calls.
*/
EnvCollector& add(const char* var)
{
const char* sep;
#if CARB_PLATFORM_WINDOWS
// Windows' environment sets variables such as "=C:=C:\". We need to handle this case.
// Here the variable's name is "=C:" and its value is "C:\". This similar behavior is
// not allowed on Linux however.
if (var[0] == '=')
sep = strchr(var + 1, '=');
else
#endif
sep = strchr(var, '=');
// no assignment in the string => clear out that variable.
if (sep == nullptr)
{
m_env[var] = "";
return *this;
}
m_env[std::string(var, sep - var)] = sep + 1;
return *this;
}
/** @copydoc add(const char*) */
EnvCollector& add(const std::string& var)
{
return add(var.c_str());
}
/** @copydoc add(const char*) */
EnvCollector& operator+=(const char* var)
{
return add(var);
}
/** @copydoc add(const char*) */
EnvCollector& operator+=(const std::string& var)
{
return add(var.c_str());
}
/** Adds a set of environment variables to this object.
*
* @param[in] vars The set of variables to add to this object. This may not be
* `nullptr`.
* The string table variant is intended to be a Unix style `nullptr`
* terminated string list. The other variants add the full contents
* of another environment collector object.
* @returns A reference to this object suitable for chaining multiple calls.
*/
EnvCollector& add(const char* const* vars)
{
for (const char* const* var = vars; var[0] != nullptr; var++)
add(var[0]);
return *this;
}
/** @copydoc add(const char* const*) */
EnvCollector& add(const std::vector<const char*>& vars)
{
for (auto& v : vars)
add(v);
return *this;
}
/** @copydoc add(const char* const*) */
EnvCollector& add(const std::vector<std::string>& vars)
{
for (auto& v : vars)
add(v);
return *this;
}
/** @copydoc add(const char* const*) */
EnvCollector& add(const EnvCollector& vars)
{
for (auto& var : vars.m_env)
m_env[var.first] = var.second;
return *this;
}
/** @copydoc add(const char* const*) */
EnvCollector& operator+=(const char* const* vars)
{
return add(vars);
}
/** @copydoc add(const char* const*) */
EnvCollector& operator+=(const std::vector<const char*>& vars)
{
return add(vars);
}
/** @copydoc add(const char* const*) */
EnvCollector& operator+=(const std::vector<std::string>& vars)
{
return add(vars);
}
/** @copydoc add(const char* const*) */
EnvCollector& operator+=(const EnvCollector& vars)
{
return add(vars);
}
/** Adds the environment variables from the calling process.
*
* @returns A reference to this object suitable for chaining multiple calls.
*
* @remarks This adds all of the current environment variables of the calling process to
* this environment block. Any variables with the same name that already existed
* in this object will be replaced. This is suitable for inheriting the calling
* process's current environment when launching a child process while still allowing
* changes or additions before launch.
*/
#if CARB_POSIX
EnvCollector& add()
{
# if CARB_PLATFORM_MACOS
char*** tmp = _NSGetEnviron(); // see man 7 environ
if (tmp == nullptr)
{
CARB_LOG_ERROR("_NSGetEnviron() returned nullptr");
return *this;
}
char** environ = *tmp;
# endif
for (char** env = environ; env[0] != nullptr; env++)
add(env[0]);
return *this;
}
#else
EnvCollector& add()
{
LPWCH origEnv = GetEnvironmentStringsW();
LPWCH env = origEnv;
std::string var;
size_t len;
// walk the environment strings table and add each variable to this object.
for (len = wcslen(env); env[0] != 0; env += len + 1, len = wcslen(env))
{
var = extras::convertWideToUtf8(env);
add(var);
}
FreeEnvironmentStringsW(origEnv);
return *this;
}
#endif
/** Removes a variable and its value from this object.
*
* @param[in] name The name of the variable to be removed. This may not be `nullptr` or
* an empty string. The named variable will no longer be present in
* this object upon return and its value will be lost.
* @returns A reference to this object suitable for chaining multiple calls.
*/
EnvCollector& remove(const char* name)
{
m_env.erase(name);
return *this;
}
/** @copydoc remove(const char*) */
EnvCollector& remove(const std::string& name)
{
return remove(name.c_str());
}
/** @copydoc remove(const char*) */
EnvCollector& operator-=(const char* name)
{
return remove(name);
}
/** @copydoc remove(const char*) */
EnvCollector& operator-=(const std::string& name)
{
return remove(name.c_str());
}
/** Retrieves the value for a named variable in this environment block object.
*
* @param[in] name The name of the variable to retrieve the value of. This may not be
* `nullptr` or an empty string. The value of the variable will not be
* modified by this lookup nor can its value be changed by assigning
* a new string to the returned string. To change the value, one of
* the add() functions or '+=()' operators should be used instead.
* @returns The value of the named variable if present in this environment block.
* @returns An empty string if the variable is not present in this environment block.
*/
const std::string& at(const char* name)
{
auto var = m_env.find(name);
if (var == m_env.end())
return m_empty;
return var->second;
}
/** @copydoc at(const char*) */
const std::string& at(const std::string& name)
{
return at(name.c_str());
}
/** @copydoc at(const char*) */
const std::string& operator[](const char* name)
{
return at(name);
}
/** @copydoc at(const char*) */
const std::string& operator[](const std::string& name)
{
return at(name.c_str());
}
private:
/** The table of argument names and values. This behaves differently in terms of case
* sensitivity depending on the platform. On Windows, environment variable names are
* treated as case insensitive to match local OS behavior. In contrast, on Linux
* environment variable names are treated as case sensitive.
*/
omni::extras::UnorderedPathMap<std::string> m_env;
/** The argument collector used to generate the environment block for getEnv(). */
ArgCollector m_args;
/** An empty string to return in case a variable is not found in the table. */
std::string m_empty;
};
} // namespace launcher
} // namespace carb
|
omniverse-code/kit/include/carb/cpp17/Tuple.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief Redirection for backwards compatibility
#pragma once
#include "Functional.h"
#include "../cpp/Tuple.h"
CARB_FILE_DEPRECATED_MSG("Use carb/cpp include path and carb::cpp namespace instead")
namespace carb
{
namespace cpp17
{
using ::carb::cpp::apply;
}
} // namespace carb
CARB_INCLUDE_PURIFY_TEST({ carb::cpp17::apply([](int) {}, std::make_tuple(5)); });
|
omniverse-code/kit/include/carb/cpp17/Functional.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief Redirection for backwards compatibility
#pragma once
#include "TypeTraits.h"
#include "../cpp/Functional.h"
CARB_FILE_DEPRECATED_MSG("Use carb/cpp include path and carb::cpp namespace instead")
namespace carb
{
namespace cpp17
{
using ::carb::cpp::invoke;
using ::carb::cpp::invoke_r;
} // namespace cpp17
} // namespace carb
CARB_INCLUDE_PURIFY_TEST({
extern int foo();
carb::cpp17::invoke(foo);
carb::cpp17::invoke_r<int>(foo);
});
|
omniverse-code/kit/include/carb/cpp17/Utility.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief Redirection for backwards compatibility
#pragma once
#include "../cpp/Utility.h"
CARB_FILE_DEPRECATED_MSG("Use carb/cpp include path and carb::cpp namespace instead")
namespace carb
{
namespace cpp17
{
using ::carb::cpp::in_place;
using ::carb::cpp::in_place_index;
using ::carb::cpp::in_place_index_t;
using ::carb::cpp::in_place_t;
using ::carb::cpp::in_place_type;
using ::carb::cpp::in_place_type_t;
} // namespace cpp17
} // namespace carb
CARB_INCLUDE_PURIFY_TEST({
static_assert(sizeof(carb::cpp17::in_place), "");
static_assert(sizeof(carb::cpp17::in_place_index<0>), "");
static_assert(sizeof(carb::cpp17::in_place_index_t<0>), "");
static_assert(sizeof(carb::cpp17::in_place_t), "");
static_assert(sizeof(carb::cpp17::in_place_type<int>), "");
static_assert(sizeof(carb::cpp17::in_place_type_t<int>), "");
});
|
omniverse-code/kit/include/carb/cpp17/StringView.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief Redirection for backwards compatibility
#pragma once
#include "../cpp20/TypeTraits.h"
#include "../cpp/StringView.h"
CARB_FILE_DEPRECATED_MSG("Use carb/cpp include path and carb::cpp namespace instead")
namespace carb
{
namespace cpp17
{
using ::carb::cpp::basic_string_view;
using ::carb::cpp::string_view;
using ::carb::cpp::wstring_view;
#if CARB_HAS_CPP20
using ::carb::cpp::u8string_view;
#endif
using ::carb::cpp::u16string_view;
using ::carb::cpp::u32string_view;
using ::carb::cpp::operator""_sv;
} // namespace cpp17
} // namespace carb
CARB_INCLUDE_PURIFY_TEST({
static_assert(sizeof(carb::cpp17::basic_string_view<char>), "");
static_assert(sizeof(carb::cpp17::string_view), "");
static_assert(sizeof(carb::cpp17::wstring_view), "");
static_assert(sizeof(carb::cpp17::u16string_view), "");
static_assert(sizeof(carb::cpp17::u32string_view), "");
});
|
omniverse-code/kit/include/carb/cpp17/Memory.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief Redirection for backwards compatibility
#pragma once
#include "../cpp/Memory.h"
CARB_FILE_DEPRECATED_MSG("Use carb/cpp include path and carb::cpp namespace instead")
namespace carb
{
namespace cpp17
{
using ::carb::cpp::addressof;
using ::carb::cpp::destroy_at;
} // namespace cpp17
} // namespace carb
CARB_INCLUDE_PURIFY_TEST({
extern int* foo();
carb::cpp17::addressof(foo);
carb::cpp17::destroy_at(foo());
});
|
omniverse-code/kit/include/carb/cpp17/TypeTraits.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief Redirection for backwards compatibility
#pragma once
#include "../cpp/TypeTraits.h"
CARB_FILE_DEPRECATED_MSG("Use carb/cpp include path and carb::cpp namespace instead")
namespace carb
{
//! (Deprecated) Namespace for C++17 features using C++14 semantics. Use \ref carb::cpp instead.
namespace cpp17
{
using ::carb::cpp::bool_constant;
using ::carb::cpp::conjunction;
using ::carb::cpp::disjunction;
using ::carb::cpp::invoke_result;
using ::carb::cpp::invoke_result_t;
using ::carb::cpp::is_convertible;
using ::carb::cpp::is_invocable;
using ::carb::cpp::is_invocable_r;
using ::carb::cpp::is_nothrow_invocable;
using ::carb::cpp::is_nothrow_invocable_r;
using ::carb::cpp::is_nothrow_swappable;
using ::carb::cpp::is_nothrow_swappable_with;
using ::carb::cpp::is_swappable;
using ::carb::cpp::is_swappable_with;
using ::carb::cpp::is_void;
using ::carb::cpp::negation;
using ::carb::cpp::void_t;
} // namespace cpp17
} // namespace carb
CARB_INCLUDE_PURIFY_TEST({
using True = carb::cpp17::bool_constant<true>;
using False = carb::cpp17::bool_constant<false>;
static_assert(carb::cpp17::conjunction<True, True>::value, "1");
static_assert(carb::cpp17::disjunction<True, False>::value, "2");
auto foo = []() noexcept { return true; };
static_assert(std::is_same<carb::cpp17::invoke_result<decltype(foo)>::type, bool>::value, "3");
static_assert(std::is_same<carb::cpp17::invoke_result_t<decltype(foo)>, bool>::value, "4");
static_assert(carb::cpp17::is_convertible<bool, int>::value, "5");
static_assert(carb::cpp17::is_invocable<decltype(foo)>::value, "6");
static_assert(carb::cpp17::is_invocable_r<bool, decltype(foo)>::value, "7");
static_assert(carb::cpp17::is_nothrow_invocable<decltype(foo)>::value, "9");
static_assert(carb::cpp17::is_nothrow_invocable_r<bool, decltype(foo)>::value, "10");
static_assert(std::is_class<carb::cpp17::is_nothrow_swappable<int>>::value, "11");
static_assert(std::is_class<carb::cpp17::is_nothrow_swappable_with<int, int>>::value, "12");
static_assert(std::is_class<carb::cpp17::is_swappable<int>>::value, "13");
static_assert(std::is_class<carb::cpp17::is_swappable_with<int, int>>::value, "14");
static_assert(carb::cpp17::is_void<void>::value, "15");
static_assert(carb::cpp17::negation<False>::value, "16");
});
|
omniverse-code/kit/include/carb/cpp17/Optional.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief Redirection for backwards compatibility
#pragma once
#include "Utility.h"
#include "../cpp/Optional.h"
CARB_FILE_DEPRECATED_MSG("Use carb/cpp include path and carb::cpp namespace instead")
namespace carb
{
namespace cpp17
{
using ::carb::cpp::bad_optional_access;
using ::carb::cpp::make_optional;
using ::carb::cpp::nullopt;
using ::carb::cpp::nullopt_t;
using ::carb::cpp::optional;
} // namespace cpp17
} // namespace carb
CARB_INCLUDE_PURIFY_TEST({
carb::cpp17::optional<int> opt{ carb::cpp17::nullopt };
carb::cpp17::optional<int> opt2 = carb::cpp17::make_optional<int>(5);
CARB_UNUSED(opt, opt2);
static_assert(sizeof(carb::cpp17::nullopt_t), "");
static_assert(sizeof(carb::cpp17::bad_optional_access), "");
});
|
omniverse-code/kit/include/carb/cpp17/Variant.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief Redirection for backwards compatibility
#pragma once
#include "TypeTraits.h"
#include "Utility.h"
#include "../cpp/Variant.h"
CARB_FILE_DEPRECATED_MSG("Use carb/cpp include path and carb::cpp namespace instead")
namespace carb
{
namespace cpp17
{
using ::carb::cpp::bad_variant_access;
using ::carb::cpp::get;
using ::carb::cpp::get_if;
using ::carb::cpp::holds_alternative;
using ::carb::cpp::monostate;
using ::carb::cpp::variant;
using ::carb::cpp::variant_alternative;
using ::carb::cpp::variant_alternative_t;
using ::carb::cpp::variant_npos;
using ::carb::cpp::visit;
} // namespace cpp17
} // namespace carb
CARB_INCLUDE_PURIFY_TEST({
using my_variant = carb::cpp17::variant<int, float>;
static_assert(std::is_same<int, carb::cpp17::variant_alternative<0, my_variant>::type>::value, "1");
static_assert(std::is_same<float, carb::cpp17::variant_alternative_t<1, my_variant>>::value, "2");
static_assert(carb::cpp17::variant_npos == size_t(-1), "3");
static_assert(sizeof(carb::cpp17::bad_variant_access), "4");
static_assert(!std::is_same<void, decltype(carb::cpp17::get<int>(std::declval<my_variant>()))>::value, "5");
static_assert(!std::is_same<void, decltype(carb::cpp17::get<0>(std::declval<my_variant>()))>::value, "6");
static_assert(!std::is_same<void, decltype(carb::cpp17::get_if<int>(std::declval<my_variant*>()))>::value, "7");
static_assert(!std::is_same<void, decltype(carb::cpp17::get_if<0>(std::declval<my_variant*>()))>::value, "8");
static_assert(
std::is_same<bool, decltype(carb::cpp17::holds_alternative<int>(std::declval<my_variant>()))>::value, "9");
static_assert(std::is_class<carb::cpp17::monostate>::value, "10");
struct my_visitor
{
void operator()(int)
{
}
void operator()(float)
{
}
};
static_assert(std::is_function<decltype(carb::cpp17::visit<my_visitor, my_variant>)>::value, "11");
});
|
omniverse-code/kit/include/carb/cpp17/StdDef.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief Redirection for backwards compatibility
#pragma once
#include "TypeTraits.h"
#include "../cpp/StdDef.h"
CARB_FILE_DEPRECATED_MSG("Use carb/cpp include path and carb::cpp namespace instead")
namespace carb
{
namespace cpp17
{
using ::carb::cpp::byte;
using ::carb::cpp::to_integer;
} // namespace cpp17
} // namespace carb
CARB_INCLUDE_PURIFY_TEST({
static_assert(sizeof(carb::cpp17::byte), "");
int i = carb::cpp17::to_integer<int>(carb::cpp17::byte(1));
CARB_UNUSED(i);
});
|
omniverse-code/kit/include/carb/cpp17/Exception.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief Redirection for backwards compatibility
#pragma once
#include "../cpp/Exception.h"
CARB_FILE_DEPRECATED_MSG("Use carb/cpp include path and carb::cpp namespace instead")
namespace carb
{
namespace cpp17
{
using ::carb::cpp::uncaught_exceptions;
}
} // namespace carb
CARB_INCLUDE_PURIFY_TEST({ carb::cpp17::uncaught_exceptions(); });
|
omniverse-code/kit/include/carb/profiler/IProfileMonitor.h | // Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Monitor interface for carb.profiler.
#pragma once
#include "../Defines.h"
#include "../Types.h"
namespace carb
{
namespace profiler
{
/**
* A struct describing a specific profiling event.
*/
struct ProfileEvent
{
//! A human-readable name for the event.
const char* eventName;
//! The thread ID that recorded this event. Comparable with `GetCurrentThreadID()` on Windows or `gettid()` on
//! Linux.
uint64_t threadId;
//! The start timestamp for this event. Based on 10 nanoseconds units since IProfiler::startup() was called.
uint64_t startTime;
//! The total time in milliseconds elapsed for this event.
float timeInMs;
//! The stack depth for this event.
uint16_t level;
};
//! An opaque pointer used by IProfileMonitor.
using ProfileEvents = struct ProfileEventsImpl*;
/**
* Defines an interface to monitor profiling events.
*/
struct IProfileMonitor
{
CARB_PLUGIN_INTERFACE("carb::profiler::IProfileMonitor", 1, 1)
/**
* Returns the profiled events for the previous frame (up to the previous \ref markFrameEnd() call).
*
* @returns an opaque pointer that refers to the previous frame's profiling information. This pointer must be
* released with releaseLastProfileEvents() when the caller is finished with it.
*/
ProfileEvents(CARB_ABI* getLastProfileEvents)();
/**
* Returns the number of profiling events for a ProfileEvents instance.
*
* @param events The ProfileEvents instance returned from getLastProfileEvents().
* @returns The number of profiling events in the array returned by getLastProfileEventsData().
*/
size_t(CARB_ABI* getLastProfileEventCount)(ProfileEvents events);
/**
* Returns an array of profiling events for a ProfileEvents instance.
*
* @param events The ProfileEvents instance returned from getLastProfileEvents().
* @returns An array of ProfileEvent objects. The size of the array is returned by getLastProfileEventCount().
*/
ProfileEvent*(CARB_ABI* getLastProfileEventsData)(ProfileEvents events);
/**
* Returns the number of thread IDs known to the ProfileEvents instance.
*
* @param events The ProfileEvents instance returned from getLastProfileEvents().
* @returns The number of thread IDs in the array returned by getProfileThreadIds().
*/
uint32_t(CARB_ABI* getProfileThreadCount)(ProfileEvents events);
/**
* Returns an array of thread IDs known to a ProfileEvents instance.
*
* @param events The ProfileEvents instance returned from getLastProfileEvents().
* @returns An array of thread IDs. The size of the array is returned by getProfileThreadCount(). The thread IDs
* are comparable with `GetCurrentThreadID()` on Windows and `gettid()` on Linux.
*/
uint64_t const*(CARB_ABI* getProfileThreadIds)(ProfileEvents events);
/**
* Destroys a ProfileEvents instance.
*
* The data returned getLastProfileEventsData() and getProfileThreadIds() must not be referenced after this function
* is called.
*
* @param events The ProfileEvents instance returned from getLastProfileEvents().
*/
void(CARB_ABI* releaseLastProfileEvents)(ProfileEvents events);
/**
* Returns the thread ID that called \ref markFrameEnd() event.
*
* @param events The ProfileEvents instance returned from getLastProfileEvents().
* @returns The thread ID that last called \ref markFrameEnd(). This thread ID is comparable with
* `GetCurrentThreadID()` on Windows and `gettid()` on Linux.
*/
uint64_t(CARB_ABI* getMainThreadId)(ProfileEvents events);
/**
* Marks the end of a frame's profile events.
*
* After this call, the previous frame's profile events are available via \ref getLastProfileEvents().
*/
void(CARB_ABI* markFrameEnd)();
};
} // namespace profiler
} // namespace carb
|
omniverse-code/kit/include/carb/profiler/ProfilerBindingsPython.h | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../BindingsPythonUtils.h"
#include "../Framework.h"
#include "IProfileMonitor.h"
#include <memory>
#include <string>
#include <vector>
using namespace pybind11::literals;
namespace carb
{
namespace profiler
{
namespace
{
class ScopedProfileEvents
{
public:
ScopedProfileEvents(const IProfileMonitor* profileMonitor) : m_profileMonitor(profileMonitor)
{
m_profileEvents = m_profileMonitor->getLastProfileEvents();
}
ScopedProfileEvents(ScopedProfileEvents&& rhs)
: m_profileEvents(rhs.m_profileEvents), m_profileMonitor(rhs.m_profileMonitor)
{
rhs.m_profileEvents = nullptr;
}
~ScopedProfileEvents()
{
if (m_profileEvents)
m_profileMonitor->releaseLastProfileEvents(m_profileEvents);
}
ScopedProfileEvents& operator=(ScopedProfileEvents&& rhs)
{
std::swap(m_profileEvents, rhs.m_profileEvents);
std::swap(m_profileMonitor, rhs.m_profileMonitor);
return *this;
}
CARB_PREVENT_COPY(ScopedProfileEvents);
const IProfileMonitor* mon() const
{
return m_profileMonitor;
}
operator ProfileEvents() const
{
return m_profileEvents;
}
private:
ProfileEvents m_profileEvents;
const IProfileMonitor* m_profileMonitor;
};
inline void definePythonModule(py::module& m)
{
using namespace carb::profiler;
m.doc() = "pybind11 carb.profiler bindings";
py::enum_<InstantType>(m, "InstantType").value("THREAD", InstantType::Thread).value("PROCESS", InstantType::Process)
/**/;
py::enum_<FlowType>(m, "FlowType").value("BEGIN", FlowType::Begin).value("END", FlowType::End)
/**/;
m.def("is_profiler_active", []() -> bool { return (g_carbProfiler != nullptr); },
py::call_guard<py::gil_scoped_release>());
m.def("supports_dynamic_source_locations",
[]() -> bool { return (g_carbProfiler && g_carbProfiler->supportsDynamicSourceLocations()); },
py::call_guard<py::gil_scoped_release>());
// This is an extended helper with location, the shorter `begin` method is defined in __init__.py
m.def("begin_with_location",
[](uint64_t mask, std::string name, std::string functionStr, std::string filepathStr, uint32_t line) {
if (g_carbProfiler)
{
static carb::profiler::StaticStringType sfunction{ g_carbProfiler->registerStaticString("Py::func") };
static carb::profiler::StaticStringType sfilepath{ g_carbProfiler->registerStaticString("Py::code") };
auto function = sfunction, filepath = sfilepath;
if (g_carbProfiler->supportsDynamicSourceLocations())
{
if (!functionStr.empty())
{
function = carb::profiler::StaticStringType(functionStr.c_str());
}
if (!filepathStr.empty())
{
filepath = carb::profiler::StaticStringType(filepathStr.c_str());
}
}
uint32_t linenumber = line;
g_carbProfiler->beginDynamic(mask, function, filepath, linenumber, "%s", name.c_str());
}
},
py::arg("mask"), py::arg("name"), py::arg("function") = "", py::arg("filepath") = "", py::arg("lineno") = 0,
py::call_guard<py::gil_scoped_release>());
m.def("end",
[](uint64_t mask) {
if (g_carbProfiler)
{
g_carbProfiler->end(mask);
}
},
py::arg("mask"), py::call_guard<py::gil_scoped_release>());
defineInterfaceClass<IProfiler>(m, "IProfiler", "acquire_profiler_interface")
// .def("get_item", wrapInterfaceFunction(&IDictionary::getItem), py::arg("base_item"), py::arg("path") = "",
// py::return_value_policy::reference)
.def("startup", [](IProfiler* self) { self->startup(); }, py::call_guard<py::gil_scoped_release>())
.def("shutdown", [](IProfiler* self) { self->shutdown(); }, py::call_guard<py::gil_scoped_release>())
.def("set_capture_mask", [](IProfiler* self, uint64_t mask) { return self->setCaptureMask(mask); },
py::arg("mask"), py::call_guard<py::gil_scoped_release>())
.def("get_capture_mask", [](IProfiler* self) { return self->getCaptureMask(); },
py::call_guard<py::gil_scoped_release>())
.def("begin",
[](IProfiler* self, uint64_t mask, std::string name) {
static auto function = self->registerStaticString("pyfunc");
static auto file = self->registerStaticString("python");
self->beginDynamic(mask, function, file, 1, "%s", name.c_str());
},
py::arg("mask"), py::arg("name"), py::call_guard<py::gil_scoped_release>())
.def("frame", [](IProfiler* self, uint64_t mask, const char* name) { self->frameDynamic(mask, "%s", name); },
py::call_guard<py::gil_scoped_release>())
.def("end", [](IProfiler* self, uint64_t mask) { self->end(mask); }, py::arg("mask"),
py::call_guard<py::gil_scoped_release>())
.def("value_float",
[](IProfiler* self, uint64_t mask, float value, std::string name) {
self->valueFloatDynamic(mask, value, "%s", name.c_str());
},
py::arg("mask"), py::arg("value"), py::arg("name"), py::call_guard<py::gil_scoped_release>())
.def("value_int",
[](IProfiler* self, uint64_t mask, int32_t value, std::string name) {
self->valueIntDynamic(mask, value, "%s", name.c_str());
},
py::arg("mask"), py::arg("value"), py::arg("name"), py::call_guard<py::gil_scoped_release>())
.def("value_uint",
[](IProfiler* self, uint64_t mask, uint32_t value, std::string name) {
self->valueUIntDynamic(mask, value, "%s", name.c_str());
},
py::arg("mask"), py::arg("value"), py::arg("name"), py::call_guard<py::gil_scoped_release>())
.def("instant",
[](IProfiler* self, uint64_t mask, InstantType type, const char* name) {
static auto function = self->registerStaticString("pyfunc");
static auto file = self->registerStaticString("python");
self->emitInstantDynamic(mask, function, file, 1, type, "%s", name);
},
py::call_guard<py::gil_scoped_release>())
.def("flow",
[](IProfiler* self, uint64_t mask, FlowType type, uint64_t id, const char* name) {
static auto function = self->registerStaticString("pyfunc");
static auto file = self->registerStaticString("python");
self->emitFlowDynamic(mask, function, file, 1, type, id, "%s", name);
},
py::call_guard<py::gil_scoped_release>());
/**/;
py::class_<ScopedProfileEvents>(m, "ProfileEvents", R"(Profile Events holder)")
.def("get_main_thread_id",
[](const ScopedProfileEvents& events) { return events.mon()->getMainThreadId(events); },
py::call_guard<py::gil_scoped_release>())
.def("get_profile_thread_ids",
[](const ScopedProfileEvents& profileEvents) {
size_t threadCount;
const uint64_t* ids;
{
py::gil_scoped_release nogil;
const IProfileMonitor* monitor = profileEvents.mon();
threadCount = monitor->getProfileThreadCount(profileEvents);
ids = monitor->getProfileThreadIds(profileEvents);
}
py::tuple threadIds(threadCount);
for (size_t i = 0; i < threadCount; i++)
{
threadIds[i] = ids[i];
}
return threadIds;
})
.def("get_profile_events",
[](const ScopedProfileEvents& profileEvents, uint64_t threadId) {
size_t eventCount;
ProfileEvent* events;
uint32_t validEventCount{ 0 };
{
py::gil_scoped_release nogil;
const IProfileMonitor* monitor = profileEvents.mon();
eventCount = monitor->getLastProfileEventCount(profileEvents);
events = monitor->getLastProfileEventsData(profileEvents);
if (threadId == 0)
{
validEventCount = static_cast<uint32_t>(eventCount);
}
else
{
for (size_t i = 0; i < eventCount; i++)
{
if (events[i].threadId == threadId)
validEventCount++;
}
}
}
py::tuple nodeList(validEventCount);
validEventCount = 0;
for (size_t i = 0; i < eventCount; i++)
{
if (threadId != 0 && events[i].threadId != threadId)
continue;
nodeList[validEventCount++] =
py::dict("name"_a = events[i].eventName, "thread_id"_a = events[i].threadId,
"start_time"_a = events[i].startTime, "duration"_a = events[i].timeInMs,
"indent"_a = events[i].level);
}
return nodeList;
},
py::arg("thread_id") = 0)
/**/;
defineInterfaceClass<IProfileMonitor>(m, "IProfileMonitor", "acquire_profile_monitor_interface")
.def("get_last_profile_events", [](const IProfileMonitor* monitor) { return ScopedProfileEvents(monitor); },
py::call_guard<py::gil_scoped_release>())
.def("mark_frame_end", [](const IProfileMonitor* monitor) { return monitor->markFrameEnd(); },
py::call_guard<py::gil_scoped_release>())
/**/;
}
} // namespace
} // namespace profiler
} // namespace carb
|
omniverse-code/kit/include/carb/profiler/IProfiler.h | // Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief carb.profiler interface definition file.
#pragma once
#include "../Interface.h"
#include <atomic>
#include <cstdint>
namespace carb
{
//! Namespace for *carb.profiler* and related utilities.
namespace profiler
{
constexpr uint64_t kCaptureMaskNone = 0; ///< Captures no events, effectively disabling the profiler.
constexpr uint64_t kCaptureMaskAll = (uint64_t)-1; ///< Captures all events
constexpr uint64_t kCaptureMaskDefault = uint64_t(1); ///< If zero is provided to an event function, it becomes this.
constexpr uint64_t kCaptureMaskProfiler = uint64_t(1) << 63; ///< The mask used by the profiler for profiling itself.
/// A type representing a static string returned by IProfiler::registerStaticString().
using StaticStringType = size_t;
/// Returned as an error by IProfiler::registerStaticString() if the string could not be registered.
constexpr StaticStringType kInvalidStaticString = StaticStringType(0);
/// An opaque ID returned by IProfiler::beginStatic() / IProfiler::beginDynamic() that should be returned in
/// IProfiler::endEx() to validate that the zone was closed properly.
using ZoneId = size_t;
/// A marker that is returned IProfiler::beginStatic() / IProfiler::beginDynamic() on error and can be passed to
/// IProfiler::endEx() to prevent zone validation checking.
constexpr ZoneId kUnknownZoneId = 0;
/// A marker returned by IProfiler::beginStatic() / IProfiler::beginDynamic() to indicate that the zone
/// should be discarded, typically because it doesn't match the current capture mask.
constexpr ZoneId kNoZoneId = ZoneId(-1);
/// The type of flow event passed to IProfiler::emitFlowStatic() / IProfiler::emitFlowDynamic(). Typically used only by
/// profiler macros.
enum class FlowType : uint8_t
{
Begin, ///< A flow begin point.
End ///< A flow end point.
};
/// The type of instant event passed to IProfiler::emitInstantStatic() / IProfiler::emitInstantDynamic().
enum class InstantType : uint8_t
{
Thread, ///< Draws a vertical line through the entire process.
Process ///< Similar to a thread profile zone with zero duration.
};
/// ID for a GPU context created with IProfiler::createGpuContext
using GpuContextId = uint8_t;
/// Special value to indicate that a GPU context ID is invalid.
constexpr uint8_t kInvalidGpuContextId = (uint8_t)-1;
/// ID for a Lockable context created with IProfiler::createLockable
using LockableId = uint32_t;
/// Special value to indicate that a LockableId is invalid.
constexpr uint32_t kInvalidLockableId = (uint32_t)-1;
/// The type of lockable operation event
enum class LockableOperationType : uint8_t
{
BeforeLock, ///< This notes on the timeline immediately before locking a non shared lock
AfterLock, ///< This notes on the timeline immediately after locking a non shared lock
AfterUnlock, ///< This notes on the timeline immediately after unlocking a non shared lock
AfterSuccessfulTryLock, ///< This notes on the timeline immediately after successfully try locking a non shared lock
BeforeLockShared, ///< This notes on the timeline immediately before locking a shared lock
AfterLockShared, ///< This notes on the timeline immediately after locking a shared lock
AfterUnlockShared, ///< This notes on the timeline immediately after unlocking a shared lock
AfterSuccessfulTryLockShared, ///< This notes on the timeline immediately after successfully try locking a shared
///< lock
};
//! A callback used for \ref IProfiler::setMaskCallback(). Typically handled automatically by
//! \ref carb::profiler::registerProfilerForClient().
using MaskCallbackFn = void (*)(uint64_t);
/**
* Defines the profiler system that is associated with the Framework.
*
* It is not recommended to use this interface directly, rather use macros provided in Profile.h, such as
* @ref CARB_PROFILE_ZONE().
*
* Event names are specified as string which can have formatting, which provides string behavior hints, but whether
* to use those hints is up to the profiler backend implementation.
*/
struct IProfiler
{
CARB_PLUGIN_INTERFACE("carb::profiler::IProfiler", 1, 4)
/**
* Starts up the profiler for use.
*/
void(CARB_ABI* startup)();
/**
* Shuts down the profiler and cleans up resources.
*/
void(CARB_ABI* shutdown)();
/**
* Set capture mask. Capture mask provides a way to filter out certain profiling events.
* Condition (eventMask & captureMask) == eventMask is evaluated, and if true, event
* is recorded. The default capture mask is kCaptureMaskAll
*
* @note Calling from multiple threads is not recommended as threads will overwrite each values. Calls to this
* function should be serialized.
*
* @warning Changing the capture mask after the profiler has been started causes undefined behavior.
*
* @param mask Capture mask.
* @returns the previous Capture mask.
*/
uint64_t(CARB_ABI* setCaptureMask)(const uint64_t mask);
/**
* Gets the current capture mask
*
* @returns The current capture mask
*/
uint64_t(CARB_ABI* getCaptureMask)();
/**
* Starts the profiling event. This event could be a fiber-based event (i.e. could yield and resume on
* another thread) if tasking scheduler provides proper `startFiber`/`stopFiber` calls.
*
* @param mask Event capture mask.
* @param function Static string (see @ref registerStaticString()) of the function where the profile zone is located
* (usually `__func__`). If supportsDynamicSourceLocations(), this may be a `const char*` cast to @ref
* StaticStringType.
* @param file Static string (see @ref registerStaticString()) of the filename where the profile zone was started
* (usually `__FILE__`). If supportsDynamicSourceLocations(), this may be a `const char*` cast to @ref
* StaticStringType.
* @param line Line number in the file where the profile zone was started (usually __LINE__).
* @param nameFmt The event name format, followed by args. For beginStatic() this must be a \ref StaticStringType
* from registerStaticString().
* @returns An opaque ZoneId that should be passed to endEx().
*/
ZoneId(CARB_ABI* beginStatic)(
const uint64_t mask, StaticStringType function, StaticStringType file, int line, StaticStringType nameFmt);
//! @copydoc IProfiler::beginStatic()
ZoneId(CARB_ABI* beginDynamic)(
const uint64_t mask, StaticStringType function, StaticStringType file, int line, const char* nameFmt, ...)
CARB_PRINTF_FUNCTION(5, 6);
/**
* Stops the profiling event. This event could be a fiber-based event (i.e. could yield and resume on
* another thread) if tasking scheduler provides proper `startFiber`/`stopFiber` calls.
*
* @warning This call is deprecated. Please use endEx() instead. This function will not be removed
* but should also not be called in new code.
*
* @param mask Event capture mask.
*/
void(CARB_ABI* end)(const uint64_t mask);
/**
* Inserts a frame marker for the calling thread in the profiling output, for profilers that support frame markers.
*
* @note The name provided below must be the same for each set of frames, and called each time from the same thread.
* For example you might have main thread frames that all are named "frame" and GPU frames that are named "GPU
* frame". Some profilers (i.e. profiler-cpu to Tracy conversion) require that the name contain the word "frame."
*
* @param mask Deprecated and ignored for frame events.
* @param nameFmt The frame set name format, followed by args. For frameStatic() this must be a
* \ref StaticStringType from registerStaticString().
*/
void(CARB_ABI* frameStatic)(const uint64_t mask, StaticStringType nameFmt);
/// @copydoc frameStatic
void(CARB_ABI* frameDynamic)(const uint64_t mask, const char* nameFmt, ...) CARB_PRINTF_FUNCTION(2, 3);
/**
* Send floating point value to the profiler.
*
* @param mask Value capture mask.
* @param value Value.
* @param valueFmt The value name format, followed by args. For valueFloatStatic() this must be a
* \ref StaticStringType
* from registerStaticString().
*/
void(CARB_ABI* valueFloatStatic)(const uint64_t mask, float value, StaticStringType valueFmt);
/// @copydoc valueFloatStatic
void(CARB_ABI* valueFloatDynamic)(const uint64_t mask, float value, const char* valueFmt, ...)
CARB_PRINTF_FUNCTION(3, 4);
/**
* Send signed integer value to the profiler.
*
* @param mask Value capture mask.
* @param value Value.
* @param valueFmt The value name format, followed by args. For valueIntStatic() this must be a
* \ref StaticStringType from registerStaticString().
*/
void(CARB_ABI* valueIntStatic)(const uint64_t mask, int32_t value, StaticStringType valueFmt);
/// @copydoc valueIntStatic
void(CARB_ABI* valueIntDynamic)(const uint64_t mask, int32_t value, const char* valueFmt, ...)
CARB_PRINTF_FUNCTION(3, 4);
/**
* Send unsigned integer value to the profiler.
*
* @param mask Value capture mask.
* @param value Value.
* @param valueFmt The value name format, followed by args. For valueUIntStatic() this must be a \ref
* StaticStringType from registerStaticString().
*/
void(CARB_ABI* valueUIntStatic)(const uint64_t mask, uint32_t value, StaticStringType valueFmt);
/// @copydoc valueUIntStatic
void(CARB_ABI* valueUIntDynamic)(const uint64_t mask, uint32_t value, const char* valueFmt, ...)
CARB_PRINTF_FUNCTION(3, 4);
/**
* Sets a threads name.
*
* @param tidOrZero The thread ID to name, or 0 to name the current thread.
* @param nameFmt The thread name format, followed by args. For nameThreadStatic() this must be a
* \ref StaticStringType from registerStaticString().
*/
void(CARB_ABI* nameThreadStatic)(uint64_t tidOrZero, StaticStringType threadName);
/// @copydoc nameThreadStatic
void(CARB_ABI* nameThreadDynamic)(uint64_t tidOrZero, const char* threadName, ...) CARB_PRINTF_FUNCTION(2, 3);
/**
* Checks if the profiler supports dynamic source locations.
*
* Dynamic source locations allow the `file` and `func` parameters to functions such as beginStatic() and
* beginDynamic() to be a transient non-literal string on the heap or stack.
*
* @returns `true` if dynamic source locations are supported; `false` if they are not supported.
*/
bool(CARB_ABI* supportsDynamicSourceLocations)();
/**
* Helper functions to send a arbitrary type to the profiler.
*
* @tparam T The type of the parameter to send to the profiler.
* @param mask Value capture mask.
* @param value Value.
* @param valueFmt The value name format, followed by args. For valueStatic() this must be a \ref StaticStringType
* from registerStaticString().
*/
template <typename T>
void valueStatic(uint64_t mask, T value, StaticStringType valueFmt);
/// @copydoc valueStatic
/// @param args Additional arguments that correspond to printf-style format string @p valueFmt.
template <typename T, typename... Args>
void valueDynamic(uint64_t mask, T value, const char* valueFmt, Args&&... args);
/**
* Helper function for registering a static string.
*
* @note The profiler must copy all strings. By registering a static string, you are making a contract with the
* profiler that the string at the provided address will never change. This allows the string to be passed by
* pointer as an optimization without needing to copy the string.
*
* @note This function should be called only once per string. The return value should be captured in a variable and
* passed to the static function such as beginStatic(), frameStatic(), valueStatic(), etc.
*
* @param string The static string to register. This must be a string literal or otherwise a string whose address
* will never change.
* @returns A \ref StaticStringType that represents the registered static string. If the string could not be
* registered, kInvalidStaticString is returned.
*/
StaticStringType(CARB_ABI* registerStaticString)(const char* string);
/**
* Send memory allocation event to the profiler for custom pools.
*
* @param mask Value capture mask.
* @param ptr Memory address.
* @param size Amount of bytes allocated.
* @param name Static or formatted string which contains the name of the pool.
*/
void(CARB_ABI* allocNamedStatic)(const uint64_t mask, const void* ptr, uint64_t size, StaticStringType name);
/// @copydoc allocNamedStatic
void(CARB_ABI* allocNamedDynamic)(const uint64_t mask, const void* ptr, uint64_t size, const char* nameFmt, ...)
CARB_PRINTF_FUNCTION(4, 5);
/**
* Send memory free event to the profiler for custom pools.
*
* @param mask Value capture mask.
* @param ptr Memory address.
* @param name Static or formatted string which contains the name of the pool.
*/
void(CARB_ABI* freeNamedStatic)(const uint64_t mask, const void* ptr, StaticStringType valueFmt);
/// @copydoc freeNamedStatic
void(CARB_ABI* freeNamedDynamic)(const uint64_t mask, const void* ptr, const char* nameFmt, ...)
CARB_PRINTF_FUNCTION(3, 4);
/**
* Send memory allocation event to the profiler on the default pool.
*
* @param mask Value capture mask.
* @param ptr Memory address.
* @param size Amount of bytes allocated.
*/
void(CARB_ABI* allocStatic)(const uint64_t mask, const void* ptr, uint64_t size);
/**
* Send memory free event to the profiler on the default pool.
*
* @param mask Value capture mask.
* @param ptr Memory address.
*/
void(CARB_ABI* freeStatic)(const uint64_t mask, const void* ptr);
/**
* Stops the profiling event that was initiated by beginStatic() or beginDynamic().
*
* @param mask Event capture mask.
* @param zoneId The ZoneId returned from beginStatic() or beginDynamic().
*/
void(CARB_ABI* endEx)(const uint64_t mask, ZoneId zoneId);
/**
* Records an instant event on a thread's timeline at the current time. Generally not used directly; instead use the
* @ref CARB_PROFILE_EVENT() macro.
*
* @param mask Event capture mask.
* @param function Static string (see @ref registerStaticString()) of the name of the function containing this event
* (typically `__func__`). If supportsDynamicSourceLocations(), this may be a `const char*` cast to @ref
* StaticStringType.
* @param file Static string (see @ref registerStaticString()) of the name of the source file containing this event
* (typically `__FILE__`). If supportsDynamicSourceLocations(), this may be a `const char*` cast to @ref
* StaticStringType.
* @param line The line number in @p file containing this event (typically `__LINE__`).
* @param type The type of instant event.
* @param nameFmt The name for the event.
*/
void(CARB_ABI* emitInstantStatic)(const uint64_t mask,
StaticStringType function,
StaticStringType file,
int line,
InstantType type,
StaticStringType nameFmt);
/// @copydoc emitInstantStatic
/// @note This function is slower than using emitInstanceStatic().
/// @param ... `printf`-style varargs for @p nameFmt.
void(CARB_ABI* emitInstantDynamic)(const uint64_t mask,
StaticStringType function,
StaticStringType file,
int line,
InstantType type,
const char* nameFmt,
...) CARB_PRINTF_FUNCTION(6, 7);
/**
* Puts a flow event on the timeline at the current line. Generally not used directly; instead use the
* @ref CARB_PROFILE_FLOW_BEGIN() and @ref CARB_PROFILE_FLOW_END() macros.
*
* Flow events draw an arrow from one point (the @ref FlowType::Begin location) to another point (the @ref
* FlowType::End location). These two points can be in different threads but must have a matching @p id field. Only
* the @ref FlowType::Begin event must specify a @p name. The @p id field is meant to be unique across profiler
* runs, but may be reused as long as the @p name field matches across all @ref FlowType::Begin events and events
* occur on the global timeline as @ref FlowType::Begin followed by @ref FlowType::End.
*
* A call with @ref FlowType::Begin will automatically insert an instant event on the current thread's timeline.
*
* @param mask Event capture mask.
* @param function Static string (see @ref registerStaticString()) of the name of the function containing this
* event. If supportsDynamicSourceLocations(), this may be a `const char*` cast to @ref StaticStringType.
* @param file Static string (see @ref registerStaticString()) of the name of the source file containing this event
* (typically `__FILE__`). If supportsDynamicSourceLocations(), this may be a `const char*` cast to @ref
* StaticStringType.
* @param line The line number in @p file containing this event (typically `__LINE__`).
* @param type The type of flow marker.
* @param id A unique identifier to tie `Begin` and `End` events together.
* @param name The name for the flow event. Only required for @ref FlowType::Begin; if specified for
* @ref FlowType::End it must match exactly or be `nullptr`.
*/
void(CARB_ABI* emitFlowStatic)(const uint64_t mask,
StaticStringType function,
StaticStringType file,
int line,
FlowType type,
uint64_t id,
StaticStringType name);
/// @copydoc emitFlowStatic
/// @note This function is slower than using emitFlowStatic().
/// @param ... `printf`-style varargs for @p nameFmt.
void(CARB_ABI* emitFlowDynamic)(const uint64_t mask,
StaticStringType function,
StaticStringType file,
int line,
FlowType type,
uint64_t id,
const char* name,
...) CARB_PRINTF_FUNCTION(7, 8);
/**
* Create a new GPU profiling context that allows injecting timestamps coming from a GPU in a deferred manner
*
* @param name name of the context
* @param correlatedCpuTimestampNs correlated GPU clock timestamp (in nanoseconds)
* @param correlatedGpuTimestamp correlated GPU clock timestamp (raw value)
* @param gpuTimestampPeriodNs is the number of nanoseconds required for a GPU timestamp query to be incremented
* by 1.
* @param graphicApi string of graphic API used ['vulkan'/'d3d12']
* @returns a valid ID or kInvalidGpuContextId if creation fails
*/
GpuContextId(CARB_ABI* createGpuContext)(const char* name,
int64_t correlatedCpuTimestampNs,
int64_t correlatedGpuTimestamp,
float gpuTimestampPeriodNs,
const char* graphicApi);
/**
* Destroy a previously created GPU Context
*
* @param contextId id of the context, returned by createGpuContext
*/
void(CARB_ABI* destroyGpuContext)(GpuContextId contextId);
/**
* Submit context calibration information that allows correlating CPU and GPU clocks
*
* @param contextId id of the context, returned by createGpuContext
* @param correlatedCpuTimestampNs the new CPU timestamp at the time of correlation (in nanoseconds)
* @param previousCorrelatedCpuTimestamp the CPU timestamp at the time of previous correlation (in nanoseconds)
* @param correlatedGpuTimestamp the new raw GPU timestamp at the time of correlation
*/
bool(CARB_ABI* calibrateGpuContext)(GpuContextId contextId,
int64_t correlatedCpuTimestampNs,
int64_t previousCorrelatedCpuTimestampNs,
int64_t correlatedGpuTimestamp);
/**
* Record the beginning of a new GPU timestamp query
*
* @param mask Event capture mask.
* @param function Static string (see @ref registerStaticString()) of the name of the function containing this event
* (typically `__func__`). If supportsDynamicSourceLocations(), this may be a `const char*` cast to @ref
* StaticStringType.
* @param file Static string (see @ref registerStaticString()) of the name of the source file containing this event
* (typically `__FILE__`). If supportsDynamicSourceLocations(), this may be a `const char*` cast to @ref
* StaticStringType.
* @param line The line number in @p file containing this event (typically `__LINE__`).
* @param contextId the id of the context as returned by @ref createGpuContext
* @param queryId unique query id (for identification when passing to setGpuQueryValue)
* @param name The name for the event.
*/
void(CARB_ABI* beginGpuQueryStatic)(const uint64_t mask,
StaticStringType functionName,
StaticStringType fileName,
int line,
GpuContextId contextId,
uint32_t queryId,
StaticStringType name);
//! @copydoc IProfiler::beginGpuQueryStatic()
void(CARB_ABI* beginGpuQueryDynamic)(const uint64_t mask,
StaticStringType functionName,
StaticStringType fileName,
int line,
GpuContextId contextId,
uint32_t queryId,
const char* nameFmt,
...) CARB_PRINTF_FUNCTION(7, 8);
/**
* Record the end of a new GPU timestamp query
*
* @param mask Event capture mask.
* @param contextId the id of the context as returned by @ref createGpuContext
* @param queryId unique query id (for identification when passing to setGpuQueryValue)
*/
void(CARB_ABI* endGpuQuery)(const uint64_t mask, GpuContextId contextId, uint32_t queryId);
/**
* Set the value we've received from the GPU for a query (begin or end) we've issued in the past
*
* @param contextId the id of the context as returned by @ref createGpuContext
* @param queryId unique query id specified at begin/end time
* @param gpuTimestamp raw GPU timestamp value
*/
void(CARB_ABI* setGpuQueryValue)(const uint64_t mask, GpuContextId contextId, uint32_t queryId, int64_t gpuTimestamp);
/**
* Create a lockable context which we can use to tag lock operation
*
* @param mask Event capture mask. If the mask does not match the current capture mask, the lockable is not created
* and \ref kInvalidLockableId is returned.
* @param name context name
* @param isSharedLock if this shared for a shared lock
* @param functionName Static string (see @ref registerStaticString()) of the name of the function containing this
* event (typically `__func__`). If supportsDynamicSourceLocations(), this may be a `const char*` cast to @ref
* StaticStringType.
* @param fileName Static string (see @ref registerStaticString()) of the name of the source file containing this
* event (typically `__FILE__`). If supportsDynamicSourceLocations(), this may be a `const char*` cast to @ref
* StaticStringType.
* @param line The line number in @p file containing this event (typically `__LINE__`).
*/
LockableId(CARB_ABI* createLockable)(const uint64_t mask,
const char* name,
const bool isSharedLock,
StaticStringType functionName,
StaticStringType fileName,
int line);
/**
* Destroy a lockable context
*
* @param lockableId the id of the lockable as returned by @ref createLockable
*/
void(CARB_ABI* destroyLockable)(LockableId lockableId);
/**
* Record a lockable operation
*
* @param lockableId the id of the lockable as returned by @ref createLockable
* @param operation which lock operation to tag
*/
void(CARB_ABI* lockableOperation)(LockableId lockableId, LockableOperationType operation);
/**
* Used by \ref carb::profiler::registerProfilerForClient() and \ref carb::profiler::deregisterProfilerForClient()
* to register a callback for keeping the profiler mask up to date.
*
* @param func The callback function to register.
* @param enabled \c true to register the callback, \c false to unregister the callback.
* @returns The current profiler mask.
*/
uint64_t(CARB_ABI* setMaskCallback)(MaskCallbackFn func, bool enabled);
};
#ifndef DOXYGEN_BUILD
namespace detail
{
template <typename T>
class ValueInvoker;
template <>
class ValueInvoker<float>
{
public:
static void invokeStatic(IProfiler& profiler, uint64_t mask, float value, StaticStringType valueFmt)
{
profiler.valueFloatStatic(mask, value, valueFmt);
}
template <typename... Args>
static void invokeDynamic(IProfiler& profiler, uint64_t mask, float value, const char* valueFmt, Args&&... args)
{
profiler.valueFloatDynamic(mask, value, valueFmt, std::forward<Args>(args)...);
}
};
template <>
class ValueInvoker<int32_t>
{
public:
static void invokeStatic(IProfiler& profiler, uint64_t mask, int32_t value, StaticStringType valueFmt)
{
profiler.valueIntStatic(mask, value, valueFmt);
}
template <typename... Args>
static void invokeDynamic(IProfiler& profiler, uint64_t mask, int32_t value, const char* valueFmt, Args&&... args)
{
profiler.valueIntDynamic(mask, value, valueFmt, std::forward<Args>(args)...);
}
};
template <>
class ValueInvoker<uint32_t>
{
public:
static void invokeStatic(IProfiler& profiler, uint64_t mask, uint32_t value, StaticStringType valueFmt)
{
profiler.valueUIntStatic(mask, value, valueFmt);
}
template <typename... Args>
static void invokeDynamic(IProfiler& profiler, uint64_t mask, uint32_t value, const char* valueFmt, Args&&... args)
{
profiler.valueUIntDynamic(mask, value, valueFmt, std::forward<Args>(args)...);
}
};
} // namespace detail
#endif
template <typename T>
inline void IProfiler::valueStatic(uint64_t mask, T value, StaticStringType valueFmt)
{
using ValueInvoker = typename detail::ValueInvoker<T>;
ValueInvoker::invokeStatic(*this, mask, value, valueFmt);
}
template <typename T, typename... Args>
inline void IProfiler::valueDynamic(uint64_t mask, T value, const char* valueFmt, Args&&... args)
{
using ValueInvoker = typename detail::ValueInvoker<T>;
ValueInvoker::invokeDynamic(*this, mask, value, valueFmt, std::forward<Args>(args)...);
}
} // namespace profiler
} // namespace carb
/**
* Global pointer used to store the @ref carb::profiler::IProfiler interface.
*
* A copy of this pointer is stored in each Carbonite client (i.e. plugin/app). For applications, this pointer is
* declared by @ref OMNI_APP_GLOBALS. For plugins, this pointer is declared by @ref CARB_PROFILER_GLOBALS via @ref
* OMNI_MODULE_GLOBALS.
*
* This pointer is an implementation detail transparent to users. However, a linker error pointing to this variable
* usually indicates one of the `_GLOBALS` macros mentioned above were not called.
*/
CARB_WEAKLINK carb::profiler::IProfiler* g_carbProfiler;
/**
* A global variable used as a cache for the result of \ref carb::profiler::IProfiler::getCaptureMask().
*
* \ref carb::profiler::registerProfilerForClient() will register a callback function with the profiler (if supported)
* that will keep this variable updated. This variable can be checked inline before calling into the IProfiler
* interface.
*/
CARB_WEAKLINK std::atomic_uint64_t g_carbProfilerMask;
|
omniverse-code/kit/include/carb/profiler/ProfilerUtils.h | // Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief carb.profiler helper utilities.
#pragma once
#include "IProfiler.h"
#include "../cpp/Atomic.h"
#include "../settings/ISettings.h"
#include "../InterfaceUtils.h"
namespace carb
{
namespace profiler
{
//! \cond DEV
namespace detail
{
struct String2
{
carb::profiler::StaticStringType first;
carb::profiler::StaticStringType second;
constexpr String2(StaticStringType first, StaticStringType second) noexcept : first(first), second(second)
{
}
};
constexpr String2 makeString2(StaticStringType first, StaticStringType second) noexcept
{
return String2{ first, second };
}
struct String3
{
carb::profiler::StaticStringType first;
carb::profiler::StaticStringType second;
carb::profiler::StaticStringType third;
constexpr String3(StaticStringType first, StaticStringType second, StaticStringType third) noexcept
: first(first), second(second), third(third)
{
}
};
constexpr String3 makeString3(StaticStringType first, StaticStringType second, StaticStringType third) noexcept
{
return String3{ first, second, third };
}
} // namespace detail
//! \endcond
/**
* Profiler channel which can be configured via \ref carb::settings::ISettings.
*
* @warning Do not use this class directly. Instead, use \ref CARB_PROFILE_DECLARE_CHANNEL().
*/
class Channel final
{
uint64_t m_mask;
bool m_enabled;
const char* m_name;
Channel* m_next;
struct ModuleData
{
Channel* head{ nullptr };
LoadHookHandle onSettingsLoadHandle{ kInvalidLoadHook };
dictionary::SubscriptionId* changeSubscription{ nullptr };
#if CARB_ASSERT_ENABLED
~ModuleData()
{
// If these weren't unregistered we could crash later
CARB_ASSERT(onSettingsLoadHandle == kInvalidLoadHook);
CARB_ASSERT(changeSubscription == nullptr);
}
#endif
};
static ModuleData& moduleData()
{
static ModuleData s_moduleData;
return s_moduleData;
}
static void onSettingsLoad(const PluginDesc&, void*)
{
// DO NOT USE getCachedInterface here! This is called by a load hook, which can be triggered by
// getCachedInterface in this module. This means if we were to recursively call getCachedInterface() here, we
// could hang indefinitely as this thread is the thread responsible for loading the cached interface.
if (loadSettings(getFramework()->tryAcquireInterface<settings::ISettings>(), true))
{
g_carbFramework->removeLoadHook(moduleData().onSettingsLoadHandle);
moduleData().onSettingsLoadHandle = kInvalidLoadHook;
}
}
static void onSettingsUnload(void*, void*)
{
// Settings was unloaded. Make sure we no longer have a subscription callback.
moduleData().changeSubscription = nullptr;
}
static void onSettingsChange(const dictionary::Item*,
const dictionary::Item* changedItem,
dictionary::ChangeEventType eventType,
void*)
{
if (eventType == dictionary::ChangeEventType::eDestroyed)
return;
auto dict = getCachedInterface<dictionary::IDictionary>();
// Only care about elements that can change at runtime.
const char* name = dict->getItemName(changedItem);
if (strcmp(name, "enabled") != 0 && strcmp(name, "mask") != 0)
return;
loadSettings(
getCachedInterface<settings::ISettings>(), false, dict->getItemName(dict->getItemParent(changedItem)));
}
static bool loadSettings(settings::ISettings* settings, bool initial, const char* channelName = nullptr)
{
// Only proceed if settings is already initialized
if (!settings)
return false;
auto dict = carb::getCachedInterface<dictionary::IDictionary>();
if (!dict)
return false;
auto root = settings->getSettingsDictionary("/profiler/channels");
if (root)
{
for (Channel* c = moduleData().head; c; c = c->m_next)
{
if (channelName && strcmp(c->m_name, channelName) != 0)
continue;
auto channelRoot = dict->getItem(root, c->m_name);
if (!channelRoot)
continue;
auto enabled = dict->getItem(channelRoot, "enabled");
if (enabled)
{
c->setEnabled(dict->getAsBool(enabled));
}
auto mask = dict->getItem(channelRoot, "mask");
if (mask)
{
c->setMask(uint64_t(dict->getAsInt64(mask)));
}
}
}
// Register a change subscription on initial setup if we have any channels.
if (initial && !moduleData().changeSubscription && moduleData().head)
{
moduleData().changeSubscription =
settings->subscribeToTreeChangeEvents("/profiler/channels", onSettingsChange, nullptr);
::g_carbFramework->addReleaseHook(settings, onSettingsUnload, nullptr);
}
return true;
}
public:
/**
* Constructor
*
* @warning Do not call this directly. Instead use \ref CARB_PROFILE_DECLARE_CHANNEL().
*
* @warning Instances of this class must have static storage and module-lifetime, therefore they may only exist at
* file-level scope, class-level (static) scope, or namespace-level scope only. Anything else is undefined behavior.
*
* @param mask The default profiler mask for this channel.
* @param enabled Whether this channel is enabled by default.
* @param name A literal string that is used to look up settings keys.
*/
Channel(uint64_t mask, bool enabled, const char* name) : m_mask(mask), m_enabled(enabled), m_name(name)
{
// Add ourselves to the list of channels for this module
auto& head = moduleData().head;
m_next = head;
head = this;
}
/**
* Returns the name of this channel.
* @returns the channel name.
*/
const char* getName() const noexcept
{
return m_name;
}
/**
* Returns the current mask for this channel.
* @returns the current mask.
*/
uint64_t getMask() const noexcept
{
return m_mask;
}
/**
* Sets the mask value for *this.
* @param mask The new profiler mask value.
*/
void setMask(uint64_t mask) noexcept
{
cpp::atomic_ref<uint64_t>(m_mask).store(mask, std::memory_order_release);
}
/**
* Returns whether this channel is enabled.
* @returns \c true if this channel is enabled; \c false otherwise.
*/
bool isEnabled() const noexcept
{
return m_enabled;
}
/**
* Sets *this to enabled or disabled.
*
* @param enabled Whether to enable (\c true) or disable (\c false) the channel.
*/
void setEnabled(bool enabled) noexcept
{
cpp::atomic_ref<bool>(m_enabled).store(enabled, std::memory_order_release);
}
/**
* Called by profiler::registerProfilerForClient() to initialize all channels.
*
* If ISettings is available, it is queried for this module's channel's settings, and a subscription is installed to
* be notified when settings change. If ISettings is not available, a load hook is installed with the framework in
* order to be notified if and when ISettings becomes available.
*/
static void onProfilerRegistered()
{
// example-begin acquire-without-init
// Don't try to load settings, but if it's already available we will load settings from it.
auto settings = g_carbFramework->tryAcquireExistingInterface<settings::ISettings>();
// example-end acquire-without-init
if (!loadSettings(settings, true))
{
// If settings isn't available, wait for it to load.
moduleData().onSettingsLoadHandle =
g_carbFramework->addLoadHook<settings::ISettings>(nullptr, onSettingsLoad, nullptr);
}
}
/**
* Called by profiler::deregisterProfilerForClient() to uninitialize all channels.
*
* Any load hooks and subscriptions installed with ISettings are removed.
*/
static void onProfilerUnregistered()
{
if (moduleData().onSettingsLoadHandle != kInvalidLoadHook)
{
g_carbFramework->removeLoadHook(moduleData().onSettingsLoadHandle);
moduleData().onSettingsLoadHandle = kInvalidLoadHook;
}
if (moduleData().changeSubscription)
{
// Don't re-initialize settings if it's already been unloaded (though in this case we should've gotten a
// callback)
auto settings = g_carbFramework->tryAcquireExistingInterface<settings::ISettings>();
CARB_ASSERT(settings);
if (settings)
{
settings->unsubscribeToChangeEvents(moduleData().changeSubscription);
g_carbFramework->removeReleaseHook(settings, onSettingsUnload, nullptr);
}
moduleData().changeSubscription = nullptr;
}
}
};
/**
* Helper class that allows to automatically stop profiling upon leaving block.
* @note Typically this is not used by an application. It is generated automatically by the CARB_PROFILE_ZONE() macro.
*/
class ProfileZoneStatic final
{
const uint64_t m_mask;
ZoneId m_zoneId;
public:
/**
* Constructor.
*
* @param mask Profiling bitmask.
* @param tup A `String3` of registered static strings for `__func__`, `__FILE__` and event name.
* @param line Line number in the file where the profile zone was started (usually `__LINE__`).
*/
ProfileZoneStatic(const uint64_t mask, const ::carb::profiler::detail::String3& tup, int line) : m_mask(mask)
{
if (g_carbProfiler && ((mask ? mask : kCaptureMaskDefault) & g_carbProfilerMask.load(std::memory_order_acquire)))
m_zoneId = g_carbProfiler->beginStatic(m_mask, tup.first, tup.second, line, tup.third);
else
m_zoneId = kNoZoneId;
}
/**
* Constructor.
*
* @param channel A profiling channel.
* @param tup A `String3` of registered static strings for `__func__`, `__FILE__` and event name.
* @param line Line number in the file where the profile zone was started (usually `__LINE__`).
*/
ProfileZoneStatic(const Channel& channel, const ::carb::profiler::detail::String3& tup, int line)
: m_mask(channel.getMask())
{
if (g_carbProfiler && channel.isEnabled())
m_zoneId = g_carbProfiler->beginStatic(m_mask, tup.first, tup.second, line, tup.third);
else
m_zoneId = kNoZoneId;
}
/**
* Destructor.
*/
~ProfileZoneStatic()
{
if (g_carbProfiler && m_zoneId != kNoZoneId)
g_carbProfiler->endEx(m_mask, m_zoneId);
}
};
//! @copydoc ProfileZoneStatic
class ProfileZoneDynamic final
{
const uint64_t m_mask;
ZoneId m_zoneId;
public:
/**
* Constructor.
*
* @param mask Profiling bitmask.
* @param tup A `String2` of registered static strings for `__func__` and `__FILE__`.
* @param line Line number in the file where the profile zone was started (usually `__LINE__`).
* @param nameFmt Profile zone name with printf-style formatting followed by arguments
* @param args Printf-style arguments used with @p nameFmt.
*/
template <typename... Args>
ProfileZoneDynamic(
const uint64_t mask, const ::carb::profiler::detail::String2& tup, int line, const char* nameFmt, Args&&... args)
: m_mask(mask)
{
if (g_carbProfiler && ((mask ? mask : kCaptureMaskDefault) & g_carbProfilerMask.load(std::memory_order_acquire)))
m_zoneId =
g_carbProfiler->beginDynamic(m_mask, tup.first, tup.second, line, nameFmt, std::forward<Args>(args)...);
else
m_zoneId = kNoZoneId;
}
/**
* Constructor.
*
* @param channel A profiling channel.
* @param tup A `String2` of registered static strings for `__func__` and `__FILE__`.
* @param line Line number in the file where the profile zone was started (usually `__LINE__`).
* @param nameFmt Profile zone name with printf-style formatting followed by arguments
* @param args Printf-style arguments used with @p nameFmt.
*/
template <typename... Args>
ProfileZoneDynamic(const Channel& channel,
const ::carb::profiler::detail::String2& tup,
int line,
const char* nameFmt,
Args&&... args)
: m_mask(channel.getMask())
{
if (g_carbProfiler && channel.isEnabled())
m_zoneId =
g_carbProfiler->beginDynamic(m_mask, tup.first, tup.second, line, nameFmt, std::forward<Args>(args)...);
else
m_zoneId = kNoZoneId;
}
/**
* Destructor.
*/
~ProfileZoneDynamic()
{
if (g_carbProfiler && m_zoneId != kNoZoneId)
g_carbProfiler->endEx(m_mask, m_zoneId);
}
};
} // namespace profiler
} // namespace carb
|
omniverse-code/kit/include/carb/profiler/Profile.h | // Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief carb.profiler macros and helpers
#pragma once
#include "../Defines.h"
#include "../Framework.h"
#include "../cpp/Atomic.h"
#include "IProfiler.h"
#include <cstdarg>
#include <cstdio>
#include "ProfilerUtils.h"
/**
* Declares a channel that can be used with the profiler.
*
* Channels can be used in place of a mask for macros such as \ref CARB_PROFILE_ZONE. Channels allow enabling and
* disabling at runtime, or based on a settings configuration.
*
* Channels must have static storage and module lifetime, therefore this macro should be used at file-level, class-level
* or namespace-level scope only. Any other use is undefined behavior.
*
* Channels must be declared in exactly one compilation unit for a given module. References to the channel can be
* accomplished with \ref CARB_PROFILE_EXTERN_CHANNEL for other compilation units that desire to reference the channel.
*
* Channel settings are located under `/profiler/channels/<name>` and may have the following values:
* - `enabled` - (bool) whether this channel is enabled (reports to the profiler) or not
* - `mask` - (uint64_t) the mask used with the profiler
*
* @param name_ A string name for this channel. This is used to look up settings keys for this channel.
* @param defaultMask_ The profiler works with the concept of masks. The profiler must have the capture mask enabled for
* this channel to report to the profiler. A typical value for this could be
* \ref carb::profiler::kCaptureMaskDefault.
* @param defaultEnabled_ Whether this channel is enabled to report to the profiler by default.
* @param symbol_ The symbol name that code would refer to this channel by.
*/
#define CARB_PROFILE_DECLARE_CHANNEL(name_, defaultMask_, defaultEnabled_, symbol_) \
::carb::profiler::Channel symbol_((defaultMask_), (defaultEnabled_), "" name_)
/**
* References a channel declared in another compilation unit.
*
* @param symbol_ The symbol name given to the \ref CARB_PROFILE_DECLARE_CHANNEL
*/
#define CARB_PROFILE_EXTERN_CHANNEL(symbol_) extern ::carb::profiler::Channel symbol_
#if CARB_PROFILING || defined(DOXYGEN_BUILD)
/**
* @defgroup Profiler Helper Macros
*
* All of the following macros do nothing if @ref g_carbProfiler is `nullptr` (i.e.
* carb::profiler::registerProfilerForClient() has not been called).
* @{
*/
# ifndef DOXYGEN_BUILD
// The following are helper macros for the profiler.
# define CARB_PROFILE_IF(cond, true_case, false_case) CARB_PROFILE_IF_HELPER(cond, true_case, false_case)
// Note: CARB_PROFILE_HAS_VARARGS only supports up to 10 args now. If more are desired, increase the sequences below
// and add test cases to TestProfiler.cpp
// This trick is from https://stackoverflow.com/a/36015150/1450686
# if CARB_COMPILER_MSC
# define CARB_PROFILE_HAS_VARARGS(x, ...) CARB_PROFILE_EXPAND_ARGS(CARB_PROFILE_AUGMENT_ARGS(__VA_ARGS__))
# elif CARB_COMPILER_GNUC
# define CARB_PROFILE_HAS_VARARGS(...) \
CARB_PROFILE_ARGCHK_PRIVATE2(0, ##__VA_ARGS__, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)
# else
# error Unsupported Compiler!
# endif
// The following are implementation helpers not intended to be used
# define CARB_PROFILE_IF_HELPER(cond, true_case, false_case) \
CARB_JOIN(CARB_PROFILE_IF_HELPER_, cond)(true_case, false_case)
# define CARB_PROFILE_IF_HELPER_0(true_case, false_case) false_case
# define CARB_PROFILE_IF_HELPER_1(true_case, false_case) true_case
# define CARB_PROFILE_AUGMENT_ARGS(...) unused, __VA_ARGS__
# define CARB_PROFILE_EXPAND_ARGS(...) \
CARB_PROFILE_EXPAND(CARB_PROFILE_ARGCHK_PRIVATE(__VA_ARGS__, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0))
# define CARB_PROFILE_EXPAND(x) x
# define CARB_PROFILE_ARGCHK_PRIVATE(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, count, ...) count
# define CARB_PROFILE_ARGCHK_PRIVATE2(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, count, ...) count
# define CARB_PROFILE_UFUNCFILE(func) \
[](const char* pfunc) -> const auto& \
{ \
static auto tup = ::carb::profiler::detail::makeString2( \
::g_carbProfiler->registerStaticString(pfunc), ::g_carbProfiler->registerStaticString(__FILE__)); \
return tup; \
} \
(func)
# define CARB_PROFILE_UFUNCFILESTR(func, str) \
[](const char* pfunc, const char* pstr) -> const auto& \
{ \
static auto tup = ::carb::profiler::detail::makeString3( \
::g_carbProfiler->registerStaticString(pfunc), ::g_carbProfiler->registerStaticString(__FILE__), \
::g_carbProfiler->registerStaticString(pstr)); \
return tup; \
} \
(func, str)
# define CARB_PROFILE_FUNCFILE(func) \
[](const char* pfunc) -> const auto& \
{ \
if (::g_carbProfiler) \
{ \
static auto tup = \
::carb::profiler::detail::makeString2(::g_carbProfiler->registerStaticString(pfunc), \
::g_carbProfiler->registerStaticString(__FILE__)); \
return tup; \
} \
return ::carb::profiler::detail::emptyTuple2(); \
} \
(func)
# define CARB_PROFILE_FUNCFILESTR(func, str) \
[](const char* pfunc, const char* pstr) -> const auto& \
{ \
if (::g_carbProfiler) \
{ \
static auto tup = \
::carb::profiler::detail::makeString3(::g_carbProfiler->registerStaticString(pfunc), \
::g_carbProfiler->registerStaticString(__FILE__), \
::g_carbProfiler->registerStaticString(pstr)); \
return tup; \
} \
return ::carb::profiler::detail::emptyTuple3(); \
} \
(func, str)
# define CARB_PROFILE_CHECKMASK(mask) \
(((mask) ? (mask) : carb::profiler::kCaptureMaskDefault) & \
g_carbProfilerMask.load(std::memory_order_acquire))
namespace carb
{
namespace profiler
{
namespace detail
{
// Helper functions for begin that take the tuples created by CARB_PROFILE_UFUNCFILE and CARB_PROFILE_UFUNCFILESTR
template <class... Args>
carb::profiler::ZoneId beginDynamicHelper(
const uint64_t mask, const ::carb::profiler::detail::String2& tup, int line, const char* fmt, Args&&... args)
{
if (!CARB_PROFILE_CHECKMASK(mask))
return kNoZoneId;
return ::g_carbProfiler->beginDynamic(mask, tup.first, tup.second, line, fmt, std::forward<Args>(args)...);
}
template <class... Args>
carb::profiler::ZoneId beginDynamicHelper(const carb::profiler::Channel& channel,
const ::carb::profiler::detail::String2& tup,
int line,
const char* fmt,
Args&&... args)
{
if (!channel.isEnabled())
return kNoZoneId;
return ::g_carbProfiler->beginDynamic(
channel.getMask(), tup.first, tup.second, line, fmt, std::forward<Args>(args)...);
}
inline carb::profiler::ZoneId beginStaticHelper(const uint64_t mask, const ::carb::profiler::detail::String3& tup, int line)
{
if (!CARB_PROFILE_CHECKMASK(mask))
return kNoZoneId;
return ::g_carbProfiler->beginStatic(mask, tup.first, tup.second, line, tup.third);
}
inline carb::profiler::ZoneId beginStaticHelper(const carb::profiler::Channel& channel,
const ::carb::profiler::detail::String3& tup,
int line)
{
if (!channel.isEnabled())
return kNoZoneId;
return ::g_carbProfiler->beginStatic(channel.getMask(), tup.first, tup.second, line, tup.third);
}
inline uint64_t maskHelper(uint64_t mask)
{
return mask;
}
inline uint64_t maskHelper(const carb::profiler::Channel& channel)
{
return channel.getMask();
}
inline bool enabled(uint64_t mask)
{
return CARB_PROFILE_CHECKMASK(mask);
}
inline bool enabled(const carb::profiler::Channel& channel)
{
return channel.isEnabled();
}
inline const ::carb::profiler::detail::String2& emptyTuple2()
{
static constexpr auto tup = ::carb::profiler::detail::makeString2(kInvalidStaticString, kInvalidStaticString);
return tup;
}
inline const ::carb::profiler::detail::String3& emptyTuple3()
{
static constexpr auto tup =
::carb::profiler::detail::makeString3(kInvalidStaticString, kInvalidStaticString, kInvalidStaticString);
return tup;
}
} // namespace detail
} // namespace profiler
} // namespace carb
# endif
/**
* Starts the profiler that has been registered with carb::profiler::registerProfilerForClient().
*
* When finished with the profiler it should be stopped with CARB_PROFILE_SHUTDOWN().
*
* @note This is typically done immediately after carb::profiler::registerProfilerForClient().
*/
# define CARB_PROFILE_STARTUP() \
do \
{ \
if (::g_carbProfiler) \
{ \
::g_carbProfiler->startup(); \
} \
} while (0)
/**
* Shuts down the profiler that has been registered with carb::profiler::registerProfilerForClient() and previously
* started with CARB_PROFILE_STARTUP().
*
* @note This is typically done immediately before carb::profiler::deregisterProfilerForClient().
*/
# define CARB_PROFILE_SHUTDOWN() \
do \
{ \
if (::g_carbProfiler) \
{ \
::g_carbProfiler->shutdown(); \
} \
} while (0)
/**
* Registers a static string for use with the profiler.
*
* The profiler works by capturing events very quickly in the thread of execution that they happen in, and then
* processing them later in a background thread. Since static/literal strings are contained in memory that may be
* invalid once the module unloads, these static/literal strings are registered and copied by the profiler and this
* macro returns a handle to the string that can be passed to the "static" function such as
* @ref carb::profiler::IProfiler::beginStatic().
*
* @note This macro is used by other helper macros and is typically not used by applications.
*
* @warning Undefined behavior occurs if the given string is not a literal or static string.
*
* @returns A handle to the static string registered with the profiler. There is no need to unregister this string.
*/
# define CARB_PROFILE_REGISTER_STRING(str) \
[](const char* pstr) { \
if (::g_carbProfiler) \
{ \
static ::carb::profiler::StaticStringType p = ::g_carbProfiler->registerStaticString(pstr); \
return p; \
} \
return ::carb::profiler::kInvalidStaticString; \
}(str)
/**
* A helper to set the capture mask.
*
* The capture mask is a set of 64 bits. Each profiling zone is *bitwise-and*'d with the capture mask. If the operation
* matches the profiling zone mask then the event is included in the profiling output. Otherwise, the event is ignored.
*
* The default capture mask is profiler-specific, but typically has all bits set (i.e. includes everything).
* @see carb::profiler::IProfiler::setCaptureMask()
*
* @warning Changing the capture mask after the profiler has been started causes undefined behavior.
*/
# define CARB_PROFILE_SET_CAPTURE_MASK(mask) \
do \
{ \
if (::g_carbProfiler) \
{ \
::g_carbProfiler->setCaptureMask(mask); \
} \
} while (0)
/**
* Marks the beginning of a profiling zone.
*
* To end the profiling zone, use CARB_PROFILE_END().
*
* @warning Consider using CARB_PROFILE_ZONE() to automatically profile a scope. Manual begin and end sections can cause
* programming errors and confuse the profiler if an end is skipped.
*
* @param maskOrChannel The event mask (see carb::profiler::setCaptureMask()) or a channel symbol name.
* @param eventName The name of the profiling zone. This must be either a literal string or a printf-style format
* string. Literal strings are far more efficient.
* @param ... Optional printf-style variadic arguments corresponding to format specifiers in @p eventName.
* @returns A carb::profiler::ZoneId that is unique to this zone and should be passed to CARB_PROFILE_END().
*/
# define CARB_PROFILE_BEGIN(maskOrChannel, eventName, ...) \
::g_carbProfiler ? \
CARB_PROFILE_IF(CARB_PROFILE_HAS_VARARGS(eventName, ##__VA_ARGS__), \
::carb::profiler::detail::beginDynamicHelper( \
maskOrChannel, CARB_PROFILE_UFUNCFILE(__func__), __LINE__, eventName, ##__VA_ARGS__), \
::carb::profiler::detail::beginStaticHelper( \
maskOrChannel, CARB_PROFILE_UFUNCFILESTR(__func__, eventName), __LINE__)) : \
(0 ? /*compiler validate*/ printf(eventName, ##__VA_ARGS__) : 0)
/**
* Marks the end of a profiling zone previously started with CARB_PROFILE_BEGIN().
*
* @warning Consider using CARB_PROFILE_ZONE() to automatically profile a scope. Manual begin and end sections can cause
* programming errors and confuse the profiler if an end is skipped.
*
* @param maskOrChannel The event mask or a channel symbol. This should match the value passed to CARB_PROFILE_BEGIN().
* @param ... The carb::profiler::ZoneId returned from CARB_PROFILE_BEGIN(), if known. This will help the profiler to
* validate that the proper zone was ended.
*/
# define CARB_PROFILE_END(maskOrChannel, ...) \
do \
{ \
if (::g_carbProfiler) \
{ \
::g_carbProfiler->CARB_PROFILE_IF( \
CARB_PROFILE_HAS_VARARGS(maskOrChannel, ##__VA_ARGS__), \
endEx(::carb::profiler::detail::maskHelper(maskOrChannel), ##__VA_ARGS__), \
end(::carb::profiler::detail::maskHelper(maskOrChannel))); \
} \
} while (0)
/**
* Inserts a frame marker for the calling thread in the profiling output, for profilers that support frame markers.
*
* @note The name provided below must be the same for each set of frames, and called each time from the same thread.
* For example you might have main thread frames that all are named "frame" and GPU frames that are named "GPU
* frame". Some profilers (i.e. profiler-cpu to Tracy conversion) require that the name contain the word "frame."
*
* @param mask Deprecated and ignored for frame events.
* @param frameName A name for the frame. This must either be a literal string or a printf-style format string. Literal
* strings are far more efficient. See the note above about frame names.
* @param ... Optional printf-style variadic arguments corresponding to format specifiers in @p frameName.
*/
# define CARB_PROFILE_FRAME(mask, frameName, ...) \
do \
{ \
/* Use printf to validate the format string */ \
if (0) \
{ \
printf(frameName, ##__VA_ARGS__); \
} \
if (::g_carbProfiler) \
{ \
CARB_PROFILE_IF(CARB_PROFILE_HAS_VARARGS(frameName, ##__VA_ARGS__), \
::g_carbProfiler->frameDynamic(mask, frameName, ##__VA_ARGS__), \
::g_carbProfiler->frameStatic(mask, []() { \
static ::carb::profiler::StaticStringType p = \
::g_carbProfiler->registerStaticString("" frameName); \
return p; \
}())); \
} \
} while (0)
/**
* Creates a profiling zone over a scope.
*
* This macro creates a temporary object on the stack that automatically begins a profiling zone at the point where this
* macro is used, and automatically ends the profiling zone when it goes out of scope.
*
* @param maskOrChannel The event mask (see carb::profiler::setCaptureMask()) or a channel symbol.
* @param zoneName The name of the profiling zone. This must be either a literal string or a printf-style format string.
* Literal strings are far more efficient.
* @param ... Optional printf-style variadic arguments corresponding to format specifiers in @p zoneName.
*/
# define CARB_PROFILE_ZONE(maskOrChannel, zoneName, ...) \
CARB_PROFILE_IF(CARB_PROFILE_HAS_VARARGS(zoneName, ##__VA_ARGS__), \
::carb::profiler::ProfileZoneDynamic CARB_JOIN(_carbZone, __LINE__)( \
(maskOrChannel), CARB_PROFILE_FUNCFILE(__func__), __LINE__, zoneName, ##__VA_ARGS__), \
::carb::profiler::ProfileZoneStatic CARB_JOIN(_carbZone, __LINE__)( \
(maskOrChannel), CARB_PROFILE_FUNCFILESTR(__func__, zoneName), __LINE__))
/**
* A helper for CARB_PROFILE_ZONE() that automatically uses the function name as from `CARB_PRETTY_FUNCTION`.
*
* Equivalent, but faster than: `CARB_PROFILE_ZONE(mask, "%s", CARB_PRETTY_FUNCTION)`.
*
* @param maskOrChannel The event mask (see carb::profiler::setCaptureMask()) or a profiling channel.
*/
# define CARB_PROFILE_FUNCTION(maskOrChannel) \
::carb::profiler::ProfileZoneStatic CARB_JOIN(_carbZoneFunction, __LINE__)( \
(maskOrChannel), CARB_PROFILE_FUNCFILESTR(__func__, CARB_PRETTY_FUNCTION), __LINE__)
/**
* Writes a named numeric value to the profiling output for profilers that support them.
*
* @note Supported types for @p value are `float`, `uint32_t` and `int32_t`.
*
* @param value The value to record.
* @param maskOrChannel The event mask (see carb::profiler::setCaptureMask()) or a profiling channel.
* @param valueName The name of the value. This must be either a literal string or a printf-style format string. Literal
* strings are far more efficient.
* @param ... Optional printf-style variadic arguments corresponding to format specifiers in @p valueName.
*/
# define CARB_PROFILE_VALUE(value, maskOrChannel, valueName, ...) \
do \
{ \
/* Use printf to validate the format string */ \
if (0) \
{ \
printf(valueName, ##__VA_ARGS__); \
} \
if (::g_carbProfiler && ::carb::profiler::detail::enabled(maskOrChannel)) \
{ \
CARB_PROFILE_IF( \
CARB_PROFILE_HAS_VARARGS(valueName, ##__VA_ARGS__), \
::g_carbProfiler->valueDynamic( \
::carb::profiler::detail::maskHelper(maskOrChannel), value, valueName, ##__VA_ARGS__), \
::g_carbProfiler->valueStatic(::carb::profiler::detail::maskHelper(maskOrChannel), value, []() { \
static ::carb::profiler::StaticStringType p = \
::g_carbProfiler->registerStaticString("" valueName); \
return p; \
}())); \
} \
} while (0)
/**
* Records an allocation event for a named memory pool for profilers that support them.
*
* @param maskOrChannel The event mask (see carb::profiler::setCaptureMask()) or a profiling channel.
* @param ptr The memory address that was allocated.
* @param size The size of the memory region beginning at @p ptr.
* @param poolName The name of the memory pool. This must be either a literal string or a printf-style format string.
* Literal strings are far more efficient.
* @param ... Optional printf-style variadic arguments corresponding to format specifiers in @p poolName.
*/
# define CARB_PROFILE_ALLOC_NAMED(maskOrChannel, ptr, size, poolName, ...) \
do \
{ \
/* Use printf to validate the format string */ \
if (0) \
{ \
printf(poolName, ##__VA_ARGS__); \
} \
if (::g_carbProfiler && ::carb::profiler::detail::enabled(maskOrChannel)) \
{ \
CARB_PROFILE_IF(CARB_PROFILE_HAS_VARARGS(poolName, ##__VA_ARGS__), \
::g_carbProfiler->allocNamedDynamic(::carb::profiler::detail::maskHelper(maskOrChannel), \
ptr, size, poolName, ##__VA_ARGS__), \
::g_carbProfiler->allocNamedStatic( \
::carb::profiler::detail::maskHelper(maskOrChannel), ptr, size, []() { \
static ::carb::profiler::StaticStringType p = \
::g_carbProfiler->registerStaticString("" poolName); \
return p; \
}())); \
} \
} while (0)
/**
* Records a free event for a named memory pool for profilers that support them.
*
* @param maskOrChannel The event mask (see carb::profiler::setCaptureMask()) or a profiling channel. This should match
* the value passed to CARB_PROFILE_ALLOC_NAMED() for the same allocation.
* @param ptr The memory address that was freed.
* @param poolName The name of the memory pool. This must be either a literal string or a printf-style format string.
* Literal strings are far more efficient.
* @param ... Optional printf-style variadic arguments corresponding to format specifiers in @p poolName.
*/
# define CARB_PROFILE_FREE_NAMED(maskOrChannel, ptr, poolName, ...) \
do \
{ \
/* Use printf to validate the format string */ \
if (0) \
{ \
printf(poolName, ##__VA_ARGS__); \
} \
if (::g_carbProfiler && ::carb::profiler::detail::enabled(maskOrChannel)) \
{ \
CARB_PROFILE_IF( \
CARB_PROFILE_HAS_VARARGS(poolName, ##__VA_ARGS__), \
::g_carbProfiler->freeNamedDynamic( \
::carb::profiler::detail::maskHelper(maskOrChannel), ptr, poolName, ##__VA_ARGS__), \
::g_carbProfiler->freeNamedStatic(::carb::profiler::detail::maskHelper(maskOrChannel), ptr, []() { \
static ::carb::profiler::StaticStringType p = \
::g_carbProfiler->registerStaticString("" poolName); \
return p; \
}())); \
} \
} while (0)
/**
* Records an allocation event for profilers that support them.
*
* @param maskOrChannel The event mask (see carb::profiler::setCaptureMask()) or a profiling channel.
* @param ptr The memory address that was allocated.
* @param size The size of the memory region beginning at @p ptr.
*/
# define CARB_PROFILE_ALLOC(maskOrChannel, ptr, size) \
do \
{ \
if (::g_carbProfiler && ::carb::profiler::detail::enabled(maskOrChannel)) \
{ \
::g_carbProfiler->allocStatic(::carb::profiler::detail::maskHelper(maskOrChannel), ptr, size); \
} \
} while (0)
/**
* Records a free event for profilers that support them.
*
* @param maskOrChannel The event mask (see carb::profiler::setCaptureMask()) or a profiling channel.
* @param ptr The memory address that was freed.
*/
# define CARB_PROFILE_FREE(maskOrChannel, ptr) \
do \
{ \
if (::g_carbProfiler && ::carb::profiler::detail::enabled(maskOrChannel)) \
{ \
::g_carbProfiler->freeStatic(::carb::profiler::detail::maskHelper(maskOrChannel), ptr); \
} \
} while (0)
/**
* Records the name of a thread.
*
* @param tidOrZero The thread ID that is being named. A value of `0` indicates the current thread. Not all profilers
* support values other than `0`.
* @param threadName The name of the thread. This must be either a literal string or a printf-style format string.
* Literal strings are far more efficient.
* @param ... Optional printf-style variadic arguments corresponding to format specifiers in @p threadName.
*/
# define CARB_NAME_THREAD(tidOrZero, threadName, ...) \
do \
{ \
/* Use printf to validate the format string */ \
if (0) \
{ \
printf((threadName), ##__VA_ARGS__); \
} \
if (::g_carbProfiler) \
{ \
CARB_PROFILE_IF(CARB_PROFILE_HAS_VARARGS(threadName, ##__VA_ARGS__), \
::g_carbProfiler->nameThreadDynamic((tidOrZero), (threadName), ##__VA_ARGS__), \
::g_carbProfiler->nameThreadStatic((tidOrZero), []() { \
static ::carb::profiler::StaticStringType p = \
::g_carbProfiler->registerStaticString("" threadName); \
return p; \
}())); \
} \
} while (0)
/**
* Records an instant event on a thread's timeline at the current time.
*
* @param maskOrChannel The event mask (see carb::profiler::setCaptureMask()) or a profiling channel.
* @param type The type of the instant event that will be passed to carb::profiler::emitInstantStatic() or
* carb::profiler::emitInstantDynamic().
* @param name The name of the event. This must either be a literal string or a printf-style format string with variadic
* arguments. Literal strings are far more efficient.
* @param ... Optional printf-style variadic arguments corresponding to format specifiers in @p name.
*/
# define CARB_PROFILE_EVENT(maskOrChannel, type, name, ...) \
do \
{ \
if (0) \
printf((name), ##__VA_ARGS__); \
if (::g_carbProfiler && ::carb::profiler::detail::enabled(maskOrChannel)) \
{ \
CARB_PROFILE_IF( \
CARB_PROFILE_HAS_VARARGS(name, ##__VA_ARGS__), \
static auto tup = ::carb::profiler::detail::makeString2( \
::g_carbProfiler->registerStaticString(CARB_PRETTY_FUNCTION), \
::g_carbProfiler->registerStaticString(__FILE__)); \
::g_carbProfiler->emitInstantDynamic(::carb::profiler::detail::maskHelper(maskOrChannel), tup.first, \
tup.second, __LINE__, (type), (name), ##__VA_ARGS__), \
static auto tup = ::carb::profiler::detail::makeString3( \
::g_carbProfiler->registerStaticString(CARB_PRETTY_FUNCTION), \
::g_carbProfiler->registerStaticString(__FILE__), ::g_carbProfiler->registerStaticString(name)); \
::g_carbProfiler->emitInstantStatic(::carb::profiler::detail::maskHelper(maskOrChannel), \
tup.first, tup.second, __LINE__, (type), tup.third)); \
} \
} while (0)
/**
* Records the beginning of a flow event on the timeline at the current time for the current thread.
*
* Flow events draw an arrow from one point (the `BEGIN` location) to another point (the `END` location).
* These two points can be in different threads but must have a matching @p id field. The @p id field is meant to be
* unique across profiler runs, but may be reused as long as the @p name field matches across all `BEGIN` events and
* events occur on the global timeline as `BEGIN` followed by `END`.
*
* This macro will automatically insert an instant event on the current thread's timeline.
*
* @param maskOrChannel The event mask (see carb::profiler::setCaptureMask()) or a profiling channel.
* @param id A unique identifier that must also be passed to CARB_PROFILE_FLOW_END().
* @param name The name of the event. This must either be a literal string or a printf-style format string with variadic
* arguments. Literal strings are far more efficient.
* @param ... Optional printf-style variadic arguments corresponding to format specifiers in @p name.
*/
# define CARB_PROFILE_FLOW_BEGIN(maskOrChannel, id, name, ...) \
do \
{ \
if (0) \
printf((name), ##__VA_ARGS__); \
if (::g_carbProfiler && ::carb::profiler::detail::enabled(maskOrChannel)) \
{ \
CARB_PROFILE_IF(CARB_PROFILE_HAS_VARARGS(name, ##__VA_ARGS__), \
static auto tup = ::carb::profiler::detail::makeString2( \
::g_carbProfiler->registerStaticString(CARB_PRETTY_FUNCTION), \
::g_carbProfiler->registerStaticString(__FILE__)); \
::g_carbProfiler->emitFlowDynamic( \
::carb::profiler::detail::maskHelper(maskOrChannel), tup.first, tup.second, \
__LINE__, ::carb::profiler::FlowType::Begin, (id), (name), ##__VA_ARGS__), \
static auto tup = ::carb::profiler::detail::makeString3( \
::g_carbProfiler->registerStaticString(CARB_PRETTY_FUNCTION), \
::g_carbProfiler->registerStaticString(__FILE__), \
::g_carbProfiler->registerStaticString("" name)); \
::g_carbProfiler->emitFlowStatic(::carb::profiler::detail::maskHelper(maskOrChannel), \
tup.first, tup.second, __LINE__, \
::carb::profiler::FlowType::Begin, (id), tup.third)); \
} \
} while (0)
/**
* Records the end of a flow event on the timeline at the current time for the current thread.
*
* @see CARB_PROFILE_FLOW_BEGIN()
*
* @param maskOrChannel The event mask or profiling channel. Must match the value given to CARB_PROFILE_FLOW_BEGIN().
* @param id Th unique identifier passed to CARB_PROFILE_FLOW_BEGIN().
*/
# define CARB_PROFILE_FLOW_END(maskOrChannel, id) \
do \
{ \
if (::g_carbProfiler && ::carb::profiler::detail::enabled(maskOrChannel)) \
{ \
static auto tup = \
::carb::profiler::detail::makeString2(::g_carbProfiler->registerStaticString(CARB_PRETTY_FUNCTION), \
::g_carbProfiler->registerStaticString(__FILE__)); \
::g_carbProfiler->emitFlowStatic(::carb::profiler::detail::maskHelper(maskOrChannel), tup.first, \
tup.second, __LINE__, ::carb::profiler::FlowType::End, (id), \
::carb::profiler::kInvalidStaticString); \
} \
} while (0)
/**
* Create a new GPU profiling context that allows injecting timestamps coming from a GPU in a deferred manner
*
* @param name name of the context
* @param correlatedCpuTimestampNs correlated GPU clock timestamp (in nanoseconds)
* @param correlatedGpuTimestamp correlated GPU clock timestamp (raw value)
* @param gpuTimestampPeriodNs is the number of nanoseconds required for a GPU timestamp query to be incremented
* by 1.
* @param graphicApi string of graphic API used ['vulkan'/'d3d12']
* @returns a valid ID or kInvalidGpuContextId if creation fails
*/
# define CARB_PROFILE_CREATE_GPU_CONTEXT( \
name, correlatedCpuTimestampNs, correlatedGpuTimestamp, gpuTimestampPeriodNs, graphicApi) \
(::g_carbProfiler ? ::g_carbProfiler->createGpuContext(name, correlatedCpuTimestampNs, correlatedGpuTimestamp, \
gpuTimestampPeriodNs, graphicApi) : \
carb::profiler::kInvalidGpuContextId)
/**
* Destroy a previously created GPU Context
*
* @param contextId ID of the context, returned by createGpuContext
*/
# define CARB_PROFILE_DESTROY_GPU_CONTEXT(contextId) \
do \
{ \
if (::g_carbProfiler) \
{ \
::g_carbProfiler->destroyGpuContext(contextId); \
} \
} while (0)
/**
* Submit context calibration information that allows correlating CPU and GPU clocks
*
* @param contextId ID of the context, returned by @ref carb::profiler::IProfiler::createGpuContext()
* @param correlatedCpuTimestampNs The new CPU timestamp at the time of correlation (in nanoseconds)
* @param previousCorrelatedCpuTimestampNs The CPU timestamp at the time of previous correlation (in nanoseconds)
* @param correlatedGpuTimestamp The new raw GPU timestamp at the time of correlation
*/
# define CARB_PROFILE_CALIBRATE_GPU_CONTEXT( \
contextId, correlatedCpuTimestampNs, previousCorrelatedCpuTimestampNs, correlatedGpuTimestamp) \
((::g_carbProfiler) ? \
(::g_carbProfiler->calibrateGpuContext( \
contextId, correlatedCpuTimestampNs, previousCorrelatedCpuTimestampNs, correlatedGpuTimestamp)) : \
false)
/**
* Record the beginning of a new GPU timestamp query
*
* @param maskOrChannel Event capture mask or profiling channel.
* @param contextId The id of the context as returned by @ref carb::profiler::IProfiler::createGpuContext()
* @param queryId Unique query id (for identification when passing to @ref
* carb::profiler::IProfiler::setGpuQueryValue())
* @param eventName The name for the event.
*/
# define CARB_PROFILE_GPU_QUERY_BEGIN(maskOrChannel, contextId, queryId, eventName, ...) \
do \
{ \
if (0) \
printf((eventName), ##__VA_ARGS__); \
if (::g_carbProfiler && ::carb::profiler::detail::enabled(maskOrChannel)) \
{ \
CARB_PROFILE_IF(CARB_PROFILE_HAS_VARARGS(eventName, ##__VA_ARGS__), \
static auto tup = ::carb::profiler::detail::makeString2( \
::g_carbProfiler->registerStaticString(CARB_PRETTY_FUNCTION), \
::g_carbProfiler->registerStaticString(__FILE__)); \
::g_carbProfiler->beginGpuQueryDynamic( \
::carb::profiler::detail::maskHelper(maskOrChannel), tup.first, tup.second, \
__LINE__, contextId, queryId, eventName, ##__VA_ARGS__), \
static auto tup = ::carb::profiler::detail::makeString3( \
::g_carbProfiler->registerStaticString(CARB_PRETTY_FUNCTION), \
::g_carbProfiler->registerStaticString(__FILE__), \
::g_carbProfiler->registerStaticString("" eventName)); \
::g_carbProfiler->beginGpuQueryStatic( \
::carb::profiler::detail::maskHelper(maskOrChannel), tup.first, tup.second, \
__LINE__, contextId, queryId, tup.third)); \
} \
} while (0)
/**
* Record the end of a new GPU timestamp query
*
* @param maskOrChannel Event capture mask or profiling channel.
* @param contextId The id of the context as returned by @ref carb::profiler::IProfiler::createGpuContext()
* @param queryId Unique query id (for identification when passing to @ref
* carb::profiler::IProfiler::setGpuQueryValue())
*/
# define CARB_PROFILE_GPU_QUERY_END(maskOrChannel, contextId, queryId) \
do \
{ \
if (::g_carbProfiler && ::carb::profiler::detail::enabled(maskOrChannel)) \
{ \
::g_carbProfiler->endGpuQuery(::carb::profiler::detail::maskHelper(maskOrChannel), contextId, queryId); \
} \
} while (0)
/**
* Set the value we've received from the GPU for a query (begin or end) we've issued in the past
*
* @param maskOrChannel Event capture mask or profiling channel
* @param contextId The id of the context as returned by @ref carb::profiler::IProfiler::createGpuContext()
* @param queryId Unique query id specified at begin/end time
* @param gpuTimestamp Raw GPU timestamp value
*/
# define CARB_PROFILE_GPU_SET_QUERY_VALUE(maskOrChannel, contextId, queryId, gpuTimestamp) \
do \
{ \
if (::g_carbProfiler && ::carb::profiler::detail::enabled(maskOrChannel)) \
{ \
::g_carbProfiler->setGpuQueryValue( \
::carb::profiler::detail::maskHelper(maskOrChannel), contextId, queryId, gpuTimestamp); \
} \
} while (0)
/**
* Create a lockable context which we can use to tag lock operation
* @note Do not use this macro directly. Use \ref carb::profiler::ProfiledMutex or
* \ref carb::profiler::ProfiledSharedMutex instead.
* @param maskOrChannel Event capture mask or profiling channel
* @param isSharedLock If this shared for a shared lock
* @param name The lockable context name
*/
# define CARB_PROFILE_LOCKABLE_CREATE(maskOrChannel, isSharedLock, name) \
[](bool enabled, const uint64_t maskParam, const bool isSharedLockParam, const char* nameParam, \
const char* function) { \
if (::g_carbProfiler && enabled) \
{ \
static auto tup = ::carb::profiler::detail::makeString2( \
::g_carbProfiler->registerStaticString(function), ::g_carbProfiler->registerStaticString(__FILE__)); \
return ::g_carbProfiler->createLockable( \
maskParam, nameParam, isSharedLockParam, tup.first, tup.second, __LINE__); \
} \
return ::carb::profiler::kInvalidLockableId; \
}(::carb::profiler::detail::enabled(maskOrChannel), ::carb::profiler::detail::maskHelper(maskOrChannel), \
(isSharedLock), (name), CARB_PRETTY_FUNCTION)
/**
* Destroy a lockable context
* @note Do not use this macro directly. Use \ref carb::profiler::ProfiledMutex or
* \ref carb::profiler::ProfiledSharedMutex instead.
* @param lockableId the id of the lockable as returned by @ref carb::profiler::IProfiler::createLockable()
*/
# define CARB_PROFILE_LOCKABLE_DESTROY(lockableId) \
do \
{ \
if (::g_carbProfiler && lockableId != carb::profiler::kInvalidLockableId) \
{ \
::g_carbProfiler->destroyLockable((lockableId)); \
} \
} while (0)
/**
* Records a lockable operation on a thread's timeline at the current time.
* @note Do not use this macro directly. Use \ref carb::profiler::ProfiledMutex or
* \ref carb::profiler::ProfiledSharedMutex instead.
* @param lockableId the id of the lockable as returned by @ref carb::profiler::IProfiler::createLockable()
* @param operation which lock operation to tag
*/
# define CARB_PROFILE_LOCKABLE_OPERATION(lockableId, operation) \
do \
{ \
if (::g_carbProfiler && lockableId != carb::profiler::kInvalidLockableId) \
{ \
::g_carbProfiler->lockableOperation((lockableId), (operation)); \
} \
} while (0)
/// @}
#else
# define CARB_PROFILE_STARTUP() (void(0))
# define CARB_PROFILE_SHUTDOWN() (void(0))
# define CARB_PROFILE_REGISTER_STRING(str) (CARB_UNUSED(str), ::carb::profiler::kInvalidStaticString)
# define CARB_PROFILE_SET_CAPTURE_MASK(mask) CARB_UNUSED(mask)
# define CARB_PROFILE_BEGIN(maskOrChannel, eventName, ...) \
(CARB_UNUSED((maskOrChannel), (eventName), ##__VA_ARGS__), ::carb::profiler::kNoZoneId)
# define CARB_PROFILE_END(maskOrChannel, ...) CARB_UNUSED((maskOrChannel), ##__VA_ARGS__)
# define CARB_PROFILE_FRAME(mask, frameName, ...) CARB_UNUSED((mask), (frameName), ##__VA_ARGS__)
# define CARB_PROFILE_ZONE(maskOrChannel, zoneName, ...) CARB_UNUSED((maskOrChannel), (zoneName), ##__VA_ARGS__)
# define CARB_PROFILE_FUNCTION(maskOrChannel) CARB_UNUSED(maskOrChannel)
# define CARB_PROFILE_VALUE(value, maskOrChannel, valueName, ...) \
CARB_UNUSED((value), (maskOrChannel), (valueName), ##__VA_ARGS__)
# define CARB_PROFILE_ALLOC_NAMED(maskOrChannel, ptr, size, poolName, ...) \
CARB_UNUSED((maskOrChannel), (ptr), (size), (poolName), ##__VA_ARGS__)
# define CARB_PROFILE_FREE_NAMED(maskOrChannel, ptr, poolName, ...) \
CARB_UNUSED((maskOrChannel), (ptr), (poolName), ##__VA_ARGS__)
# define CARB_PROFILE_ALLOC(maskOrChannel, ptr, size) CARB_UNUSED((maskOrChannel), (ptr), (size))
# define CARB_PROFILE_FREE(maskOrChannel, ptr) CARB_UNUSED((maskOrChannel), (ptr))
# define CARB_NAME_THREAD(tidOrZero, threadName, ...) CARB_UNUSED((tidOrZero), (threadName), ##__VA_ARGS__)
# define CARB_PROFILE_EVENT(maskOrChannel, type, name, ...) \
CARB_UNUSED((maskOrChannel), (type), (name), ##__VA_ARGS__)
# define CARB_PROFILE_FLOW_BEGIN(maskOrChannel, id, name, ...) \
CARB_UNUSED((maskOrChannel), (id), (name), ##__VA_ARGS__)
# define CARB_PROFILE_FLOW_END(maskOrChannel, id) CARB_UNUSED((maskOrChannel), (id))
# define CARB_PROFILE_CREATE_GPU_CONTEXT( \
name, correlatedCpuTimestampNs, correlatedGpuTimestamp, gpuTimestampPeriodNs, graphicApi) \
(CARB_UNUSED( \
(name), (correlatedCpuTimestampNs), (correlatedGpuTimestamp), (gpuTimestampPeriodNs), (graphicsApi)), \
carb::profiler::kInvalidGpuContextId)
# define CARB_PROFILE_DESTROY_GPU_CONTEXT(contextId) CARB_UNUSED(contextId)
# define CARB_PROFILE_CALIBRATE_GPU_CONTEXT( \
contextId, correlatedCpuTimestampNs, previousCorrelatedCpuTimestampNs, correlatedGpuTimestamp) \
CARB_UNUSED( \
(contextId), (correlatedCpuTimestampNs), (previousCorrelatedCpuTimestampNs), (correlatedGpuTimestamp))
# define CARB_PROFILE_GPU_QUERY_BEGIN(maskOrChannel, contextId, queryId, eventName, ...) \
CARB_UNUSED((maskOrChannel), (contextId), (queryId), (eventName), ##__VA_ARGS__)
# define CARB_PROFILE_GPU_QUERY_END(maskOrChannel, contextId, queryId) \
(CARB_UNUSED((maskOrChannel), (contextId), (queryId)))
# define CARB_PROFILE_GPU_SET_QUERY_VALUE(maskOrChannel, contextId, queryId, gpuTimestamp) \
CARB_UNUSED((maskOrChannel), (contextId), (queryId), (gpuTimestamp))
# define CARB_PROFILE_LOCKABLE_CREATE(maskOrChannel, isSharedLock, name) \
(CARB_UNUSED((maskOrChannel), (isSharedLock), (name)), ::carb::profiler::kInvalidLockableId)
# define CARB_PROFILE_LOCKABLE_DESTROY(lockableId) CARB_UNUSED(lockableId)
# define CARB_PROFILE_LOCKABLE_OPERATION(lockableId, operation) CARB_UNUSED((lockableId), (operation))
#endif
/**
* Placeholder macro for any work that needs to be done at the global scope for the profiler.
* @note This is typically not used as it is included in the @ref CARB_GLOBALS_EX macro.
*/
#define CARB_PROFILER_GLOBALS()
namespace carb
{
namespace profiler
{
/**
* Wrapper to add automatic profiling to a mutex
*/
template <class Mutex>
class ProfiledMutex
{
public:
/**
* Constructor.
* @param profileMask The mask used to determine if events from this mutex are captured.
* @param name The name of the mutex
*/
ProfiledMutex(const uint64_t profileMask, const char* name) : ProfiledMutex(profileMask, false, name)
{
}
/**
* Constructor.
* @param channel The profiling channel used to determine if events from this mutex are captured.
* @param name The name of the mutex
*/
ProfiledMutex(const carb::profiler::Channel& channel, const char* name) : ProfiledMutex(channel, false, name)
{
}
/**
* Destructor.
*/
~ProfiledMutex()
{
CARB_PROFILE_LOCKABLE_DESTROY(m_lockableId);
}
/**
* Locks the underlying mutex and reports the event to the profiler.
*/
void lock()
{
CARB_PROFILE_LOCKABLE_OPERATION(m_lockableId, LockableOperationType::BeforeLock);
m_mutex.lock();
CARB_PROFILE_LOCKABLE_OPERATION(m_lockableId, LockableOperationType::AfterLock);
}
/**
* Attempts a lock on the underlying mutex and reports the event to the profiler if successful.
* @returns \c true if successfully locked; \c false otherwise.
*/
bool try_lock()
{
bool acquired = m_mutex.try_lock();
if (acquired)
{
CARB_PROFILE_LOCKABLE_OPERATION(m_lockableId, LockableOperationType::AfterSuccessfulTryLock);
}
return acquired;
}
/**
* Unlocks the underlying mutex and reports the event to the profiler.
*/
void unlock()
{
m_mutex.unlock();
CARB_PROFILE_LOCKABLE_OPERATION(m_lockableId, LockableOperationType::AfterUnlock);
}
/**
* Returns a reference to the underlying mutex.
* @returns a reference to the underlying mutex.
*/
Mutex& getMutex()
{
return m_mutex;
}
/**
* Returns a reference to the underlying mutex.
* @returns a reference to the underlying mutex.
*/
const Mutex& getMutex() const
{
return m_mutex;
}
protected:
/**
* Protected Constructor.
* @param profileMask The mask used to determine if events from this mutex are captured.
* @param isSharedMutex A boolean representing whether `*this` represents a shared mutex.
* @param name The name of the mutex
*/
ProfiledMutex(const uint64_t profileMask, bool isSharedMutex, const char* name)
{
m_lockableId = CARB_PROFILE_LOCKABLE_CREATE(profileMask, isSharedMutex, name);
}
/**
* Protected Constructor.
* @param channel The channel used to determine if events from this mutex are captured.
* @param isSharedMutex A boolean representing whether `*this` represents a shared mutex.
* @param name The name of the mutex
*/
ProfiledMutex(const carb::profiler::Channel& channel, bool isSharedMutex, const char* name)
{
m_lockableId = CARB_PROFILE_LOCKABLE_CREATE(channel, isSharedMutex, name);
}
//! The underlying mutex instance
Mutex m_mutex;
//! The lockable ID as returned by \ref carb::profiler::IProfiler::createLockable()
LockableId m_lockableId;
};
/**
* Wrapper to add automatic profiling to a shared mutex
*/
template <class Mutex>
class ProfiledSharedMutex : public ProfiledMutex<Mutex>
{
using Base = ProfiledMutex<Mutex>;
public:
/**
* Constructor.
* @param profileMask The mask used to determine if events from this mutex are captured.
* @param name The name of the mutex
*/
ProfiledSharedMutex(const uint64_t profileMask, const char* name) : Base(profileMask, true, name)
{
}
/**
* Constructor.
* @param channel The profiling channel used to determine if events from this mutex are captured.
* @param name The name of the mutex
*/
ProfiledSharedMutex(const carb::profiler::Channel& channel, const char* name) : Base(channel, true, name)
{
}
/**
* Destructor.
*/
~ProfiledSharedMutex()
{
}
/**
* Locks the underlying mutex (shared) and reports the event to the profiler.
*/
void lock_shared()
{
CARB_PROFILE_LOCKABLE_OPERATION(this->m_lockableId, LockableOperationType::BeforeLockShared);
this->m_mutex.lock_shared();
CARB_PROFILE_LOCKABLE_OPERATION(this->m_lockableId, LockableOperationType::AfterLockShared);
}
/**
* Attempts a shared lock on the underlying mutex and reports the event to the profiler if successful.
* @returns \c true if successfully locked; \c false otherwise.
*/
bool try_lock_shared()
{
bool acquired = this->m_mutex.try_lock_shared();
if (acquired)
{
CARB_PROFILE_LOCKABLE_OPERATION(this->m_lockableId, LockableOperationType::AfterSuccessfulTryLockShared);
}
return acquired;
}
/**
* Unlocks (shared) the underlying mutex and reports the event to the profiler.
*/
void unlock_shared()
{
this->m_mutex.unlock_shared();
CARB_PROFILE_LOCKABLE_OPERATION(this->m_lockableId, LockableOperationType::AfterUnlockShared);
}
};
void deregisterProfilerForClient() noexcept;
#ifndef DOXYGEN_SHOULD_SKIP_THIS
namespace detail
{
inline void updateMask(uint64_t mask)
{
g_carbProfilerMask.store(mask, std::memory_order_release);
}
inline void releaseHook(void* iface, void*)
{
cpp::atomic_ref<IProfiler*>(g_carbProfiler).store(nullptr); // sequentially consistent
getFramework()->removeReleaseHook(iface, &releaseHook, nullptr);
}
inline void frameworkReleaseHook(void*, void*)
{
// Framework is going away, so make sure we get fully deregistered.
deregisterProfilerForClient();
}
inline void loadHook(const PluginDesc&, void*)
{
if (!g_carbProfiler)
{
IProfiler* profiler = getFramework()->tryAcquireInterface<IProfiler>();
if (profiler)
{
if (profiler->setMaskCallback)
{
// Relaxed semantics since we will shortly be synchronizing on g_carbProfiler.
g_carbProfilerMask.store(profiler->setMaskCallback(updateMask, true), std::memory_order_relaxed);
}
else
{
g_carbProfilerMask.store(uint64_t(-1), std::memory_order_relaxed); // not supported; let everything
// through
}
getFramework()->addReleaseHook(profiler, &detail::releaseHook, nullptr);
cpp::atomic_ref<IProfiler*>(g_carbProfiler).store(profiler, std::memory_order_seq_cst); // sequentially
// consistent
}
}
}
inline bool& registered()
{
static bool r{ false };
return r;
}
inline LoadHookHandle& loadHookHandle()
{
static carb::LoadHookHandle handle{};
return handle;
}
} // namespace detail
#endif
/**
* Allows access to the @ref g_carbProfiler global variable previously registered with @ref registerProfilerForClient().
* @returns The value of @ref g_carbProfiler.
*/
inline IProfiler* getProfiler()
{
return g_carbProfiler;
}
/**
* Clears the @ref g_carbProfiler global variable and unregisters load and release hooks with the \ref Framework.
*/
inline void deregisterProfilerForClient() noexcept
{
if (std::exchange(detail::registered(), false))
{
auto fw = getFramework();
auto handle = std::exchange(detail::loadHookHandle(), kInvalidLoadHook);
IProfiler* profiler = cpp::atomic_ref<IProfiler*>(g_carbProfiler).exchange(nullptr, std::memory_order_seq_cst);
if (fw)
{
if (profiler && fw->verifyInterface(profiler) && profiler->setMaskCallback)
{
profiler->setMaskCallback(detail::updateMask, false);
}
if (handle)
{
fw->removeLoadHook(handle);
}
fw->removeReleaseHook(nullptr, &detail::frameworkReleaseHook, nullptr);
if (profiler)
{
fw->removeReleaseHook(profiler, &detail::releaseHook, nullptr);
}
// Unregister channels
Channel::onProfilerUnregistered();
}
}
}
/**
* Acquires the default IProfiler interface and assigns it to the @ref g_carbProfiler global variable.
*
* If a profiler is not yet loaded, a load hook is registered with the \ref Framework and when the profiler is loaded,
* \ref g_carbProfiler will be automatically set for this module. If the profiler is unloaded, \ref g_carbProfiler will
* be automatically set to `nullptr`.
*/
inline void registerProfilerForClient() noexcept
{
if (!std::exchange(detail::registered(), true))
{
auto fw = getFramework();
fw->addReleaseHook(nullptr, &detail::frameworkReleaseHook, nullptr);
IProfiler* profiler = fw->tryAcquireInterface<IProfiler>();
if (profiler)
{
if (profiler->setMaskCallback)
{
// Relaxed semantics since we will shortly be synchronizing on g_carbProfiler.
g_carbProfilerMask.store(profiler->setMaskCallback(detail::updateMask, true), std::memory_order_relaxed);
}
else
{
g_carbProfilerMask.store(uint64_t(-1), std::memory_order_relaxed); // let everything through
}
bool b = fw->addReleaseHook(profiler, &detail::releaseHook, nullptr);
CARB_ASSERT(b);
CARB_UNUSED(b);
}
cpp::atomic_ref<IProfiler*>(g_carbProfiler).store(profiler, std::memory_order_seq_cst); // sequentially
// consistent
detail::loadHookHandle() = fw->addLoadHook<IProfiler>(nullptr, &detail::loadHook, nullptr);
// Register channels
Channel::onProfilerRegistered();
// Make sure this only happens once even if re-registered.
static bool ensureDeregister = (atexit(&deregisterProfilerForClient), true);
CARB_UNUSED(ensureDeregister);
}
}
} // namespace profiler
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/Tuple.h | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief C++14-compatible implementation of select functionality from C++ `<tuple>` library.
#pragma once
#include <tuple>
#include "Functional.h"
namespace carb
{
namespace cpp
{
namespace detail
{
template <class F, class Tuple, size_t... I>
constexpr decltype(auto) applyImpl(F&& f, Tuple&& t, std::index_sequence<I...>)
{
return carb::cpp::invoke(std::forward<F>(f), std::get<I>(std::forward<Tuple>(t))...);
}
} // namespace detail
template <class F, class Tuple>
constexpr decltype(auto) apply(F&& f, Tuple&& t)
{
return detail::applyImpl(std::forward<F>(f), std::forward<Tuple>(t),
std::make_index_sequence<std::tuple_size<std::remove_reference_t<Tuple>>::value>{});
}
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/Semaphore.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief C++14-compatible implementation of select functionality from C++ `<semaphore>` library.
#pragma once
#include "Atomic.h"
#include "../thread/Futex.h"
#include <algorithm>
#include <thread>
namespace carb
{
namespace cpp
{
#ifndef DOXYGEN_SHOULD_SKIP_THIS
namespace detail
{
# if CARB_PLATFORM_WINDOWS
constexpr ptrdiff_t kSemaphoreValueMax = LONG_MAX;
# else
constexpr ptrdiff_t kSemaphoreValueMax = INT_MAX;
# endif
} // namespace detail
#endif
// Handle case where Windows.h may have defined 'max'
#pragma push_macro("max")
#undef max
/**
* C++20-compatible counting semaphore class.
*
* A counting_semaphore is a lightweight synchronization primitive that can control access to a shared resource. Unlike
* a mutex, a counting_semaphore allows more than one concurrent access to the same resource, for at least
* `least_max_value` concurrent accessors. The program is ill-formed if `least_max_value` is negative.
*
* This is a C++14-compatible implementation of std::counting_semaphore from C++20 draft spec dated 11/13/2019.
*
* @note `sizeof(counting_semaphore)` is 8 bytes for `least_max_value > 1`. A specialization exists for
* `least_max_value == 1` where the size is only 1 byte, also known as @ref binary_semaphore.
*
* @tparam least_max_value The maximum count value that this semaphore can reach. This
* indicates the number of threads or callers that can simultaneously
* successfully acquire this semaphore. May not be less than or equal to zero.
*
* @thread_safety This class is thread-safe. However, attempting to destruct before all threads have returned from any
* function (especially the wait functions) is malformed and will lead to undefined behavior.
*/
template <ptrdiff_t least_max_value = detail::kSemaphoreValueMax>
class CARB_VIZ counting_semaphore
{
CARB_PREVENT_COPY_AND_MOVE(counting_semaphore);
public:
/** Constructor: initializes a new semaphore object with a given count.
*
* @param[in] desired The initial count value for the semaphore. This must be a positive
* value or zero. If set to zero, the semaphore will be 'unowned' on
* creation. If set to any other value, the semaphore will only be able
* to be acquired by at most @a least_max_value minus @p desired other
* threads or callers until it is released @p desired times.
*/
constexpr explicit counting_semaphore(ptrdiff_t desired) noexcept
: m_data(::carb_min(::carb_max(ptrdiff_t(0), desired), least_max_value))
{
static_assert(least_max_value >= 1, "semaphore needs a count of at least 1");
static_assert(least_max_value <= detail::kSemaphoreValueMax, "semaphore count too high");
}
/**
* Destructor
*
* On Linux, performs a `CARB_CHECK` to verify that no waiters are present when `*this` is destroyed.
*
* @note On Windows, `ExitProcess()` (or returning from `main()`) causes all threads to be terminated before
* `atexit()` registered functions are called (and static objects are cleaned up). This has the unpleasant side
* effect of potentially terminating threads that are waiting on a semaphore and will never get the chance to clean
* up their waiting count. Therefore, this check is linux only.
*/
~counting_semaphore() noexcept
{
#if CARB_PLATFORM_LINUX
// Make sure we don't have any waiters when we are destroyed
CARB_CHECK((m_data.load(std::memory_order_acquire) >> kWaitersShift) == 0, "Semaphore destroyed with waiters");
#endif
}
/** Retrieves the maximum count value this semaphore can reach.
*
* @returns The maximum count value for this semaphore. This will never be zero.
*
* @thread_safety This call is thread safe.
*/
static constexpr ptrdiff_t max() noexcept
{
return least_max_value;
}
/** Releases references on this semaphore and potentially wakes another waiting thread.
*
* @param[in] update The number of references to atomically increment this semaphore's
* counter by. This number of waiting threads will be woken as a
* result.
* @return No return value.
*
* @remarks This releases zero or more references on this semaphore. If a reference is
* released, another waiting thread could potentially be woken and acquire this
* semaphore again.
*
* @thread_safety This call is thread safe.
*/
void release(ptrdiff_t update = 1) noexcept
{
CARB_ASSERT(update >= 0);
uint64_t d = m_data.load(std::memory_order_relaxed), u;
for (;;)
{
// The standard is somewhat unclear here. Preconditions are that update >= 0 is true and update <= max() -
// counter is true. And it throws system_error when an exception is required. So I supposed that it's likely
// that violating the precondition would cause a system_error exception which doesn't completely make sense
// (I would think runtime_error would make more sense). However, throwing at all is inconvenient, as is
// asserting/crashing/etc. Therefore, we clamp the update value here.
u = ::carb_min(update, max() - ptrdiff_t(d & kValueMask));
if (CARB_LIKELY(m_data.compare_exchange_weak(d, d + u, std::memory_order_release, std::memory_order_relaxed)))
break;
}
// At this point, the Semaphore could be destroyed by another thread. Therefore, we shouldn't access any other
// members (taking the address of m_data below is okay because that would not actually read any memory that
// may be destroyed)
// waiters with a value have been notified already by whatever thread added the value. Only wake threads that
// haven't been woken yet.
ptrdiff_t waiters = ptrdiff_t(d >> kWaitersShift);
ptrdiff_t value = ptrdiff_t(d & kValueMask);
ptrdiff_t wake = ::carb_min(ptrdiff_t(u), waiters - value);
if (wake > 0)
{
// cpp::atomic only has notify_one() and notify_all(). Call the futex system directly to wake N.
thread::futex::notify(m_data, unsigned(size_t(wake)), unsigned(size_t(waiters)));
}
}
/** Acquires a reference to this semaphore.
*
* @returns No return value.
*
* @remarks This blocks until a reference to this semaphore can be successfully acquired.
* This is done by atomically decrementing the semaphore's counter if it is greater
* than zero. If the counter is zero, this will block until the counter is greater
* than zero. The counter is incremented by calling release().
*
* @thread_safety This call is thread safe.
*/
void acquire() noexcept
{
if (CARB_LIKELY(fast_acquire(false)))
return;
// Register as a waiter
uint64_t d =
m_data.fetch_add(uint64_t(1) << kWaitersShift, std::memory_order_relaxed) + (uint64_t(1) << kWaitersShift);
for (;;)
{
if ((d & kValueMask) == 0)
{
// Need to wait
m_data.wait(d, std::memory_order_relaxed);
// Reload
d = m_data.load(std::memory_order_relaxed);
}
else
{
// Try to unregister as a waiter and grab a token at the same time
if (CARB_LIKELY(m_data.compare_exchange_weak(d, d - 1 - (uint64_t(1) << kWaitersShift),
std::memory_order_acquire, std::memory_order_relaxed)))
return;
}
}
}
/** Attempts to acquire a reference to this semaphore.
*
* @returns `true` if the semaphore's counter was greater than zero and it was successfully
* atomically decremented. Returns `false` if the counter was zero and the
* semaphore could not be acquired. This will never block even if the semaphore
* could not be acquired.
*
* @thread_safety This call is thread safe.
*/
bool try_acquire() noexcept
{
return fast_acquire(true);
}
/** Attempts to acquire a reference to this semaphore for a specified relative time.
*
* @tparam Rep The representation primitive type for the duration value.
* @tparam Period The duration's time scale value (ie: milliseconds, seconds, etc).
* @param[in] duration The amount of time to try to acquire this semaphore for. This is
* specified as a duration relative to the call time.
* @returns `true` if the semaphore's counter was greater than zero and it was successfully
* atomically decremented within the specified time limit. Returns `false` if the
* counter was zero and the semaphore could not be acquired within the time limit.
* This will only block for up to approximately the specified time limit.
*
* @thread_safety This call is thread safe.
*/
template <class Rep, class Period>
bool try_acquire_for(const std::chrono::duration<Rep, Period>& duration) noexcept
{
if (CARB_LIKELY(fast_acquire(false)))
return true;
if (duration.count() <= 0)
return false;
// Register as a waiter
uint64_t d =
m_data.fetch_add(uint64_t(1) << kWaitersShift, std::memory_order_relaxed) + (uint64_t(1) << kWaitersShift);
while ((d & kValueMask) != 0)
{
// Try to unregister as a waiter and grab a token at the same time
if (CARB_LIKELY(m_data.compare_exchange_weak(
d, d - 1 - (uint64_t(1) << kWaitersShift), std::memory_order_acquire, std::memory_order_relaxed)))
return true;
}
// Now we need to wait, but do it with absolute time so that we properly handle spurious futex wakeups
auto time_point = std::chrono::steady_clock::now() + thread::detail::clampDuration(duration);
for (;;)
{
if (!m_data.wait_until(d, time_point, std::memory_order_relaxed))
{
// Timed out. Unregister as a waiter
m_data.fetch_sub(uint64_t(1) << kWaitersShift, std::memory_order_relaxed);
return false;
}
// Reload after wait
d = m_data.load(std::memory_order_relaxed);
if ((d & kValueMask) != 0)
{
// Try to unreference as a waiter and grab a token at the same time
if (CARB_LIKELY(m_data.compare_exchange_weak(d, d - 1 - (uint64_t(1) << kWaitersShift),
std::memory_order_acquire, std::memory_order_relaxed)))
return true;
}
}
}
/** Attempts to acquire a reference to this semaphore until a specified absolute time.
*
* @tparam Clock The clock to use as a time source to compare the time limit to.
* @tparam Duration The duration type associated with the specified clock.
* @param[in] time_point The absolute time to try to acquire this semaphore for. This is
* specified as a time point from the given clock @a Clock.
* @returns `true` if the semaphore's counter was greater than zero and it was successfully
* atomically decremented before the specified time limit. Returns `false` if the
* counter was zero and the semaphore could not be acquired before the time limit.
* This will only block up until approximately the specified time limit.
*
* @thread_safety This call is thread safe.
*/
template <class Clock, class Duration>
bool try_acquire_until(const std::chrono::time_point<Clock, Duration>& time_point) noexcept
{
if (CARB_LIKELY(fast_acquire(false)))
return true;
// Register as a waiter
uint64_t d =
m_data.fetch_add(uint64_t(1) << kWaitersShift, std::memory_order_relaxed) + (uint64_t(1) << kWaitersShift);
for (;;)
{
if ((d & kValueMask) == 0)
{
// Need to wait
if (!m_data.wait_until(d, time_point, std::memory_order_relaxed))
{
// Timed out. Unregister as a waiter
m_data.fetch_sub(uint64_t(1) << kWaitersShift, std::memory_order_relaxed);
return false;
}
// Reload after wait
d = m_data.load(std::memory_order_relaxed);
}
else
{
// Try to unregister as a waiter and grab a token at the same time
if (CARB_LIKELY(m_data.compare_exchange_weak(d, d - 1 - (uint64_t(1) << kWaitersShift),
std::memory_order_acquire, std::memory_order_relaxed)))
return true;
}
}
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
protected:
// The 32 most significant bits are the waiters; the lower 32 bits is the value of the semaphore
CARB_VIZ cpp::atomic_uint64_t m_data;
constexpr static int kWaitersShift = 32;
constexpr static unsigned kValueMask = 0xffffffff;
CARB_ALWAYS_INLINE bool fast_acquire(bool needResolution) noexcept
{
uint64_t d = m_data.load(needResolution ? std::memory_order_acquire : std::memory_order_relaxed);
for (;;)
{
if (uint32_t(d & kValueMask) == 0)
return false;
if (CARB_LIKELY(m_data.compare_exchange_weak(d, d - 1, std::memory_order_acquire, std::memory_order_relaxed)))
return true;
if (!needResolution)
return false;
}
}
#endif
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS
/** Specialization for the case of a semaphore with a maximum count of 1. This is treated as
* a binary semaphore - it can only be acquired by one caller at a time.
*/
template <>
class CARB_VIZ counting_semaphore<1>
{
CARB_PREVENT_COPY_AND_MOVE(counting_semaphore);
public:
static constexpr ptrdiff_t max() noexcept
{
return 1;
}
constexpr explicit counting_semaphore(ptrdiff_t desired) noexcept
: m_val(uint8_t(size_t(::carb_min(::carb_max(ptrdiff_t(0), desired), max()))))
{
}
void release(ptrdiff_t update = 1) noexcept
{
if (CARB_UNLIKELY(update <= 0))
return;
CARB_ASSERT(update == 1); // precondition failure
if (!m_val.exchange(1, std::memory_order_release))
m_val.notify_one();
}
void acquire() noexcept
{
for (;;)
{
uint8_t old = m_val.exchange(0, std::memory_order_acquire);
if (CARB_LIKELY(old == 1))
break;
CARB_ASSERT(old == 0); // m_val can only be 0 or 1
m_val.wait(0, std::memory_order_relaxed);
}
}
bool try_acquire() noexcept
{
uint8_t old = m_val.exchange(0, std::memory_order_acquire);
CARB_ASSERT(old <= 1); // m_val can only be 0 or 1
return old == 1;
}
template <class Rep, class Period>
bool try_acquire_for(const std::chrono::duration<Rep, Period>& duration) noexcept
{
return try_acquire_until(std::chrono::steady_clock::now() + thread::detail::clampDuration(duration));
}
template <class Clock, class Duration>
bool try_acquire_until(const std::chrono::time_point<Clock, Duration>& time_point) noexcept
{
for (;;)
{
uint8_t old = m_val.exchange(0, std::memory_order_acquire);
if (CARB_LIKELY(old == 1))
return true;
CARB_ASSERT(old == 0); // m_val can only be 0 or 1
if (!m_val.wait_until(0, time_point, std::memory_order_relaxed))
return false;
}
}
protected:
CARB_VIZ cpp::atomic_uint8_t m_val;
};
#endif
/** Alias for a counting semaphore that can only be acquired by one caller at a time. */
using binary_semaphore = counting_semaphore<1>;
#pragma pop_macro("max")
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/Functional.h | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief C++14-compatible implementation of select functionality from C++ `<functional>` library.
#pragma once
#include "TypeTraits.h"
#include "detail/ImplInvoke.h"
#include "../detail/NoexceptType.h"
namespace carb
{
namespace cpp
{
CARB_DETAIL_PUSH_IGNORE_NOEXCEPT_TYPE()
//! Invoke the function \a f with the given \a args pack.
//!
//! This is equivalent to the C++20 \c std::invoke function. It was originally added in C++17, but is marked as
//! \c constexpr per the C++20 Standard.
template <typename Func, typename... TArgs>
constexpr invoke_result_t<Func, TArgs...> invoke(Func&& f,
TArgs&&... args) noexcept(is_nothrow_invocable<Func, TArgs...>::value)
{
return detail::invoke_impl<std::decay_t<Func>>::eval(std::forward<Func>(f), std::forward<TArgs>(args)...);
}
//! \cond DEV
namespace detail
{
// This is needed to handle calling a function which returns a non-void when `R` is void. The only difference is the
// lack of a return statement.
template <typename R, typename Func, typename... TArgs>
constexpr std::enable_if_t<is_void<R>::value> invoke_r_impl(Func&& f, TArgs&&... args) noexcept(
is_nothrow_invocable_r<R, Func, TArgs...>::value)
{
detail::invoke_impl<std::decay_t<Func>>::eval(std::forward<Func>(f), std::forward<TArgs>(args)...);
}
template <typename R, typename Func, typename... TArgs>
constexpr std::enable_if_t<!is_void<R>::value, R> invoke_r_impl(Func&& f, TArgs&&... args) noexcept(
is_nothrow_invocable_r<R, Func, TArgs...>::value)
{
return detail::invoke_impl<std::decay_t<Func>>::eval(std::forward<Func>(f), std::forward<TArgs>(args)...);
}
} // namespace detail
//! \endcond
//! Invoke the function with the given arguments with the explicit return type \c R. This follows the same rules as
//! \ref carb::cpp::invoke().
//!
//! This is equivalent to the C++23 \c std::invoke_r function. It lives here because people would expect an \c invoke_r
//! function to live next to an \c invoke function.
template <typename R, typename Func, typename... TArgs>
constexpr R invoke_r(Func&& f, TArgs&&... args) noexcept(is_nothrow_invocable_r<R, Func, TArgs...>::value)
{
return detail::invoke_r_impl<R>(std::forward<Func>(f), std::forward<TArgs>(args)...);
}
CARB_DETAIL_POP_IGNORE_NOEXCEPT_TYPE()
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/Utility.h | // Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief C++14-compatible implementation of select functionality from C++ `<utility>` library.
#pragma once
#include "../Defines.h"
#include <utility>
namespace carb
{
namespace cpp
{
#if !CARB_HAS_CPP17
struct in_place_t
{
explicit in_place_t() = default;
};
static constexpr in_place_t in_place{};
template <class>
struct in_place_type_t
{
explicit in_place_type_t() = default;
};
template <class T>
static constexpr in_place_type_t<T> in_place_type{};
template <std::size_t I>
struct in_place_index_t
{
explicit in_place_index_t() = default;
};
template <std::size_t I>
static constexpr in_place_index_t<I> in_place_index{};
#else
using std::in_place;
using std::in_place_index;
using std::in_place_index_t;
using std::in_place_t;
using std::in_place_type;
using std::in_place_type_t;
#endif
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/StringView.h | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief C++14-compatible implementation of select functionality from C++ `<string_view>` library.
#pragma once
#include "../Defines.h"
#include "../../omni/detail/PointerIterator.h"
#include "TypeTraits.h"
#include <algorithm>
#include <string>
#include <typeindex> // for std::hash
#include <utility>
namespace carb
{
namespace cpp
{
template <class CharT, class Traits>
class basic_string_view;
//! \cond DEV
namespace detail
{
template <class T>
struct IsBasicStringView : public std::false_type
{
};
template <class T, class Traits>
struct IsBasicStringView<basic_string_view<T, Traits>> : public std::true_type
{
};
template <class T, class OpType, class = void>
struct HasOperator : public std::false_type
{
};
template <class T, class OpType>
struct HasOperator<T, OpType, void_t<decltype(std::declval<T>().operator OpType())>> : public std::true_type
{
};
template <class CharT, class T>
struct IsCharArrayLiteral : public std::false_type
{
};
template <class CharT, size_t N>
struct IsCharArrayLiteral<CharT, const CharT (&)[N]> : public std::true_type
{
};
// GCC instantiates some functions always so they cannot use static_assert, but throwing an exception is allowed from a
// constexpr function (which will act as a compile failure if constexpr) so fall back to that.
#if CARB_EXCEPTIONS_ENABLED
# define CARB_ALWAYS_FAIL(msg) throw std::out_of_range(msg)
# define CARB_THROW_OR_CHECK(check, msg) \
if (!CARB_LIKELY(check)) \
throw std::out_of_range(msg)
#else
# define CARB_THROW_OR_CHECK(check, msg) CARB_CHECK((check), msg)
# if CARB_COMPILER_MSC
# define CARB_ALWAYS_FAIL(msg) static_assert(false, msg)
# else
# define CARB_ALWAYS_FAIL(msg) CARB_FATAL_UNLESS(false, msg)
# endif
#endif
} // namespace detail
//! \endcond DEV
/**
* The class template basic_string_view describes an object that can refer to a constant contiguous sequence of elements
* with the first element of the sequence at position zero.
*
* This implementation of \c string_view is a guaranteed @rstref{ABI- and interop-safe <abi-compatibility>} type.
*
* @see https://en.cppreference.com/w/cpp/string/basic_string_view
* @tparam CharT character type
* @tparam Traits A traits class specifying the operations on the character type. Like for `std::basic_string`,
* `Traits::char_type` must name the same type as `CharT` or the program is ill-formed.
*/
template <class CharT, class Traits = std::char_traits<CharT>>
class CARB_VIZ basic_string_view
{
public:
//! `Traits`
using traits_type = Traits;
//! `CharT`
using value_type = CharT;
//! `CharT*`
using pointer = CharT*;
//! `const CharT*`
using const_pointer = const CharT*;
//! `CharT&`
using reference = CharT&;
//! `const CharT&`
using const_reference = const CharT&;
//! implementation defined constant LegacyRandomAccessIterator and LegacyContiguousIterator whose value_type
//! is CharT.
using const_iterator = omni::detail::PointerIterator<const_pointer, basic_string_view>;
//! `const_iterator`
//! \note `iterator` and `const_iterator` are the same type because string views are views into constant character
//! sequences.
using iterator = const_iterator;
//! `std::reverse_iterator<const_iterator>`
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
//! `const_reverse_iterator`
using reverse_iterator = const_reverse_iterator;
//! `std::size_t`
using size_type = std::size_t;
//! `std::ptrdiff_t`
using difference_type = std::ptrdiff_t;
//! Special value. The exact meaning depends on the context.
static constexpr size_type npos = size_type(-1);
//! Constructs a basic_string_view.
//! \details Constructs an empty basic_string_view. After construction, \ref data() is equal to `nullptr` and
//! \ref size() is equal to `0`.
constexpr basic_string_view() noexcept = default;
//! Constructs a basic_string_view.
//! \details Constructs a view of the same content as \p other. After construction, \ref data() is equal to
//! `other.data()` and \ref size() is equal to `other.size()`.
//! @param other The string view to copy.
constexpr basic_string_view(const basic_string_view& other) noexcept = default;
//! Constructs a basic_string_view.
//! \details Constructs a view of the first count characters of the character array starting with the element
//! pointed by \p s. \p s can contain null characters. The behavior is undefined if `[s, s + count)` is not a valid
//! range (even though the constructor may not access any of the elements of this range). After construction,
//! \ref data() is equal to \p s, and \ref size() is equal to \p count.
//! @param s The character array to view.
//! @param count The number of characters in \p s to view.
constexpr basic_string_view(const CharT* s, size_type count) : m_data(s), m_count(count)
{
}
//! Constructs a basic_string_view.
//! \details Constructs a view of the null-terminated character string pointed to by \p s, not including the
//! terminating null character. The length of the view is determined as if by `Traits::length(s)`. The behavior is
//! undefined if `[s, s + Traits::length(s))` is not a valid range. After construction, \ref data() is equal to
//! \p s, and \ref size() is equal to `Traits::length(s)`.
//! \note As a Carbonite extension, this constructor will not participate in overload resolution if \p s is a
//! literal `CharT` array. Instead, see \ref basic_string_view(const CharT(&literal)[N]).
//! @param s The null-terminated character string to view.
#ifdef DOXYGEN_BUILD
constexpr basic_string_view(const CharT* s) : m_data(s), m_count(traits_type::length(s))
#else
template <class T,
std::enable_if_t<!detail::IsCharArrayLiteral<CharT, T>::value && std::is_convertible<T, const_pointer>::value,
bool> = false>
constexpr basic_string_view(T&& s) : m_data(s), m_count(traits_type::length(s))
#endif
{
}
//! Constructs a basic_string_view.
//! \details Constructs a view of the literal string \p literal, not including the terminating null character. The
//! length of the view is determined by `N - 1` as literal strings will have a terminating null character. After
//! construction, \ref data() is equal to \p literal, and \ref size() is equal to `N - 1`.
//! \note This constructor is a Carbonite extension and provided as an optimization. For `std::basic_string_view`,
//! the \ref basic_string_view(const CharT*) constructor would be invoked instead. This allows string views to be
//! constructed as a constant expression from string literals without using the `_sv` literal extension.
//! @param literal A string literal to view.
template <size_t N>
constexpr basic_string_view(const CharT (&literal)[N]) noexcept : m_data(literal), m_count(N - 1)
{
}
//! Constructs a basic_string_view.
//! \details Constructs a basic_string_view over the range `[first, last)`. The behavior is undefined if
//! `[first, last)` is not a valid range or if `It` is not a random-access iterator. This function differs
//! significantly from the C++20 definition since the concepts of `contiguous_iterator` and `sized_sentinel_for` are
//! not available. Since these concepts are not available until C++20, instead this function does not participate in
//! overload resolution unless
//! `std::iterator_traits<It>::iterator_category == std::random_access_iterator_tag`. Also \p first and \p last must
//! be a matching iterator type. After construction, \ref data() is equal to `std::to_address(first)`, and
//! \ref size() is equal to `last - first`.
//! @param first Iterator to the beginning of the view.
//! @param last Iterator to the end of the view (non-inclusive).
template <class It CARB_NO_DOC(
,
std::enable_if_t<std::is_same<typename std::iterator_traits<It>::iterator_category, std::random_access_iterator_tag>::value,
bool> = false)>
constexpr basic_string_view(It first, It last) : m_data(std::addressof(*first)), m_count(std::distance(first, last))
{
}
//! Constructs a basic_string_view.
//! \details Constructs a basic_string_view over the "range" \p r. After construction \ref data() is equal to
//! `r.data()` and \ref size() is equal to `r.size()`.
//! @tparam R A range type. Since this implementation is for pre-C++20 and ranges are not available, this is an
//! approximation of a `range`: This type must have `data()` and `size()` member functions that must be
//! convertible to \ref const_pointer and \ref size_type respectively. (e.g. a `std::vector`).
//! @param r The range to view. Behavior is undefined if `[range.data(), range.size())` is not a contiguous range or
//! cannot be borrowed (i.e. it is a temporary that will expire leaving a dangling pointer).
template <class R CARB_NO_DOC(,
std::enable_if_t<cpp::detail::IsConvertibleRange<cpp::remove_cvref_t<R>, const_pointer>::value &&
!detail::IsBasicStringView<cpp::remove_cvref_t<R>>::value &&
!std::is_convertible<R, const char*>::value &&
!detail::HasOperator<cpp::remove_cvref_t<R>, basic_string_view>::value,
bool> = false)>
constexpr explicit basic_string_view(R&& r) : m_data(r.data()), m_count(r.size())
{
}
//! Implicit conversion from `std::basic_string` to basic_string_view.
//!
//! \details Construct a basic_string_view from a `std::basic_string`. The construction is implicit to mimic the
//! behavior of <a
//! href="https://en.cppreference.com/w/cpp/string/basic_string/operator_basic_string_view">std::basic_string<CharT,Traits,Allocator>::operator
//! basic_string_view</a>.
//!
//! @tparam S An instance of `std::basic_string`.
template <class S,
class Allocator = typename S::allocator_type,
std::enable_if_t<std::is_same<std::basic_string<CharT, Traits, Allocator>, std::decay_t<S>>::value, bool> = false>
constexpr basic_string_view(const S& s) : m_data(s.data()), m_count(s.size())
{
}
//! basic_string_view cannot be constructed from nullptr.
constexpr basic_string_view(std::nullptr_t) = delete;
//! Assigns a view.
//! @param view The view to replace `*this` with.
//! @returns `*this`
constexpr basic_string_view& operator=(const basic_string_view& view) noexcept = default;
//! Returns an iterator to the beginning.
//! @returns A \ref const_iterator to the first character of the view.
constexpr const_iterator begin() const noexcept
{
return const_iterator(m_data);
}
//! Returns an iterator to the beginning.
//! @returns A \ref const_iterator to the first character of the view.
constexpr const_iterator cbegin() const noexcept
{
return const_iterator(m_data);
}
//! Returns an iterator to the end.
//! @returns A \ref const_iterator to the character following the last character of the view. This character acts as
//! a placeholder, attempting to access it results in undefined behavior.
constexpr const_iterator end() const noexcept
{
return const_iterator(m_data + m_count);
}
//! Returns an iterator to the end.
//! @returns A \ref const_iterator to the character following the last character of the view. This character acts as
//! a placeholder, attempting to access it results in undefined behavior.
constexpr const_iterator cend() const noexcept
{
return const_iterator(m_data + m_count);
}
//! Returns a reverse iterator to the beginning.
//! @returns A \ref const_reverse_iterator to the first character of the reversed view. It corresponds to the last
//! character of the non-reversed view.
constexpr const_reverse_iterator rbegin() const noexcept
{
return const_reverse_iterator(end());
}
//! Returns a reverse iterator to the beginning.
//! @returns A \ref const_reverse_iterator to the first character of the reversed view. It corresponds to the last
//! character of the non-reversed view.
constexpr const_reverse_iterator crbegin() const noexcept
{
return const_reverse_iterator(cend());
}
//! Returns a reverse iterator to the end.
//! @returns a \ref const_reverse_iterator to the character following the last character of the reversed view. It
//! corresponds to the character preceding the first character of the non-reversed view. This character acts as a
//! placeholder, attempting to access it results in undefined behavior.
constexpr const_reverse_iterator rend() const noexcept
{
return const_reverse_iterator(begin());
}
//! Returns a reverse iterator to the end.
//! @returns a \ref const_reverse_iterator to the character following the last character of the reversed view. It
//! corresponds to the character preceding the first character of the non-reversed view. This character acts as a
//! placeholder, attempting to access it results in undefined behavior.
constexpr const_reverse_iterator crend() const noexcept
{
return const_reverse_iterator(cbegin());
}
//! Accesses the specified character.
//! \details Returns a const reference to the character at the specified position. No bounds checking is performed:
//! the behavior is undefined if `pos >= size()`.
//! @param pos The position of the character to return.
//! @returns A \ref const_reference to the requested character.
constexpr const_reference operator[](size_type pos) const noexcept /*strengthened*/
{
// Though the standard says no bounds checking we do assert
CARB_ASSERT(pos < m_count);
return m_data[pos];
}
//! Accesses the specified character with bounds checking.
//! \details Returns a const reference to the character at the specified position. Bounds checking is performed.
//! \throws std::out_of_range Invalid access: \p pos is at or after \ref size().
//! @param pos The position of the character to return.
//! @returns A \ref const_reference to the requested character
constexpr const_reference at(size_type pos) const
{
CARB_THROW_OR_CHECK(pos < m_count, "pos >= size()");
return m_data[pos];
}
//! Accesses the first character.
//! @returns A \ref const_reference to the first character in the view. The behavior is undefined if empty() is
//! `true`.
constexpr const_reference front() const noexcept /*strengthened*/
{
CARB_ASSERT(!empty(), "Undefined since empty()");
return *m_data;
}
//! Accesses the last character.
//! @returns A \ref const_reference to the last character in the view. The behavior is undefined if empty() is
//! `true`.
constexpr const_reference back() const noexcept /*strengthened*/
{
CARB_ASSERT(!empty(), "Undefined since empty()");
return m_data[m_count - 1];
}
//! Returns a pointer to the first character of a view.
//! \details Returns a pointer to the underlying character array. The pointer is such that the range
//! `[data(), data() + size())` is valid and the values in it correspond to the values of the view.
//! \note Unlike `std::basic_string::data()` and string literals, this function returns a pointer to a buffer that
//! is not necessarily null-terminated, for example a substring view (e.g. from \ref remove_suffix()). Therefore,
//! it is typically a mistake to pass `data()` to a routine that takes just a `const CharT*` and expects a null-
//! terminated string.
//! @returns A \ref const_pointer to the underlying character array.
constexpr const_pointer data() const noexcept
{
return m_data;
}
//! Returns the number of characters.
//! \details Returns the number of `CharT` characters in the view, i.e. `std::distance(begin(), end())`.
//! @returns The number of `CharT` elements in the view.
constexpr size_type size() const noexcept
{
return m_count;
}
//! Returns the number of characters.
//! \details Returns the number of `CharT` characters in the view, i.e. `std::distance(begin(), end())`.
//! @returns The number of `CharT` elements in the view.
constexpr size_type length() const noexcept
{
return m_count;
}
//! Returns the maximum number of characters.
//! \details The largest possible number of char-like objects that can be referred to by a basic_string_view.
//! @returns Maximum number of characters.
constexpr size_type max_size() const noexcept
{
return npos - 1;
}
//! Checks whether the view is empty.
//! \details Checks if the view has no characters, i.e. whether \ref size() is `0`.
//! @returns `true` if the view is empty, `false` otherwise.
CARB_NODISCARD constexpr bool empty() const noexcept
{
return m_count == 0;
}
//! Shrinks the view by moving its start forward.
//! \details Moves the start of the view forward by \p n characters. The behavior is undefined if `n > size()`.
//! @param n Number of characters to remove from the start of the view.
constexpr void remove_prefix(size_type n) noexcept /*strengthened*/
{
CARB_ASSERT(n <= size(), "Undefined since n > size()");
m_data += n;
m_count -= n;
}
//! Shrinks the view by moving its end backward.
//! \details Moves the end of the view back by \p n characters. The behavior is undefined if `n > size()`.
//! @param n Number of characters to remove from the end of the view.
constexpr void remove_suffix(size_type n) noexcept /*strengthened*/
{
CARB_ASSERT(n <= size(), "Undefined since n > size()");
m_count = m_count - n;
}
//! Swaps the contents.
//! \details Exchanges the view with that of \p v.
//! @param v View to swap with.
constexpr void swap(basic_string_view& v) noexcept
{
std::swap(m_data, v.m_data);
std::swap(m_count, v.m_count);
}
//! Copies characters.
//! \details Copies the substring `[pos, pos + rcount)` to the character array pointed to by \p dest, where `rcount`
//! is the smaller of \p count and `size() - pos`. Equivalent to `Traits::copy(dest, data() + pos, rcount)`.
//! \throws std::out_of_range if `pos > size()`.
//! @param dest Pointer to the destination character string.
//! @param count Requested substring length.
//! @param pos Position of first character.
//! @returns Number of characters copied.
constexpr size_type copy(CharT* dest, size_type count, size_type pos = 0) const
{
CARB_THROW_OR_CHECK(pos <= size(), "pos > size()");
size_type rcount = ::carb_min(count, size() - pos);
Traits::copy(dest, m_data + pos, rcount);
return rcount;
}
//! Returns a substring.
//! \details Returns a view of the substring `[pos, pos + rcount)`, where `rcount` is the smaller of \p count and
//! `size() - pos.`
//! \throws std::out_of_range if `pos > size()`.
//! @param pos Position of the first character.
//! @param count Requested length.
//! @returns View of the substring `[pos, pos + count)`.
constexpr basic_string_view substr(size_t pos, size_t count = npos) const
{
CARB_THROW_OR_CHECK(pos <= size(), "pos > size()");
size_t rcount = ::carb_min(count, size() - pos);
return { m_data + pos, rcount };
}
//! Compares two views.
//! \details The length rlen of the sequences to compare is the smaller of \ref size() and `v.size()`. The function
//! compares the two views by calling `traits::compare(data(), v.data(), rlen)`, and returns as follows:
//! * A value less than zero (`<0`) if:
//! * `Traits::compare(data(), v.data(), rlen) < 0`, or
//! * `Traits::compare(data(), v.data(), rlen) == 0` and `size() < v.size()`.
//! * A value of zero (`0`) if:
//! * `Traits::compare(data(), v.data(), rlen) == 0` and `size() == v.size()`.
//! * A value greater than zero (`>0`) if:
//! * `Traits::compare(data(), v.data(), rlen) > 0`, or
//! * `Traits::compare(data(), v.data(), rlen) == 0` and `size() > v.size()`.
//! @param v View to compare
//! @returns A negative value if `*this` is less than the other character sequence, `0` if both character sequences
//! are equal, positive value if `*this` is greater than the other character sequence. See above.
constexpr int compare(basic_string_view v) const noexcept
{
size_type rlen = ::carb_min(size(), v.size());
int result = traits_type::compare(data(), v.data(), rlen);
if (result != 0 || size() == v.size())
return result;
return (size() < v.size()) ? -1 : 1;
}
//! Compares two views.
//! \details Equivalent to `substr(pos1, count1).compare(v)`.
//! \see compare(basic_string_view) const noexcept, substr()
//! \throws std::out_of_range if `pos1 > size()`.
//! @param pos1 Position of the first character in this view to compare.
//! @param count1 Number of characters of this view to compare.
//! @param v View to compare.
//! @returns see \ref compare(basic_string_view) const noexcept
constexpr int compare(size_type pos1, size_type count1, basic_string_view v) const
{
return substr(pos1, count1).compare(v);
}
//! Compares two views.
//! \details Equivalent to `substr(pos1, count1).compare(v.substr(pos2, count2))`.
//! \see compare(basic_string_view) const noexcept, substr()
//! \throws std::out_of_range if `pos1 > size()` or if `pos2 > v.size()`.
//! @param pos1 Position of the first character in this view to compare.
//! @param count1 Number of characters of this view to compare.
//! @param v View to compare.
//! @param pos2 Position of the first character of the given view to compare.
//! @param count2 Number of characters of the given view to compare.
//! @returns see \ref compare(basic_string_view) const noexcept
constexpr int compare(size_type pos1, size_type count1, basic_string_view v, size_type pos2, size_type count2) const
{
return substr(pos1, count1).compare(v.substr(pos2, count2));
}
//! Compares two views.
//! \details Equivalent to `compare(basic_string_view(s))`.
//! \see compare(basic_string_view) const noexcept
//! @param s Pointer to the null-terminated character string to compare to.
//! @returns see \ref compare(basic_string_view) const noexcept
constexpr int compare(const CharT* s) const
{
return compare(basic_string_view(s));
}
//! Compares two views.
//! \details Equivalent to `substr(pos1, count1).compare(basic_string_view(s))`.
//! \see compare(basic_string_view) const noexcept, substr()
//! \throws std::out_of_range if `pos1 > size()`.
//! @param pos1 Position of the first character in this view to compare.
//! @param count1 Number of characters of this view to compare.
//! @param s Pointer to the null-terminated character string to compare to.
//! @returns see \ref compare(basic_string_view) const noexcept
constexpr int compare(size_type pos1, size_type count1, const CharT* s) const
{
return substr(pos1, count1).compare(basic_string_view(s));
}
//! Compares two views.
//! \details Equivalent to `substr(pos1, count1).compare(basic_string_view(s, count2))`. Behavior is undefined if
//! `[s, s+count2)` is not a valid contiguous range.
//! \see compare(basic_string_view) const noexcept, substr()
//! \throws std::out_of_range if `pos1 > size()`.
//! @param pos1 Position of the first character in this view to compare.
//! @param count1 Number of characters of this view to compare.
//! @param s Pointer to the character string to compare to.
//! @param count2 Number of characters of \p s to compare.
//! @returns see \ref compare(basic_string_view) const noexcept
constexpr int compare(size_type pos1, size_type count1, const CharT* s, size_type count2) const
{
return substr(pos1, count1).compare(basic_string_view(s, count2));
}
//! Checks if the string view starts with the given prefix.
//! \details Effectively returns `substr(0, sv.size()) == sv`.
//! @param sv A string view which may be a result of implicit conversion from `std::basic_string`.
//! @returns `true` if the string view begins with the provided prefix, `false` otherwise.
constexpr bool starts_with(basic_string_view sv) const noexcept
{
return substr(0, sv.size()) == sv;
}
//! Checks if the string view starts with the given prefix.
//! \details Effectively returns `!empty() && Traits::eq(front(), ch)`.
//! @param ch A single character.
//! @returns `true` if the string view begins with the provided prefix, `false` otherwise.
constexpr bool starts_with(CharT ch) const noexcept
{
return !empty() && traits_type::eq(front(), ch);
}
//! Checks if the string view starts with the given prefix.
//! \details Effectively returns `starts_with(basic_string_view(s))`.
//! @param s A null-terminated character string.
//! @returns `true` if the string view begins with the provided prefix, `false` otherwise.
constexpr bool starts_with(const CharT* s) const
{
return starts_with(basic_string_view(s));
}
//! Checks if the string view ends with the given suffix.
//! \details Effectively returns `size() >= sv.size() && compare(size() - sv.size(), npos, sv) == 0`.
//! @param sv A string view which may be a result of implicit conversion from `std::basic_string`.
//! @returns `true` if the string view ends with the provided suffix, `false` otherwise.
constexpr bool ends_with(basic_string_view sv) const noexcept
{
return size() >= sv.size() && compare(size() - sv.size(), npos, sv) == 0;
}
//! Checks if the string view ends with the given suffix.
//! \details Effectively returns `!empty() && Traits::eq(back(), ch)`.
//! @param ch A single character.
//! @returns `true` if the string view ends with the provided suffix, `false` otherwise.
constexpr bool ends_with(CharT ch) const noexcept
{
return !empty() && traits_type::eq(back(), ch);
}
//! Checks if the string view ends with the given suffix.
//! \details Effectively returns `ends_with(basic_string_view(s))`.
//! @param s A null-terminated character string.
//! @returns `true` if the string view ends with the provided suffix, `false` otherwise.
constexpr bool ends_with(const CharT* s) const
{
return ends_with(basic_string_view(s));
}
//! Checks if the string view contains the given substring or character.
//! \details Effectively `find(sv) != npos`.
//! @param sv A string view.
//! @returns `true` if the string view contains the provided substring, `false` otherwise.
constexpr bool contains(basic_string_view sv) const noexcept
{
return find(sv) != npos;
}
//! Checks if the string view contains the given substring or character.
//! \details Effectively `find(c) != npos`.
//! @param c A single character.
//! @returns `true` if the string view contains the provided substring, `false` otherwise.
constexpr bool contains(CharT c) const noexcept
{
return find(c) != npos;
}
//! Checks if the string view contains the given substring or character.
//! \details Effectively `find(s) != npos`.
//! @param s A null-terminated character string.
//! @returns `true` if the string view contains the provided substring, `false` otherwise.
constexpr bool contains(const CharT* s) const
{
return find(s) != npos;
}
//! Find characters in the view.
//! \details Finds the first substring equal to the given character sequence. Complexity is `O(size() * v.size())`
//! at worst.
//! @param v View to search for.
//! @param pos Position at which to start the search.
//! @returns Position of the first character of the found substring, or \ref npos if no such substring is found.
constexpr size_type find(basic_string_view v, size_type pos = 0) const noexcept
{
// [strings.view.find] in the Standard.
size_type xpos = pos;
while (xpos + v.size() <= size())
{
if (traits_type::compare(v.data(), data() + xpos, v.size()) == 0)
{
return xpos;
}
xpos++;
}
return npos;
}
//! Find characters in the view.
//! \details Finds the first substring equal to the given character sequence. Equivalent to
//! `find(basic_string_view(std::addressof(ch), 1))`. Complexity is `O(size())` at worst.
//! @param ch Character to search for.
//! @param pos Position at which to start the search.
//! @returns Position of the first character of the found substring, or \ref npos if no such substring is found.
constexpr size_type find(CharT ch, size_type pos = 0) const noexcept
{
size_type xpos = pos;
while (xpos < size())
{
if (traits_type::eq(data()[xpos], ch))
{
return xpos;
}
xpos++;
}
return npos;
}
//! Find characters in the view.
//! \details Finds the first substring equal to the given character sequence. Equivalent to
//! `find(basic_string_view(s, count), pos)`. Complexity is `O(size() * count)` at worst.
//! @param s Pointer to a character string to search for.
//! @param pos Position at which to start the search.
//! @param count Length of substring to search for.
//! @returns Position of the first character of the found substring, or \ref npos if no such substring is found.
constexpr size_type find(const CharT* s, size_type pos, size_type count) const
{
return find(basic_string_view(s, count), pos);
}
//! Find characters in the view.
//! \details Finds the first substring equal to the given character sequence. Equivalent to
//! `find(basic_string_view(s), pos)`. Complexity is `O(size() * Traits::length(s))` at worst.
//! @param s Pointer to a character string to search for.
//! @param pos Position at which to start the search.
//! @returns Position of the first character of the found substring, or \ref npos if no such substring is found.
constexpr size_type find(const CharT* s, size_type pos = 0) const
{
return find(basic_string_view(s), pos);
}
//! Find the last occurrence of a substring.
//! \details Finds the last substring equal to the given character sequence. Search begins at \p pos, i.e. the found
//! substring must not being is a position following \p pos. If \ref npos or any value not smaller than `size()-1`
//! is passed as pos, the whole string will be searched. Complexity is `O(size() * v.size())` at worst.
//! @param v View to search for.
//! @param pos Position at which to start the search.
//! @returns Position of the first character of the found substring, or \ref npos if no such substring is found.
constexpr size_type rfind(basic_string_view v, size_type pos = npos) const noexcept
{
if (v.size() > size())
{
return npos;
}
// Clip the position to our string length.
for (size_type xpos = ::carb_min(pos, size() - v.size());; xpos--)
{
if (traits_type::compare(v.data(), data() + xpos, v.size()) == 0)
{
return xpos;
}
if (xpos == 0)
{
break;
}
}
return npos;
}
//! Find the last occurrence of a substring.
//! \details Finds the last substring equal to the given character sequence. Search begins at \p pos, i.e. the found
//! substring must not being is a position following \p pos. If \ref npos or any value not smaller than `size()-1`
//! is passed as pos, the whole string will be searched. Equivalent to
//! `rfind(basic_string_view(std::addressof(ch), 1), pos)`. Complexity is `O(size())` at worst.
//! @param ch Character to search for.
//! @param pos Position at which to start the search.
//! @returns Position of the first character of the found substring, or \ref npos if no such substring is found.
constexpr size_type rfind(CharT ch, size_type pos = npos) const noexcept
{
if (empty())
{
return npos;
}
// Clip the position to our string length.
for (size_type xpos = ::carb_min(pos, size() - 1);; xpos--)
{
if (traits_type::eq(ch, data()[xpos]))
{
return xpos;
}
if (xpos == 0)
{
break;
}
}
return npos;
}
//! Find the last occurrence of a substring.
//! \details Finds the last substring equal to the given character sequence. Search begins at \p pos, i.e. the found
//! substring must not being is a position following \p pos. If \ref npos or any value not smaller than `size()-1`
//! is passed as pos, the whole string will be searched. Equivalent to `rfind(basic_string_view(s, count), pos)`.
//! Complexity is `O(size() * count)` at worst.
//! @param s Pointer to a character string to search for.
//! @param pos Position at which to start the search.
//! @param count Length of substring to search for.
//! @returns Position of the first character of the found substring, or \ref npos if no such substring is found.
constexpr size_type rfind(const CharT* s, size_type pos, size_type count) const
{
return rfind(basic_string_view(s, count), pos);
}
//! Find the last occurrence of a substring.
//! \details Finds the last substring equal to the given character sequence. Search begins at \p pos, i.e. the found
//! substring must not being is a position following \p pos. If \ref npos or any value not smaller than `size()-1`
//! is passed as pos, the whole string will be searched. Equivalent to `rfind(basic_string_view(s), pos)`.
//! Complexity is `O(size() * Traits::length(s))` at worst.
//! @param s Pointer to a null-terminated character string to search for.
//! @param pos Position at which to start the search.
//! @returns Position of the first character of the found substring, or \ref npos if no such substring is found.
constexpr size_type rfind(const CharT* s, size_type pos = npos) const
{
return rfind(basic_string_view(s), pos);
}
//! Find first occurrence of characters.
//! \details Finds the first occurrence of any of the characters of \p v in this view, starting as position \p pos.
//! Complexity is `O(size() * v.size())` at worst.
//! @param v View to search for.
//! @param pos Position at which to start the search.
//! @returns Position of the first occurrence of any character of the substring, or \ref npos if no such character
//! is found.
constexpr size_type find_first_of(basic_string_view v, size_type pos = 0) const noexcept
{
if (v.empty())
{
return npos;
}
size_type xpos = pos;
while (xpos < size())
{
if (v.find(m_data[xpos]) != npos)
{
return xpos;
}
xpos++;
}
return npos;
}
//! Find first occurrence of characters.
//! \details Equivalent to `find_first_of(basic_string_view(std::addressof(ch), 1), pos)`.
//! Complexity is `O(size())` at worst.
//! \see find_first_of(basic_string_view,size_type) const noexcept
//! @param ch Character to search for.
//! @param pos Position at which to start the search.
//! @returns Position of the first occurrence of any character of the substring, or \ref npos if no such character
//! is found.
constexpr size_type find_first_of(CharT ch, size_type pos = 0) const noexcept
{
return find(ch, pos);
}
//! Find first occurrence of characters.
//! \details Equivalent to `find_first_of(basic_string_view(s, count), pos)`. Complexity is `O(size() * count)` at
//! worst.
//! \see find_first_of(basic_string_view,size_type) const noexcept
//! @param s Pointer to a string of characters to search for.
//! @param pos Position at which to start the search.
//! @param count Length of the string of characters to search for.
//! @returns Position of the first occurrence of any character of the substring, or \ref npos if no such character
//! is found.
constexpr size_type find_first_of(const CharT* s, size_type pos, size_type count) const
{
return find_first_of(basic_string_view(s, count), pos);
}
//! Find first occurrence of characters.
//! \details Equivalent to `find_first_of(basic_string_view(s, Traits::length(s)), pos)`. Complexity is
//! `O(size() * count)` at worst.
//! \see find_first_of(basic_string_view,size_type) const noexcept
//! @param s Pointer to a null-terminated string of characters to search for.
//! @param pos Position at which to start the search.
//! @returns Position of the first occurrence of any character of the substring, or \ref npos if no such character
//! is found.
constexpr size_type find_first_of(const CharT* s, size_type pos = 0) const
{
return find_first_of(basic_string_view(s), pos);
}
//! Find last occurrence of characters.
//! \details Finds the last character equal to one of characters in the given character sequence. Exact search
//! algorithm is not specified. The search considers only the interval `[0, pos]`. If the character is not present
//! in the interval, \ref npos will be returned. Complexity is `O(size() * v.size())` at worst.
//! @param v View to search for.
//! @param pos Position at which the search is to finish.
//! @returns Position of the last occurrence of any character of the substring, or \ref npos if no such character
//! is found.
constexpr size_type find_last_of(basic_string_view v, size_type pos = npos) const noexcept
{
if (v.empty() || empty())
{
return npos;
}
// Clip the position to our string length.
for (size_type xpos = ::carb_min(pos, size() - 1);; xpos--)
{
if (v.find(data()[xpos]) != npos)
{
return xpos;
}
if (xpos == 0)
{
break;
}
}
return npos;
}
//! Find last occurrence of characters.
//! \details Finds the last character equal to one of characters in the given character sequence. Exact search
//! algorithm is not specified. The search considers only the interval `[0, pos]`. If the character is not present
//! in the interval, \ref npos will be returned. Equivalent to
//! `find_last_of(basic_string_view(std::addressof(ch), 1), pos)`. Complexity is `O(size())` at worst.
//! \see find_last_of(basic_string_view,size_type) const noexcept
//! @param ch Character to search for.
//! @param pos Position at which the search is to finish.
//! @returns Position of the last occurrence of any character of the substring, or \ref npos if no such character
//! is found.
constexpr size_type find_last_of(CharT ch, size_type pos = npos) const noexcept
{
return rfind(ch, pos);
}
//! Find last occurrence of characters.
//! \details Finds the last character equal to one of characters in the given character sequence. Exact search
//! algorithm is not specified. The search considers only the interval `[0, pos]`. If the character is not present
//! in the interval, \ref npos will be returned. Equivalent to
//! `find_last_of(basic_string_view(s, count), pos)`. Complexity is `O(size() * count)` at worst.
//! \see find_last_of(basic_string_view,size_type) const noexcept
//! @param s Pointer to a string of characters to search for.
//! @param pos Position at which the search is to finish.
//! @param count Length of the string of characters to search for.
//! @returns Position of the last occurrence of any character of the substring, or \ref npos if no such character
//! is found.
constexpr size_type find_last_of(const CharT* s, size_type pos, size_type count) const
{
return find_last_of(basic_string_view(s, count), pos);
}
//! Find last occurrence of characters.
//! \details Finds the last character equal to one of characters in the given character sequence. Exact search
//! algorithm is not specified. The search considers only the interval `[0, pos]`. If the character is not present
//! in the interval, \ref npos will be returned. Equivalent to
//! `find_last_of(basic_string_view(s), pos)`. Complexity is `O(size() * Traits::length(s))` at worst.
//! \see find_last_of(basic_string_view,size_type) const noexcept
//! @param s Pointer to a null-terminated string of characters to search for.
//! @param pos Position at which the search is to finish.
//! @returns Position of the last occurrence of any character of the substring, or \ref npos if no such character
//! is found.
constexpr size_type find_last_of(const CharT* s, size_type pos = npos) const
{
return find_last_of(basic_string_view(s), pos);
}
//! Find first absence of characters.
//! \details Finds the first character not equal to any of the characters in the given character sequence.
//! Complexity is `O(size() * v.size())` at worst.
//! @param v View to search for.
//! @param pos Position at which to start the search.
//! @returns Position of the first character not equal to any of the characters in the given string, or \ref npos if
//! no such character is found.
constexpr size_type find_first_not_of(basic_string_view v, size_type pos = 0) const noexcept
{
size_type xpos = pos;
while (xpos < size())
{
if (v.find(data()[xpos]) == npos)
{
return xpos;
}
xpos++;
}
return npos;
}
//! Find first absence of characters.
//! \details Finds the first character not equal to any of the characters in the given character sequence.
//! Equivalent to `find_first_not_of(basic_string_view(std::addressof(ch), 1), pos)`. Complexity is `O(size())` at
//! worst.
//! \see find_first_not_of(basic_string_view,size_type) const noexcept
//! @param ch Character to search for.
//! @param pos Position at which to start the search.
//! @returns Position of the first character not equal to any of the characters in the given string, or \ref npos if
//! no such character is found.
constexpr size_type find_first_not_of(CharT ch, size_type pos = 0) const noexcept
{
size_type xpos = pos;
while (xpos < size())
{
if (!traits_type::eq(ch, m_data[xpos]))
{
return xpos;
}
xpos++;
}
return npos;
}
//! Find first absence of characters.
//! \details Finds the first character not equal to any of the characters in the given character sequence.
//! Equivalent to `find_first_not_of(basic_string_view(s, count), pos)`. Complexity is `O(size() * count)` at worst.
//! \see find_first_not_of(basic_string_view,size_type) const noexcept
//! @param s Pointer to a string of characters to search for.
//! @param pos Position at which to start the search.
//! @param count Length of the string of characters to compare.
//! @returns Position of the first character not equal to any of the characters in the given string, or \ref npos if
//! no such character is found.
constexpr size_type find_first_not_of(const CharT* s, size_type pos, size_type count) const
{
return find_first_not_of(basic_string_view(s, count), pos);
}
//! Find first absence of characters.
//! \details Finds the first character not equal to any of the characters in the given character sequence.
//! Equivalent to `find_first_not_of(basic_string_view(s), pos)`. Complexity is `O(size() * Traits::length(s))` at
//! worst.
//! \see find_first_not_of(basic_string_view,size_type) const noexcept
//! @param s Pointer to a null-terminated string of characters to search for.
//! @param pos Position at which to start the search.
//! @returns Position of the first character not equal to any of the characters in the given string, or \ref npos if
//! no such character is found.
constexpr size_type find_first_not_of(const CharT* s, size_type pos = 0) const
{
return find_first_not_of(basic_string_view(s), pos);
}
//! Find last absence of characters.
//! \details Finds the last character not equal to any of the characters in the given character sequence. The search
//! considers only the interval `[0, pos]`. Complexity is `O(size() * v.size())` at worst.
//! @param v View to search for.
//! @param pos Position at which to start the search.
//! @returns Position of the last character not equal to any of the characters in the given string, or \ref npos if
//! no such character is found.
constexpr size_type find_last_not_of(basic_string_view v, size_type pos = 0) const noexcept
{
if (empty())
{
return npos;
}
// Clip the position to our string length.
for (size_type xpos = ::carb_min(pos, size() - 1);; xpos--)
{
if (v.find(data()[xpos]) == npos)
{
return xpos;
}
if (xpos == 0)
{
break;
}
}
return npos;
}
//! Find last absence of characters.
//! \details Finds the last character not equal to any of the characters in the given character sequence. The search
//! considers only the interval `[0, pos]`. Equivalent to
//! `find_last_not_of(basic_string_view(std::addressof(ch), 1), pos)`, Complexity is `O(size())` at worst.
//! \see find_last_not_of(basic_string_view,size_type) const noexcept
//! @param ch Character to search for.
//! @param pos Position at which to start the search.
//! @returns Position of the last character not equal to any of the characters in the given string, or \ref npos if
//! no such character is found.
constexpr size_type find_last_not_of(CharT ch, size_type pos = 0) const noexcept
{
if (empty())
{
return npos;
}
// Clip the position to our string length.
for (size_type xpos = ::carb_min(pos, size() - 1);; xpos--)
{
if (!traits_type::eq(data()[xpos], ch))
{
return xpos;
}
if (xpos == 0)
{
break;
}
}
return npos;
}
//! Find last absence of characters.
//! \details Finds the last character not equal to any of the characters in the given character sequence. The search
//! considers only the interval `[0, pos]`. Equivalent to
//! `find_last_not_of(basic_string_view(s, count), pos)`, Complexity is `O(size() * count)` at worst.
//! \see find_last_not_of(basic_string_view,size_type) const noexcept
//! @param s Pointer to a string of characters to compare.
//! @param pos Position at which to start the search.
//! @param count Length of the string of characters to compare.
//! @returns Position of the last character not equal to any of the characters in the given string, or \ref npos if
//! no such character is found.
constexpr size_type find_last_not_of(const CharT* s, size_type pos, size_type count) const
{
return find_last_not_of(basic_string_view(s, count), pos);
}
//! Find last absence of characters.
//! \details Finds the last character not equal to any of the characters in the given character sequence. The search
//! considers only the interval `[0, pos]`. Equivalent to
//! `find_last_not_of(basic_string_view(s), pos)`, Complexity is `O(size() * Traits::length(s))` at worst.
//! \see find_last_not_of(basic_string_view,size_type) const noexcept
//! @param s Pointer to a null-terminated string of characters to compare.
//! @param pos Position at which to start the search.
//! @returns Position of the last character not equal to any of the characters in the given string, or \ref npos if
//! no such character is found.
constexpr size_type find_last_not_of(const CharT* s, size_type pos = 0) const
{
return find_last_not_of(basic_string_view(s), pos);
}
private:
CARB_VIZ const_pointer m_data = nullptr;
CARB_VIZ size_type m_count = 0;
};
//! basic_string_view<char>
//! @see basic_string_view
using string_view = basic_string_view<char>;
//! basic_string_view<wchar_t>
//! @see basic_string_view
using wstring_view = basic_string_view<wchar_t>;
#if CARB_HAS_CPP20
//! basic_string_view<char8_t>
//! @see basic_string_view
using u8string_view = basic_string_view<char8_t>;
#endif
//! basic_string_view<char16_t>
//! @see basic_string_view
using u16string_view = basic_string_view<char16_t>;
//! basic_string_view<char32_t>
//! @see basic_string_view
using u32string_view = basic_string_view<char32_t>;
// Ensure these for ABI safety
//! \cond SKIP
#define CARB_IMPL_ENSURE_ABI(cls) \
static_assert(std::is_standard_layout<cls>::value, #cls " must be standard layout"); \
static_assert(std::is_trivially_copyable<cls>::value, #cls " must be trivially copyable"); /* C++23 requirement */ \
static_assert(sizeof(cls) == (2 * sizeof(size_t)), #cls "ABI change violation")
CARB_IMPL_ENSURE_ABI(string_view);
CARB_IMPL_ENSURE_ABI(wstring_view);
CARB_IMPL_ENSURE_ABI(u16string_view);
CARB_IMPL_ENSURE_ABI(u32string_view);
#undef CARB_IMPL_ENSURE_ABI
//! \endcond SKIP
//! @defgroup string_view_compare Lexicographically compare two string views.
//! \details. All comparisons are done via the \ref basic_string_view::compare() member function (which itself is
//! defined in terms of `Traits::compare()`):
//! * Two views are equal if both the size of \p a and \p b are equal and each character in \p a has an equivalent
//! character in \p b at the same position.
//! * The ordering comparisons are done lexicographically -- the comparison is performed by a function equivalent to
//! `std::lexicographical_compare`.
//! Complexity is linear in the size of the views.
//! @{
//! Lexicographically compare two string views.
template <class CharT, class Traits = std::char_traits<CharT>>
bool operator==(const basic_string_view<CharT, Traits>& a, const basic_string_view<CharT, Traits>& b)
{
return a.compare(b) == 0;
}
//! Lexicographically compare two string views.
template <class CharT, class Traits = std::char_traits<CharT>>
bool operator!=(const basic_string_view<CharT, Traits>& a, const basic_string_view<CharT, Traits>& b)
{
return a.compare(b) != 0;
}
//! Lexicographically compare two string views.
template <class CharT, class Traits = std::char_traits<CharT>>
bool operator<(const basic_string_view<CharT, Traits>& a, const basic_string_view<CharT, Traits>& b)
{
return a.compare(b) < 0;
}
//! Lexicographically compare two string views.
template <class CharT, class Traits = std::char_traits<CharT>>
bool operator<=(const basic_string_view<CharT, Traits>& a, const basic_string_view<CharT, Traits>& b)
{
return a.compare(b) <= 0;
}
//! Lexicographically compare two string views.
template <class CharT, class Traits = std::char_traits<CharT>>
bool operator>(const basic_string_view<CharT, Traits>& a, const basic_string_view<CharT, Traits>& b)
{
return a.compare(b) > 0;
}
//! Lexicographically compare two string views.
template <class CharT, class Traits = std::char_traits<CharT>>
bool operator>=(const basic_string_view<CharT, Traits>& a, const basic_string_view<CharT, Traits>& b)
{
return a.compare(b) >= 0;
}
// [string.view.comparison]
//! Lexicographically compare two string views.
inline bool operator==(const char* t, string_view sv)
{
return string_view(t) == sv;
}
//! Lexicographically compare two string views.
inline bool operator==(string_view sv, const char* t)
{
return sv == string_view(t);
}
//! Lexicographically compare two string views.
inline bool operator!=(const char* t, string_view sv)
{
return string_view(t) != sv;
}
//! Lexicographically compare two string views.
inline bool operator!=(string_view sv, const char* t)
{
return sv != string_view(t);
}
//! Lexicographically compare two string views.
inline bool operator<(const char* t, string_view sv)
{
return string_view(t) < sv;
}
//! Lexicographically compare two string views.
inline bool operator<(string_view sv, const char* t)
{
return sv < string_view(t);
}
//! Lexicographically compare two string views.
inline bool operator<=(const char* t, string_view sv)
{
return string_view(t) <= sv;
}
//! Lexicographically compare two string views.
inline bool operator<=(string_view sv, const char* t)
{
return sv <= string_view(t);
}
//! Lexicographically compare two string views.
inline bool operator>(const char* t, string_view sv)
{
return string_view(t) > sv;
}
//! Lexicographically compare two string views.
inline bool operator>(string_view sv, const char* t)
{
return sv > string_view(t);
}
//! Lexicographically compare two string views.
inline bool operator>=(const char* t, string_view sv)
{
return string_view(t) >= sv;
}
//! Lexicographically compare two string views.
inline bool operator>=(string_view sv, const char* t)
{
return sv >= string_view(t);
}
//! Lexicographically compare two string views.
inline bool operator==(const wchar_t* t, wstring_view sv)
{
return wstring_view(t) == sv;
}
//! Lexicographically compare two string views.
inline bool operator==(wstring_view sv, const wchar_t* t)
{
return sv == wstring_view(t);
}
//! Lexicographically compare two string views.
inline bool operator!=(const wchar_t* t, wstring_view sv)
{
return wstring_view(t) != sv;
}
//! Lexicographically compare two string views.
inline bool operator!=(wstring_view sv, const wchar_t* t)
{
return sv != wstring_view(t);
}
//! Lexicographically compare two string views.
inline bool operator<(const wchar_t* t, wstring_view sv)
{
return wstring_view(t) < sv;
}
//! Lexicographically compare two string views.
inline bool operator<(wstring_view sv, const wchar_t* t)
{
return sv < wstring_view(t);
}
//! Lexicographically compare two string views.
inline bool operator<=(const wchar_t* t, wstring_view sv)
{
return wstring_view(t) <= sv;
}
//! Lexicographically compare two string views.
inline bool operator<=(wstring_view sv, const wchar_t* t)
{
return sv <= wstring_view(t);
}
//! Lexicographically compare two string views.
inline bool operator>(const wchar_t* t, wstring_view sv)
{
return wstring_view(t) > sv;
}
//! Lexicographically compare two string views.
inline bool operator>(wstring_view sv, const wchar_t* t)
{
return sv > wstring_view(t);
}
//! Lexicographically compare two string views.
inline bool operator>=(const wchar_t* t, wstring_view sv)
{
return wstring_view(t) >= sv;
}
//! Lexicographically compare two string views.
inline bool operator>=(wstring_view sv, const wchar_t* t)
{
return sv >= wstring_view(t);
}
//! Lexicographically compare two string views.
inline bool operator==(const char16_t* t, u16string_view sv)
{
return u16string_view(t) == sv;
}
//! Lexicographically compare two string views.
inline bool operator==(u16string_view sv, const char16_t* t)
{
return sv == u16string_view(t);
}
//! Lexicographically compare two string views.
inline bool operator!=(const char16_t* t, u16string_view sv)
{
return u16string_view(t) != sv;
}
//! Lexicographically compare two string views.
inline bool operator!=(u16string_view sv, const char16_t* t)
{
return sv != u16string_view(t);
}
//! Lexicographically compare two string views.
inline bool operator<(const char16_t* t, u16string_view sv)
{
return u16string_view(t) < sv;
}
//! Lexicographically compare two string views.
inline bool operator<(u16string_view sv, const char16_t* t)
{
return sv < u16string_view(t);
}
//! Lexicographically compare two string views.
inline bool operator<=(const char16_t* t, u16string_view sv)
{
return u16string_view(t) <= sv;
}
//! Lexicographically compare two string views.
inline bool operator<=(u16string_view sv, const char16_t* t)
{
return sv <= u16string_view(t);
}
//! Lexicographically compare two string views.
inline bool operator>(const char16_t* t, u16string_view sv)
{
return u16string_view(t) > sv;
}
//! Lexicographically compare two string views.
inline bool operator>(u16string_view sv, const char16_t* t)
{
return sv > u16string_view(t);
}
//! Lexicographically compare two string views.
inline bool operator>=(const char16_t* t, u16string_view sv)
{
return u16string_view(t) >= sv;
}
//! Lexicographically compare two string views.
inline bool operator>=(u16string_view sv, const char16_t* t)
{
return sv >= u16string_view(t);
}
//! Lexicographically compare two string views.
inline bool operator==(const char32_t* t, u32string_view sv)
{
return u32string_view(t) == sv;
}
//! Lexicographically compare two string views.
inline bool operator==(u32string_view sv, const char32_t* t)
{
return sv == u32string_view(t);
}
//! Lexicographically compare two string views.
inline bool operator!=(const char32_t* t, u32string_view sv)
{
return u32string_view(t) != sv;
}
//! Lexicographically compare two string views.
inline bool operator!=(u32string_view sv, const char32_t* t)
{
return sv != u32string_view(t);
}
//! Lexicographically compare two string views.
inline bool operator<(const char32_t* t, u32string_view sv)
{
return u32string_view(t) < sv;
}
//! Lexicographically compare two string views.
inline bool operator<(u32string_view sv, const char32_t* t)
{
return sv < u32string_view(t);
}
//! Lexicographically compare two string views.
inline bool operator<=(const char32_t* t, u32string_view sv)
{
return u32string_view(t) <= sv;
}
//! Lexicographically compare two string views.
inline bool operator<=(u32string_view sv, const char32_t* t)
{
return sv <= u32string_view(t);
}
//! Lexicographically compare two string views.
inline bool operator>(const char32_t* t, u32string_view sv)
{
return u32string_view(t) > sv;
}
//! Lexicographically compare two string views.
inline bool operator>(u32string_view sv, const char32_t* t)
{
return sv > u32string_view(t);
}
//! Lexicographically compare two string views.
inline bool operator>=(const char32_t* t, u32string_view sv)
{
return u32string_view(t) >= sv;
}
//! Lexicographically compare two string views.
inline bool operator>=(u32string_view sv, const char32_t* t)
{
return sv >= u32string_view(t);
}
//! @}
// Note that literal suffixes that don't start with _ are reserved, in addition we probably don't want to compete with
// the C++17 suffix either.
//! Creates a string view of a character array literal.
//! \note The literal suffix operator `sv` is reserved by C++, so this is `_sv`.
//! @param str Pointer to the beginning of the raw character array literal.
//! @param len Length of the raw character array literal.
//! @returns The \ref basic_string_view literal.
constexpr string_view operator""_sv(const char* str, std::size_t len) noexcept
{
return string_view(str, len);
}
// C++ 20 and above have char8_t.
#if CARB_HAS_CPP20
//! \copydoc operator""_sv(const char*,std::size_t) noexcept
constexpr u8string_view operator""_sv(const char8_t* str, std::size_t len) noexcept
{
return u8string_view(str, len);
}
#endif
//! \copydoc operator""_sv(const char*,std::size_t) noexcept
constexpr u16string_view operator""_sv(const char16_t* str, std::size_t len) noexcept
{
return u16string_view(str, len);
}
//! \copydoc operator""_sv(const char*,std::size_t) noexcept
constexpr u32string_view operator""_sv(const char32_t* str, std::size_t len) noexcept
{
return u32string_view(str, len);
}
//! \copydoc operator""_sv(const char*,std::size_t) noexcept
constexpr wstring_view operator""_sv(const wchar_t* str, std::size_t len) noexcept
{
return wstring_view(str, len);
}
// Non-standard function?
//! Swaps two string views.
//! @param a String view to swap.
//! @param b String view to swap.
template <class CharT, class Traits>
constexpr void swap(carb::cpp::basic_string_view<CharT, Traits>& a, carb::cpp::basic_string_view<CharT, Traits>& b) noexcept
{
a.swap(b);
};
} // namespace cpp
} // namespace carb
//! Hash support for string views.
template <class CharT, class Traits>
struct std::hash<carb::cpp::basic_string_view<CharT, Traits>>
{
//! @private
size_t operator()(const carb::cpp::basic_string_view<CharT, Traits>& v) const
{
return carb::hashBuffer(v.data(), (uintptr_t)(v.data() + v.size()) - (uintptr_t)(v.data()));
}
};
#undef CARB_ALWAYS_FAIL
#undef CARB_THROW_OR_CHECK
|
omniverse-code/kit/include/carb/cpp/Memory.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief C++14-compatible implementation of select functionality from C++ `<memory>` library.
#pragma once
#include "../Defines.h"
#include <memory>
#include <type_traits>
namespace carb
{
namespace cpp
{
#if !CARB_HAS_CPP17 || defined DOXYGEN_BUILD
//! Get the address of \a arg, even if \c operator& is overloaded. This is generally only useful in memory management
//! functions -- \c to_address is almost always preferable.
//!
//! \note
//! This function is \c constexpr even in C++14 mode.
template <typename T>
constexpr T* addressof(T& arg) noexcept
{
return __builtin_addressof(arg);
}
//! Taking the address of a \c const rvalue is never correct.
template <typename T>
T const* addressof(T const&&) = delete;
#else
using std::addressof;
#endif
//! Call the destructor of \a p.
template <typename T>
constexpr std::enable_if_t<!std::is_array<T>::value> destroy_at(T* const p) noexcept(std::is_nothrow_destructible<T>::value)
{
p->~T();
}
//! Call the destructor of all \a array elements.
//!
//! \tparam T The element type of the array must have a \c noexcept destructor. There no mechanism to safely use this
//! function if an element throws. This is a departure from the C++20, wherein an exception thrown from a
//! destructor will result in either \c std::terminate or an "implementation defined manner." Instead, we force
//! you to handle potential exceptions by disallowing it.
template <typename T, std::size_t N>
constexpr void destroy_at(T (*array)[N]) noexcept
{
static_assert(noexcept(carb::cpp::destroy_at(array[0])),
"destroy_at for array requires elements to have noexcept destructor");
for (T& elem : *array)
{
carb::cpp::destroy_at(carb::cpp::addressof(elem));
}
}
#if !CARB_HAS_CPP20 || defined DOXYGEN_BUILD
//! Construct a \c T in \a place using the provided \a args.
//!
//! \note
//! This differs from the C++20 definition by not being \c constexpr, since placement new is not \c constexpr before
//! C++20. When C++20 is enabled, this function disappears in favor of \c std::construct_at.
template <typename T, typename... TArgs>
T* construct_at(T* place, TArgs&&... args) noexcept
CARB_NO_DOC((noexcept(::new (static_cast<void*>(place)) T(std::forward<TArgs>(args)...))))
{
return ::new (static_cast<void*>(place)) T(std::forward<TArgs>(args)...);
}
#else
using std::construct_at;
#endif
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/TypeTraits.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief C++14-compatible implementation of select functionality from C++ `<type_traits>` library.
#pragma once
#include "../Defines.h"
#include "../detail/NoexceptType.h"
#include "detail/ImplInvoke.h"
#include <functional>
#include <type_traits>
namespace carb
{
namespace cpp
{
CARB_DETAIL_PUSH_IGNORE_NOEXCEPT_TYPE()
//! An integral constant with \c bool type and value \c B.
template <bool B>
using bool_constant = std::integral_constant<bool, B>;
//! \cond DEV
namespace detail
{
template <typename... B>
struct conjunction_impl;
template <>
struct conjunction_impl<> : std::true_type
{
};
template <typename B>
struct conjunction_impl<B> : B
{
};
template <typename BHead, typename... BRest>
struct conjunction_impl<BHead, BRest...> : std::conditional_t<bool(BHead::value), conjunction_impl<BRest...>, BHead>
{
};
} // namespace detail
//! \endcond
//! A conjunction is the logical \e and of all \c B traits.
//!
//! An empty list results in a \c true value. This meta-function is short-circuiting.
//!
//! \tparam B The series of traits to evaluate the \c value member of. Each \c B must have a member constant \c value
//! which is convertible to \c bool. Use of carb::cpp::bool_constant is helpful here.
template <typename... B>
struct conjunction : detail::conjunction_impl<B...>
{
};
//! \cond DEV
namespace detail
{
template <typename... B>
struct disjunction_impl;
template <>
struct disjunction_impl<> : std::false_type
{
};
template <typename B>
struct disjunction_impl<B> : B
{
};
template <typename BHead, typename... BRest>
struct disjunction_impl<BHead, BRest...> : std::conditional_t<bool(BHead::value), BHead, disjunction_impl<BRest...>>
{
};
} // namespace detail
//! \endcond
//! A disjunction is the logical \e or of all \c B traits.
//!
//! An empty list results in a \c false value. This metafunction is short-circuiting.
//!
//! \tparam B The series of traits to evaluate the \c value member of. Each \c B must have a member constant \c value
//! which is convertible to \c bool. Use of \ref cpp::bool_constant is helpful here.
template <typename... B>
struct disjunction : detail::disjunction_impl<B...>
{
};
//! A logical \e not of \c B trait.
//!
//! \tparam B The trait to evaluate the \c value member of. This must have a member constant \c value which is
//! convertible to \c bool. Use of \ref cpp::bool_constant is helpful here.
template <typename B>
struct negation : bool_constant<!bool(B::value)>
{
};
//! Void type
template <class...>
using void_t = void;
using std::is_convertible;
using std::is_void;
//! \cond DEV
namespace detail
{
// Base case matches where either `To` or `From` is void or if `is_convertible<From, To>` is false. The conversion in
// this case is only non-throwing if both `From` and `To` are `void`.
template <typename From, typename To, typename = void>
struct is_nothrow_convertible_impl : conjunction<is_void<From>, is_void<To>>
{
};
// If neither `From` nor `To` are void and `From` is convertible to `To`, then we test that such a conversion is
// non-throwing.
template <typename From, typename To>
struct is_nothrow_convertible_impl<
From,
To,
std::enable_if_t<conjunction<negation<is_void<From>>, negation<is_void<To>>, is_convertible<From, To>>::value>>
{
static void test(To) noexcept;
static constexpr bool value = noexcept(test(std::declval<From>()));
};
} // namespace detail
//! \endcond
//! Determine if \c From can be implicitly-converted to \c To without throwing an exception.
//!
//! This is equivalent to the C++20 \c std::is_nothrow_convertible meta query. While added in C++20, it is required for
//! the C++17 \ref is_nothrow_invocable_r meta query.
template <typename From, typename To>
struct is_nothrow_convertible : bool_constant<detail::is_nothrow_convertible_impl<From, To>::value>
{
};
//! \cond DEV
namespace detail
{
template <class T>
struct IsSwappable;
template <class T>
struct IsNothrowSwappable;
template <class T, class U, class = void>
struct SwappableWithHelper : std::false_type
{
};
template <class T, class U>
struct SwappableWithHelper<T, U, void_t<decltype(swap(std::declval<T>(), std::declval<U>()))>> : std::true_type
{
};
template <class T, class U>
struct IsSwappableWith : bool_constant<conjunction<SwappableWithHelper<T, U>, SwappableWithHelper<U, T>>::value>
{
};
template <class T>
struct IsSwappable : IsSwappableWith<std::add_lvalue_reference_t<T>, std::add_lvalue_reference_t<T>>::type
{
};
using std::swap; // enable ADL
template <class T, class U>
struct SwapCannotThrow : bool_constant<noexcept(swap(std::declval<T>(), std::declval<U>()))&& noexcept(
swap(std::declval<U>(), std::declval<T>()))>
{
};
template <class T, class U>
struct IsNothrowSwappableWith : bool_constant<conjunction<IsSwappableWith<T, U>, SwapCannotThrow<T, U>>::value>
{
};
template <class T>
struct IsNothrowSwappable : IsNothrowSwappableWith<std::add_lvalue_reference_t<T>, std::add_lvalue_reference_t<T>>::type
{
};
} // namespace detail
//! \endcond
template <class T, class U>
struct is_swappable_with : detail::IsSwappableWith<T, U>::type
{
};
template <class T>
struct is_swappable : detail::IsSwappable<T>::type
{
};
template <class T, class U>
struct is_nothrow_swappable_with : detail::IsNothrowSwappableWith<T, U>::type
{
};
template <class T>
struct is_nothrow_swappable : detail::IsNothrowSwappable<T>::type
{
};
//! \cond DEV
namespace detail
{
// The base case is matched in cases where `invoke_uneval` is an invalid expression. The `Qualify` is always set to void
// by users.
template <typename Qualify, typename Func, typename... TArgs>
struct invoke_result_impl
{
};
template <typename Func, typename... TArgs>
struct invoke_result_impl<decltype(void(invoke_uneval(std::declval<Func>(), std::declval<TArgs>()...))), Func, TArgs...>
{
using type = decltype(invoke_uneval(std::declval<Func>(), std::declval<TArgs>()...));
};
template <typename Qualify, typename Func, typename... TArgs>
struct is_invocable_impl : std::false_type
{
};
template <typename Func, typename... TArgs>
struct is_invocable_impl<void_t<typename invoke_result_impl<void, Func, TArgs...>::type>, Func, TArgs...> : std::true_type
{
};
template <typename Qualify, typename Func, typename... TArgs>
struct is_nothrow_invocable_impl : std::false_type
{
};
template <typename Func, typename... TArgs>
struct is_nothrow_invocable_impl<void_t<typename invoke_result_impl<void, Func, TArgs...>::type>, Func, TArgs...>
: bool_constant<noexcept(invoke_uneval(std::declval<Func>(), std::declval<TArgs>()...))>
{
};
} // namespace detail
//! \endcond
//! Get the result type of calling \c Func with the \c TArgs pack.
//!
//! If \c Func is callable with the given \c TArgs pack, then this structure has a member typedef named \c type with the
//! return of that call. If \c Func is not callable, then the member typedef does not exist.
//!
//! \code
//! static_assert(std::is_same<int, typename invoke_result<int(*)(char), char>::type>::value);
//! \endcode
//!
//! This is equivalent to the C++17 \c std::invoke_result meta transformation.
//!
//! \see carb::cpp::invoke_result_t
template <typename Func, typename... TArgs>
struct invoke_result : detail::invoke_result_impl<void, Func, TArgs...>
{
};
//! Helper for \ref carb::cpp::invoke_result which accesses the \c type member.
//!
//! \code
//! // Get the proper return type and SFINAE-safe disqualify `foo` when `f(10)` is not valid.
//! template <typename Func>
//! invoke_result_t<Func, int> foo(Func&& f)
//! {
//! return invoke(std::forward<Func>(f), 10);
//! }
//! \endcode
//!
//! This is equivalent to the C++ \c std::invoke_result_t helper typedef.
template <typename Func, typename... TArgs>
using invoke_result_t = typename invoke_result<Func, TArgs...>::type;
//! Check if the \c Func is invocable with the \c TArgs pack.
//!
//! If \c Func is callable with the given \c TArgs pack, then this structure will derive from \c true_type; otherwise,
//! it will be \c false_type. If \c value is \c true, then `invoke(func, args...)` is a valid expression.
//!
//! \code
//! static_assert(is_invocable<void(*)()>::value);
//! static_assert(!is_invocable<void(*)(int)>::value);
//! static_assert(is_invocable<void(*)(int), int>::value);
//! static_assert(is_invocable<void(*)(long), int>::value);
//! \endcode
//!
//! This is equivalent to the C++20 \c std::is_invocable meta query. The query was added in C++17, but this additionally
//! supports invoking a pointer to a `const&` member function on an rvalue reference.
template <typename Func, typename... TArgs>
struct is_invocable : detail::is_invocable_impl<void, Func, TArgs...>
{
};
//! Check if invoking \c Func with the \c TArgs pack will not throw.
//!
//! If \c Func called with the given \c TArgs pack is callable and marked \c noexcept, then this structure will derive
//! from \c true_type; otherwise, it will be \c false_type. If \c Func is not callable at all, then this will also be
//! \c false_type.
//!
//! This is equivalent to the C++17 \c is_nothrow_invocable meta query.
template <typename Func, typename... TArgs>
struct is_nothrow_invocable : detail::is_nothrow_invocable_impl<void, Func, TArgs...>
{
};
//! \cond DEV
namespace detail
{
template <typename Qualify, typename R, typename Func, typename... TArgs>
struct invocable_r_impl
{
using invocable_t = std::false_type;
using invocable_nothrow_t = std::false_type;
};
template <typename Func, typename... TArgs>
struct invocable_r_impl<std::enable_if_t<is_invocable<Func, TArgs...>::value>, void, Func, TArgs...>
{
using invocable_t = std::true_type;
using invocable_nothrow_t = is_nothrow_invocable<Func, TArgs...>;
};
// The is_void as part of the qualifier is to workaround an MSVC issue where it thinks this partial specialization and
// the one which explicitly lists `R` as void are equally-qualified.
template <typename R, typename Func, typename... TArgs>
struct invocable_r_impl<std::enable_if_t<is_invocable<Func, TArgs...>::value && !is_void<R>::value>, R, Func, TArgs...>
{
private:
// Can't use declval for conversion checks, as it adds an rvalue ref to the type. We want to make sure the result of
// a returned function can be converted.
static invoke_result_t<Func, TArgs...> get_val() noexcept;
template <typename Target>
static void convert_to(Target) noexcept;
template <typename TR, typename = decltype(convert_to<TR>(get_val()))>
static std::true_type test(int) noexcept;
template <typename TR>
static std::false_type test(...) noexcept;
template <typename TR, typename = decltype(convert_to<TR>(get_val()))>
static bool_constant<noexcept(convert_to<TR>(get_val()))> test_nothrow(int) noexcept;
template <typename TR>
static std::false_type test_nothrow(...) noexcept;
public:
using invocable_t = decltype(test<R>(0));
using invocable_nothrow_t = decltype(test_nothrow<R>(0));
};
} // namespace detail
//! \endcond
//! Check if invoking \c Func with the \c TArgs pack will return \c R.
//!
//! Similar to \ref is_invocable, but additionally checks that the result type is convertible to \c R and that the
//! conversion does not bind a reference to a temporary object. If \c R is \c void, the result can be any type (as any
//! type can be converted to \c void by discarding it). If \c value is \c true, then `invoke_r<R>(func, args...)` is a
//! valid expression.
//!
//! This is equivalent to the C++23 definition of \c is_invocable_r. The function was originally added in C++17, but the
//! specification was altered in C++23 to avoid undefined behavior.
template <typename R, typename Func, typename... TArgs>
struct is_invocable_r : detail::invocable_r_impl<void, R, Func, TArgs...>::invocable_t
{
};
//! Check that invoking \c Func with the \c TArgs pack and converting it to \c R will not throw.
//!
//! This is equivalent to the C++23 definition of \c is_nothrow_invocable_r. The function was originally added in C++17,
//! but the specification was altered in C++23 to avoid undefined behavior.
template <typename R, typename Func, typename... TArgs>
struct is_nothrow_invocable_r : detail::invocable_r_impl<void, R, Func, TArgs...>::invocable_nothrow_t
{
};
CARB_DETAIL_POP_IGNORE_NOEXCEPT_TYPE()
//! Provides the member typedef type that names T (i.e. the identity transformation).
//! This can be used to establish non-deduced contexts in template argument deduction.
template <class T>
struct type_identity
{
//! The identity transformation.
using type = T;
};
//! Helper type for \ref type_identity
template <class T>
using type_identity_t = typename type_identity<T>::type;
//! If the type T is a reference type, provides the member typedef type which is the type referred to by T with its
//! topmost cv-qualifiers removed. Otherwise type is T with its topmost cv-qualifiers removed.
template <class T>
struct remove_cvref
{
//! The type `T` or referred to by `T` (in the case of a reference) with topmost cv-qualifiers removed.
using type = std::remove_cv_t<std::remove_reference_t<T>>;
};
//! Helper type for \ref remove_cvref
template <class T>
using remove_cvref_t = typename remove_cvref<T>::type;
//! \cond DEV
namespace detail
{
template <class T, class P, class = void>
struct IsConvertibleRange : public std::false_type
{
};
template <class T, class P>
struct IsConvertibleRange<T,
P,
cpp::void_t<decltype(std::declval<T>().size()),
std::enable_if_t<std::is_convertible<decltype(std::declval<T>().data()), P>::value>>>
: public std::true_type
{
};
} // namespace detail
//! \endcond DEV
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/Barrier.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief C++14-compatible implementation of select functionality from C++ `<barrier>` library.
#pragma once
#include "Atomic.h"
#include "Bit.h"
#include <utility>
namespace carb
{
//! Namespace for C++ standard library types after C++14 implemented and usable by C++14 compilers.
namespace cpp
{
//! \cond DEV
namespace detail
{
constexpr uint32_t kInvalidPhase = 0;
struct NullFunction
{
constexpr NullFunction() noexcept = default;
constexpr void operator()() noexcept
{
}
};
} // namespace detail
//! \endcond
// Handle case where Windows.h may have defined 'max'
#pragma push_macro("max")
#undef max
/**
* Implements a C++20 barrier in C++14 semantics.
*
* A barrier is a thread coordination mechanism whose lifetime consists of a sequence of barrier phases, where
* each phase allows at most an expected number of threads to block until the expected number of threads
* arrive at the barrier. A barrier is useful for managing repeated tasks that are handled by multiple
* threads.
*
* @see https://en.cppreference.com/w/cpp/thread/barrier
* @tparam CompletionFunction A function object type that must meet the requirements of MoveConstructible and
* Destructible. `std::is_nothrow_invocable_v<CompletionFunction&>` must be `true`. The default template argument is an
* unspecified function object type that additionally meets the requirements of DefaultConstructible. Calling an lvalue
* of it with no arguments has no effects. Every barrier behaves as if it holds an exposition-only non-static data
* member `completion_` of type `CompletionFunction` and calls it by `completion_()` on every phase completion step.
*/
template <class CompletionFunction = detail::NullFunction>
class barrier
{
CARB_PREVENT_COPY_AND_MOVE(barrier);
public:
/**
* Returns the maximum value of expected count supported by the implementation.
* @returns the maximum value of expected count supported by the implementation.
*/
static constexpr ptrdiff_t max() noexcept
{
return ptrdiff_t(INT_MAX);
}
/**
* Constructor
*
* Sets both the initial expected count for each phase and the current expected count for the first phase to
* @p expected, initializes the completion function object with `std::move(f)`, and then starts the first phase. The
* behavior is undefined if @p expected is negative or greater than @ref max().
*
* @param expected Initial value of the expected count.
* @param f Completion function object to be called on phase completion step.
* @throws std::exception Any exception thrown by CompletionFunction's move constructor.
*/
constexpr explicit barrier(ptrdiff_t expected, CompletionFunction f = CompletionFunction{})
: m_emo(InitBoth{}, std::move(f), (uint64_t(1) << kPhaseBitShift) + uint32_t(::carb_min(expected, max()))),
m_expected(uint32_t(::carb_min(expected, max())))
{
CARB_ASSERT(expected >= 0 && expected <= max());
}
/**
* Destructor
*
* The behavior is undefined if any thread is concurrently calling a member function of the barrier.
*
* @note This implementation waits until all waiting threads have woken, but this is a stronger guarantee than the
* standard.
*/
~barrier()
{
// Wait for destruction until all waiters are clear
while (m_waiters.load(std::memory_order_acquire) != 0)
std::this_thread::yield();
}
/**
* An object type meeting requirements of MoveConstructible, MoveAssignable and Destructible.
* @see arrive() wait()
*/
class arrival_token
{
CARB_PREVENT_COPY(arrival_token);
friend class barrier;
uint32_t m_token{ detail::kInvalidPhase };
arrival_token(uint32_t token) : m_token(token)
{
}
public:
//! Constructor
arrival_token() = default;
//! Move constructor
//! @param rhs Other @c arrival_token to move from. @c rhs is left in a valid but empty state.
arrival_token(arrival_token&& rhs) : m_token(std::exchange(rhs.m_token, detail::kInvalidPhase))
{
}
//! Move-assign operator
//! @param rhs Other @c arrival_token to move from. @c rhs is left in a valid but empty state.
//! @returns @c *this
arrival_token& operator=(arrival_token&& rhs)
{
m_token = std::exchange(rhs.m_token, detail::kInvalidPhase);
return *this;
}
};
/**
* Arrives at barrier and decrements the expected count
*
* Constructs an @ref arrival_token object associated with the phase synchronization point for the current phase.
* Then, decrements the expected count by @p update.
*
* This function executes atomically. The call to this function strongly happens-before the start of the phase
* completion step for the current phase.
*
* The behavior is undefined if @p update is less than or equal zero or greater than the expected count for the
* current barrier phase.
*
* @param update The value by which the expected count is decreased.
* @returns The constructed @ref arrival_token object.
* @throws std::system_error According to the standard, but this implementation does not throw. Instead an assertion
* occurs.
* @see wait()
*/
CARB_NODISCARD arrival_token arrive(ptrdiff_t update = 1)
{
return arrival_token(uint32_t(_arrive(update).first >> kPhaseBitShift));
}
/**
* Blocks at the phase synchronization point until its phase completion step is run.
*
* If @p arrival is associated with the phase synchronization point for the current phase of @c *this, blocks at the
* synchronization point associated with @p arrival until the phase completion step of the synchronization point's
* phase is run.
*
* Otherwise if @p arrival is associated with the phase synchronization point for the immediately preceding phase of
* @c *this, returns immediately.
*
* Otherwise, i.e. if @p arrival is associated with the phase synchronization point for an earlier phase of @c *this
* or any phase of a barrier object other than @c *this, the behavior is undefined.
*
* @param arrival An @ref arrival_token obtained by a previous call to @ref arrive() on the same barrier.
* @throws std::system_error According to the standard, but this implementation does not throw. Instead an assertion
* occurs.
*/
void wait(arrival_token&& arrival) const
{
// Precondition: arrival is associated with the phase synchronization point for the current phase or the
// immediately preceding phase.
CARB_CHECK(arrival.m_token != 0); // No invalid tokens
uint64_t data = m_emo.second.load(std::memory_order_acquire);
uint32_t phase = uint32_t(data >> kPhaseBitShift);
CARB_CHECK((phase - arrival.m_token) <= 1, "arrival %u is not the previous or current phase %u",
arrival.m_token, phase);
if (phase != arrival.m_token)
return;
// Register as a waiter
m_waiters.fetch_add(1, std::memory_order_relaxed);
do
{
// Wait for the phase to change
m_emo.second.wait(data, std::memory_order_relaxed);
// Reload after waiting
data = m_emo.second.load(std::memory_order_acquire);
phase = uint32_t(data >> kPhaseBitShift);
} while (phase == arrival.m_token);
// Unregister as a waiter
m_waiters.fetch_sub(1, std::memory_order_release);
}
/**
* Arrives at barrier and decrements the expected count by one, then blocks until the current phase completes.
*
* Atomically decrements the expected count by one, then blocks at the synchronization point for the current phase
* until the phase completion step of the current phase is run. Equivalent to `wait(arrive())`.
*
* The behavior is undefined if the expected count for the current phase is zero.
*
* @note If the current expected count is decremented to zero in the call to this function, the phase completion
* step is run and this function does not block. If the current expected count is zero before calling this function,
* the initial expected count for all subsequent phases is also zero, which means the barrier cannot be reused.
*
* @throws std::system_error According to the standard, but this implementation does not throw. Instead an assertion
* occurs.
*/
void arrive_and_wait()
{
// Two main differences over just doing arrive(wait()):
// - We return immediately if _arrive() did the phase shift
// - We don't CARB_CHECK that the phase is the current or preceding one since it is guaranteed
auto result = _arrive(1);
if (result.second)
return;
// Register as a waiter
m_waiters.fetch_add(1, std::memory_order_relaxed);
uint64_t data = result.first;
uint32_t origPhase = uint32_t(data >> kPhaseBitShift), phase;
do
{
// Wait for the phase to change
m_emo.second.wait(data, std::memory_order_relaxed);
// Reload after waiting
data = m_emo.second.load(std::memory_order_acquire);
phase = uint32_t(data >> kPhaseBitShift);
} while (phase == origPhase);
// Unregister as a waiter
m_waiters.fetch_sub(1, std::memory_order_release);
}
/**
* Decrements both the initial expected count for subsequent phases and the expected count for current phase by one.
*
* This function is executed atomically. The call to this function strongly happens-before the start of the phase
* completion step for the current phase.
*
* The behavior is undefined if the expected count for the current phase is zero.
*
* @note This function can cause the completion step for the current phase to start. If the current expected count
* is zero before calling this function, the initial expected count for all subsequent phases is also zero, which
* means the barrier cannot be reused.
*
* @throws std::system_error According to the standard, but this implementation does not throw. Instead an assertion
* occurs.
*/
void arrive_and_drop()
{
uint32_t prev = m_expected.fetch_sub(1, std::memory_order_relaxed);
CARB_CHECK(prev != 0); // Precondition failure: expected count for the current barrier phase must be greater
// than zero.
_arrive(1);
}
private:
constexpr static int kPhaseBitShift = 32;
constexpr static uint64_t kCounterMask = 0xffffffffull;
CARB_ALWAYS_INLINE std::pair<uint64_t, bool> _arrive(ptrdiff_t update)
{
CARB_CHECK(update > 0 && update <= max());
uint64_t pre = m_emo.second.fetch_sub(uint32_t(update), std::memory_order_acq_rel);
CARB_CHECK(ptrdiff_t(int32_t(uint32_t(pre & kCounterMask))) >= update); // Precondition check
bool completed = false;
if (uint32_t(pre & kCounterMask) == uint32_t(update))
{
// Phase is now complete
std::atomic_thread_fence(std::memory_order_acquire);
_completePhase(pre - uint32_t(update));
completed = true;
}
return std::make_pair(pre - uint32_t(update), completed);
}
void _completePhase(uint64_t data)
{
uint32_t expected = m_expected.load(std::memory_order_relaxed);
// Run the completion routine before releasing threads
m_emo.first()();
// Increment the phase and don't allow the invalid phase
uint32_t phase = uint32_t(data >> kPhaseBitShift);
if (++phase == detail::kInvalidPhase)
++phase;
#if CARB_ASSERT_ENABLED
// Should not have changed during completion function.
uint64_t old = m_emo.second.exchange((uint64_t(phase) << kPhaseBitShift) + expected, std::memory_order_release);
CARB_ASSERT(old == data);
#else
m_emo.second.store((uint64_t(phase) << kPhaseBitShift) + expected, std::memory_order_release);
#endif
// Release all waiting threads
m_emo.second.notify_all();
}
// The MSB 32 bits of the atomic_uint64_t are the Phase; the other bits are the Counter
EmptyMemberPair<CompletionFunction, atomic_uint64_t> m_emo;
std::atomic_uint32_t m_expected;
mutable std::atomic_uint32_t m_waiters{ 0 };
};
#pragma pop_macro("max")
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/Optional.h | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// Implements std::optional from C++17 using C++14 paradigms.
// Heavily borrowed from MS STL: https://github.com/microsoft/STL/blob/master/stl/inc/optional
//! \file
//! \brief C++14-compatible implementation of select functionality from C++ `<optional>` library.
#pragma once
#include "../Defines.h"
#include "Utility.h"
#define CARB_IMPLOPTIONAL
#include "detail/ImplOptional.h"
#undef CARB_IMPLOPTIONAL
namespace carb
{
namespace cpp
{
struct nullopt_t
{
struct Tag
{
};
explicit constexpr nullopt_t(Tag)
{
}
};
static constexpr nullopt_t nullopt{ nullopt_t::Tag{} };
class bad_optional_access final : public std::exception
{
public:
bad_optional_access() noexcept = default;
bad_optional_access(const bad_optional_access&) noexcept = default;
bad_optional_access& operator=(const bad_optional_access&) noexcept = default;
virtual const char* what() const noexcept override
{
return "bad optional access";
}
};
template <class T>
class CARB_VIZ optional : private detail::SelectHierarchy<detail::OptionalConstructor<T>, T>
{
using BaseClass = detail::SelectHierarchy<detail::OptionalConstructor<T>, T>;
static_assert(!std::is_same<std::remove_cv_t<T>, nullopt_t>::value &&
!std::is_same<std::remove_cv_t<T>, in_place_t>::value,
"T may not be nullopt_t or inplace_t");
static_assert(std::is_object<T>::value && std::is_destructible<T>::value && !std::is_array<T>::value,
"T does not meet Cpp17Destructible requirements");
// Essentially: !is_same(U, optional) && !is_same(U, in_place_t) && is_constructible(T from U)
template <class U>
using AllowDirectConversion = bool_constant<
conjunction<negation<std::is_same<typename std::remove_reference_t<typename std::remove_cv_t<U>>, optional>>,
negation<std::is_same<typename std::remove_reference_t<typename std::remove_cv_t<U>>, in_place_t>>,
std::is_constructible<T, U>>::value>;
// Essentially: !(is_same(T, U) || is_constructible(T from optional<U>) || is_convertible(optional<U> to T))
template <class U>
struct AllowUnwrapping : bool_constant<!disjunction<std::is_same<T, U>,
std::is_constructible<T, optional<U>&>,
std::is_constructible<T, const optional<U>&>,
std::is_constructible<T, const optional<U>>,
std::is_constructible<T, optional<U>>,
std::is_convertible<optional<U>&, T>,
std::is_convertible<const optional<U>&, T>,
std::is_convertible<const optional<U>, T>,
std::is_convertible<optional<U>, T>>::value>
{
};
// Essentially: !(is_same(T, U) || is_assignable(T& from optional<U>))
template <class U>
struct AllowUnwrappingAssignment : bool_constant<!disjunction<std::is_same<T, U>,
std::is_assignable<T&, optional<U>&>,
std::is_assignable<T&, const optional<U>&>,
std::is_assignable<T&, const optional<U>>,
std::is_assignable<T&, optional<U>>>::value>
{
};
[[noreturn]] static void onBadAccess()
{
#if CARB_EXCEPTIONS_ENABLED
throw bad_optional_access();
#else
CARB_FATAL_UNLESS(0, "bad optional access");
#endif
}
public:
using value_type = T;
constexpr optional() noexcept
{
}
constexpr optional(nullopt_t) noexcept
{
}
optional(const optional& other) : BaseClass(static_cast<const BaseClass&>(other))
{
}
optional(optional&& other) noexcept(std::is_nothrow_move_constructible<T>::value)
: BaseClass(static_cast<BaseClass&&>(std::move(other)))
{
}
optional& operator=(const optional& other)
{
if (other)
this->assign(*other);
else
reset();
return *this;
}
optional& operator=(optional&& other) noexcept(
std::is_nothrow_move_assignable<T>::value&& std::is_nothrow_move_constructible<T>::value)
{
if (other)
this->assign(std::move(*other));
else
reset();
return *this;
}
// The spec states that this is conditionally-explicit, which is a C++20 feature, so we have to work around it by
// having two functions with SFINAE
template <class U,
typename std::enable_if_t<
conjunction<AllowUnwrapping<U>, std::is_constructible<T, const U&>, std::is_convertible<const U&, T>>::value,
int> = 0>
optional(const optional<U>& other)
{
if (other)
this->construct(*other);
}
template <
class U,
typename std::enable_if_t<
conjunction<AllowUnwrapping<U>, std::is_constructible<T, const U&>, negation<std::is_convertible<const U&, T>>>::value,
int> = 0>
explicit optional(const optional<U>& other)
{
if (other)
this->construct(*other);
}
// The spec states that this is conditionally-explicit, which is a C++20 feature, so we have to work around it by
// having two functions with SFINAE
template <
class U,
typename std::enable_if_t<conjunction<AllowUnwrapping<U>, std::is_constructible<T, U>, std::is_convertible<U, T>>::value,
int> = 0>
optional(optional<U>&& other)
{
if (other)
this->construct(std::move(*other));
}
template <class U,
typename std::enable_if_t<
conjunction<AllowUnwrapping<U>, std::is_constructible<T, U>, negation<std::is_convertible<U, T>>>::value,
int> = 0>
explicit optional(optional<U>&& other)
{
if (other)
this->construct(std::move(*other));
}
template <class... Args, typename std::enable_if_t<std::is_constructible<T, Args...>::value, int> = 0>
optional(in_place_t, Args&&... args) : BaseClass(in_place, std::forward<Args>(args)...)
{
}
template <class U,
class... Args,
typename std::enable_if_t<std::is_constructible<T, std::initializer_list<U>&, Args...>::value, int> = 0>
optional(in_place_t, std::initializer_list<U> ilist, Args&&... args)
: BaseClass(in_place, ilist, std::forward<Args>(args)...)
{
}
// The spec states that this is conditionally-explicit, which is a C++20 feature, so we have to work around it by
// having two functions with SFINAE
template <class U = value_type,
typename std::enable_if_t<conjunction<AllowDirectConversion<U>, std::is_convertible<U, T>>::value, int> = 0>
constexpr optional(U&& value) : BaseClass(in_place, std::forward<U>(value))
{
}
template <class U = value_type,
typename std::enable_if_t<conjunction<AllowDirectConversion<U>, negation<std::is_convertible<U, T>>>::value, int> = 0>
constexpr explicit optional(U&& value) : BaseClass(in_place, std::forward<U>(value))
{
}
~optional() = default;
optional& operator=(nullopt_t) noexcept
{
reset();
return *this;
}
template <class U = T,
typename std::enable_if_t<
conjunction<negation<std::is_same<optional, typename std::remove_cv_t<typename std::remove_reference_t<U>>>>,
negation<conjunction<std::is_scalar<T>, std::is_same<T, typename std::decay_t<U>>>>,
std::is_constructible<T, U>,
std::is_assignable<T&, U>>::value,
int> = 0>
optional& operator=(U&& value)
{
this->assign(std::forward<U>(value));
return *this;
}
template <
class U,
typename std::enable_if_t<
conjunction<AllowUnwrappingAssignment<U>, std::is_constructible<T, const U&>, std::is_assignable<T&, const U&>>::value,
int> = 0>
optional& operator=(const optional<U>& other)
{
if (other)
this->assign(*other);
else
reset();
return *this;
}
template <class U,
typename std::enable_if_t<
conjunction<AllowUnwrappingAssignment<U>, std::is_constructible<T, U>, std::is_assignable<T&, U>>::value,
int> = 0>
optional& operator=(optional<U>&& other)
{
if (other)
this->assign(std::move(*other));
else
reset();
return *this;
}
constexpr const T* operator->() const
{
return std::addressof(this->val());
}
constexpr T* operator->()
{
return std::addressof(this->val());
}
constexpr const T& operator*() const&
{
return this->val();
}
constexpr T& operator*() &
{
return this->val();
}
constexpr const T&& operator*() const&&
{
return std::move(this->val());
}
constexpr T&& operator*() &&
{
return std::move(this->val());
}
constexpr explicit operator bool() const noexcept
{
return this->hasValue;
}
constexpr bool has_value() const noexcept
{
return this->hasValue;
}
constexpr const T& value() const&
{
if (!this->hasValue)
onBadAccess();
return this->val();
}
constexpr T& value() &
{
if (!this->hasValue)
onBadAccess();
return this->val();
}
constexpr const T&& value() const&&
{
if (!this->hasValue)
onBadAccess();
return std::move(this->val());
}
constexpr T&& value() &&
{
if (!this->hasValue)
onBadAccess();
return std::move(this->val());
}
template <class U>
constexpr typename std::remove_cv_t<T> value_or(U&& default_value) const&
{
static_assert(
std::is_convertible<const T&, typename std::remove_cv_t<T>>::value,
"The const overload of optional<T>::value_or() requires const T& to be convertible to std::remove_cv_t<T>");
static_assert(std::is_convertible<U, T>::value, "optional<T>::value_or() requires U to be convertible to T");
if (this->hasValue)
return this->val();
return static_cast<typename std::remove_cv_t<T>>(std::forward<U>(default_value));
}
template <class U>
constexpr typename std::remove_cv_t<T> value_or(U&& default_value) &&
{
static_assert(
std::is_convertible<T, typename std::remove_cv_t<T>>::value,
"The rvalue overload of optional<T>::value_or() requires T to be convertible to std::remove_cv_t<T>");
static_assert(std::is_convertible<U, T>::value, "optional<T>::value_or() requires U to be convertible to T");
if (this->hasValue)
return this->val();
return static_cast<typename std::remove_cv_t<T>>(std::forward<U>(default_value));
}
void swap(optional& other) noexcept(std::is_nothrow_move_constructible<T>::value&& is_nothrow_swappable<T>::value)
{
static_assert(std::is_move_constructible<T>::value, "T must be move constructible");
static_assert(!std::is_move_constructible<T>::value || is_swappable<T>::value, "T must be swappable");
const bool engaged = this->hasValue;
if (engaged == other.hasValue)
{
if (engaged)
{
using std::swap; // Enable ADL
swap(**this, *other);
}
}
else
{
optional& source = engaged ? *this : other;
optional& target = engaged ? other : *this;
target.construct(std::move(*source));
source.reset();
}
}
using BaseClass::reset;
template <class... Args>
T& emplace(Args&&... args)
{
reset();
return this->construct(std::forward<Args>(args)...);
}
template <class U,
class... Args,
typename std::enable_if_t<std::is_constructible<T, std::initializer_list<U>&, Args...>::value, int> = 0>
T& emplace(std::initializer_list<U> ilist, Args&&... args)
{
reset();
return this->construct(ilist, std::forward<Args>(args)...);
}
};
template <class T, class U>
constexpr bool operator==(const optional<T>& lhs, const optional<U>& rhs)
{
const bool lhv = lhs.has_value();
return lhv == rhs.has_value() && (!lhv || *lhs == *rhs);
}
template <class T, class U>
constexpr bool operator!=(const optional<T>& lhs, const optional<U>& rhs)
{
const bool lhv = lhs.has_value();
return lhv != rhs.has_value() || (lhv && *lhs != *rhs);
}
template <class T, class U>
constexpr bool operator<(const optional<T>& lhs, const optional<U>& rhs)
{
return rhs.has_value() && (!lhs.has_value() || *lhs < *rhs);
}
template <class T, class U>
constexpr bool operator<=(const optional<T>& lhs, const optional<U>& rhs)
{
return !lhs.has_value() || (rhs.has_value() && *lhs <= *rhs);
}
template <class T, class U>
constexpr bool operator>(const optional<T>& lhs, const optional<U>& rhs)
{
return lhs.has_value() && (!rhs.has_value() || *lhs > *rhs);
}
template <class T, class U>
constexpr bool operator>=(const optional<T>& lhs, const optional<U>& rhs)
{
return !rhs.has_value() || (lhs.has_value() && *lhs >= *rhs);
}
template <class T>
constexpr bool operator==(const optional<T>& opt, nullopt_t) noexcept
{
return !opt.has_value();
}
template <class T>
constexpr bool operator==(nullopt_t, const optional<T>& opt) noexcept
{
return !opt.has_value();
}
template <class T>
constexpr bool operator!=(const optional<T>& opt, nullopt_t) noexcept
{
return opt.has_value();
}
template <class T>
constexpr bool operator!=(nullopt_t, const optional<T>& opt) noexcept
{
return opt.has_value();
}
template <class T>
constexpr bool operator<(const optional<T>& opt, nullopt_t) noexcept
{
CARB_UNUSED(opt);
return false;
}
template <class T>
constexpr bool operator<(nullopt_t, const optional<T>& opt) noexcept
{
return opt.has_value();
}
template <class T>
constexpr bool operator<=(const optional<T>& opt, nullopt_t) noexcept
{
return !opt.has_value();
}
template <class T>
constexpr bool operator<=(nullopt_t, const optional<T>& opt) noexcept
{
CARB_UNUSED(opt);
return true;
}
template <class T>
constexpr bool operator>(const optional<T>& opt, nullopt_t) noexcept
{
return opt.has_value();
}
template <class T>
constexpr bool operator>(nullopt_t, const optional<T>& opt) noexcept
{
CARB_UNUSED(opt);
return false;
}
template <class T>
constexpr bool operator>=(const optional<T>& opt, nullopt_t) noexcept
{
CARB_UNUSED(opt);
return true;
}
template <class T>
constexpr bool operator>=(nullopt_t, const optional<T>& opt) noexcept
{
return !opt.has_value();
}
template <class T, class U, detail::EnableIfComparableWithEqual<T, U> = 0>
constexpr bool operator==(const optional<T>& opt, const U& value)
{
return opt ? *opt == value : false;
}
template <class T, class U, detail::EnableIfComparableWithEqual<T, U> = 0>
constexpr bool operator==(const T& value, const optional<U>& opt)
{
return opt ? *opt == value : false;
}
template <class T, class U, detail::EnableIfComparableWithNotEqual<T, U> = 0>
constexpr bool operator!=(const optional<T>& opt, const U& value)
{
return opt ? *opt != value : true;
}
template <class T, class U, detail::EnableIfComparableWithNotEqual<T, U> = 0>
constexpr bool operator!=(const T& value, const optional<U>& opt)
{
return opt ? *opt != value : true;
}
template <class T, class U, detail::EnableIfComparableWithLess<T, U> = 0>
constexpr bool operator<(const optional<T>& opt, const U& value)
{
return opt ? *opt < value : true;
}
template <class T, class U, detail::EnableIfComparableWithLess<T, U> = 0>
constexpr bool operator<(const T& value, const optional<U>& opt)
{
return opt ? value < *opt : false;
}
template <class T, class U, detail::EnableIfComparableWithLessEqual<T, U> = 0>
constexpr bool operator<=(const optional<T>& opt, const U& value)
{
return opt ? *opt <= value : true;
}
template <class T, class U, detail::EnableIfComparableWithLessEqual<T, U> = 0>
constexpr bool operator<=(const T& value, const optional<U>& opt)
{
return opt ? value <= *opt : false;
}
template <class T, class U, detail::EnableIfComparableWithGreater<T, U> = 0>
constexpr bool operator>(const optional<T>& opt, const U& value)
{
return opt ? *opt > value : false;
}
template <class T, class U, detail::EnableIfComparableWithGreater<T, U> = 0>
constexpr bool operator>(const T& value, const optional<U>& opt)
{
return opt ? value > *opt : true;
}
template <class T, class U, detail::EnableIfComparableWithGreaterEqual<T, U> = 0>
constexpr bool operator>=(const optional<T>& opt, const U& value)
{
return opt ? *opt >= value : false;
}
template <class T, class U, detail::EnableIfComparableWithGreaterEqual<T, U> = 0>
constexpr bool operator>=(const T& value, const optional<U>& opt)
{
return opt ? value >= *opt : true;
}
template <class T, typename std::enable_if_t<std::is_move_constructible<T>::value && is_swappable<T>::value, int> = 0>
void swap(optional<T>& lhs, optional<T>& rhs) noexcept(noexcept(lhs.swap(rhs)))
{
lhs.swap(rhs);
}
template <class T>
constexpr optional<typename std::decay_t<T>> make_optional(T&& value)
{
return optional<typename std::decay_t<T>>{ std::forward<T>(value) };
}
template <class T, class... Args>
constexpr optional<T> make_optional(Args&&... args)
{
return optional<T>{ in_place, std::forward<Args>(args)... };
}
template <class T, class U, class... Args>
constexpr optional<T> make_optional(std::initializer_list<U> il, Args&&... args)
{
return optional<T>{ in_place, il, std::forward<Args>(args)... };
}
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/Variant.h | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief C++14-compatible implementation of select functionality from C++ `<variant>` library.
#pragma once
#include "../Defines.h"
#include "TypeTraits.h"
#include "Utility.h"
#include <cstdint>
#include <exception>
#include <new>
#include <tuple>
#include <utility>
// This class is a not quite standards conformant implementation of std::variant. Where it doesn't comply it is
// in the sense that it doesn't support everything. Such as all constexpr usages. Part of this is because it
// isn't possible on a C++14 compiler, the other part is full coverage of this class is difficult. Feel free
// to expand this class and make it more conforming to all use cases the standard version can given it will
// still compile on C++14. The long term intention is we will move to a C++17 compiler, and import the std
// version of this class, removing this code from our codebase. Therefore it is very important that this class
// doesn't do anything that the std can't, though the opposite is permissible.
namespace carb
{
namespace cpp
{
static constexpr std::size_t variant_npos = (std::size_t)-1;
// Forward define.
template <typename... Types>
class variant;
template <std::size_t I, typename T>
struct variant_alternative;
class bad_variant_access final : public std::exception
{
public:
bad_variant_access() noexcept = default;
bad_variant_access(const bad_variant_access&) noexcept = default;
bad_variant_access& operator=(const bad_variant_access&) noexcept = default;
virtual const char* what() const noexcept override
{
return "bad variant access";
}
};
namespace detail
{
// Common pathway for bad_variant_access.
[[noreturn]] inline void on_bad_variant_access()
{
#if CARB_EXCEPTIONS_ENABLED
throw bad_variant_access();
#else
std::terminate();
#endif
}
template <bool IsTriviallyDesctructable, typename... Types>
class VariantHold;
template <bool IsTriviallyDesctructable>
class VariantHold<IsTriviallyDesctructable>
{
static constexpr size_t size = 0;
};
template <typename T, typename... Types>
class VariantHold<true, T, Types...>
{
public:
static constexpr size_t size = 1 + sizeof...(Types);
using Next = VariantHold<true, Types...>;
union
{
CARB_VIZ std::remove_const_t<T> m_value;
CARB_VIZ Next m_next;
};
constexpr VariantHold() noexcept
{
}
template <class... Args>
constexpr VariantHold(in_place_index_t<0>, Args&&... args) noexcept : m_value(std::forward<Args>(args)...)
{
}
template <size_t I, class... Args>
constexpr VariantHold(in_place_index_t<I>, Args&&... args) noexcept
: m_next(in_place_index<I - 1>, std::forward<Args>(args)...)
{
}
constexpr T& get() & noexcept
{
return m_value;
}
constexpr const T& get() const& noexcept
{
return m_value;
}
constexpr T&& get() && noexcept
{
return std::move(m_value);
}
constexpr const T&& get() const&& noexcept
{
return std::move(m_value);
}
};
template <typename T, typename... Types>
class VariantHold<false, T, Types...>
{
public:
static constexpr size_t size = 1 + sizeof...(Types);
using Next = VariantHold<false, Types...>;
union
{
CARB_VIZ std::remove_const_t<T> m_value;
CARB_VIZ Next m_next;
};
constexpr VariantHold() noexcept
{
}
template <class... Args>
constexpr VariantHold(in_place_index_t<0>, Args&&... args) noexcept : m_value(std::forward<Args>(args)...)
{
}
template <size_t I, class... Args>
constexpr VariantHold(in_place_index_t<I>, Args&&... args) noexcept
: m_next(in_place_index<I - 1>, std::forward<Args>(args)...)
{
}
~VariantHold()
{
}
constexpr T& get() & noexcept
{
return m_value;
}
constexpr const T& get() const& noexcept
{
return m_value;
}
constexpr T&& get() && noexcept
{
return std::move(m_value);
}
constexpr const T&& get() const&& noexcept
{
return std::move(m_value);
}
};
template <size_t I>
struct VariantGetFromHold
{
template <class Hold>
static decltype(auto) get(Hold&& hold)
{
return VariantGetFromHold<I - 1>::get(hold.m_next);
}
};
template <>
struct VariantGetFromHold<0>
{
template <class Hold>
static decltype(auto) get(Hold&& hold)
{
return hold.get();
}
};
template <size_t I, typename Hold>
constexpr decltype(auto) variant_get_from_hold(Hold&& hold)
{
constexpr size_t size = std::remove_reference_t<Hold>::size;
return VariantGetFromHold < I < size ? I : size - 1 > ::get(hold);
}
// Visitor with index feedback.
template <size_t I, typename Functor, typename Hold>
static decltype(auto) visitWithIndex(Functor&& functor, Hold&& hold)
{
return std::forward<Functor>(functor)(I, VariantGetFromHold<I>::get(static_cast<Hold&&>(hold)));
}
template <int I>
struct VisitWithIndexHelper;
template <typename Functor, typename Hold, typename Ids = std::make_index_sequence<std::remove_reference<Hold>::type::size>>
struct VisitWithIndexTable;
template <typename Functor, typename Hold, size_t... Ids>
struct VisitWithIndexTable<Functor, Hold, std::index_sequence<Ids...>>
{
using return_type = decltype(std::declval<Functor>()(VariantGetFromHold<0>::get(std::declval<Hold>())));
using f_table_type = return_type (*)(Functor&&, Hold&&);
static f_table_type& table(size_t id)
{
static f_table_type tbl[] = { &visitWithIndex<Ids, Functor, Hold>... };
return tbl[id];
}
};
template <>
struct VisitWithIndexHelper<-1>
{
template <typename Functor, typename Hold>
static constexpr decltype(auto) issue(Functor&& functor, size_t index, Hold&& hold)
{
auto& entry = VisitWithIndexTable<Functor, Hold>::table(index);
return entry(std::forward<Functor>(functor), std::forward<Hold>(hold));
}
};
#define VISIT_WITH_INDEX_1(n) \
case (n): \
return std::forward<Functor>(functor)( \
n, VariantGetFromHold < n < size ? n : size - 1 > ::get(static_cast<Hold&&>(hold)));
#define VISIT_WITH_INDEX_2(n) \
VISIT_WITH_INDEX_1(n) \
VISIT_WITH_INDEX_1(n + 1)
#define VISIT_WITH_INDEX_4(n) \
VISIT_WITH_INDEX_2(n) \
VISIT_WITH_INDEX_2(n + 2)
#define VISIT_WITH_INDEX_8(n) \
VISIT_WITH_INDEX_4(n) \
VISIT_WITH_INDEX_4(n + 4)
#define VISIT_WITH_INDEX_16(n) \
VISIT_WITH_INDEX_8(n) \
VISIT_WITH_INDEX_8(n + 8)
template <>
struct VisitWithIndexHelper<0>
{
template <typename Functor, typename Hold>
static constexpr decltype(auto) issue(Functor&& functor, size_t index, Hold&& hold)
{
constexpr size_t size = std::remove_reference_t<Hold>::size;
switch (index)
{
default:
VISIT_WITH_INDEX_1(0);
}
}
};
template <>
struct VisitWithIndexHelper<1>
{
template <typename Functor, typename Hold>
static constexpr decltype(auto) issue(Functor&& functor, size_t index, Hold&& hold)
{
constexpr size_t size = std::remove_reference_t<Hold>::size;
switch (index)
{
default:
VISIT_WITH_INDEX_2(0);
}
}
};
template <>
struct VisitWithIndexHelper<2>
{
template <typename Functor, typename Hold>
static constexpr decltype(auto) issue(Functor&& functor, size_t index, Hold&& hold)
{
constexpr size_t size = std::remove_reference_t<Hold>::size;
switch (index)
{
default:
VISIT_WITH_INDEX_4(0);
}
}
};
template <>
struct VisitWithIndexHelper<3>
{
template <typename Functor, typename Hold>
static constexpr decltype(auto) issue(Functor&& functor, size_t index, Hold&& hold)
{
constexpr size_t size = std::remove_reference_t<Hold>::size;
switch (index)
{
default:
VISIT_WITH_INDEX_8(0);
}
}
};
template <>
struct VisitWithIndexHelper<4>
{
template <typename Functor, typename Hold>
static constexpr decltype(auto) issue(Functor&& functor, size_t index, Hold&& hold)
{
constexpr size_t size = std::remove_reference_t<Hold>::size;
switch (index)
{
default:
VISIT_WITH_INDEX_16(0);
}
}
};
#undef VISIT_WITH_INDEX_1
#undef VISIT_WITH_INDEX_2
#undef VISIT_WITH_INDEX_4
#undef VISIT_WITH_INDEX_8
#undef VISIT_WITH_INDEX_16
// Use this as the definition so that the template parameters can auto-deduce.
template <typename Functor, typename Hold>
decltype(auto) visitorWithIndex(Functor&& functor, size_t typeIndex, Hold&& hold)
{
constexpr int size = std::remove_reference<Hold>::type::size;
constexpr int version = size <= 1 ? 0 : size <= 2 ? 1 : size <= 4 ? 2 : size <= 8 ? 3 : size <= 16 ? 4 : -1;
return VisitWithIndexHelper<version>::issue(std::forward<Functor>(functor), typeIndex, std::forward<Hold>(hold));
}
// Visitor without index feedback.
template <size_t I>
struct Dispatcher;
template <>
struct Dispatcher<1>
{
template <typename Functor, typename Impl>
using return_type = decltype(std::declval<Functor>()(VariantGetFromHold<0>::get(std::declval<Impl>())));
template <size_t I, typename Functor, typename Impl>
static decltype(auto) issue(Functor&& functor, Impl&& impl)
{
return std::forward<Functor>(functor)(variant_get_from_hold<I>(std::forward<Impl>(impl)));
}
};
template <>
struct Dispatcher<2>
{
template <typename Functor, typename Impl1, typename Impl2>
using return_type = decltype(std::declval<Functor>()(
VariantGetFromHold<0>::get(std::declval<Impl1>()), VariantGetFromHold<0>::get(std::declval<Impl2>())));
template <size_t I, typename Functor, typename Impl1, typename Impl2>
static decltype(auto) issue(Functor&& functor, Impl1&& impl1, Impl2&& impl2)
{
constexpr size_t size1 = std::remove_reference<Impl1>::type::size;
constexpr size_t I1 = I % size1;
constexpr size_t I2 = I / size1;
return functor(variant_get_from_hold<I1>(std::forward<Impl1>(impl1)),
variant_get_from_hold<I2>(std::forward<Impl2>(impl2)));
}
};
template <typename... Impl>
struct TotalStates;
template <typename Impl>
struct TotalStates<Impl>
{
static constexpr size_t value = std::remove_reference<Impl>::type::size;
};
template <typename Impl, typename... Rem>
struct TotalStates<Impl, Rem...>
{
static constexpr size_t value = std::remove_reference<Impl>::type::size * TotalStates<Rem...>::value;
};
template <typename... Ts>
struct Package
{
};
template <typename Functor, typename... Impl>
struct VisitHelperTable
{
template <typename Ids>
struct Instance;
};
template <typename Functor, typename... Impl>
template <size_t... Ids>
struct VisitHelperTable<Functor, Impl...>::Instance<std::index_sequence<Ids...>>
{
using dispatcher = Dispatcher<sizeof...(Impl)>;
using return_type = typename dispatcher::template return_type<Functor, Impl...>;
using f_table_type = return_type (*)(Functor&&, Impl&&...);
static f_table_type& table(size_t id)
{
static f_table_type tbl[] = { &Dispatcher<sizeof...(Impl)>::template issue<Ids, Functor>... };
return tbl[id];
}
};
inline size_t computeIndex()
{
return 0;
}
template <class Impl, class... Impls>
constexpr size_t computeIndex(Impl&& impl, Impls&&... impls)
{
if (impl.index() != variant_npos)
{
constexpr size_t size = std::remove_reference<Impl>::type::size;
return impl.index() + size * computeIndex(impls...);
}
on_bad_variant_access();
}
template <int I>
struct VisitHelper;
template <>
struct VisitHelper<-1>
{
template <typename Functor, typename... Impl>
static constexpr decltype(auto) issue(Functor&& functor, Impl&&... impl)
{
constexpr size_t size = TotalStates<Impl...>::value;
size_t index = computeIndex(impl...);
auto& entry = VisitHelperTable<Functor, Impl...>::template Instance<std::make_index_sequence<size>>::table(index);
return entry(std::forward<Functor>(functor), std::forward<Impl>(impl)...);
}
};
#define VISIT_1(n) \
case (n): \
return dispatcher::template issue<n, Functor>(std::forward<Functor>(functor), std::forward<Impl>(impl)...);
#define VISIT_2(n) \
VISIT_1(n) \
VISIT_1(n + 1)
#define VISIT_4(n) \
VISIT_2(n) \
VISIT_2(n + 2)
#define VISIT_8(n) \
VISIT_4(n) \
VISIT_4(n + 4)
#define VISIT_16(n) \
VISIT_8(n) \
VISIT_8(n + 8)
template <>
struct VisitHelper<0>
{
template <typename Functor, typename... Impl>
static constexpr decltype(auto) issue(Functor&& functor, Impl&&... impl)
{
size_t index = computeIndex(impl...);
using dispatcher = Dispatcher<sizeof...(Impl)>;
switch (index)
{
default:
VISIT_1(0);
}
}
};
template <>
struct VisitHelper<1>
{
template <typename Functor, typename... Impl>
static constexpr decltype(auto) issue(Functor&& functor, Impl&&... impl)
{
size_t index = computeIndex(impl...);
using dispatcher = Dispatcher<sizeof...(Impl)>;
switch (index)
{
default:
VISIT_2(0);
}
}
};
template <>
struct VisitHelper<2>
{
template <typename Functor, typename... Impl>
static constexpr decltype(auto) issue(Functor&& functor, Impl&&... impl)
{
size_t index = computeIndex(impl...);
using dispatcher = Dispatcher<sizeof...(Impl)>;
switch (index)
{
default:
VISIT_4(0);
}
}
};
template <>
struct VisitHelper<3>
{
template <typename Functor, typename... Impl>
static constexpr decltype(auto) issue(Functor&& functor, Impl&&... impl)
{
size_t index = computeIndex(impl...);
using dispatcher = Dispatcher<sizeof...(Impl)>;
switch (index)
{
default:
VISIT_8(0);
}
}
};
template <>
struct VisitHelper<4>
{
template <typename Functor, typename... Impl>
static constexpr decltype(auto) issue(Functor&& functor, Impl&&... impl)
{
size_t index = computeIndex(impl...);
using dispatcher = Dispatcher<sizeof...(Impl)>;
switch (index)
{
default:
VISIT_16(0);
}
}
};
#undef VISIT_1
#undef VISIT_2
#undef VISIT_4
#undef VISIT_8
#undef VISIT_16
// Use this as the definition so that the template parameters can auto-deduce.
template <typename Functor, typename... Impl>
decltype(auto) visitor(Functor&& functor, Impl&&... impl)
{
constexpr size_t size = TotalStates<Impl...>::value;
constexpr int version = size <= 1 ? 0 : size <= 2 ? 1 : size <= 4 ? 2 : size <= 8 ? 3 : size <= 16 ? 4 : -1;
return VisitHelper<version>::issue(std::forward<Functor>(functor), std::forward<Impl>(impl)...);
}
// Internal helper that generates smaller code when scanning two variants.
template <size_t I, typename Functor, typename Hold1, typename Hold2>
decltype(auto) visitSameOnce(Functor&& functor, Hold1&& hold1, Hold2&& hold2)
{
return std::forward<Functor>(functor)(
variant_get_from_hold<I>(std::forward<Hold1>(hold1)), variant_get_from_hold<I>(std::forward<Hold2>(hold2)));
}
template <typename Functor,
typename Hold1,
typename Hold2,
typename Ids = std::make_index_sequence<std::remove_reference<Hold1>::type::size>>
struct VisitSameHelperTable;
template <typename Functor, typename Hold1, typename Hold2, size_t... Ids>
struct VisitSameHelperTable<Functor, Hold1, Hold2, std::index_sequence<Ids...>>
{
using return_type = decltype(std::declval<Functor>()(
VariantGetFromHold<0>::get(std::declval<Hold1>()), VariantGetFromHold<0>::get(std::declval<Hold2>())));
using f_table_type = return_type (*)(Functor&&, Hold1&&, Hold2&&);
static f_table_type& table(size_t id)
{
static f_table_type tbl[] = { &visitSameOnce<Ids, Functor, Hold1, Hold2>... };
return tbl[id];
}
};
template <int I>
struct VisitSameHelper;
template <>
struct VisitSameHelper<-1>
{
template <typename Functor, typename Hold1, typename Hold2>
static constexpr decltype(auto) issue(Functor&& functor, size_t index, Hold1&& hold1, Hold2&& hold2)
{
constexpr auto& entry = VisitSameHelperTable<Functor, Hold1, Hold2>::table(index);
return entry(std::forward<Functor>(functor), std::forward<Hold1>(hold1), std::forward<Hold2>(hold2));
}
};
#define VISIT_SAME_1(n) \
case (n): \
return functor(variant_get_from_hold<n>(std::forward<Hold1>(hold1)), \
variant_get_from_hold<n>(std::forward<Hold2>(hold2)));
#define VISIT_SAME_2(n) \
VISIT_SAME_1(n) \
VISIT_SAME_1(n + 1)
#define VISIT_SAME_4(n) \
VISIT_SAME_2(n) \
VISIT_SAME_2(n + 2)
#define VISIT_SAME_8(n) \
VISIT_SAME_4(n) \
VISIT_SAME_4(n + 4)
#define VISIT_SAME_16(n) \
VISIT_SAME_8(n) \
VISIT_SAME_8(n + 8)
template <>
struct VisitSameHelper<0>
{
template <typename Functor, typename Hold1, typename Hold2>
static constexpr decltype(auto) issue(Functor&& functor, size_t index, Hold1&& hold1, Hold2&& hold2)
{
switch (index)
{
default:
VISIT_SAME_1(0);
}
}
};
template <>
struct VisitSameHelper<1>
{
template <typename Functor, typename Hold1, typename Hold2>
static constexpr decltype(auto) issue(Functor&& functor, size_t index, Hold1&& hold1, Hold2&& hold2)
{
switch (index)
{
default:
VISIT_SAME_2(0);
}
}
};
template <>
struct VisitSameHelper<2>
{
template <typename Functor, typename Hold1, typename Hold2>
static constexpr decltype(auto) issue(Functor&& functor, size_t index, Hold1&& hold1, Hold2&& hold2)
{
switch (index)
{
default:
VISIT_SAME_4(0);
}
}
};
template <>
struct VisitSameHelper<3>
{
template <typename Functor, typename Hold1, typename Hold2>
static constexpr decltype(auto) issue(Functor&& functor, size_t index, Hold1&& hold1, Hold2&& hold2)
{
switch (index)
{
default:
VISIT_SAME_8(0);
}
}
};
template <>
struct VisitSameHelper<4>
{
template <typename Functor, typename Hold1, typename Hold2>
static constexpr decltype(auto) issue(Functor&& functor, size_t index, Hold1&& hold1, Hold2&& hold2)
{
switch (index)
{
default:
VISIT_SAME_16(0);
}
}
};
#undef VISIT_SAME_1
#undef VISIT_SAME_2
#undef VISIT_SAME_4
#undef VISIT_SAME_8
#undef VISIT_SAME_16
// Use this as the definition so that the template parameters can auto-deduce.
template <typename Functor, typename Hold1, typename Hold2>
decltype(auto) visitor_same(Functor&& functor, size_t typeIndex, Hold1&& hold1, Hold2&& hold2)
{
constexpr int size = std::remove_reference<Hold1>::type::size;
constexpr int version = size <= 1 ? 0 : size <= 2 ? 1 : size <= 4 ? 2 : size <= 8 ? 3 : size <= 16 ? 4 : -1;
return VisitSameHelper<version>::issue(
std::forward<Functor>(functor), typeIndex, std::forward<Hold1>(hold1), std::forward<Hold2>(hold2));
}
template <std::size_t I, typename T, typename... Candiates>
struct GetIndexOfHelper;
template <std::size_t I, typename T, typename Candiate>
struct GetIndexOfHelper<I, T, Candiate>
{
static constexpr size_t value = I;
};
template <std::size_t I, typename T, typename FirstCandiate, typename... Candiates>
struct GetIndexOfHelper<I, T, FirstCandiate, Candiates...>
{
static constexpr size_t value =
std::is_same<T, FirstCandiate>::value ? I : GetIndexOfHelper<I + 1, T, Candiates...>::value;
};
template <typename T, typename... Candiates>
static constexpr size_t index_of_v = GetIndexOfHelper<0, T, Candiates...>::value;
template <typename T, typename... Candiates>
static constexpr size_t index_of_init_v =
GetIndexOfHelper<0, std::remove_const_t<std::remove_reference_t<T>>, std::remove_const_t<Candiates>...>::value;
template <bool IsTriviallyDesctructable, typename... Types>
class VariantBaseImpl;
template <typename... Types>
class VariantBaseImpl<true, Types...> : public VariantHold<true, Types...>
{
public:
using hold = VariantHold<true, Types...>;
static constexpr size_t size = VariantHold<false, Types...>::size;
CARB_VIZ size_t m_index;
constexpr VariantBaseImpl() noexcept : hold{}, m_index(variant_npos)
{
}
template <size_t I, class... Args>
constexpr VariantBaseImpl(in_place_index_t<I>, Args&&... args) noexcept
: hold{ in_place_index<I>, std::forward<Args>(args)... }, m_index(I)
{
}
constexpr size_t index() const
{
return m_index;
}
void destroy()
{
m_index = variant_npos;
}
};
template <typename... Types>
class VariantBaseImpl<false, Types...> : public VariantHold<false, Types...>
{
public:
static constexpr size_t size = VariantHold<false, Types...>::size;
using hold = VariantHold<false, Types...>;
CARB_VIZ size_t m_index;
constexpr VariantBaseImpl() noexcept : hold{}, m_index(variant_npos)
{
}
template <size_t I, class... Args>
constexpr VariantBaseImpl(in_place_index_t<I>, Args&&... args) noexcept
: hold{ in_place_index<I>, std::forward<Args>(args)... }, m_index(I)
{
}
constexpr size_t index() const
{
return m_index;
}
void destroy()
{
if (m_index != variant_npos)
{
detail::visitor(
[](auto& obj) {
using type = typename std::remove_reference<decltype(obj)>::type;
obj.~type();
},
*this);
}
m_index = variant_npos;
}
~VariantBaseImpl()
{
destroy();
}
};
template <typename... Types>
using VariantBase = VariantBaseImpl<conjunction<std::is_trivially_destructible<Types>...>::value, Types...>;
} // namespace detail
struct monostate
{
constexpr monostate()
{
}
};
constexpr bool operator==(monostate, monostate) noexcept
{
return true;
}
constexpr bool operator!=(monostate, monostate) noexcept
{
return false;
}
constexpr bool operator<(monostate, monostate) noexcept
{
return false;
}
constexpr bool operator>(monostate, monostate) noexcept
{
return false;
}
constexpr bool operator<=(monostate, monostate) noexcept
{
return true;
}
constexpr bool operator>=(monostate, monostate) noexcept
{
return true;
}
template <typename Type>
struct variant_alternative<0, variant<Type>>
{
using type = Type;
};
template <typename Type, typename... OtherTypes>
struct variant_alternative<0, variant<Type, OtherTypes...>>
{
using type = Type;
};
template <size_t I, typename Type, typename... OtherTypes>
struct variant_alternative<I, variant<Type, OtherTypes...>>
{
using type = typename variant_alternative<I - 1, variant<OtherTypes...>>::type;
};
template <std::size_t I, typename T>
using variant_alternative_t = typename variant_alternative<I, T>::type;
template <typename... Types>
class CARB_VIZ variant : public detail::VariantBase<Types...>
{
private:
using base = detail::VariantBase<Types...>;
using hold = typename base::hold;
using self_type = variant<Types...>;
public:
constexpr variant() noexcept : base(in_place_index_t<0>())
{
}
constexpr variant(const variant& other) noexcept
{
base::m_index = other.index();
if (base::m_index != variant_npos)
{
detail::visitor_same(
[](auto& dest, auto& src) {
using type = typename std::remove_reference<decltype(src)>::type;
new (&dest) type(src);
},
other.index(), *this, other);
base::m_index = other.index();
}
}
constexpr variant(variant&& other) noexcept
{
base::m_index = other.index();
if (base::m_index != variant_npos)
{
CARB_ASSERT(base::m_index < sizeof...(Types));
detail::visitor_same(
[](auto& dest, auto&& src) {
using type = typename std::remove_reference<decltype(src)>::type;
new (&dest) type(std::move(src));
},
other.index(), *this, other);
other.base::m_index = variant_npos;
}
}
template <class T>
constexpr variant(const T& value) noexcept : base{ in_place_index_t<detail::index_of_init_v<T, Types...>>(), value }
{
}
template <class T,
typename std::enable_if<!std::is_same<variant, std::remove_reference_t<std::remove_const_t<T>>>::value,
bool>::type = true>
constexpr variant(T&& value) noexcept
: base{ in_place_index_t<detail::index_of_init_v<T, Types...>>(), std::move(value) }
{
}
template <class T, class... Args>
constexpr explicit variant(in_place_type_t<T>, Args&&... args)
: base{ in_place_index_t<detail::index_of_init_v<T, Types...>>(), std::forward<Args>(args)... }
{
}
template <std::size_t I, class... Args>
constexpr explicit variant(in_place_index_t<I>, Args&&... args)
: base{ in_place_index_t<I>(), std::forward<Args>(args)... }
{
}
constexpr variant& operator=(const variant& rhs)
{
if (this != &rhs)
{
if (base::m_index != variant_npos)
{
base::destroy();
}
if (rhs.base::m_index != variant_npos)
{
detail::visitor_same(
[](auto& dest, auto& src) {
using type = typename std::remove_reference<decltype(src)>::type;
new (&dest) type(src);
},
rhs.index(), *this, rhs);
base::m_index = rhs.base::m_index;
}
}
return *this;
}
constexpr variant& operator=(variant&& rhs)
{
if (this == &rhs)
return *this;
if (base::m_index != variant_npos)
{
base::destroy();
}
base::m_index = rhs.base::m_index;
if (base::m_index != variant_npos)
{
CARB_ASSERT(base::m_index < sizeof...(Types));
detail::visitor_same(
[](auto& dest, auto&& src) {
using type = typename std::remove_reference<decltype(src)>::type;
new (&dest) type(std::move(src));
},
rhs.index(), *this, rhs);
rhs.base::m_index = variant_npos;
}
return *this;
}
template <class T,
typename std::enable_if<!std::is_same<variant, std::remove_reference_t<std::remove_const_t<T>>>::value,
bool>::type = true>
variant& operator=(T&& t) noexcept
{
constexpr size_t I = detail::index_of_init_v<T, Types...>;
using type = variant_alternative_t<I, self_type>;
type& v = detail::variant_get_from_hold<I>(*static_cast<hold*>(this));
if (index() == I)
{
v = std::move(t);
}
else
{
base::destroy();
new (&v) type(std::move(t));
base::m_index = I;
}
return *this;
}
constexpr bool valueless_by_exception() const noexcept
{
return base::m_index == variant_npos;
}
template <class T, class... Args>
auto emplace(Args&&... args) -> std::remove_const_t<std::remove_reference_t<T>>&
{
base::destroy();
using place_type = std::remove_const_t<std::remove_reference_t<T>>;
base::m_index = detail::index_of_v<place_type, Types...>;
return *(new (&detail::variant_get_from_hold<detail::index_of_v<T, Types...>>(static_cast<hold&>(*this)))
place_type(std::forward<Args>(args)...));
}
constexpr std::size_t index() const noexcept
{
return base::m_index;
}
};
template <class T, class... Types>
constexpr bool holds_alternative(const variant<Types...>& v) noexcept
{
return v.index() == detail::index_of_v<T, Types...>;
}
template <std::size_t I, class... Types>
constexpr std::add_pointer_t<variant_alternative_t<I, variant<Types...>>> get_if(variant<Types...>* pv) noexcept
{
return (pv && I == pv->index()) ? &detail::variant_get_from_hold<I>(*pv) : nullptr;
}
template <std::size_t I, class... Types>
constexpr std::add_pointer_t<const variant_alternative_t<I, variant<Types...>>> get_if(const variant<Types...>* pv) noexcept
{
return (pv && I == pv->index()) ? &detail::variant_get_from_hold<I>(*pv) : nullptr;
}
template <class T, class... Types>
constexpr std::add_pointer_t<T> get_if(variant<Types...>* pv) noexcept
{
return get_if<detail::index_of_v<T, Types...>>(pv);
}
template <class T, class... Types>
constexpr std::add_pointer_t<const T> get_if(const variant<Types...>* pv) noexcept
{
return get_if<detail::index_of_v<T, Types...>>(pv);
}
// Don't support moves yet...
template <std::size_t I, class... Types>
constexpr variant_alternative_t<I, variant<Types...>>& get(variant<Types...>& v)
{
auto o = get_if<I>(&v);
if (o)
{
return *o;
}
detail::on_bad_variant_access();
}
template <std::size_t I, class... Types>
constexpr const variant_alternative_t<I, variant<Types...>>& get(const variant<Types...>& v)
{
auto o = get_if<I>(&v);
if (o)
{
return *o;
}
detail::on_bad_variant_access();
}
template <class T, class... Types>
constexpr T& get(variant<Types...>& v)
{
auto o = get_if<T>(&v);
if (o)
{
return *o;
}
detail::on_bad_variant_access();
}
template <class T, class... Types>
constexpr const T& get(const variant<Types...>& v)
{
auto o = get_if<T>(&v);
if (o)
{
return *o;
}
detail::on_bad_variant_access();
}
// Comparison
template <class... Types>
constexpr bool operator==(const variant<Types...>& v, const variant<Types...>& w)
{
return v.index() == w.index() &&
detail::visitor_same([](const auto& a, const auto& b) { return a == b; }, v.index(), v, w);
}
template <class... Types>
constexpr bool operator!=(const variant<Types...>& v, const variant<Types...>& w)
{
return v.index() != w.index() ||
detail::visitor_same([](const auto& a, const auto& b) { return a != b; }, v.index(), v, w);
}
template <class... Types>
constexpr bool operator<(const variant<Types...>& v, const variant<Types...>& w)
{
// If w.valueless_by_exception(), false; otherwise if v.valueless_by_exception(), true
if (w.valueless_by_exception())
return false;
if (v.valueless_by_exception())
return true;
return v.index() < w.index() ||
(v.index() == w.index() &&
detail::visitor_same([](const auto& a, const auto& b) { return a < b; }, v.index(), v, w));
}
template <class... Types>
constexpr bool operator>(const variant<Types...>& v, const variant<Types...>& w)
{
// If v.valueless_by_exception(), false; otherwise if w.valueless_by_exception(), true
if (v.valueless_by_exception())
return false;
if (w.valueless_by_exception())
return true;
return v.index() > w.index() ||
(v.index() == w.index() &&
detail::visitor_same([](const auto& a, const auto& b) { return a > b; }, v.index(), v, w));
}
template <class... Types>
constexpr bool operator<=(const variant<Types...>& v, const variant<Types...>& w)
{
// If v.valueless_by_exception(), true; otherwise if w.valueless_by_exception(), false
if (v.valueless_by_exception())
return true;
if (w.valueless_by_exception())
return false;
return v.index() < w.index() ||
(v.index() == w.index() &&
detail::visitor_same([](const auto& a, const auto& b) { return a <= b; }, v.index(), v, w));
}
template <class... Types>
constexpr bool operator>=(const variant<Types...>& v, const variant<Types...>& w)
{
// If w.valueless_by_exception(), true; otherwise if v.valueless_by_exception(), false
if (w.valueless_by_exception())
return true;
if (v.valueless_by_exception())
return false;
return v.index() > w.index() ||
(v.index() == w.index() &&
detail::visitor_same([](const auto& a, const auto& b) { return a >= b; }, v.index(), v, w));
}
// Currently we only support one Variant and not a list.
template <class Visitor, class... Variants>
constexpr decltype(auto) visit(Visitor&& vis, Variants&&... variants)
{
return detail::visitor(std::forward<Visitor>(vis), std::forward<Variants>(variants)...);
}
} // namespace cpp
} // namespace carb
namespace std
{
template <>
struct hash<carb::cpp::monostate>
{
CARB_NODISCARD size_t operator()(const carb::cpp::monostate&) const
{
// Just return something reasonable, there is not state with monostate.
// This is just a random hex value.
return 0x5f631327531c2962ull;
}
};
template <typename... Types>
struct hash<carb::cpp::variant<Types...>>
{
CARB_NODISCARD size_t operator()(const carb::cpp::variant<Types...>& vis) const
{
// Valueless
if (vis.index() == carb::cpp::variant_npos)
{
return 0;
}
return carb::hashCombine(std::hash<size_t>{}(vis.index()), carb::cpp::visit(
// Invoking dohash directly is a compile
// error.
[](const auto& s) { return dohash(s); }, vis));
}
private:
// This is a simple way to remove the C-Ref qualifiers.
template <class T>
static size_t dohash(const T& t)
{
return std::hash<T>{}(t);
}
};
} // namespace std
|
omniverse-code/kit/include/carb/cpp/Latch.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief C++14-compatible implementation of select functionality from C++ `<latch>` library.
#pragma once
#include "Atomic.h"
#include <algorithm>
#include <thread>
namespace carb
{
namespace cpp
{
// Handle case where Windows.h may have defined 'max'
#pragma push_macro("max")
#undef max
/**
* Implements a C++20 latch in C++14 semantics.
*
* The latch class is a downward counter of type @c std::ptrdiff_t which can be used to synchronize threads. The value
* of the counter is initialized on creation. Threads may block on the latch until the counter is decremented to zero.
* There is no possibility to increase or reset the counter, which makes the latch a single-use @ref barrier.
*
* @thread_safety Concurrent invocations of the member functions of @c latch, except for the destructor, do not
* introduce data races.
*
* Unlike @ref barrier, @c latch can be decremented by a participating thread more than once.
*/
class latch
{
CARB_PREVENT_COPY_AND_MOVE(latch);
public:
/**
* The maximum value of counter supported by the implementation
* @returns The maximum value of counter supported by the implementation.
*/
static constexpr ptrdiff_t max() noexcept
{
return ptrdiff_t(UINT_MAX);
}
/**
* Constructor
*
* Constructs a latch and initializes its internal counter. The behavior is undefined if @p expected is negative or
* greater than @ref max().
* @param expected The initial value of the internal counter.
*/
constexpr explicit latch(ptrdiff_t expected) noexcept : m_counter(uint32_t(::carb_min(max(), expected)))
{
CARB_ASSERT(expected >= 0 && expected <= max());
}
/**
* Destructor
*
* The behavior is undefined if any thread is concurrently calling a member function of the latch.
*
* @note This implementation waits until all waiting threads have woken, but this is a stronger guarantee than the
* standard.
*/
~latch() noexcept
{
// Wait until we have no waiters
while (m_waiters.load(std::memory_order_acquire) != 0)
std::this_thread::yield();
}
/**
* Decrements the counter in a non-blocking manner
*
* Atomically decrements the internal counter by @p update without blocking the caller. If the count reaches zero,
* all blocked threads are unblocked.
*
* If @p update is greater than the value of the internal counter or is negative, the behavior is undefined.
*
* This operation strongly happens-before all calls that are unblocked on this latch.
*
* @param update The value by which the internal counter is decreased.
* @throws std::system_error According to the standard, but this implementation does not throw. Instead an assertion
* occurs.
*/
void count_down(ptrdiff_t update = 1) noexcept
{
CARB_ASSERT(update >= 0);
// `fetch_sub` returns value before operation
uint32_t count = m_counter.fetch_sub(uint32_t(update), std::memory_order_release);
CARB_CHECK((count - uint32_t(update)) <= count); // Malformed if we go below zero or overflow
if ((count - uint32_t(update)) == 0)
{
// Wake all waiters
m_counter.notify_all();
}
}
// Returns whether the latch has completed. Allowed to return spuriously false with very low probability.
/**
* Tests if the internal counter equals zero
*
* Returns @c true only if the internal counter has reached zero. This function may spuriously return @c false with
* very low probability even if the internal counter has reached zero.
*
* @note The reason why a spurious result is permitted is to allow implementations to use a memory order more
* relaxed than @c std::memory_order_seq_cst.
*
* @return With very low probability @c false, otherwise `cnt == 0` where `cnt` is the value of the internal
* counter.
*/
bool try_wait() const noexcept
{
return m_counter.load(std::memory_order_acquire) == 0;
}
/**
* Blocks until the counter reaches zero
*
* Blocks the calling thread until the internal counter reaches zero. If it is zero already, returns immediately.
* @throws std::system_error According to the standard, but this implementation does not throw.
*/
void wait() const noexcept
{
uint32_t count = m_counter.load(std::memory_order_acquire);
if (count != 0)
{
// Register as a waiter
m_waiters.fetch_add(1, std::memory_order_relaxed);
_wait(count);
}
}
/**
* Decrements the counter and blocks until it reaches zero
*
* Atomically decrements the internal counter by @p update and (if necessary) blocks the calling thread until the
* counter reaches zero. Equivalent to `count_down(update); wait();`.
*
* If @p update is greater than the value of the internal counter or is negative, the behavior is undefined.
*
* @param update The value by which the internal counter is decreased.
* @throws std::system_error According to the standard, but this implementation does not throw. Instead an assertion
* occurs.
*/
void arrive_and_wait(ptrdiff_t update = 1) noexcept
{
uint32_t original = m_counter.load(std::memory_order_acquire);
if (original == uint32_t(update))
{
// We're the last and won't be waiting.
#if CARB_ASSERT_ENABLED
uint32_t updated = m_counter.exchange(0, std::memory_order_release);
CARB_ASSERT(updated == original);
#else
m_counter.store(0, std::memory_order_release);
#endif
// Wake all waiters
m_counter.notify_all();
return;
}
// Speculatively register as a waiter
m_waiters.fetch_add(1, std::memory_order_relaxed);
original = m_counter.fetch_sub(uint32_t(update), std::memory_order_release);
if (CARB_UNLIKELY(original == uint32_t(update)))
{
// Wake all waiters and unregister as a waiter
m_counter.notify_all();
m_waiters.fetch_sub(1, std::memory_order_release);
}
else
{
CARB_CHECK(original >= uint32_t(update)); // Malformed if we underflow
_wait(original - uint32_t(update));
}
}
private:
mutable atomic_uint32_t m_counter;
mutable atomic_uint32_t m_waiters{ 0 };
CARB_ALWAYS_INLINE void _wait(uint32_t count) const noexcept
{
CARB_ASSERT(count != 0);
do
{
m_counter.wait(count, std::memory_order_relaxed);
count = m_counter.load(std::memory_order_acquire);
} while (count != 0);
// Done waiting
m_waiters.fetch_sub(1, std::memory_order_release);
}
};
#pragma pop_macro("max")
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/StdDef.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief C++14-compatible implementation of select functionality from C++ `<cstddef>` library.
#pragma once
#include "TypeTraits.h"
namespace carb
{
namespace cpp
{
//! A byte is a distinct type that implements the concept of byte as specified in the C++ language definition.
//! Like `char` and `unsigned char`, it can be used to access raw memory occupied by other objects, but unlike those
//! types it is not a character type and is not an arithmetic type. A byte is only a collection of bits, and only
//! bitwise operators are defined for it.
enum class byte : unsigned char
{
};
#ifndef DOXYGEN_BUILD
template <class IntegerType, std::enable_if_t<std::is_integral<IntegerType>::value, bool> = false>
constexpr IntegerType to_integer(byte b) noexcept
{
return static_cast<IntegerType>(b);
}
constexpr byte operator|(byte l, byte r) noexcept
{
return byte(static_cast<unsigned char>(l) | static_cast<unsigned char>(r));
}
constexpr byte operator&(byte l, byte r) noexcept
{
return byte(static_cast<unsigned char>(l) & static_cast<unsigned char>(r));
}
constexpr byte operator^(byte l, byte r) noexcept
{
return byte(static_cast<unsigned char>(l) ^ static_cast<unsigned char>(r));
}
constexpr byte operator~(byte b) noexcept
{
return byte(~static_cast<unsigned char>(b));
}
template <class IntegerType, std::enable_if_t<std::is_integral<IntegerType>::value, bool> = false>
constexpr byte operator<<(byte b, IntegerType shift) noexcept
{
return byte(static_cast<unsigned char>(b) << shift);
}
template <class IntegerType, std::enable_if_t<std::is_integral<IntegerType>::value, bool> = false>
constexpr byte operator>>(byte b, IntegerType shift) noexcept
{
return byte(static_cast<unsigned char>(b) >> shift);
}
template <class IntegerType, std::enable_if_t<std::is_integral<IntegerType>::value, bool> = false>
constexpr byte& operator<<=(byte& b, IntegerType shift) noexcept
{
b = b << shift;
return b;
}
template <class IntegerType, std::enable_if_t<std::is_integral<IntegerType>::value, bool> = false>
constexpr byte& operator>>=(byte& b, IntegerType shift) noexcept
{
b = b >> shift;
return b;
}
constexpr byte& operator|=(byte& l, byte r) noexcept
{
l = l | r;
return l;
}
constexpr byte& operator&=(byte& l, byte r) noexcept
{
l = l & r;
return l;
}
constexpr byte& operator^=(byte& l, byte r) noexcept
{
l = l ^ r;
return l;
}
#endif
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/Exception.h | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief C++14-compatible implementation of select functionality from C++ `<exception>` library.
#pragma once
#include "../Defines.h"
#include <exception>
#if CARB_PLATFORM_LINUX
# include <cxxabi.h>
# include <cstring>
#elif CARB_PLATFORM_WINDOWS
# include <cstring>
extern "C"
{
void* __cdecl _getptd(void);
}
#endif
// __cpp_lib_uncaught_exceptions -> https://en.cppreference.com/w/User:D41D8CD98F/feature_testing_macros
// Visual Studio 14.0+ supports N4152 std::uncaught_exceptions() but doesn't define __cpp_lib_uncaught_exceptions
#if (defined(__cpp_lib_uncaught_exceptions) && __cpp_lib_uncaught_exceptions >= 201411) || \
(defined(_MSC_VER) && _MSC_VER >= 1900)
# define CARB_HAS_UNCAUGHT_EXCEPTIONS 1
#else
# define CARB_HAS_UNCAUGHT_EXCEPTIONS 0
#endif
namespace carb
{
namespace cpp
{
//
// various ways to backport this, mainly come from accessing the systems thread local data and knowing
// the exact offset the exception count is stored in.
//
// * https://beta.boost.org/doc/libs/develop/boost/core/uncaught_exceptions.hpp
// * https://github.com/facebook/folly/blob/master/folly/lang/UncaughtExceptions.h
// * https://github.com/bitwizeshift/BackportCpp/blob/master/include/bpstd/exception.hpp
//
inline int uncaught_exceptions() noexcept
{
#if CARB_HAS_UNCAUGHT_EXCEPTIONS
return static_cast<int>(std::uncaught_exceptions());
#elif CARB_PLATFORM_LINUX
using byte = unsigned char;
int count{};
// __cxa_eh_globals::uncaughtExceptions, x32 offset - 0x4, x64 - 0x8
const auto* ptr = reinterpret_cast<const byte*>(::abi::__cxa_get_globals()) + sizeof(void*);
std::memcpy(&count, ptr, sizeof(count));
return count;
#elif CARB_PLATFORM_WINDOWS
using byte = unsigned char;
int count{};
const auto offset = (sizeof(void*) == 8u ? 0x100 : 0x90);
const auto* ptr = static_cast<const byte*>(::_getptd()) + offset;
// _tiddata::_ProcessingThrow, x32 offset - 0x90, x64 - 0x100
std::memcpy(&count, ptr, sizeof(count));
return count;
#else
// return C++11/14 uncaught_exception() is basically broken for any nested exceptions
// as only going from 0 exceptions to 1 exception will be detected
return static_cast<int>(std::uncaught_exception());
#endif
}
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/Span.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief C++14-compatible implementation of select functionality from C++ `<span>` library.
#pragma once
#include "../Defines.h"
#include "TypeTraits.h"
#include "detail/ImplData.h"
#include "StdDef.h"
#include "../../omni/detail/PointerIterator.h"
#include <array>
namespace carb
{
namespace cpp
{
//! A constant of type size_t that is used to differentiate carb::cpp::span of static and dynamic extent
//! @see span
constexpr size_t dynamic_extent = size_t(-1);
template <class T, size_t Extent>
class span;
//! \cond DEV
namespace detail
{
template <class T>
constexpr T* to_address(T* p) noexcept
{
static_assert(!std::is_function<T>::value, "Ill-formed if T is function type");
return p;
}
template <class Ptr, std::enable_if_t<std::is_pointer<decltype(std::declval<Ptr>().operator->())>::value, bool> = false>
constexpr auto to_address(const Ptr& p) noexcept
{
return to_address(p.operator->());
}
template <size_t Extent, size_t Offset, size_t Count>
struct DetermineSubspanExtent
{
constexpr static size_t value = Count;
};
template <size_t Extent, size_t Offset>
struct DetermineSubspanExtent<Extent, Offset, dynamic_extent>
{
constexpr static size_t value = Extent - Offset;
};
template <size_t Offset>
struct DetermineSubspanExtent<dynamic_extent, Offset, dynamic_extent>
{
constexpr static size_t value = dynamic_extent;
};
template <class T>
struct IsStdArray : public std::false_type
{
};
template <class T, size_t N>
struct IsStdArray<std::array<T, N>> : public std::true_type
{
};
template <class T>
struct IsSpan : public std::false_type
{
};
template <class T, size_t N>
struct IsSpan<span<T, N>> : public std::true_type
{
};
// GCC instantiates some functions always so they cannot use static_assert, but throwing an exception is allowed from a
// constexpr function (which will act as a compile failure if constexpr) so fall back to that.
#if CARB_EXCEPTIONS_ENABLED
# define CARB_ALWAYS_FAIL(msg) throw std::out_of_range(msg)
# define CARB_THROW_OR_CHECK(check, msg) \
if (!CARB_LIKELY(check)) \
throw std::out_of_range(msg)
#else
# define CARB_THROW_OR_CHECK(check, msg) CARB_CHECK((check), msg)
# if CARB_COMPILER_MSC
# define CARB_ALWAYS_FAIL(msg) static_assert(false, msg)
# else
# define CARB_ALWAYS_FAIL(msg) CARB_FATAL_UNLESS(false, msg)
# endif
#endif
} // namespace detail
//! \endcond
/**
* An object that refers to a contiguous sequence of objects.
*
* The class template span describes an object that can refer to a contiguous sequence of objects with the first element
* of the sequence at position zero. A span can either have a \a static extent, in which case the number of elements in
* the sequence is known at compile-time and encoded in the type, or a \a dynamic extent.
*
* If a span has \a dynamic extent, this implementation holds two members: a pointer to T and a size. A span with
* \a static extent has only one member: a pointer to T.
*
* Every specialization of \c span is a \a TriviallyCopyable type.
*
* This implementation of \c span is a C++14-compatible implementation of the
* [C++20 std::span](https://en.cppreference.com/w/cpp/container/span), as such certain constructors that involve
* `ranges` or `concepts` are either not implemented or are implemented using C++14 paradigms.
*
* This implementation of \c span is a guaranteed @rstref{ABI- and interop-safe <abi-compatibility>} type.
*
* @note For function definitions below the following definitions are used: An ill-formed program will generate a
* compiler error via `static_assert`. Undefined behavior is typically signaled by throwing a `std::out_of_range`
* exception as this is allowed for `constexpr` functions to cause a compiler error, however, if exceptions are
* disabled a \ref CARB_CHECK will occur (this also disables `constexpr` as \ref CARB_CHECK is not `constexpr`).
* @tparam T Element type; must be a complete object type that is not an abstract class type
* @tparam Extent The number of elements in the sequence, or \ref dynamic_extent if dynamic
*/
template <class T, size_t Extent = dynamic_extent>
class span
{
// NOTE: This implementation is used for Extent != 0 && Extent != dynamic_extent, both of which have specializations
public:
using element_type = T; //!< The element type `T`
using value_type = std::remove_cv_t<T>; //!< The value type; const/volatile is removed from `T`
using size_type = std::size_t; //!< The size type `size_t`
using difference_type = std::ptrdiff_t; //!< The difference type `ptrdiff_t`
using pointer = T*; //!< The pointer type `T*`
using const_pointer = const T*; //!< The const pointer type `const T*`
using reference = T&; //!< The reference type `T&`
using const_reference = const T&; //!< The const reference type `const T&`
//! The iterator type \ref omni::detail::PointerIterator
using iterator = omni::detail::PointerIterator<pointer, span>;
//! The reverse iterator type `std::reverse_iterator<iterator>`
using reverse_iterator = std::reverse_iterator<iterator>;
//! The number of elements in the sequence, or \ref dynamic_extent if dynamic
constexpr static std::size_t extent = Extent;
#ifdef DOXYGEN_BUILD
//! Default constructor
//!
//! Constructs an empty span whose \ref data() is `nullptr` and \ref size() is `0`. This constructor participates in
//! overload resolution only if `extent == 0 || extent == dynamic_extent`.
constexpr span() noexcept
{
}
#endif
//! Constructs a \ref span that is a view over the range `[first, first + count)`.
//!
//! Behavior is undefined if `count != extent` (for static extent) or \p first does not model `contiguous_iterator`.
//! However, since `contiguous_iterator` is not available until C++20, instead this function does not participate in
//! overload resolution unless `std::iterator_traits<It>::iterator_category == std::random_access_iterator_tag`.
//! @param first An iterator or pointer type that models C++20 `contiguous_iterator`
//! @param count The number of elements from \p first to include in the span. If `extent != dynamic_extent` then
//! this must match \ref extent.
template <class It CARB_NO_DOC(
,
std::enable_if_t<std::is_same<typename std::iterator_traits<It>::iterator_category, std::random_access_iterator_tag>::value,
bool> = false)>
constexpr explicit span(It first, size_type count)
{
CARB_THROW_OR_CHECK(extent == count, "Behavior is undefined if count != extent");
m_p = detail::to_address(first);
}
//! Constructs a \ref span that is a view over the range `[first, last)`.
//!
//! Behavior is undefined if `(last - first) != extent` (for static extent) or if `[first, last)` does not represent
//! a contiguous range. This function differs significantly from the C++20 definition since the concepts of
//! `contiguous_iterator` and `sized_sentinel_for` are not available. Since these concepts are not available until
//! C++20, instead this function does not participate in overload resolution unless
//! `std::iterator_traits<It>::iterator_category == std::random_access_iterator_tag`. Also \p first and \p last must
//! be a matching iterator type.
//! @param first An iterator or pointer type that represents the first element in the span.
//! @param last An iterator or pointer type that represents the past-the-end element in the span.
template <class It CARB_NO_DOC(
,
std::enable_if_t<std::is_same<typename std::iterator_traits<It>::iterator_category, std::random_access_iterator_tag>::value,
bool> = false)>
constexpr explicit span(It first, It last)
{
CARB_THROW_OR_CHECK((last - first) == extent, "Behavior is undefined if (last - first) != extent");
m_p = detail::to_address(first);
}
//! Constructs a span that is a view over an array.
//!
//! Behavior is undefined if `extent != dynamic_extent && N != extent`.
//! @param arr The array to view
template <std::size_t N>
constexpr span(type_identity_t<element_type> (&arr)[N]) noexcept
{
static_assert(N == extent, "Undefined if N != extent");
m_p = cpp::data(arr);
}
//! Constructs a span that is a view over an array.
//!
//! Behavior is undefined if `extent != dynamic_extent && N != extent`.
//! @param arr The array to view
template <class U, std::size_t N CARB_NO_DOC(, std::enable_if_t<std::is_convertible<U, element_type>::value, bool> = false)>
constexpr span(std::array<U, N>& arr) noexcept
{
static_assert(N == extent, "Undefined if N != extent");
m_p = cpp::data(arr);
}
//! Constructs a span that is a view over an array.
//!
//! Behavior is undefined if `extent != dynamic_extent && N != extent`.
//! @param arr The array to view
template <class U, std::size_t N CARB_NO_DOC(, std::enable_if_t<std::is_convertible<U, element_type>::value, bool> = false)>
constexpr span(const std::array<U, N>& arr) noexcept
{
static_assert(N == extent, "Undefined if N != extent");
m_p = cpp::data(arr);
}
// template< class R >
// explicit(extent != dynamic_extent)
// constexpr span( R&& range );
// (Constructor not available without ranges, but approximate constructor follows)
//! Constructs a span that is a view over a range.
//!
//! The behavior is undefined if any of the following are true:
//! * `R` does not actually model `contiguous_range` and `sized_range`
//! * if `R` does not model `borrowed_range` while `element_type` is non-const
//! * `extent` is not `dynamic_extent` and the size of the range != `extent`
//!
//! @tparam R A range type. Since this implementation is for pre-C++20 and ranges are not available, this is an
//! approximation of a `range`: This type must have `data()` and `size()` member functions that must be
//! convertible to the \ref pointer and \ref size_type types respectively.
//! @param range The range to view. Behavior is undefined if `[range.data(), range.data() + range.size())` is not a
//! contiguous range or cannot be borrowed (i.e. it is a temporary that will expire leaving a danging pointer).
template <class R CARB_NO_DOC(
,
std::enable_if_t<detail::IsConvertibleRange<cpp::remove_cvref_t<R>, pointer>::value &&
// These types have a separate constructor
!detail::IsStdArray<cpp::remove_cvref_t<R>>::value &&
!detail::IsSpan<cpp::remove_cvref_t<R>>::value && !std::is_array<cpp::remove_cvref_t<R>>::value,
bool> = false)>
constexpr explicit span(R&& range)
{
CARB_THROW_OR_CHECK(range.size() == extent, "Behavior is undefined if R.size() != extent");
m_p = range.data();
}
#ifndef DOXYGEN_BUILD
template <class U,
std::size_t N,
std::enable_if_t<N == dynamic_extent && std::is_convertible<U, element_type>::value, bool> = false>
constexpr explicit span(const span<U, N>& source) noexcept : m_p(source.data())
{
CARB_CHECK(source.size() == extent); // specified as `noexcept` so we cannot throw
}
template <class U,
std::size_t N,
std::enable_if_t<N != dynamic_extent && std::is_convertible<U, element_type>::value, bool> = false>
#else
//! Converting constructor from another span.
//!
//! Behavior is undefined if `extent != dynamic_extent && source.size() != extent`.
//! @tparam U `element_type` of \p source; must be convertible to `element_type`
//! @tparam N `extent` of \p source
//! @param source The span to convert from
template <class U, std::size_t N>
#endif
constexpr span(const span<U, N>& source) noexcept : m_p(source.data())
{
static_assert(N == extent, "Undefined if N != extent");
}
//! Copy constructor
//! @param other A span to copy from
constexpr span(const span& other) noexcept = default;
//! Assignment operator
//!
//! @param other A span to copy from
//! @returns *this
constexpr span& operator=(const span& other) noexcept = default;
//! Returns an iterator to the first element of the span.
//!
//! If the span is empty, the returned iterator will be equal to \ref end().
//! @returns an \ref iterator to the first element of the span
constexpr iterator begin() const noexcept
{
return iterator(m_p);
}
//! Returns an iterator to the element following the last element of the span.
//!
//! This element acts as a placeholder; attempting to access it results in undefined behavior.
//! @returns an \ref iterator to the element following the last element of the span
constexpr iterator end() const noexcept
{
return iterator(m_p + extent);
}
//! Returns a reverse iterator to the first element of the reversed span.
//!
//! It corresponds to the last element of the non-reversed span. If the span is empty, the returned iterator is
//! equal to \ref rend().
//! @returns a \ref reverse_iterator to the first element of the reversed span
constexpr reverse_iterator rbegin() const noexcept
{
return reverse_iterator(end());
}
//! Reverse a reverse iterator to the element following the last element of the reversed span.
//!
//! It corresponds to the element preceding the first element of the non-reversed span. This element acts as a
//! placeholder, attempting to access it results in undefined behavior.
//! @returns a \ref reverse_iterator to the element following the last element of the reversed span
constexpr reverse_iterator rend() const noexcept
{
return reverse_iterator(begin());
}
//! Returns a reference to the first element in the span.
//!
//! Calling this on an empty span results in undefined behavior.
//! @returns a reference to the first element
constexpr reference front() const
{
return *m_p;
}
//! Returns a reference to the last element in a span.
//!
//! Calling this on an empty span results in undefined behavior.
//! @returns a reference to the last element
constexpr reference back() const
{
return m_p[extent - 1];
}
//! Returns a reference to the element in the span at the given index.
//!
//! The behavior is undefined if \p index is out of range (i.e. if it is greater than or equal to \ref size() or
//! the span is \ref empty()).
//! @param index The index of the element to access
//! @returns a reference to the element at position \p index
constexpr reference operator[](size_type index) const
{
CARB_THROW_OR_CHECK(index < extent, "Behavior is undefined when index exceeds size()");
return m_p[index];
}
//! Returns a pointer to the beginning of the sequence.
//! @returns a pointer to the beginning of the sequence
constexpr pointer data() const noexcept
{
return m_p;
}
//! Returns the number of elements in the span.
//! @returns the number of elements in the span
constexpr size_type size() const noexcept
{
return extent;
}
//! Returns the size of the sequence in bytes.
//! @returns the size of the sequence in bytes
constexpr size_type size_bytes() const noexcept
{
return sizeof(element_type) * extent;
}
//! Checks if the span is empty.
//! @returns \c true if the span is empty (i.e. \ref size() == 0); \c false otherwise
CARB_NODISCARD constexpr bool empty() const noexcept
{
return false;
}
//! Obtains a subspan consisting of the first N elements of the sequence.
//!
//! The program is ill-formed if `Count > extent`. The behavior is undefined if `Count > size()`.
//! @tparam Count the number of elements of the subspan
//! @returns a span that is a view over the first `Count` elements of `*this`
template <std::size_t Count>
constexpr span<element_type, Count> first() const
{
static_assert(Count <= extent, "Program ill-formed if Count > extent");
return span<element_type, Count>{ m_p, Count };
}
//! Obtains a subspan consisting of the first N elements of the sequence.
//!
//! The program is ill-formed if `Count > extent`. The behavior is undefined if `Count > size()`.
//! @param Count the number of elements of the subspan
//! @returns a span of dynamic extent that is a view over the first `Count` elements of `*this`
constexpr span<element_type, dynamic_extent> first(size_type Count) const
{
CARB_THROW_OR_CHECK(Count <= extent, "Program ill-formed if Count > extent");
return span<element_type, dynamic_extent>{ m_p, Count };
}
//! Obtains a subspan consisting of the last N elements of the sequence.
//!
//! The program is ill-formed if `Count > Extent`. The behavior is undefined if `Count > size()`.
//! @tparam Count the number of elements of the subspan
//! @returns a span that is a view over the last `Count` elements of `*this`
template <std::size_t Count>
constexpr span<element_type, Count> last() const
{
static_assert(Count <= extent, "Program ill-formed if Count > extent");
return span<element_type, Count>{ m_p + (extent - Count), Count };
}
//! Obtains a subspan consisting of the last N elements of the sequence.
//!
//! The program is ill-formed if `Count > extent`. The behavior is undefined if `Count > size()`.
//! @param Count the number of elements of the subspan
//! @returns a span of dynamic extent that is a view over the last `Count` elements of `*this`
constexpr span<element_type, dynamic_extent> last(size_type Count) const
{
CARB_THROW_OR_CHECK(Count <= extent, "Program ill-formed if Count > extent");
return span<element_type, dynamic_extent>{ m_p + (extent - Count), Count };
}
//! Obtains a subspan that is a view over Count elements of this span starting at Offset.
//!
//! If `Count` is \ref dynamic_extent, the number of elements in the subspan is \ref size() - `Offset` (i.e. it ends
//! at the end of `*this`).
//! Ill-formed if `Offset` is greater than \ref extent, or `Count` is not \ref dynamic_extent and `Count` is greater
//! than \ref extent - `Offset`.
//! Behavior is undefined if either `Offset` or `Count` is out of range. This happens if `Offset` is greater than
//! \ref size(), or `Count` is not \ref dynamic_extent and `Count` is greater than \ref size() - `Offset`.
//! The \ref extent of the returned span is determined as follows: if `Count` is not \ref dynamic_extent, `Count`;
//! otherwise, if `Extent` is not \ref dynamic_extent, `Extent - Offset`; otherwise \ref dynamic_extent.
//! @tparam Offset The offset within `*this` to start the subspan
//! @tparam Count The length of the subspan, or \ref dynamic_extent to indicate the rest of the span.
//! @returns A subspan of the given range
template <std::size_t Offset, std::size_t Count = dynamic_extent>
constexpr span<element_type, detail::DetermineSubspanExtent<extent, Offset, Count>::value> subspan() const
{
static_assert(Offset <= extent, "Ill-formed");
static_assert(Count == dynamic_extent || (Offset + Count) <= extent, "Ill-formed");
return span<element_type, detail::DetermineSubspanExtent<extent, Offset, Count>::value>{
data() + Offset, carb_min(size() - Offset, Count)
};
}
//! Obtains a subspan that is a view over Count elements of this span starting at Offset.
//!
//! If \p Count is \ref dynamic_extent, the number of elements in the subspan is \ref size() - `Offset` (i.e. it
//! ends at the end of `*this`).
//! Behavior is undefined if either \p Offset or \p Count is out of range. This happens if \p Offset is greater than
//! \ref size(), or \p Count is not \ref dynamic_extent and \p Count is greater than \ref size() - \p Offset.
//! The extent of the returned span is always \ref dynamic_extent.
//! @param Offset The offset within `*this` to start the subspan
//! @param Count The length of the subspan, or \ref dynamic_extent to indicate the rest of the span.
//! @returns A subspan of the given range
constexpr span<element_type, dynamic_extent> subspan(size_type Offset, size_type Count = dynamic_extent) const
{
CARB_THROW_OR_CHECK(Offset <= extent, "Program ill-formed if Offset > extent");
CARB_THROW_OR_CHECK(Count == dynamic_extent || (Offset + Count) <= extent,
"Program ill-formed if Count is not dynamic_extent and is greater than Extent - Offset");
return { data() + Offset, carb_min(size() - Offset, Count) };
}
private:
pointer m_p;
};
CARB_ASSERT_INTEROP_SAFE(span<int, 1>);
// Doxygen can ignore the specializations
#ifndef DOXYGEN_BUILD
// Specialization for dynamic_extent
template <class T>
class span<T, dynamic_extent>
{
// NOTE: This specialization is for Extent == dynamic_extent
public:
using element_type = T;
using value_type = std::remove_cv_t<T>;
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using pointer = T*;
using const_pointer = const T*;
using reference = T&;
using const_reference = const T&;
using iterator = omni::detail::PointerIterator<pointer, span>;
using reverse_iterator = std::reverse_iterator<iterator>;
constexpr static std::size_t extent = dynamic_extent;
constexpr span() noexcept : m_p(nullptr), m_size(0)
{
}
template <class It,
std::enable_if_t<std::is_same<typename std::iterator_traits<It>::iterator_category, std::random_access_iterator_tag>::value,
bool> = false>
constexpr span(It first, size_type count)
{
m_p = detail::to_address(first);
m_size = count;
}
template <class It,
std::enable_if_t<std::is_same<typename std::iterator_traits<It>::iterator_category, std::random_access_iterator_tag>::value,
bool> = false>
constexpr span(It first, It last)
{
m_p = detail::to_address(first);
m_size = last - first;
}
template <std::size_t N>
constexpr span(type_identity_t<element_type> (&arr)[N]) noexcept
{
m_p = cpp::data(arr);
m_size = N;
}
template <class U, std::size_t N, std::enable_if_t<std::is_convertible<U, element_type>::value, bool> = false>
constexpr span(std::array<U, N>& arr) noexcept
{
m_p = cpp::data(arr);
m_size = N;
}
template <class U, std::size_t N, std::enable_if_t<std::is_convertible<U, element_type>::value, bool> = false>
constexpr span(const std::array<U, N>& arr) noexcept
{
m_p = cpp::data(arr);
m_size = N;
}
// template< class R >
// explicit(extent != dynamic_extent)
// constexpr span( R&& range );
// (Constructor not available without ranges, but approximate constructor follows)
template <class R,
std::enable_if_t<detail::IsConvertibleRange<cpp::remove_cvref_t<R>, pointer>::value &&
!detail::IsStdArray<cpp::remove_cvref_t<R>>::value && // has separate constructor
!detail::IsSpan<cpp::remove_cvref_t<R>>::value && // has separate constructor
!std::is_array<cpp::remove_cvref_t<R>>::value, // has separate constructor
bool> = false>
constexpr span(R&& range)
{
m_p = range.data();
m_size = range.size();
}
template <class U, std::size_t N, std::enable_if_t<std::is_convertible<U, element_type>::value, bool> = false>
constexpr span(const span<U, N>& source) noexcept
{
m_p = source.data();
m_size = source.size();
}
constexpr span(const span& other) noexcept = default;
constexpr span& operator=(const span& other) noexcept = default;
constexpr iterator begin() const noexcept
{
return iterator(m_p);
}
constexpr iterator end() const noexcept
{
return iterator(m_p + m_size);
}
constexpr reverse_iterator rbegin() const noexcept
{
return reverse_iterator(end());
}
constexpr reverse_iterator rend() const noexcept
{
return reverse_iterator(begin());
}
constexpr reference front() const
{
CARB_THROW_OR_CHECK(!empty(), "Behavior is undefined when calling front() on an empty span");
return *m_p;
}
constexpr reference back() const
{
CARB_THROW_OR_CHECK(!empty(), "Behavior is undefined when calling back() on an empty span");
return m_p[m_size - 1];
}
constexpr reference operator[](size_type index) const
{
CARB_THROW_OR_CHECK(index < m_size, "Behavior is undefined when index exceeds size()");
return m_p[index];
}
constexpr pointer data() const noexcept
{
return m_p;
}
constexpr size_type size() const noexcept
{
return m_size;
}
constexpr size_type size_bytes() const noexcept
{
return sizeof(element_type) * m_size;
}
CARB_NODISCARD constexpr bool empty() const noexcept
{
return m_size == 0;
}
template <std::size_t Count>
constexpr span<element_type, Count> first() const
{
CARB_THROW_OR_CHECK(Count <= m_size, "Behavior is undefined when Count > size()");
return span<element_type, Count>{ m_p, Count };
}
constexpr span<element_type, dynamic_extent> first(size_type Count) const
{
CARB_THROW_OR_CHECK(Count <= m_size, "Behavior is undefined when Count > size()");
return span<element_type, dynamic_extent>{ m_p, Count };
}
template <std::size_t Count>
constexpr span<element_type, Count> last() const
{
CARB_THROW_OR_CHECK(Count <= m_size, "Behavior is undefined when Count > size()");
return span<element_type, Count>{ m_p + (m_size - Count), Count };
}
constexpr span<element_type, dynamic_extent> last(size_type Count) const
{
CARB_THROW_OR_CHECK(Count <= m_size, "Behavior is undefined when Count > size()");
return span<element_type, dynamic_extent>{ m_p + (m_size - Count), Count };
}
template <std::size_t Offset, std::size_t Count = dynamic_extent>
constexpr span<element_type, detail::DetermineSubspanExtent<extent, Offset, Count>::value> subspan() const
{
CARB_THROW_OR_CHECK(Offset <= m_size, "Behavior is undefined when Offset > size()");
CARB_THROW_OR_CHECK(Count == dynamic_extent || (Offset + Count) <= m_size,
"Behavior is undefined when Count != dynamic_extent and (Offset + Count) > size()");
return span<element_type, detail::DetermineSubspanExtent<extent, Offset, Count>::value>{
data() + Offset, carb_min(size() - Offset, Count)
};
}
constexpr span<element_type, dynamic_extent> subspan(size_type Offset, size_type Count = dynamic_extent) const
{
CARB_THROW_OR_CHECK(Offset <= m_size, "Behavior is undefined when Offset > size()");
CARB_THROW_OR_CHECK(Count == dynamic_extent || (Offset + Count) <= m_size,
"Behavior is undefined when Count != dynamic_extent and (Offset + Count) > size()");
return { data() + Offset, carb_min(size() - Offset, Count) };
}
private:
pointer m_p;
size_type m_size;
};
// Specialization for zero
template <class T>
class span<T, 0>
{
// NOTE: This specialization is for Extent == 0
public:
using element_type = T;
using value_type = std::remove_cv_t<T>;
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using pointer = T*;
using const_pointer = const T*;
using reference = T&;
using const_reference = const T&;
using iterator = omni::detail::PointerIterator<pointer, span>;
using reverse_iterator = std::reverse_iterator<iterator>;
constexpr static std::size_t extent = 0;
constexpr span() noexcept = default;
template <class It,
std::enable_if_t<std::is_same<typename std::iterator_traits<It>::iterator_category, std::random_access_iterator_tag>::value,
bool> = false>
constexpr explicit span(It first, size_type count)
{
CARB_UNUSED(first, count);
CARB_THROW_OR_CHECK(count == 0, "Behavior is undefined if count != extent");
}
template <class It,
std::enable_if_t<std::is_same<typename std::iterator_traits<It>::iterator_category, std::random_access_iterator_tag>::value,
bool> = false>
constexpr explicit span(It first, It last)
{
CARB_UNUSED(first, last);
CARB_THROW_OR_CHECK(first == last, "Behavior is undefined if (last - first) != extent");
}
template <std::size_t N>
constexpr span(type_identity_t<element_type> (&arr)[N]) noexcept
{
CARB_UNUSED(arr);
static_assert(N == extent, "Undefined if N != extent");
}
template <class U, std::size_t N, std::enable_if_t<std::is_convertible<U, element_type>::value, bool> = false>
constexpr span(std::array<U, N>& arr) noexcept
{
CARB_UNUSED(arr);
static_assert(N == extent, "Undefined if N != extent");
}
template <class U, std::size_t N, std::enable_if_t<std::is_convertible<U, element_type>::value, bool> = false>
constexpr span(const std::array<U, N>& arr) noexcept
{
CARB_UNUSED(arr);
static_assert(N == extent, "Undefined if N != extent");
}
// template< class R >
// explicit(extent != dynamic_extent)
// constexpr span( R&& range );
// (Constructor not available without ranges, but approximate constructor follows)
template <class R,
std::enable_if_t<detail::IsConvertibleRange<cpp::remove_cvref_t<R>, pointer>::value &&
!detail::IsStdArray<cpp::remove_cvref_t<R>>::value && // has separate constructor
!detail::IsSpan<cpp::remove_cvref_t<R>>::value && // has separate constructor
!std::is_array<cpp::remove_cvref_t<R>>::value, // has separate constructor
bool> = false>
constexpr explicit span(R&& range)
{
CARB_THROW_OR_CHECK(range.size() == extent, "Behavior is undefined if R.size() != extent");
}
template <class U,
std::size_t N,
std::enable_if_t<N == dynamic_extent && std::is_convertible<U, element_type>::value, bool> = false>
constexpr explicit span(const span<U, N>& source) noexcept
{
CARB_UNUSED(source);
CARB_THROW_OR_CHECK(N == extent, "Behavior is undefined if N != extent");
}
template <class U,
std::size_t N,
std::enable_if_t<N != dynamic_extent && std::is_convertible<U, element_type>::value, bool> = false>
constexpr explicit span(const span<U, N>& source) noexcept
{
CARB_UNUSED(source);
CARB_THROW_OR_CHECK(N == extent, "Behavior is undefined if N != extent");
}
constexpr span(const span& other) noexcept = default;
constexpr span& operator=(const span& other) noexcept = default;
constexpr iterator begin() const noexcept
{
return end();
}
constexpr iterator end() const noexcept
{
return iterator(nullptr);
}
constexpr reverse_iterator rbegin() const noexcept
{
return rend();
}
constexpr reverse_iterator rend() const noexcept
{
return reverse_iterator(begin());
}
constexpr reference front() const
{
CARB_ALWAYS_FAIL("Behavior is undefined if front() is called on an empty span");
}
constexpr reference back() const
{
CARB_ALWAYS_FAIL("Behavior is undefined if back() is called on an empty span");
}
constexpr reference operator[](size_type index) const
{
CARB_UNUSED(index);
CARB_ALWAYS_FAIL("Behavior is undefined if index >= size()");
}
constexpr pointer data() const noexcept
{
return nullptr;
}
constexpr size_type size() const noexcept
{
return 0;
}
constexpr size_type size_bytes() const noexcept
{
return 0;
}
CARB_NODISCARD constexpr bool empty() const noexcept
{
return true;
}
template <std::size_t Count>
constexpr span<element_type, Count> first() const
{
static_assert(Count <= extent, "Program ill-formed if Count > extent");
return span<element_type, Count>{};
}
constexpr span<element_type, dynamic_extent> first(size_type Count) const
{
CARB_UNUSED(Count);
CARB_THROW_OR_CHECK(Count == 0, "Behavior is undefined if Count > extent");
return span<element_type, dynamic_extent>{};
}
template <std::size_t Count>
constexpr span<element_type, Count> last() const
{
CARB_THROW_OR_CHECK(Count == 0, "Behavior is undefined if Count > extent");
return span<element_type, Count>{};
}
constexpr span<element_type, dynamic_extent> last(size_type Count) const
{
CARB_UNUSED(Count);
CARB_THROW_OR_CHECK(Count == 0, "Behavior is undefined if Count > extent");
return span<element_type, dynamic_extent>{};
}
template <std::size_t Offset, std::size_t Count = dynamic_extent>
constexpr span<element_type, detail::DetermineSubspanExtent<extent, Offset, Count>::value> subspan() const
{
static_assert(Offset <= extent, "Ill-formed");
static_assert(Count == dynamic_extent || (Offset + Count) <= extent, "Ill-formed");
return {};
}
constexpr span<element_type, dynamic_extent> subspan(size_type Offset, size_type Count = dynamic_extent) const
{
CARB_UNUSED(Offset, Count);
CARB_THROW_OR_CHECK(Offset <= extent, "Behavior is undefined if Offset > extent");
CARB_THROW_OR_CHECK(Count == dynamic_extent || (Offset + Count) <= extent,
"Behavior is undefined if Count != dynamic_extent and (Offset + Count) > extent");
return {};
}
};
CARB_ASSERT_INTEROP_SAFE(span<int, 0>);
CARB_ASSERT_INTEROP_SAFE(span<int, dynamic_extent>);
#endif
#ifndef DOXYGEN_BUILD
template <class T, std::size_t N, std::enable_if_t<N == dynamic_extent, bool> = false>
span<const cpp::byte, dynamic_extent> as_bytes(span<T, N> s) noexcept
{
return { reinterpret_cast<const cpp::byte*>(s.data()), s.size_bytes() };
}
template <class T, std::size_t N, std::enable_if_t<N != dynamic_extent, bool> = false>
span<const cpp::byte, sizeof(T) * N> as_bytes(span<T, N> s) noexcept
#else
//! Obtains a view ot the object representation of the elements of the given span
//! @tparam T the `element_type` of the span
//! @tparam N the `extent` of the span
//! @param s The span
//! @returns A \ref span of type `span<const cpp::byte, E>` where `E` is \ref dynamic_extent if `N` is also
//! \ref dynamic_extent, otherwise `E` is `sizeof(T) * N`.
template <class T, std::size_t N>
auto as_bytes(span<T, N> s) noexcept
#endif
{
return span<const cpp::byte, sizeof(T) * N>{ reinterpret_cast<const cpp::byte*>(s.data()), s.size_bytes() };
}
#ifndef DOXYGEN_BUILD
template <class T, std::size_t N, std::enable_if_t<N == dynamic_extent, bool> = false>
span<cpp::byte, dynamic_extent> as_writable_bytes(span<T, N> s) noexcept
{
return { reinterpret_cast<cpp::byte*>(s.data()), s.size_bytes() };
}
template <class T, std::size_t N, std::enable_if_t<N != dynamic_extent, bool> = false>
span<cpp::byte, sizeof(T) * N> as_writable_bytes(span<T, N> s) noexcept
#else
//! Obtains a writable view to the object representation of the elements of the given span
//! @tparam T the `element_type` of the span
//! @tparam N the `extent` of the span
//! @param s The span
//! @returns A \ref span of type `span<cpp::byte, E>` where `E` is \ref dynamic_extent if `N` is also
//! \ref dynamic_extent, otherwise `E` is `sizeof(T) * N`.
template <class T, std::size_t N>
auto as_writable_bytes(span<T, N> s) noexcept
#endif
{
return span<cpp::byte, sizeof(T) * N>{ reinterpret_cast<cpp::byte*>(s.data()), s.size_bytes() };
}
#undef CARB_ALWAYS_FAIL
#undef CARB_THROW_OR_CHECK
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/Bit.h | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief C++14-compatible implementation of select functionality from C++ `<bit>` library.
#pragma once
#include "TypeTraits.h"
#include "detail/ImplDummy.h"
#include "../extras/CpuInfo.h"
#ifndef DOXYGEN_SHOULD_SKIP_THIS
// CARB_POPCNT is 1 if the compiler is targeting a CPU with AVX instructions or GCC reports popcnt is available. It is
// undefined at the bottom of the file.
# if defined(__AVX__) /* MSC/GCC */ || defined(__POPCNT__) /* GCC */
# define CARB_POPCNT 1
# else
# define CARB_POPCNT 0
# endif
// CARB_LZCNT is 1 if the compiler is targeting a CPU with AVX2 instructions or GCC reports lzcnt is available. It is
// undefined at the bottom of the file.
# if defined(__AVX2__) /* MSC/GCC */ || defined(__LZCNT__) /* GCC */
# define CARB_LZCNT 1
# else
# define CARB_LZCNT 0
# endif
#endif
#if CARB_COMPILER_MSC
extern "C"
{
unsigned int __popcnt(unsigned int value);
unsigned __int64 __popcnt64(unsigned __int64 value);
unsigned char _BitScanReverse(unsigned long* _Index, unsigned long _Mask);
unsigned char _BitScanReverse64(unsigned long* _Index, unsigned __int64 _Mask);
unsigned char _BitScanForward(unsigned long* _Index, unsigned long _Mask);
unsigned char _BitScanForward64(unsigned long* _Index, unsigned __int64 _Mask);
# if CARB_LZCNT
unsigned int __lzcnt(unsigned int);
unsigned short __lzcnt16(unsigned short);
unsigned __int64 __lzcnt64(unsigned __int64);
# endif
}
# pragma intrinsic(__popcnt)
# pragma intrinsic(__popcnt64)
# pragma intrinsic(_BitScanReverse)
# pragma intrinsic(_BitScanReverse64)
# pragma intrinsic(_BitScanForward)
# pragma intrinsic(_BitScanForward64)
# if CARB_LZCNT
# pragma intrinsic(__lzcnt)
# pragma intrinsic(__lzcnt16)
# pragma intrinsic(__lzcnt64)
# endif
#elif CARB_COMPILER_GNUC
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
namespace carb
{
namespace cpp
{
#ifndef DOXYGEN_SHOULD_SKIP_THIS
namespace detail
{
// Naive implementation of popcnt for CPUs without built in instructions.
template <class T, std::enable_if_t<std::is_unsigned<T>::value, bool> = true>
constexpr int popCountImpl(T val) noexcept
{
int count = 0;
while (val != 0)
{
++count;
val = val & (val - 1);
}
return count;
}
// The Helper class is specialized by type and size since many intrinsics have different names for different sizes.
template <class T, size_t Size = sizeof(T)>
class Helper;
// Specialization for functions where sizeof(T) >= 1
template <class T>
class Helper<T, 1>
{
public:
static_assert(std::numeric_limits<T>::is_specialized, "Requires numeric type");
using Signed = typename std::make_signed_t<T>;
using Unsigned = typename std::make_unsigned_t<T>;
// popCount implementation for 1-4 bytes integers
static int popCount(const T& val)
{
# if CARB_COMPILER_MSC
# if !CARB_POPCNT // Skip the check if we know we have the instruction
// Only use the intrinsic if it's supported on the CPU
static extras::CpuInfo cpuInfo;
if (!cpuInfo.popcntSupported())
{
return popCountImpl((Unsigned)val);
}
else
# endif
{
return (int)__popcnt((unsigned long)(Unsigned)val);
}
# else
return __builtin_popcount((unsigned long)(Unsigned)val);
# endif
}
static constexpr void propagateHighBit(T& n)
{
n |= (n >> 1);
n |= (n >> 2);
n |= (n >> 4);
}
static int countl_zero(T val)
{
# if CARB_LZCNT
# if CARB_COMPILER_MSC
return int(__lzcnt16((unsigned short)(Unsigned)val)) - (16 - std::numeric_limits<T>::digits);
# else
return int(__builtin_ia32_lzcnt_u16((unsigned short)(Unsigned)val)) - (16 - std::numeric_limits<T>::digits);
# endif
# else
# if CARB_COMPILER_MSC
unsigned long index;
constexpr static int diff = std::numeric_limits<unsigned long>::digits - std::numeric_limits<T>::digits;
return _BitScanReverse(&index, (unsigned long)(Unsigned)val) ?
(std::numeric_limits<unsigned long>::digits - 1 - index - diff) :
std::numeric_limits<T>::digits;
# else
// According to docs, undefined if val == 0
return val ? __builtin_clz((unsigned int)(Unsigned)val) - (32 - std::numeric_limits<T>::digits) :
std::numeric_limits<T>::digits;
# endif
# endif
}
static int countr_zero(T val)
{
if (val == 0)
{
return std::numeric_limits<T>::digits;
}
else
{
# if CARB_COMPILER_MSC
unsigned long result;
_BitScanForward(&result, (unsigned long)(Unsigned)val);
return (int)result;
# else
return __builtin_ctz((unsigned int)(Unsigned)val);
# endif
}
}
};
// Specialization for functions where sizeof(T) >= 2
template <class T>
class Helper<T, 2> : public Helper<T, 1>
{
public:
using Base = Helper<T, 1>;
using typename Base::Signed;
using typename Base::Unsigned;
static constexpr void propagateHighBit(T& n)
{
Base::propagateHighBit(n);
n |= (n >> 8);
}
};
// Specialization for functions where sizeof(T) >= 4
template <class T>
class Helper<T, 4> : public Helper<T, 2>
{
public:
using Base = Helper<T, 2>;
using typename Base::Signed;
using typename Base::Unsigned;
static constexpr void propagateHighBit(T& n)
{
Base::propagateHighBit(n);
n |= (n >> 16);
}
# if CARB_LZCNT
# if CARB_COMPILER_MSC
static int countl_zero(T val)
{
static_assert(std::numeric_limits<T>::digits == 32, "Invalid assumption");
return int(__lzcnt((unsigned int)(Unsigned)val));
}
# else
static int countl_zero(T val)
{
static_assert(std::numeric_limits<T>::digits == 32, "Invalid assumption");
return int(__builtin_ia32_lzcnt_u32((unsigned int)(Unsigned)val));
}
# endif
# endif
};
// Specialization for functions where sizeof(T) >= 8
template <class T>
class Helper<T, 8> : public Helper<T, 4>
{
public:
using Base = Helper<T, 4>;
using typename Base::Signed;
using typename Base::Unsigned;
// popCount implementation for 8 byte integers
static int popCount(const T& val)
{
static_assert(sizeof(T) == sizeof(uint64_t), "Unexpected size");
# if CARB_COMPILER_MSC
# if !CARB_POPCNT // Skip the check if we know we have the instruction
// Only use the intrinsic if it's supported on the CPU
static extras::CpuInfo cpuInfo;
if (!cpuInfo.popcntSupported())
{
return popCountImpl((Unsigned)val);
}
else
# endif
{
return (int)__popcnt64((Unsigned)val);
}
# else
return __builtin_popcountll((Unsigned)val);
# endif
}
static constexpr void propagateHighBit(T& n)
{
Base::propagateHighBit(n);
n |= (n >> 32);
}
static int countl_zero(T val)
{
# if CARB_LZCNT
# if CARB_COMPILER_MSC
return int(__lzcnt64((Unsigned)val));
# else
return int(__builtin_ia32_lzcnt_u64((Unsigned)val));
# endif
# else
# if CARB_COMPILER_MSC
unsigned long index;
static_assert(sizeof(val) == sizeof(unsigned __int64), "Invalid assumption");
return _BitScanReverse64(&index, val) ? std::numeric_limits<T>::digits - 1 - index :
std::numeric_limits<T>::digits;
# else
// According to docs, undefined if val == 0
return val ? __builtin_clzll((Unsigned)val) : std::numeric_limits<T>::digits;
# endif
# endif
}
static int countr_zero(T val)
{
if (val == 0)
{
return std::numeric_limits<T>::digits;
}
else
{
# if CARB_COMPILER_MSC
unsigned long result;
_BitScanForward64(&result, (Unsigned)val);
return (int)result;
# else
return __builtin_ctzll((Unsigned)val);
# endif
}
}
};
template <class U, class V>
struct SizeMatches : cpp::bool_constant<sizeof(U) == sizeof(V)>
{
};
} // namespace detail
#endif
/**
* Indicates the endianness of all scalar types for the current system.
*
* Endianness refers to byte ordering of scalar types larger than one byte. Take for example a 32-bit scalar with the
* value "1".
* On a little-endian system, the least-significant ("littlest") bytes are ordered first in memory. "1" would be
* represented as:
* @code{.txt}
* 01 00 00 00
* @endcode
*
* On a big-endian system, the most-significant ("biggest") bytes are ordered first in memory. "1" would be represented
* as:
* @code{.txt}
* 00 00 00 01
* @endcode
*/
enum class endian
{
#ifdef DOXYGEN_BUILD
little = 0, //!< An implementation-defined value representing little-endian scalar types.
big = 1, //!< An implementation-defined value representing big-endian scalar types.
native = -1 //!< Will be either @ref endian::little or @ref endian::big depending on the target platform.
#elif CARB_COMPILER_GNUC
little = __ORDER_LITTLE_ENDIAN__,
big = __ORDER_BIG_ENDIAN__,
native = __BYTE_ORDER__
#else
little = 0,
big = 1,
# if CARB_X86_64 || CARB_AARCH64
native = little
# else
CARB_UNSUPPORTED_PLATFORM();
# endif
#endif
};
/**
* Re-interprets the bits @p src as type `To`.
*
* @note The `To` and `From` types must exactly match size and both be trivially copyable.
*
* See: https://en.cppreference.com/w/cpp/numeric/bit_cast
* @tparam To The object type to convert to.
* @tparam From The (typically inferred) object type to convert from.
* @param src The source object to reinterpret.
* @returns The reinterpreted object.
*/
template <class To,
class From,
std::enable_if_t<detail::SizeMatches<To, From>::value && std::is_trivially_copyable<From>::value &&
std::is_trivially_copyable<To>::value,
bool> = false>
/* constexpr */ // Cannot be constexpr without compiler support
To bit_cast(const From& src) noexcept
{
// This union allows us to bypass `dest`'s constructor and just trivially copy into it.
union
{
detail::NontrivialDummyType dummy{};
To dest;
} u;
std::memcpy(&u.dest, &src, sizeof(From));
return u.dest;
}
/**
* Checks if a given value is an integral power of 2
* @see https://en.cppreference.com/w/cpp/numeric/has_single_bit
* @tparam T An unsigned integral type
* @param val An unsigned integral value
* @returns \c true if \p val is not zero and has a single bit set (integral power of two); \c false otherwise.
*/
template <class T, std::enable_if_t<std::is_integral<T>::value && std::is_unsigned<T>::value, bool> = false>
constexpr bool has_single_bit(T val) noexcept
{
return val != T(0) && (val & (val - 1)) == T(0);
}
/**
* Finds the smallest integral power of two not less than the given value.
* @see https://en.cppreference.com/w/cpp/numeric/bit_ceil
* @tparam T An unsigned integral type
* @param val An unsigned integral value
* @returns The smallest integral power of two that is not smaller than \p val. Undefined if the resulting value is not
* representable in \c T.
*/
template <class T, std::enable_if_t<std::is_integral<T>::value && std::is_unsigned<T>::value, bool> = false>
constexpr T bit_ceil(T val) noexcept
{
if (val <= 1)
return T(1);
// Yes, this could be implemented with a `nlz` instruction but cannot be constexpr without compiler support.
--val;
detail::Helper<T>::propagateHighBit(val);
++val;
return val;
}
/**
* Finds the largest integral power of two not greater than the given value.
* @see https://en.cppreference.com/w/cpp/numeric/bit_floor
* @tparam T An unsigned integral type
* @param val An unsigned integral value
* @returns The largest integral power of two not greater than \p val.
*/
template <class T, std::enable_if_t<std::is_integral<T>::value && std::is_unsigned<T>::value, bool> = false>
constexpr T bit_floor(T val) noexcept
{
// Yes, this could be implemented with a `nlz` instruction but cannot be constexpr without compiler support.
detail::Helper<T>::propagateHighBit(val);
return val - (val >> 1);
}
/**
* Returns the number of 1 bits in the value of x.
*
* @note Unlike std::popcount, this function is not constexpr. This is because the compiler intrinsics used to
* implement this function are not constexpr until C++20, so it was decided to drop constexpr in favor of being able to
* use compiler intrinsics. If a constexpr implementation is required, use \ref popcount_constexpr().
*
* @note (Intel/AMD CPUs) This function will check to see if the CPU supports the `popcnt` instruction (Intel Nehalem
* micro-architecture, 2008; AMD Jaguar micro-architecture, 2013), and if it is not supported, will use a fallback
* function that is ~85% slower than the `popcnt` instruction. If the compiler indicates that the target CPU has that
* instruction, the CPU support check can be skipped, saving about 20%. This is accomplished with command-line switches:
* `/arch:AVX` (or higher) for Visual Studio or `-march=sandybridge` (or several other `-march` options) for GCC.
*
* @param[in] val The unsigned integer value to test.
* @returns The number of 1 bits in the value of \p val.
*/
template <class T, std::enable_if_t<std::is_unsigned<T>::value, bool> = true>
int popcount(T val) noexcept
{
return detail::Helper<T>::popCount(val);
}
/**
* Returns the number of 1 bits in the value of x.
*
* @note Unlike \ref popcount(), this function is `constexpr` as it does not make use of intrinsics. Therefore, at
* runtime it is recommended to use \ref popcount() instead of this function.
*
* @param[in] val The unsigned integer value to test.
* @returns The number of 1 bits in the value of \p val.
*/
template <class T, std::enable_if_t<std::is_unsigned<T>::value, bool> = true>
constexpr int popcount_constexpr(T val) noexcept
{
return detail::popCountImpl(val);
}
/**
* Returns the number of consecutive 0 bits in the value of val, starting from the most significant bit ("left").
* @see https://en.cppreference.com/w/cpp/numeric/countl_zero
*
* @note Unlike std::countl_zero, this function is not constexpr. This is because the compiler intrinsics used to
* implement this function are not constexpr until C++20, so it was decided to drop constexpr in favor of being able to
* use compiler intrinsics. If a constexpr implementation is required, use \ref countl_zero_constexpr().
*
* @note (Intel/AMD CPUs) To support backwards compatibility with older CPUs, by default this is implemented with a
* `bsr` instruction (i386+), that is slightly less performant (~3%) than the more modern `lzcnt` instruction. This
* function implementation will switch to using `lzcnt` if the compiler indicates that instruction is supported. On
* Visual Studio this is provided by passing `/arch:AVX2` command-line switch, or on GCC with `-march=haswell` (or
* several other
* `-march` options). The `lzcnt` instruction was
* <a href="https://en.wikipedia.org/wiki/X86_Bit_manipulation_instruction_set">introduced</a> on Intel's Haswell micro-
* architecture and AMD's Jaguar and Piledriver micro-architectures.
*
* @tparam T An unsigned integral type
* @param val An unsigned integral value
* @returns The number of consecutive 0 bits in the provided value, starting from the most significant bit.
*/
template <class T, std::enable_if_t<std::is_integral<T>::value && std::is_unsigned<T>::value, bool> = false>
int countl_zero(T val) noexcept
{
return detail::Helper<T>::countl_zero(val);
}
/**
* Returns the number of consecutive 0 bits in the value of val, starting from the most significant bit ("left").
* @see https://en.cppreference.com/w/cpp/numeric/countl_zero
*
* @note Unlike \ref countl_zero(), this function is `constexpr` as it does not make use of intrinsics. Therefore, at
* runtime it is recommended to use \ref countl_zero() instead of this function.
*
* @tparam T An unsigned integral type
* @param val An unsigned integral value
* @returns The number of consecutive 0 bits in the provided value, starting from the most significant bit.
*/
template <class T, std::enable_if_t<std::is_integral<T>::value && std::is_unsigned<T>::value, bool> = false>
constexpr int countl_zero_constexpr(T val) noexcept
{
if (val == 0)
return std::numeric_limits<T>::digits;
unsigned zeros = 0;
for (T shift = std::numeric_limits<T>::digits >> 1; shift; shift >>= 1)
{
T temp = val >> shift;
if (temp)
val = temp;
else
zeros |= shift;
}
return int(zeros);
}
/**
* Returns the number of consecutive 0 bits in the value of val, starting from the least significant bit ("right").
* @see https://en.cppreference.com/w/cpp/numeric/countr_zero
*
* @note Unlike std::countr_zero, this function is not constexpr. This is because the compiler intrinsics used to
* implement this function are not constexpr until C++20, so it was decided to drop constexpr in favor of being able to
* use compiler intrinsics. If a constexpr implementation is required, use \ref countr_zero_constexpr().
*
* @tparam T An unsigned integral type
* @param val An unsigned integral value
* @returns The number of consecutive 0 bits in the provided value, starting from the least significant bit.
*/
template <class T, std::enable_if_t<std::is_integral<T>::value && std::is_unsigned<T>::value, bool> = false>
int countr_zero(T val) noexcept
{
return detail::Helper<T>::countr_zero(val);
}
#pragma push_macro("max")
#undef max
/**
* Returns the number of consecutive 0 bits in the value of val, starting from the least significant bit ("right").
* @see https://en.cppreference.com/w/cpp/numeric/countr_zero
*
* @note Unlike \ref countr_zero(), this function is `constexpr` as it does not make use of intrinsics. Therefore, at
* runtime it is recommended to use \ref countr_zero() instead of this function.
*
* @tparam T An unsigned integral type
* @param val An unsigned integral value
* @returns The number of consecutive 0 bits in the provided value, starting from the least significant bit.
*/
template <class T, std::enable_if_t<std::is_integral<T>::value && std::is_unsigned<T>::value, bool> = false>
constexpr int countr_zero_constexpr(T val) noexcept
{
if (val == 0)
return std::numeric_limits<T>::digits;
if (val & 1)
return 0;
int zeros = 0;
T shift = std::numeric_limits<T>::digits >> 1;
T mask = std::numeric_limits<T>::max() >> shift;
while (shift)
{
if (!(val & mask))
{
val >>= shift;
zeros |= shift;
}
shift >>= 1;
mask >>= shift;
}
return zeros;
}
#pragma pop_macro("max")
/**
* Returns the number of consecutive 1 bits in the value of val, starting from the most significant bit ("left").
* @see https://en.cppreference.com/w/cpp/numeric/countl_one
*
* @note Unlike std::countl_one, this function is not constexpr. This is because the compiler intrinsics used to
* implement this function are not constexpr until C++20, so it was decided to drop constexpr in favor of being able to
* use compiler intrinsics. If a constexpr implementation is required, use \ref countl_one_constexpr().
*
* @note (Intel/AMD CPUs) To support backwards compatibility with older CPUs, by default this is implemented with a
* `bsr` instruction (i386+), that is slightly less performant (~3%) than the more modern `lzcnt` instruction. This
* function implementation will switch to using `lzcnt` if the compiler indicates that instruction is supported. On
* Visual Studio this is provided by passing `/arch:AVX2` command-line switch, or on GCC with `-march=haswell` (or
* several other
* `-march` options). The `lzcnt` instruction was
* <a href="https://en.wikipedia.org/wiki/X86_Bit_manipulation_instruction_set">introduced</a> on Intel's Haswell micro-
* architecture and AMD's Jaguar and Piledriver micro-architectures.
*
* @tparam T An unsigned integral type
* @param val An unsigned integral value
* @returns The number of consecutive 1 bits in the provided value, starting from the most significant bit.
*/
template <class T, std::enable_if_t<std::is_integral<T>::value && std::is_unsigned<T>::value, bool> = false>
int countl_one(T val) noexcept
{
return detail::Helper<T>::countl_zero(T(~val));
}
/**
* Returns the number of consecutive 1 bits in the value of val, starting from the most significant bit ("left").
* @see https://en.cppreference.com/w/cpp/numeric/countl_zero
*
* @note Unlike \ref countl_one(), this function is `constexpr` as it does not make use of intrinsics. Therefore, at
* runtime it is recommended to use \ref countl_one() instead of this function.
*
* @tparam T An unsigned integral type
* @param val An unsigned integral value
* @returns The number of consecutive 0 bits in the provided value, starting from the most significant bit.
*/
template <class T, std::enable_if_t<std::is_integral<T>::value && std::is_unsigned<T>::value, bool> = false>
constexpr int countl_one_constexpr(T val) noexcept
{
return countl_zero_constexpr(T(~val));
}
/**
* Returns the number of consecutive 1 bits in the value of val, starting from the least significant bit ("right").
* @see https://en.cppreference.com/w/cpp/numeric/countr_one
*
* @note Unlike std::countr_one, this function is not constexpr. This is because the compiler intrinsics used to
* implement this function are not constexpr until C++20, so it was decided to drop constexpr in favor of being able to
* use compiler intrinsics. If a constexpr implementation is required, use \ref countr_one_constexpr().
*
* @tparam T An unsigned integral type
* @param val An unsigned integral value
* @returns The number of consecutive 1 bits in the provided value, starting from the least significant bit.
*/
template <class T, std::enable_if_t<std::is_integral<T>::value && std::is_unsigned<T>::value, bool> = false>
int countr_one(T val) noexcept
{
return detail::Helper<T>::countr_zero(T(~val));
}
/**
* Returns the number of consecutive 1 bits in the value of val, starting from the least significant bit ("right").
* @see https://en.cppreference.com/w/cpp/numeric/countr_one
*
* @note Unlike \ref countr_one(), this function is `constexpr` as it does not make use of intrinsics. Therefore, at
* runtime it is recommended to use \ref countr_one() instead of this function.
*
* @tparam T An unsigned integral type
* @param val An unsigned integral value
* @returns The number of consecutive 1 bits in the provided value, starting from the least significant bit.
*/
template <class T, std::enable_if_t<std::is_integral<T>::value && std::is_unsigned<T>::value, bool> = false>
constexpr int countr_one_constexpr(T val) noexcept
{
return countr_zero_constexpr(T(~val));
}
} // namespace cpp
} // namespace carb
#undef CARB_LZCNT
#undef CARB_POPCNT
|
omniverse-code/kit/include/carb/cpp/Numeric.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//! \brief C++14-compatible implementation of select functionality from C++ `<numeric>` library.
#pragma once
#include "../Defines.h"
#include "Bit.h"
namespace carb
{
namespace cpp
{
//! \cond DEV
namespace detail
{
template <class Signed, std::enable_if_t<std::is_signed<Signed>::value, bool> = false>
constexpr inline std::make_unsigned_t<Signed> abs(const Signed val) noexcept
{
using Unsigned = std::make_unsigned_t<Signed>;
if (val < 0)
return Unsigned(0) - Unsigned(val);
return Unsigned(val);
}
template <class Unsigned, std::enable_if_t<std::is_unsigned<Unsigned>::value, bool> = false>
constexpr inline Unsigned abs(const Unsigned val) noexcept
{
return val;
}
template <class Unsigned>
constexpr inline unsigned long bitscan_forward(Unsigned mask) noexcept
{
// Since carb::cpp::countr_zero isn't constexpr...
static_assert(std::is_unsigned<Unsigned>::value, "Must be an unsigned value");
unsigned long count = 0;
if (mask != 0)
{
while ((mask & 1u) == 0)
{
mask >>= 1;
++count;
}
}
return count;
}
template <class T>
using NotBoolIntegral =
::carb::cpp::bool_constant<std::is_integral<T>::value && !std::is_same<std::remove_cv_t<T>, bool>::value>;
} // namespace detail
//! \endcond
/**
* Computes the greatest common divisor of two integers.
*
* If either `M` or `N` is not an integer type, or if either is (possibly cv-qualified) `bool`, the program is ill-
* formed. If either `|m|` or `|n|` is not representable as a value of type `std::common_type_t<M, N>`, the behavior is
* undefined.
* @param m Integer value
* @param n Integer value
* @returns If both @p m and @p n are 0, returns 0; otherwise returns the greatest common divisor of `|m|` and `|n|`.
*/
template <class M, class N>
constexpr inline std::common_type_t<M, N> gcd(M m, N n) noexcept /*strengthened*/
{
static_assert(::carb::cpp::detail::NotBoolIntegral<M>::value && ::carb::cpp::detail::NotBoolIntegral<N>::value,
"Requires non-bool integral");
using Common = std::common_type_t<M, N>;
using Unsigned = std::make_unsigned_t<Common>;
Unsigned am = ::carb::cpp::detail::abs(m);
Unsigned an = ::carb::cpp::detail::abs(n);
if (am == 0)
return Common(an);
if (an == 0)
return Common(am);
const auto trailingZerosM = ::carb::cpp::detail::bitscan_forward(am);
const auto common2s = carb_min(trailingZerosM, ::carb::cpp::detail::bitscan_forward(an));
an >>= common2s;
am >>= trailingZerosM;
do
{
an >>= ::carb::cpp::detail::bitscan_forward(an);
if (am > an)
{
Unsigned temp = am;
am = an;
an = temp;
}
an -= am;
} while (an != 0u);
return Common(am << common2s);
}
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/Atomic.h | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// Implements the wait/notify functions from C++20-standard atomics for types that are 1, 2, 4 or 8 bytes. If this
// feature is not desired, or the type you're trying to atomic-wrap isn't supported, use std::atomic instead.
// See the following:
// https://en.cppreference.com/w/cpp/atomic/atomic/wait
// https://en.cppreference.com/w/cpp/atomic/atomic/notify_one
// https://en.cppreference.com/w/cpp/atomic/atomic/notify_all
//! \file
//! \brief C++14-compatible implementation of select functionality from C++ `<atomic>` library.
#pragma once
#include "../thread/Futex.h"
#include "../thread/Util.h"
#include <type_traits>
#include <algorithm>
namespace carb
{
namespace cpp
{
template <class T>
class atomic;
namespace detail
{
// C++20 adds fetch_add/fetch_sub and operator support for floating point types
template <class T>
class atomic_float_facade : public std::atomic<T>
{
using Base = std::atomic<T>;
static_assert(std::is_floating_point<T>::value, "");
public:
atomic_float_facade() noexcept = default;
constexpr atomic_float_facade(T desired) noexcept : Base(desired)
{
}
atomic_float_facade(const atomic_float_facade&) = delete;
using Base::operator=;
T fetch_add(T arg, std::memory_order order = std::memory_order_seq_cst) noexcept
{
T temp = this->load(std::memory_order_relaxed);
while (!this->compare_exchange_strong(temp, temp + arg, order, std::memory_order_relaxed))
{
}
return temp;
}
T fetch_add(T arg, std::memory_order order = std::memory_order_seq_cst) volatile noexcept
{
T temp = this->load(std::memory_order_relaxed);
while (!this->compare_exchange_strong(temp, temp + arg, order, std::memory_order_relaxed))
{
}
return temp;
}
T fetch_sub(T arg, std::memory_order order = std::memory_order_seq_cst) noexcept
{
T temp = this->load(std::memory_order_relaxed);
while (!this->compare_exchange_strong(temp, temp - arg, order, std::memory_order_relaxed))
{
}
return temp;
}
T fetch_sub(T arg, std::memory_order order = std::memory_order_seq_cst) volatile noexcept
{
T temp = this->load(std::memory_order_relaxed);
while (!this->compare_exchange_strong(temp, temp - arg, order, std::memory_order_relaxed))
{
}
return temp;
}
T operator+=(T arg) noexcept
{
return this->fetch_add(arg) + arg;
}
T operator+=(T arg) volatile noexcept
{
return this->fetch_add(arg) + arg;
}
T operator-=(T arg) noexcept
{
return this->fetch_sub(arg) - arg;
}
T operator-=(T arg) volatile noexcept
{
return this->fetch_sub(arg) - arg;
}
};
template <class T>
class atomic_ref_base
{
protected:
using AtomicType = ::carb::cpp::atomic<T>;
AtomicType& m_ref;
public:
using value_type = T;
static constexpr bool is_always_lock_free = AtomicType::is_always_lock_free;
static constexpr std::size_t required_alignment = alignof(T);
explicit atomic_ref_base(T& obj) : m_ref(reinterpret_cast<AtomicType&>(obj))
{
}
atomic_ref_base(const atomic_ref_base& ref) noexcept : m_ref(ref.m_ref)
{
}
T operator=(T desired) const noexcept
{
m_ref.store(desired);
return desired;
}
atomic_ref_base& operator=(const atomic_ref_base&) = delete;
bool is_lock_free() const noexcept
{
return m_ref.is_lock_free();
}
void store(T desired, std::memory_order order = std::memory_order_seq_cst) const noexcept
{
m_ref.store(desired, order);
}
T load(std::memory_order order = std::memory_order_seq_cst) const noexcept
{
return m_ref.load(order);
}
operator T() const noexcept
{
return load();
}
T exchange(T desired, std::memory_order order = std::memory_order_seq_cst) const noexcept
{
return m_ref.exchange(desired, order);
}
bool compare_exchange_weak(T& expected, T desired, std::memory_order success, std::memory_order failure) const noexcept
{
return m_ref.compare_exchange_weak(expected, desired, success, failure);
}
bool compare_exchange_weak(T& expected, T desired, std::memory_order order = std::memory_order_seq_cst) const noexcept
{
return m_ref.compare_exchange_weak(expected, desired, order);
}
bool compare_exchange_strong(T& expected, T desired, std::memory_order success, std::memory_order failure) const noexcept
{
return m_ref.compare_exchange_strong(expected, desired, success, failure);
}
bool compare_exchange_strong(T& expected, T desired, std::memory_order order = std::memory_order_seq_cst) const noexcept
{
return m_ref.compare_exchange_strong(expected, desired, order);
}
void wait(T old, std::memory_order order = std::memory_order_seq_cst) const noexcept
{
m_ref.wait(old, order);
}
void wait(T old, std::memory_order order = std::memory_order_seq_cst) const volatile noexcept
{
m_ref.wait(old, order);
}
template <class Rep, class Period>
bool wait_for(T old,
std::chrono::duration<Rep, Period> duration,
std::memory_order order = std::memory_order_seq_cst) const noexcept
{
return m_ref.wait_for(old, duration, order);
}
template <class Rep, class Period>
bool wait_for(T old,
std::chrono::duration<Rep, Period> duration,
std::memory_order order = std::memory_order_seq_cst) const volatile noexcept
{
return m_ref.wait_for(old, duration, order);
}
template <class Clock, class Duration>
bool wait_until(T old,
std::chrono::time_point<Clock, Duration> time_point,
std::memory_order order = std::memory_order_seq_cst) const noexcept
{
return m_ref.wait_until(old, time_point, order);
}
template <class Clock, class Duration>
bool wait_until(T old,
std::chrono::time_point<Clock, Duration> time_point,
std::memory_order order = std::memory_order_seq_cst) const volatile noexcept
{
return m_ref.wait_until(old, time_point, order);
}
void notify_one() const noexcept
{
m_ref.notify_one();
}
void notify_one() const volatile noexcept
{
m_ref.notify_one();
}
void notify_all() const noexcept
{
m_ref.notify_all();
}
void notify_all() const volatile noexcept
{
m_ref.notify_all();
}
};
template <class T>
class atomic_ref_pointer_facade : public atomic_ref_base<T>
{
using Base = atomic_ref_base<T>;
static_assert(std::is_pointer<T>::value, "");
public:
using difference_type = std::ptrdiff_t;
explicit atomic_ref_pointer_facade(T& ref) : Base(ref)
{
}
atomic_ref_pointer_facade(const atomic_ref_pointer_facade& other) noexcept : Base(other)
{
}
using Base::operator=;
T fetch_add(std::ptrdiff_t arg, std::memory_order order = std::memory_order_seq_cst) const noexcept
{
return this->m_ref.fetch_add(arg, order);
}
T fetch_sub(std::ptrdiff_t arg, std::memory_order order = std::memory_order_seq_cst) const noexcept
{
return this->m_ref.fetch_sub(arg, order);
}
T operator++() const noexcept
{
return this->m_ref.fetch_add(1) + 1;
}
T operator++(int) const noexcept
{
return this->m_ref.fetch_add(1);
}
T operator--() const noexcept
{
return this->m_ref.fetch_sub(1) - 1;
}
T operator--(int) const noexcept
{
return this->m_ref.fetch_sub(1);
}
T operator+=(std::ptrdiff_t arg) const noexcept
{
return this->m_ref.fetch_add(arg) + arg;
}
T operator-=(std::ptrdiff_t arg) const noexcept
{
return this->m_ref.fetch_sub(arg) - arg;
}
};
template <class T>
class atomic_ref_numeric_facade : public atomic_ref_base<T>
{
using Base = atomic_ref_base<T>;
static_assert(std::is_integral<T>::value || std::is_floating_point<T>::value, "");
public:
using difference_type = T;
explicit atomic_ref_numeric_facade(T& ref) : Base(ref)
{
}
atomic_ref_numeric_facade(const atomic_ref_numeric_facade& other) noexcept : Base(other)
{
}
using Base::operator=;
T fetch_add(T arg, std::memory_order order = std::memory_order_seq_cst) const noexcept
{
return this->m_ref.fetch_add(arg, order);
}
T fetch_sub(T arg, std::memory_order order = std::memory_order_seq_cst) const noexcept
{
return this->m_ref.fetch_sub(arg, order);
}
T operator+=(T arg) const noexcept
{
return this->m_ref.fetch_add(arg) + arg;
}
T operator-=(T arg) const noexcept
{
return this->m_ref.fetch_sub(arg) - arg;
}
};
template <class T>
class atomic_ref_integer_facade : public atomic_ref_numeric_facade<T>
{
using Base = atomic_ref_numeric_facade<T>;
static_assert(std::is_integral<T>::value, "");
public:
explicit atomic_ref_integer_facade(T& ref) : Base(ref)
{
}
atomic_ref_integer_facade(const atomic_ref_integer_facade& other) noexcept : Base(other)
{
}
using Base::operator=;
T fetch_and(T arg, std::memory_order order = std::memory_order_seq_cst) const noexcept
{
return this->m_ref.fetch_and(arg, order);
}
T fetch_or(T arg, std::memory_order order = std::memory_order_seq_cst) const noexcept
{
return this->m_ref.fetch_or(arg, order);
}
T fetch_xor(T arg, std::memory_order order = std::memory_order_seq_cst) const noexcept
{
return this->m_ref.fetch_xor(arg, order);
}
T operator++() const noexcept
{
return this->m_ref.fetch_add(T(1)) + T(1);
}
T operator++(int) const noexcept
{
return this->m_ref.fetch_add(T(1));
}
T operator--() const noexcept
{
return this->m_ref.fetch_sub(T(1)) - T(1);
}
T operator--(int) const noexcept
{
return this->m_ref.fetch_sub(T(1));
}
T operator&=(T arg) const noexcept
{
return this->m_ref.fetch_and(arg) & arg;
}
T operator|=(T arg) const noexcept
{
return this->m_ref.fetch_or(arg) | arg;
}
T operator^=(T arg) const noexcept
{
return this->m_ref.fetch_xor(arg) ^ arg;
}
};
template <class T>
using SelectAtomicRefBase = std::conditional_t<
std::is_pointer<T>::value,
atomic_ref_pointer_facade<T>,
std::conditional_t<std::is_integral<T>::value,
atomic_ref_integer_facade<T>,
std::conditional_t<std::is_floating_point<T>::value, atomic_ref_numeric_facade<T>, atomic_ref_base<T>>>>;
template <class T>
using SelectAtomicBase = std::conditional_t<std::is_floating_point<T>::value, atomic_float_facade<T>, std::atomic<T>>;
} // namespace detail
template <class T>
class atomic : public detail::SelectAtomicBase<T>
{
using Base = detail::SelectAtomicBase<T>;
public:
using value_type = T;
atomic() noexcept = default;
constexpr atomic(T desired) noexcept : Base(desired)
{
}
static constexpr bool is_always_lock_free = sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8;
using Base::operator=;
using Base::operator T;
CARB_PREVENT_COPY_AND_MOVE(atomic);
// See https://en.cppreference.com/w/cpp/atomic/atomic/wait
void wait(T old, std::memory_order order = std::memory_order_seq_cst) const noexcept
{
static_assert(is_always_lock_free, "Only supported for always-lock-free types");
using I = thread::detail::to_integral_t<T>;
for (;;)
{
if (this_thread::spinTryWait([&] {
return thread::detail::reinterpret_as<I>(this->load(order)) != thread::detail::reinterpret_as<I>(old);
}))
{
break;
}
thread::futex::wait(*this, old);
}
}
void wait(T old, std::memory_order order = std::memory_order_seq_cst) const volatile noexcept
{
static_assert(is_always_lock_free, "Only supported for always-lock-free types");
using I = thread::detail::to_integral_t<T>;
for (;;)
{
if (this_thread::spinTryWait([&] {
return thread::detail::reinterpret_as<I>(this->load(order)) != thread::detail::reinterpret_as<I>(old);
}))
{
break;
}
thread::futex::wait(const_cast<atomic<T>&>(*this), old);
}
}
// wait_for and wait_until are non-standard
template <class Rep, class Period>
bool wait_for(T old,
std::chrono::duration<Rep, Period> duration,
std::memory_order order = std::memory_order_seq_cst) const noexcept
{
// Since futex can spuriously wake up, calculate the end time so that we can handle the spurious wakeups without
// shortening our wait time potentially significantly.
return wait_until(old, std::chrono::steady_clock::now() + thread::detail::clampDuration(duration), order);
}
template <class Rep, class Period>
bool wait_for(T old,
std::chrono::duration<Rep, Period> duration,
std::memory_order order = std::memory_order_seq_cst) const volatile noexcept
{
// Since futex can spuriously wake up, calculate the end time so that we can handle the spurious wakeups without
// shortening our wait time potentially significantly.
return wait_until(old, std::chrono::steady_clock::now() + thread::detail::clampDuration(duration), order);
}
template <class Clock, class Duration>
bool wait_until(T old,
std::chrono::time_point<Clock, Duration> time_point,
std::memory_order order = std::memory_order_seq_cst) const noexcept
{
static_assert(is_always_lock_free, "Only supported for always-lock-free types");
using I = thread::detail::to_integral_t<T>;
for (;;)
{
if (this_thread::spinTryWait([&] {
return thread::detail::reinterpret_as<I>(this->load(order)) != thread::detail::reinterpret_as<I>(old);
}))
{
return true;
}
if (!thread::futex::wait_until(*this, old, time_point))
{
return false;
}
}
}
template <class Clock, class Duration>
bool wait_until(T old,
std::chrono::time_point<Clock, Duration> time_point,
std::memory_order order = std::memory_order_seq_cst) const volatile noexcept
{
static_assert(is_always_lock_free, "Only supported for always-lock-free types");
using I = thread::detail::to_integral_t<T>;
for (;;)
{
if (this_thread::spinTryWait([&] {
return thread::detail::reinterpret_as<I>(this->load(order)) != thread::detail::reinterpret_as<I>(old);
}))
{
return true;
}
if (!thread::futex::wait_until(const_cast<atomic<T>&>(*this), old, time_point))
{
return false;
}
}
}
// See https://en.cppreference.com/w/cpp/atomic/atomic/notify_one
void notify_one() noexcept
{
thread::futex::notify_one(*this);
}
void notify_one() volatile noexcept
{
thread::futex::notify_one(const_cast<atomic<T>&>(*this));
}
// See https://en.cppreference.com/w/cpp/atomic/atomic/notify_all
void notify_all() noexcept
{
thread::futex::notify_all(*this);
}
void notify_all() volatile noexcept
{
thread::futex::notify_all(const_cast<atomic<T>&>(*this));
}
};
template <class T>
class atomic_ref : public detail::SelectAtomicRefBase<T>
{
using Base = detail::SelectAtomicRefBase<T>;
public:
explicit atomic_ref(T& ref) : Base(ref)
{
}
atomic_ref(const atomic_ref& other) noexcept : Base(other)
{
}
using Base::operator=;
};
// Helper functions
// See https://en.cppreference.com/w/cpp/atomic/atomic_wait
template <class T>
inline void atomic_wait(const atomic<T>* object, typename atomic<T>::value_type old) noexcept
{
object->wait(old);
}
template <class T>
inline void atomic_wait_explicit(const atomic<T>* object, typename atomic<T>::value_type old, std::memory_order order) noexcept
{
object->wait(old, order);
}
// See https://en.cppreference.com/w/cpp/atomic/atomic_notify_one
template <class T>
inline void atomic_notify_one(atomic<T>* object)
{
object->notify_one();
}
// See https://en.cppreference.com/w/cpp/atomic/atomic_notify_all
template <class T>
inline void atomic_notify_all(atomic<T>* object)
{
object->notify_all();
}
using atomic_bool = atomic<bool>;
using atomic_char = atomic<char>;
using atomic_schar = atomic<signed char>;
using atomic_uchar = atomic<unsigned char>;
using atomic_short = atomic<short>;
using atomic_ushort = atomic<unsigned short>;
using atomic_int = atomic<int>;
using atomic_uint = atomic<unsigned int>;
using atomic_long = atomic<long>;
using atomic_ulong = atomic<unsigned long>;
using atomic_llong = atomic<long long>;
using atomic_ullong = atomic<unsigned long long>;
using atomic_char16_t = atomic<char16_t>;
using atomic_char32_t = atomic<char32_t>;
using atomic_wchar_t = atomic<wchar_t>;
using atomic_int8_t = atomic<int8_t>;
using atomic_uint8_t = atomic<uint8_t>;
using atomic_int16_t = atomic<int16_t>;
using atomic_uint16_t = atomic<uint16_t>;
using atomic_int32_t = atomic<int32_t>;
using atomic_uint32_t = atomic<uint32_t>;
using atomic_int64_t = atomic<int64_t>;
using atomic_uint64_t = atomic<uint64_t>;
using atomic_int_least8_t = atomic<int_least8_t>;
using atomic_uint_least8_t = atomic<uint_least8_t>;
using atomic_int_least16_t = atomic<int_least16_t>;
using atomic_uint_least16_t = atomic<uint_least16_t>;
using atomic_int_least32_t = atomic<int_least32_t>;
using atomic_uint_least32_t = atomic<uint_least32_t>;
using atomic_int_least64_t = atomic<int_least64_t>;
using atomic_uint_least64_t = atomic<uint_least64_t>;
using atomic_int_fast8_t = atomic<int_fast8_t>;
using atomic_uint_fast8_t = atomic<uint_fast8_t>;
using atomic_int_fast16_t = atomic<int_fast16_t>;
using atomic_uint_fast16_t = atomic<uint_fast16_t>;
using atomic_int_fast32_t = atomic<int_fast32_t>;
using atomic_uint_fast32_t = atomic<uint_fast32_t>;
using atomic_int_fast64_t = atomic<int_fast64_t>;
using atomic_uint_fast64_t = atomic<uint_fast64_t>;
using atomic_intptr_t = atomic<intptr_t>;
using atomic_uintptr_t = atomic<uintptr_t>;
using atomic_size_t = atomic<size_t>;
using atomic_ptrdiff_t = atomic<ptrdiff_t>;
using atomic_intmax_t = atomic<intmax_t>;
using atomic_uintmax_t = atomic<uintmax_t>;
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/detail/ImplData.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief C++14-compatible implementation of the C++ standard library `std::data` function.
#pragma once
#include <cstddef>
#include <initializer_list>
namespace carb
{
namespace cpp
{
//! Returns a pointer to the block of memory containing the elements of the range.
//! @tparam T The type of the array elements
//! @tparam N The size of the array
//! @param array An array
//! @returns array
template <class T, std::size_t N>
constexpr T* data(T (&array)[N]) noexcept
{
return array;
}
//! Returns a pointer to the block of memory containing the elements of the range.
//! @tparam C The container type
//! @param c A container
//! @returns `c.data()`
template <class C>
constexpr auto data(C& c) -> decltype(c.data())
{
return c.data();
}
//! Returns a pointer to the block of memory containing the elements of the range.
//! @tparam C The container type
//! @param c A container
//! @returns `c.data()`
template <class C>
constexpr auto data(const C& c) -> decltype(c.data())
{
return c.data();
}
//! Returns a pointer to the block of memory containing the elements of the range.
//! @tparam E The type contained in the `std::initializer_list`
//! @param il An `std::initializer_list` of type E
//! @returns `il.begin()`
template <class E>
constexpr const E* data(std::initializer_list<E> il) noexcept
{
return il.begin();
}
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/detail/ImplDummy.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//! @brief Implementation details
#pragma once
#include <type_traits>
namespace carb
{
namespace cpp
{
//! \cond DEV
namespace detail
{
struct NontrivialDummyType
{
constexpr NontrivialDummyType() noexcept
{
}
};
static_assert(!std::is_trivially_default_constructible<NontrivialDummyType>::value, "Invalid assumption");
} // namespace detail
//! \endcond
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/detail/ImplInvoke.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief Implementation details for `carb::cpp::invoke` and related functions.
#pragma once
#include "../../Defines.h"
#include "../../detail/NoexceptType.h"
#include <functional>
#include <type_traits>
#include <utility>
//! \file
//! Contains common utilities used by \c invoke (which lives in the \c functional header) and the \c invoke_result type
//! queries (which live in the \c type_traits header).
namespace carb
{
namespace cpp
{
//! \cond DEV
namespace detail
{
CARB_DETAIL_PUSH_IGNORE_NOEXCEPT_TYPE()
template <typename T>
struct is_reference_wrapper : std::false_type
{
};
template <typename T>
struct is_reference_wrapper<std::reference_wrapper<T>> : std::true_type
{
};
// The interface of invoke_impl are the `eval` and `uneval` functions, which have the correct return type and noexcept
// specifiers for an expression `INVOKE(f, args...)` (this comes from the C++ concept of "Callable"). The two functions
// are identical, save for the `eval` function having a body while the `uneval` function does not. This matters for
// functions with declared-but-undefined return types. It is legal to ask type transformation questions about a function
// `R (*)(Args...)` when `R` is undefined, but not legal to evaluate it in any way.
//
// Base template: T is directly invocable -- a function pointer or object with operator()
template <typename T>
struct invoke_impl
{
template <typename F, typename... TArgs>
static constexpr auto eval(F&& f,
TArgs&&... args) noexcept(noexcept(std::forward<F>(f)(std::forward<TArgs>(args)...)))
-> decltype(std::forward<F>(f)(std::forward<TArgs>(args)...))
{
return std::forward<F>(f)(std::forward<TArgs>(args)...);
}
template <typename F, typename... TArgs>
static constexpr auto uneval(F&& f,
TArgs&&... args) noexcept(noexcept(std::forward<F>(f)(std::forward<TArgs>(args)...)))
-> decltype(std::forward<F>(f)(std::forward<TArgs>(args)...));
};
// Match the case where we want to invoke a member function.
template <typename TObject, typename TReturn>
struct invoke_impl<TReturn TObject::*>
{
using Self = invoke_impl;
template <bool B>
using bool_constant = std::integral_constant<bool, B>;
#if CARB_COMPILER_GNUC == 1 && __cplusplus <= 201703L
// WORKAROUND for pre-C++20: Calling a `const&` member function on an `&&` object through invoke is a C++20
// extension. MSVC supports this, but GNUC-compatible compilers do not until C++20. To work around this, we change
// the object's ref qualifier from `&&` to `const&` if we are attempting to call a `const&` member function.
//
// Note `move(x).f()` has always been allowed if `f` is `const&` qualified, the issue is `std::move(x).*(&T::foo)()`
// is not allowed (this is a C++ specification bug, corrected in C++20). Further note that we can not do this for
// member data selectors, because the ref qualifier carries through since C++17. When this workaround is no longer
// needed (when C++20 is minimum), the `_is_cref_mem_fn` tests on the `_access` functions can be removed.
template <typename UReturn, typename UObject, typename... UArgs>
static std::true_type _test_is_cref_mem_fn(UReturn (UObject::*mem_fn)(UArgs...) const&);
static std::false_type _test_is_cref_mem_fn(...);
template <typename TRMem>
using _is_cref_mem_fn = decltype(_test_is_cref_mem_fn(std::declval<std::decay_t<TRMem>>()));
template <typename T, typename = std::enable_if_t<std::is_base_of<TObject, std::decay_t<T>>::value>>
static constexpr auto _access(T&& x, std::false_type) noexcept -> std::add_rvalue_reference_t<T>
{
return std::forward<T>(x);
}
template <typename T, typename = std::enable_if_t<std::is_base_of<TObject, std::decay_t<T>>::value>>
static constexpr auto _access(T const& x, std::true_type) noexcept -> T const&
{
return x;
}
#else
template <typename>
using _is_cref_mem_fn = std::false_type;
// Accessing the type should be done directly.
template <typename T, bool M, typename = std::enable_if_t<std::is_base_of<TObject, std::decay_t<T>>::value>>
static constexpr auto _access(T&& x, bool_constant<M>) noexcept -> std::add_rvalue_reference_t<T>
{
return std::forward<T>(x);
}
#endif
// T is a reference wrapper -- access goes through the `get` function.
template <typename T, bool M, typename = std::enable_if_t<is_reference_wrapper<std::decay_t<T>>::value>>
static constexpr auto _access(T&& x, bool_constant<M>) noexcept(noexcept(x.get())) -> decltype(x.get())
{
return x.get();
}
// Matches cases where a pointer or fancy pointer is passed in.
template <typename TOriginal,
bool M,
typename T = std::decay_t<TOriginal>,
typename = std::enable_if_t<!std::is_base_of<TObject, T>::value && !is_reference_wrapper<T>::value>>
static constexpr auto _access(TOriginal&& x, bool_constant<M>) noexcept(noexcept(*std::forward<TOriginal>(x)))
-> decltype(*std::forward<TOriginal>(x))
{
return *std::forward<TOriginal>(x);
}
template <typename T, typename... TArgs, typename TRMem, typename = std::enable_if_t<std::is_function<TRMem>::value>>
static constexpr auto eval(TRMem TObject::*pmem, T&& x, TArgs&&... args) noexcept(noexcept(
(Self::_access(std::forward<T>(x), _is_cref_mem_fn<decltype(pmem)>{}).*pmem)(std::forward<TArgs>(args)...)))
-> decltype((Self::_access(std::forward<T>(x), _is_cref_mem_fn<decltype(pmem)>{}).*
pmem)(std::forward<TArgs>(args)...))
{
return (Self::_access(std::forward<T>(x), _is_cref_mem_fn<decltype(pmem)>{}).*pmem)(std::forward<TArgs>(args)...);
}
template <typename T, typename... TArgs, typename TRMem, typename = std::enable_if_t<std::is_function<TRMem>::value>>
static constexpr auto uneval(TRMem TObject::*pmem, T&& x, TArgs&&... args) noexcept(noexcept(
(Self::_access(std::forward<T>(x), _is_cref_mem_fn<decltype(pmem)>{}).*pmem)(std::forward<TArgs>(args)...)))
-> decltype((Self::_access(std::forward<T>(x), _is_cref_mem_fn<decltype(pmem)>{}).*
pmem)(std::forward<TArgs>(args)...));
template <typename T>
static constexpr auto eval(TReturn TObject::*select,
T&& x) noexcept(noexcept(Self::_access(std::forward<T>(x), std::false_type{}).*select))
-> decltype(Self::_access(std::forward<T>(x), std::false_type{}).*select)
{
return Self::_access(std::forward<T>(x), std::false_type{}).*select;
}
template <typename T>
static constexpr auto uneval(TReturn TObject::*select,
T&& x) noexcept(noexcept(Self::_access(std::forward<T>(x), std::false_type{}).*select))
-> decltype(Self::_access(std::forward<T>(x), std::false_type{}).*select);
};
// Test invocation of `f(args...)` in an unevaluated context to get its return type. This is a SFINAE-safe error if the
// expression `f(args...)` is invalid.
template <typename F, typename... TArgs>
auto invoke_uneval(F&& f, TArgs&&... args) noexcept(
noexcept(invoke_impl<std::decay_t<F>>::uneval(std::forward<F>(f), std::forward<TArgs>(args)...)))
-> decltype(invoke_impl<std::decay_t<F>>::uneval(std::forward<F>(f), std::forward<TArgs>(args)...));
CARB_DETAIL_POP_IGNORE_NOEXCEPT_TYPE()
} // namespace detail
//! \endcond
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/cpp/detail/ImplOptional.h | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! \file
//! \brief Implementation details for `carb::cpp::optional<>`.
#pragma once
#ifndef CARB_IMPLOPTIONAL
# error This file should only be included from Optional.h
#endif
#include <utility>
#include "../TypeTraits.h"
#include "ImplDummy.h"
namespace carb
{
namespace cpp
{
namespace detail
{
// Default facade for trivial destruction of T
template <class T, bool = std::is_trivially_destructible<T>::value>
struct OptionalDestructor
{
union
{
NontrivialDummyType empty;
CARB_VIZ typename std::remove_const_t<T> value;
};
CARB_VIZ bool hasValue;
constexpr OptionalDestructor() noexcept : empty{}, hasValue{ false }
{
}
template <class... Args>
constexpr explicit OptionalDestructor(in_place_t, Args&&... args)
: value(std::forward<Args>(args)...), hasValue(true)
{
}
// Cannot access anonymous union member `value` until C++17, so expose access here
constexpr const T& val() const&
{
CARB_ASSERT(hasValue);
return value;
}
constexpr T& val() &
{
CARB_ASSERT(hasValue);
return value;
}
constexpr const T&& val() const&&
{
CARB_ASSERT(hasValue);
return std::move(value);
}
constexpr T&& val() &&
{
CARB_ASSERT(hasValue);
return std::move(value);
}
void reset() noexcept
{
// No need to destruct since trivially destructible
hasValue = false;
}
};
// Specialization for non-trivial destruction of T
template <class T>
struct OptionalDestructor<T, false>
{
union
{
NontrivialDummyType empty;
CARB_VIZ typename std::remove_const_t<T> value;
};
CARB_VIZ bool hasValue;
~OptionalDestructor() noexcept
{
if (hasValue)
{
value.~T();
}
}
constexpr OptionalDestructor() noexcept : empty{}, hasValue{ false }
{
}
template <class... Args>
constexpr explicit OptionalDestructor(in_place_t, Args&&... args)
: value(std::forward<Args>(args)...), hasValue(true)
{
}
OptionalDestructor(const OptionalDestructor&) = default;
OptionalDestructor(OptionalDestructor&&) = default;
OptionalDestructor& operator=(const OptionalDestructor&) = default;
OptionalDestructor& operator=(OptionalDestructor&&) = default;
// Cannot access anonymous union member `value` until C++17, so expose access here
const T& val() const&
{
CARB_ASSERT(hasValue);
return value;
}
T& val() &
{
CARB_ASSERT(hasValue);
return value;
}
constexpr const T&& val() const&&
{
CARB_ASSERT(hasValue);
return std::move(value);
}
constexpr T&& val() &&
{
CARB_ASSERT(hasValue);
return std::move(value);
}
void reset() noexcept
{
if (hasValue)
{
value.~T();
hasValue = false;
}
}
};
template <class T>
struct OptionalConstructor : OptionalDestructor<T>
{
using value_type = T;
using OptionalDestructor<T>::OptionalDestructor;
template <class... Args>
T& construct(Args&&... args)
{
CARB_ASSERT(!this->hasValue);
new (std::addressof(this->value)) decltype(this->value)(std::forward<Args>(args)...);
this->hasValue = true;
return this->value;
}
template <class U>
void assign(U&& rhs)
{
if (this->hasValue)
{
this->value = std::forward<U>(rhs);
}
else
{
construct(std::forward<U>(rhs));
}
}
template <class U>
void constructFrom(U&& rhs) noexcept(std::is_nothrow_constructible<T, decltype((std::forward<U>(rhs).value))>::value)
{
if (rhs.hasValue)
{
construct(std::forward<U>(rhs).value);
}
}
template <class U>
void assignFrom(U&& rhs) noexcept(std::is_nothrow_constructible<T, decltype((std::forward<U>(rhs).value))>::value&&
std::is_nothrow_assignable<T, decltype((std::forward<U>(rhs).value))>::value)
{
if (rhs.hasValue)
{
assign(std::forward<U>(rhs).value);
}
else
{
this->reset();
}
}
};
template <class Base>
struct NonTrivialCopy : Base
{
using Base::Base;
NonTrivialCopy() = default;
#if CARB_COMPILER_MSC // MSVC can evaluate the noexcept operator, but GCC errors when compiling it
NonTrivialCopy(const NonTrivialCopy& from) noexcept(noexcept(Base::constructFrom(static_cast<const Base&>(from))))
#else // for GCC, use the same clause as Base::constructFrom
NonTrivialCopy(const NonTrivialCopy& from) noexcept(
std::is_nothrow_constructible<typename Base::value_type, decltype(from.value)>::value)
#endif
{
Base::constructFrom(static_cast<const Base&>(from));
}
};
// If T is copy-constructible and not trivially copy-constructible, select NonTrivialCopy,
// otherwise use the base OptionalConstructor
template <class Base, class... Types>
using SelectCopy =
typename std::conditional_t<conjunction<std::is_copy_constructible<Types>...,
negation<conjunction<std::is_trivially_copy_constructible<Types>...>>>::value,
NonTrivialCopy<Base>,
Base>;
template <class Base, class... Types>
struct NonTrivialMove : SelectCopy<Base, Types...>
{
using BaseClass = SelectCopy<Base, Types...>;
using BaseClass::BaseClass;
NonTrivialMove() = default;
NonTrivialMove(const NonTrivialMove&) = default;
#if CARB_COMPILER_MSC // MSVC can evaluate the noexcept operator, but GCC errors when compiling it
NonTrivialMove(NonTrivialMove&& from) noexcept(noexcept(BaseClass::constructFrom(static_cast<Base&&>(from))))
#else // for GCC, use the same clause as Base::constructFrom
NonTrivialMove(NonTrivialMove&& from) noexcept(
std::is_nothrow_constructible<typename Base::value_type, decltype(static_cast<Base&&>(from).value)>::value)
#endif
{
BaseClass::constructFrom(static_cast<Base&&>(from));
}
NonTrivialMove& operator=(const NonTrivialMove&) = default;
NonTrivialMove& operator=(NonTrivialMove&&) = default;
};
// If T is move-constructible and not trivially move-constructible, select NonTrivialMove,
// otherwise use the selected Copy struct.
template <class Base, class... Types>
using SelectMove =
typename std::conditional_t<conjunction<std::is_move_constructible<Types>...,
negation<conjunction<std::is_trivially_move_constructible<Types>...>>>::value,
NonTrivialMove<Base, Types...>,
SelectCopy<Base, Types...>>;
template <class Base, class... Types>
struct NonTrivialCopyAssign : SelectMove<Base, Types...>
{
using BaseClass = SelectMove<Base, Types...>;
using BaseClass::BaseClass;
NonTrivialCopyAssign() = default;
NonTrivialCopyAssign(const NonTrivialCopyAssign&) = default;
NonTrivialCopyAssign(NonTrivialCopyAssign&&) = default;
NonTrivialCopyAssign& operator=(const NonTrivialCopyAssign& from) noexcept(
noexcept(BaseClass::assignFrom(static_cast<const Base&>(from))))
{
BaseClass::assignFrom(static_cast<const Base&>(from));
return *this;
}
NonTrivialCopyAssign& operator=(NonTrivialCopyAssign&&) = default;
};
template <class Base, class... Types>
struct DeletedCopyAssign : SelectMove<Base, Types...>
{
using BaseClass = SelectMove<Base, Types...>;
using BaseClass::BaseClass;
DeletedCopyAssign() = default;
DeletedCopyAssign(const DeletedCopyAssign&) = default;
DeletedCopyAssign(DeletedCopyAssign&&) = default;
DeletedCopyAssign& operator=(const DeletedCopyAssign&) = delete;
DeletedCopyAssign& operator=(DeletedCopyAssign&&) = default;
};
// For selecting the proper copy-assign class, things get a bit more complicated:
// - If T is trivially destructible and trivially copy-constructible and trivially copy-assignable:
// * We use the Move struct selected above
// - Otherwise, if T is copy-constructible and copy-assignable:
// * We select the NonTrivialCopyAssign struct
// - If all else fails, the class is not copy-assignable, so select DeletedCopyAssign
template <class Base, class... Types>
using SelectCopyAssign = typename std::conditional_t<
conjunction<std::is_trivially_destructible<Types>...,
std::is_trivially_copy_constructible<Types>...,
std::is_trivially_copy_assignable<Types>...>::value,
SelectMove<Base, Types...>,
typename std::conditional_t<conjunction<std::is_copy_constructible<Types>..., std::is_copy_assignable<Types>...>::value,
NonTrivialCopyAssign<Base, Types...>,
DeletedCopyAssign<Base, Types...>>>;
template <class Base, class... Types>
struct NonTrivialMoveAssign : SelectCopyAssign<Base, Types...>
{
using BaseClass = SelectCopyAssign<Base, Types...>;
using BaseClass::BaseClass;
NonTrivialMoveAssign() = default;
NonTrivialMoveAssign(const NonTrivialMoveAssign&) = default;
NonTrivialMoveAssign(NonTrivialMoveAssign&&) = default;
NonTrivialMoveAssign& operator=(const NonTrivialMoveAssign&) = default;
NonTrivialMoveAssign& operator=(NonTrivialMoveAssign&& from) noexcept(
noexcept(BaseClass::assignFrom(static_cast<const Base&&>(from))))
{
BaseClass::assignFrom(static_cast<Base&&>(from));
return *this;
}
};
template <class Base, class... Types>
struct DeletedMoveAssign : SelectCopyAssign<Base, Types...>
{
using BaseClass = SelectCopyAssign<Base, Types...>;
using BaseClass::BaseClass;
DeletedMoveAssign() = default;
DeletedMoveAssign(const DeletedMoveAssign&) = default;
DeletedMoveAssign(DeletedMoveAssign&&) = default;
DeletedMoveAssign& operator=(const DeletedMoveAssign&) = default;
DeletedMoveAssign& operator=(DeletedMoveAssign&&) = delete;
};
// Selecting the proper move-assign struct is equally complicated:
// - If T is trivially destructible, trivially move-constructible and trivially move-assignable:
// * We use the CopyAssign struct selected above
// - If T is move-constructible and move-assignable:
// * We select the NonTrivialMoveAssign struct
// - If all else fails, T is not move-assignable, so select DeletedMoveAssign
template <class Base, class... Types>
using SelectMoveAssign = typename std::conditional_t<
conjunction<std::is_trivially_destructible<Types>...,
std::is_trivially_move_constructible<Types>...,
std::is_trivially_move_assignable<Types>...>::value,
SelectCopyAssign<Base, Types...>,
typename std::conditional_t<conjunction<std::is_move_constructible<Types>..., std::is_move_assignable<Types>...>::value,
NonTrivialMoveAssign<Base, Types...>,
DeletedMoveAssign<Base, Types...>>>;
// An alias for constructing our struct hierarchy to wrap T
template <class Base, class... Types>
using SelectHierarchy = SelectMoveAssign<Base, Types...>;
// Helpers for determining which operators can be enabled
template <class T>
using EnableIfBoolConvertible = typename std::enable_if_t<std::is_convertible<T, bool>::value, int>;
template <class L, class R>
using EnableIfComparableWithEqual =
EnableIfBoolConvertible<decltype(std::declval<const L&>() == std::declval<const R&>())>;
template <class L, class R>
using EnableIfComparableWithNotEqual =
EnableIfBoolConvertible<decltype(std::declval<const L&>() != std::declval<const R&>())>;
template <class L, class R>
using EnableIfComparableWithLess =
EnableIfBoolConvertible<decltype(std::declval<const L&>() < std::declval<const R&>())>;
template <class L, class R>
using EnableIfComparableWithGreater =
EnableIfBoolConvertible<decltype(std::declval<const L&>() > std::declval<const R&>())>;
template <class L, class R>
using EnableIfComparableWithLessEqual =
EnableIfBoolConvertible<decltype(std::declval<const L&>() <= std::declval<const R&>())>;
template <class L, class R>
using EnableIfComparableWithGreaterEqual =
EnableIfBoolConvertible<decltype(std::declval<const L&>() >= std::declval<const R&>())>;
} // namespace detail
} // namespace cpp
} // namespace carb
|
omniverse-code/kit/include/carb/tokens/TokensBindingsPython.h | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../BindingsPythonUtils.h"
#include "../Framework.h"
#include "TokensUtils.h"
#include <memory>
#include <string>
#include <vector>
namespace carb
{
namespace tokens
{
namespace
{
inline void definePythonModule(py::module& m)
{
using namespace carb::tokens;
m.attr("RESOLVE_FLAG_NONE") = py::int_(kResolveFlagNone);
m.attr("RESOLVE_FLAG_LEAVE_TOKEN_IF_NOT_FOUND") = py::int_(kResolveFlagLeaveTokenIfNotFound);
defineInterfaceClass<ITokens>(m, "ITokens", "acquire_tokens_interface")
.def("set_value", wrapInterfaceFunction(&ITokens::setValue), py::call_guard<py::gil_scoped_release>())
.def("set_initial_value", &ITokens::setInitialValue, py::call_guard<py::gil_scoped_release>())
.def("remove_token", &ITokens::removeToken, py::call_guard<py::gil_scoped_release>())
.def("exists", wrapInterfaceFunction(&ITokens::exists), py::call_guard<py::gil_scoped_release>())
.def("resolve",
[](ITokens* self, const std::string& str, ResolveFlags flags) -> py::str {
carb::tokens::ResolveResult result;
std::string resolvedString;
{
py::gil_scoped_release nogil;
resolvedString = carb::tokens::resolveString(self, str.c_str(), flags, &result);
}
if (result == ResolveResult::eSuccess)
return resolvedString;
else
return py::none();
},
py::arg("str"), py::arg("flags") = kResolveFlagNone)
;
}
} // namespace
} // namespace tokens
} // namespace carb
|
omniverse-code/kit/include/carb/tokens/TokensUtils.h | // Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//! @brief Implementation of utilities for \ref carb::tokens::ITokens.
#pragma once
#include "../InterfaceUtils.h"
#include "../logging/Log.h"
#include "ITokens.h"
#include <string>
#include <algorithm>
namespace carb
{
namespace tokens
{
/**
* Helper for resolving a token string. The resolve result (resolve code) is placed in the optional parameter.
*
* @param tokens tokens interface (passing a null pointer will result in an error)
* @param str string for token resolution (passing a null pointer will result in an error)
* @param resolveFlags flags that modify token resolution process
* @param resolveResult optional parameter for receiving resulting resolve code
*
* @return true if the operation was successful false otherwise
*/
inline std::string resolveString(const ITokens* tokens,
const char* str,
ResolveFlags resolveFlags = kResolveFlagNone,
ResolveResult* resolveResult = nullptr)
{
// Defaulting to an error result thus it's possible to just log an error message and return an empty string if
// anything goes wrong
if (resolveResult)
{
*resolveResult = ResolveResult::eFailure;
}
if (!tokens)
{
CARB_LOG_ERROR("Couldn't acquire ITokens interface.");
return std::string();
}
if (!str)
{
CARB_LOG_ERROR("Can't resolve a null token string.");
return std::string();
}
const size_t strLen = std::strlen(str);
ResolveResult resResult;
size_t resolvedStringSize = tokens->calculateDestinationBufferSize(
str, strLen, StringEndingMode::eNoNullTerminator, resolveFlags, &resResult);
if (resResult == ResolveResult::eFailure)
{
CARB_LOG_ERROR("Couldn't calculate required buffer size for token resolution of string: %s", str);
return std::string();
}
// Successful resolution to an empty string
if (resolvedStringSize == 0)
{
if (resolveResult)
{
*resolveResult = ResolveResult::eSuccess;
}
return std::string();
}
// C++11 guarantees that strings are continuous in memory
std::string resolvedString;
resolvedString.resize(resolvedStringSize);
const ResolveResult resolveResultLocal =
tokens->resolveString(str, strLen, &resolvedString.front(), resolvedString.size(),
StringEndingMode::eNoNullTerminator, resolveFlags, nullptr);
if (resolveResultLocal != ResolveResult::eSuccess)
{
CARB_LOG_ERROR("Couldn't successfully resolve provided string: %s", str);
return std::string();
}
if (resolveResult)
{
*resolveResult = ResolveResult::eSuccess;
}
return resolvedString;
}
/**
* A helper function that escapes necessary symbols in the provided string so that they won't be recognized as related
* to token parsing
* @param str a string that requires preprocessing to evade the token resolution (a string provided by a user or some
* other data that must not be a part of token resolution)
* @return a string with necessary modification so it won't participate in token resolution
*/
inline std::string escapeString(const std::string& str)
{
constexpr char kSpecialChar = '$';
const size_t countSpecials = std::count(str.begin(), str.end(), kSpecialChar);
if (!countSpecials)
{
return str;
}
std::string result;
result.reserve(str.length() + countSpecials);
for (char curChar : str)
{
result.push_back(curChar);
if (curChar == kSpecialChar)
{
result.push_back(kSpecialChar);
}
}
return result;
}
} // namespace tokens
} // namespace carb
|
omniverse-code/kit/include/carb/tokens/ITokens.h | // Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//! @brief Implementation of `ITokens` interface
#pragma once
#include "../Interface.h"
namespace carb
{
//! Namespace for `ITokens`.
namespace tokens
{
/**
* Possible result of resolving tokens.
*/
enum class ResolveResult
{
eSuccess, //!< Result indicating success.
eTruncated, //!< Result that indicates success, but the output was truncated.
eFailure, //!< Result that indicates failure.
};
/**
* Possible options for ending of the resolved string
*/
enum class StringEndingMode
{
eNullTerminator, //!< Indicates that the resolved string is NUL-terminated.
eNoNullTerminator //!< Indicates that the resolved string is not NUL-terminated.
};
/**
* Flags for token resolution algorithm
*/
using ResolveFlags = uint32_t;
const ResolveFlags kResolveFlagNone = 0; //!< Default token resolution process
const ResolveFlags kResolveFlagLeaveTokenIfNotFound = 1; //!< If cannot resolve token in a string then leave it as is.
/**
* Interface for storing tokens and resolving strings containing them. Tokens are string pairs {name, value} that can be
* referenced in a string as `"some text ${token_name} some other text"`, where the token name starts with a sequence
* `"${"` and end with a first closing `"}"`.
*
* If a token with the name \<token_name\> has a defined value, then it will be substituted with its value.
* If the token does not have a defined value, an empty string will be used for the replacement. This interface will use
* the ISetting interface, if available, as storage and in such case tokens will be stored under the '/app/tokens' node.
*
* Note: The "$" symbol is considered to be special by the tokenizer and should be escaped by doubling it ("$" -> "$$")
* in order to be processed as just a symbol "$"
* Ex: "some text with $ sign" -> "some text with $$ sign"
*
* Single unescaped "$" signs are considered to be a bad practice to be used for token resolution but they are
* acceptable and will be resolved into single "$" signs and no warning will be given about it.
*
* Ex:
* "$" -> "$",
* "$$" -> "$",
* "$$$" -> "$$"
*
* It's better to use the helper function "escapeString" from the "TokensUtils.h" to produce a string
* that doesn't have any parts that could participate in tokenization. As a token name start with "${" and ends with the
* first encountered "}" it can contain "$" (same rules about escaping it apply) and "{" characters, however such cases
* will result in a warning being output to the log.
* Ex: for the string "${bar$${}" the token resolution process will consider the token name to be "bar${"
* (note that "$$" is reduced into a single "$") and a warning will be outputted into the log.
*
* Environment variables are automatically available as tokens, if defined. These are specified with the text
* `${env:<var name>}` where `<var name>` is the name of the environment variable. The `env:` prefix is a reserved name,
* so any call to \ref ITokens::setValue() or \ref ITokens::setInitialValue() with a name that starts with `env:` will
* be rejected. The environment variable is read when needed and not cached in any way. An undefined environment
* variable behaves as an undefined token.
*
* @thread_safety the interface's functions are not thread safe. It is responsibility of the user to use all necessary
* synchronization mechanisms if needed. All data passed into a plugin's function must be valid for the duration of the
* function call.
*/
struct ITokens
{
CARB_PLUGIN_INTERFACE("carb::tokens::ITokens", 1, 0)
/**
* Sets a new value for the specified token, if the token didn't exist it will be created.
*
* Note: if the value is null then the token will be removed (see also: "removeToken" function). In this case true
* is returned if the token was successfully deleted or didn't exist.
*
* @param name token name not enclosed in "${" and "}". Passing a null pointer results in an error
* @param value new value for the token. Passing a null pointer deletes the token
*
* @return true if the operation was successful, false if the token name was null or an error occurred during the
* operation
*/
bool(CARB_ABI* setValue)(const char* name, const char* value);
/**
* Creates a token with the given name and value if it was non-existent. Otherwise does nothing.
*
* @param name Name of a token. Passing a null pointer results in an error
* @param value Value of a token. Passing a null pointer does nothing.
*/
void setInitialValue(const char* name, const char* value) const;
/**
* A function to delete a token.
*
* @param name token name not enclosed in "${" and "}". Passing a null pointer results in an error
*
* @return true if the operation was successful or token with such name didn't exist, false if the name is null or
* an error occurred
*/
bool removeToken(const char* name) const;
/**
* Tries to resolve all tokens in the source string buffer and places the result into the destination buffer.
* If the destBufLen isn't enough to contain the result then the result will be truncated.
*
* @param sourceBuf the source string buffer. Passing a null pointer results in an error
* @param sourceBufLen the length of the source string buffer
* @param destBuf the destination buffer. Passing a null pointer results in an error
* @param destBufLen the size of the destination buffer
* @param endingMode sets if the result will have a null-terminator (in this case passing a zero
* destBufLen will result in an error) or not
* @param resolveFlags flags that modify token resolution process
* @param[out] resolvedSize optional parameter. If the provided buffer were enough for the operation and it
* succeeded then resolvedSize <= destBufLen and equals to the number of written bytes to the buffer, if the
* operation were successful but the output were truncated then resolvedSize > destBufLen and equals to the minimum
* buffer size that can hold the fully resolved string, if the operation failed then the value of the resolvedSize
* is undetermined
*
* @retval ResolveResult::eTruncated if the destination buffer was too small to contain the result (note that if the
* StringEndingMode::eNullTerminator was used the result truncated string will end with a null-terminator)
* @retval ResolveResult::eFailure if an error occurred.
* @retval ResolveResult::eSuccess will be returned if the function successfully wrote the whole resolve result into
* the \p destBuf.
*/
ResolveResult(CARB_ABI* resolveString)(const char* sourceBuf,
size_t sourceBufLen,
char* destBuf,
size_t destBufLen,
StringEndingMode endingMode,
ResolveFlags resolveFlags,
size_t* resolvedSize);
/**
* Calculates the minimum buffer size required to hold the result of resolving of the input string buffer.
*
* @param sourceBuf the source string buffer. Passing a null pointer results in an error
* @param sourceBufLen the length of the source string buffer
* @param endingMode sets if the result will have a null-terminator or not
* @param resolveFlags flags that modify token resolution process
* @param[out] resolveResult optional parameter that will contain the result of the attempted resolution to
* calculate the necessary size
*
* @returns The calculated minimum size. In case of any error the function will return 0.
*/
size_t(CARB_ABI* calculateDestinationBufferSize)(const char* sourceBuf,
size_t sourceBufLen,
StringEndingMode endingMode,
ResolveFlags resolveFlags,
ResolveResult* resolveResult);
/**
* Check the existence of a token
*
* @param tokenName the name of a token that will be checked for existence. Passing a null pointer results in an
* error
*
* @return true if the token with the specified name exists, false is returned if an error occurs or there is no
* token with such name
*/
bool(CARB_ABI* exists)(const char* tokenName);
};
inline void ITokens::setInitialValue(const char* name, const char* value) const
{
if (!exists(name) && value)
{
setValue(name, value);
}
}
inline bool ITokens::removeToken(const char* name) const
{
return setValue(name, nullptr);
}
} // namespace tokens
} // namespace carb
|
omniverse-code/kit/include/carb/tasking/ThreadPoolUtils.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief ThreadPoolWrapper definition file.
#pragma once
#include "../cpp/Tuple.h"
#include "../logging/Log.h"
#include "IThreadPool.h"
#include <future>
namespace carb
{
namespace tasking
{
#ifndef DOXYGEN_BUILD
namespace detail
{
template <class ReturnType>
struct ApplyWithPromise
{
template <class Callable, class Tuple>
void operator()(std::promise<ReturnType>& promise, Callable&& f, Tuple&& t)
{
promise.set_value(std::forward<ReturnType>(cpp::apply(std::forward<Callable>(f), std::forward<Tuple>(t))));
}
};
template <>
struct ApplyWithPromise<void>
{
template <class Callable, class Tuple>
void operator()(std::promise<void>& promise, Callable& f, Tuple&& t)
{
cpp::apply(std::forward<Callable>(f), std::forward<Tuple>(t));
promise.set_value();
}
};
} // namespace detail
#endif
/**
* Helper class for using IThreadPool API
*/
class ThreadPoolWrapper
{
public:
/**
* Constructor
*
* @param poolInterface The acquired IThreadPool interface.
* @param workerCount (optional) The number of worker threads to create. If 0 (default) is specified, the value
* returned from IThreadPool::getDefaultWorkerCount() is used.
*/
ThreadPoolWrapper(IThreadPool* poolInterface, size_t workerCount = 0) : m_interface(poolInterface)
{
if (m_interface == nullptr)
{
CARB_LOG_ERROR("IThreadPool interface used to create a thread pool wrapper is null.");
return;
}
if (workerCount == 0)
{
workerCount = m_interface->getDefaultWorkerCount();
}
m_pool = m_interface->createEx(workerCount);
if (m_pool == nullptr)
{
CARB_LOG_ERROR("Couldn't create a new thread pool.");
}
}
/**
* Returns the number of worker threads in the thread pool.
* @returns The number of worker threads.
*/
size_t getWorkerCount() const
{
if (!isValid())
{
CARB_LOG_ERROR("Attempt to call the 'getWorkerCount' method of an invalid thread pool wrapper.");
return 0;
}
return m_interface->getWorkerCount(m_pool);
}
/**
* Enqueues a <a href="https://en.cppreference.com/w/cpp/named_req/Callable">Callable</a> to run on a worker thread.
*
* @param task The callable object. May be a lambda, [member] function, functor, etc.
* @param args Optional <a href="https://en.cppreference.com/w/cpp/utility/functional/bind">std::bind</a>-style
* arguments to pass to the callable object.
* @returns A <a href="https://en.cppreference.com/w/cpp/thread/future">std::future</a> based on the return-type of
* the callable object. If enqueuing failed, `valid()` on the returned future will be false.
*/
template <class Callable, class... Args>
auto enqueueJob(Callable&& task, Args&&... args)
{
using ReturnType = typename cpp::invoke_result_t<Callable, Args...>;
using Future = std::future<ReturnType>;
using Tuple = std::tuple<std::decay_t<Args>...>;
struct Data
{
std::promise<ReturnType> promise{};
Callable f;
Tuple args;
Data(Callable&& f_, Args&&... args_) : f(std::forward<Callable>(f_)), args(std::forward<Args>(args_)...)
{
}
void callAndDelete()
{
detail::ApplyWithPromise<ReturnType>{}(promise, f, args);
delete this;
}
};
if (!isValid())
{
CARB_LOG_ERROR("Attempt to call the 'enqueueJob' method of an invalid thread pool wrapper.");
return Future{};
}
Data* pData = new (std::nothrow) Data{ std::forward<Callable>(task), std::forward<Args>(args)... };
if (!pData)
{
CARB_LOG_ERROR("ThreadPoolWrapper: No memory for job");
return Future{};
}
Future result = pData->promise.get_future();
if (CARB_LIKELY(m_interface->enqueueJob(
m_pool, [](void* userData) { static_cast<Data*>(userData)->callAndDelete(); }, pData)))
{
return result;
}
CARB_LOG_ERROR("ThreadPoolWrapper: failed to enqueue job");
delete pData;
return Future{};
}
/**
* Returns the number of jobs currently enqueued or executing in the ThreadPool.
*
* enqueueJob() increments this value and the value is decremented as jobs finish.
*
* @note This value changes by other threads and cannot be read atomically.
*
* @returns The number of jobs currently executing in the ThreadPool.
*/
size_t getCurrentlyRunningJobCount() const
{
if (!isValid())
{
CARB_LOG_ERROR("Attempt to call the 'getCurrentlyRunningJobCount' method of an invalid thread pool wrapper.");
return 0;
}
return m_interface->getCurrentlyRunningJobCount(m_pool);
}
/**
* Blocks the calling thread until all enqueued tasks have completed.
*/
void waitUntilFinished() const
{
if (!isValid())
{
CARB_LOG_ERROR("Attempt to call the 'waitUntilFinished' method of an invalid thread pool wrapper.");
return;
}
m_interface->waitUntilFinished(m_pool);
}
/**
* Returns true if the underlying ThreadPool is valid.
*
* @returns `true` if the underlying ThreadPool is valid; `false` otherwise.
*/
bool isValid() const
{
return m_pool != nullptr;
}
/**
* Destructor
*/
~ThreadPoolWrapper()
{
if (isValid())
{
m_interface->destroy(m_pool);
}
}
CARB_PREVENT_COPY_AND_MOVE(ThreadPoolWrapper);
private:
// ThreadPoolWrapper private members and functions
IThreadPool* m_interface = nullptr;
ThreadPool* m_pool = nullptr;
};
} // namespace tasking
} // namespace carb
|
omniverse-code/kit/include/carb/tasking/IThreadPool.h | // Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief IThreadPool definition file.
#pragma once
#include "../Interface.h"
namespace carb
{
namespace tasking
{
/**
* Opaque handle for a thread pool.
*/
class ThreadPool DOXYGEN_EMPTY_CLASS;
/**
* Defines the function for performing a user-provided job.
*
* @param jobData User provided data for the job, the memory must not be released until it no longer needed by the
* task.
*/
typedef void (*JobFn)(void* jobData);
/**
* Optional plugin providing helpful facilities for utilizing a pool of threads to perform basic small tasks.
*
* @warning It is not recommended to use IThreadPool in conjunction with ITasking; the latter is a much richer feature
* set and generally preferred over IThreadPool. IThreadPool is a simple thread pool with the ability to run individual
* tasks.
*
* @warning If multiple ThreadPool objects are used, caution must be taken to not overburden the system with too many
* threads.
*
* @note Prefer using ThreadPoolWrapper.
*/
struct IThreadPool
{
CARB_PLUGIN_INTERFACE("carb::tasking::IThreadPool", 1, 0)
/**
* Creates a new thread pool where the number of worker equals to the number specified by the user.
*
* @param workerCount Required number of worker threads.
*
* @return A newly created thread pool.
*/
ThreadPool*(CARB_ABI* createEx)(size_t workerCount);
/**
* Creates a new thread pool where the number of worker equals to a value
* returned by the "getDefaultWorkerCount" function.
*
* @return A newly created thread pool.
*/
ThreadPool* create() const;
/**
* Destroys previously created thread pool.
*
* @param threadPool Previously created thread pool.
*/
void(CARB_ABI* destroy)(ThreadPool* threadPool);
/**
* Returns default number of workers used for creation of a new thread pool.
*
* @return The default number of workers.
*/
size_t(CARB_ABI* getDefaultWorkerCount)();
/**
* Returns the number of worker threads in the thread pool.
*
* @param threadPool ThreadPool previously created with create().
* @returns The number of worker threads.
*/
size_t(CARB_ABI* getWorkerCount)(ThreadPool* threadPool);
/**
* Adds a new task to be executed by the thread pool.
*
* @param threadPool Thread pool for execution of the job.
* @param jobFunction User provided function to be executed by a worker.
* @param jobData User provided data for the job, the memory must not be released until it no longer needed by the
* task.
*
* @return Returns true if the task was successfully added into the thread pool.
*/
bool(CARB_ABI* enqueueJob)(ThreadPool* threadPool, JobFn jobFunction, void* jobData);
/**
* Returns the number of currently executed tasks in the thread pool.
*
* @param threadPool Thread pool to be inspected.
*
* @return The number of currently executed tasks in the thread pool.
*/
size_t(CARB_ABI* getCurrentlyRunningJobCount)(ThreadPool* threadPool);
/**
* Blocks execution of the current thread until the thread pool finishes all queued jobs.
*
* @param threadPool Thread pool to wait on.
*/
void(CARB_ABI* waitUntilFinished)(ThreadPool* threadPool);
};
inline ThreadPool* IThreadPool::create() const
{
return createEx(getDefaultWorkerCount());
}
} // namespace tasking
} // namespace carb
|
omniverse-code/kit/include/carb/tasking/ITasking.h | // Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief carb.tasking interface definition file.
#pragma once
#include "../Interface.h"
#include "../InterfaceUtils.h"
#include "TaskingHelpers.h"
namespace carb
{
//! Namespace for *carb.tasking.plugin* and related utilities.
namespace tasking
{
/**
* Default TaskingDesc plugin starts with.
*/
inline TaskingDesc getDefaultTaskingDesc()
{
return TaskingDesc{};
}
/**
* Defines a tasking plugin interface, acquired with carb::Framework::acquireInterface() when *carb.tasking.plugin* is
* loaded.
*
* ITasking is started automatically on plugin startup. It uses default TaskingDesc, see getDefaultTaskingDesc().
*
* Several @rstref{ISettings keys <tasking_settings>} exist to provide debug behavior and to override default startup
* behavior (but do not override a TaskingDesc provided to ITasking::changeParameters()).
*
* @thread_safety Unless otherwise specified, all functions in this interface can be called from multiple threads
* simultaneously.
*/
struct ITasking
{
// 0.1 - Initial version
// 0.2 - Thread pinning, sleep, suspending/waking (not ABI compatible with 0.1)
// 0.3 - Semaphore support, SharedMutex support
// 0.4 - ConditionVariable support
// 1.0 - Wait timeouts (git hash e13289c5a5)
// 1.1 - changeTaskPriority() / executeMainTasks()
// 1.2 - restart() -> changeParameters(); don't lose tasks when changing parameters
// 1.3 - Respect task priority when resuming tasks that have slept, waited, or unsuspended (not an API change)
// 1.4 - Stuck checking (not an API change)
// 1.5 - internalGroupCounters()
// 1.6 - createRecursiveMutex()
// 2.0 - ITasking 2.0 (git hash f68ae95da7)
// 2.1 - allocTaskStorage() / freeTaskStorage() / setTaskStorage() / getTaskStorage()
// 2.2 - beginTracking() / endTracking()
// 2.3 - internalNameTask()
// 2.4 - reloadFiberEvents()
CARB_PLUGIN_INTERFACE("carb::tasking::ITasking", 2, 4)
/**
* Changes the parameters under which the ITasking interface functions. This may stop and start threads, but will
* not lose any tasks in progress or queued.
*
* @note This function reloads all registered @ref IFiberEvents interfaces so they will start receiving
* notifications. However, if this is the only change desired it is recommended to use @ref reloadFiberEvents()
* instead.
*
* @thread_safety It is unsafe to add any additional tasks while calling this function. The caller must ensure that
* no new tasks are added until this function returns.
*
* @warning Calling this function from within a task context causes undefined behavior.
*
* @param desc The tasking plugin descriptor.
*/
void(CARB_ABI* changeParameters)(TaskingDesc desc);
/**
* Get TaskingDesc the plugin currently running with.
*
* @return The tasking plugin descriptor.
*/
const TaskingDesc&(CARB_ABI* getDesc)();
/**
* Creates a Counter with target value of zero.
*
* @warning Prefer using CounterWrapper instead.
*
* @return The counter created.
*/
Counter*(CARB_ABI* createCounter)();
/**
* Creates a counter with a specific target value.
*
* @warning Prefer using CounterWrapper instead.
*
* @param target The target value of the counter. Yielding on this counter will wait for this target.
* @return The counter created.
*/
Counter*(CARB_ABI* createCounterWithTarget)(uint32_t target);
/**
* Destroys the counter.
*
* @param counter A counter.
*/
void(CARB_ABI* destroyCounter)(Counter* counter);
/**
* Adds a task to the internal queue. Do not call this function directly; instead, use one of the helper functions
* such as addTask(), addSubTask() or addThrottledTask().
*
* @param task The task to queue.
* @param counter A counter to associate with this task. It will be incremented by 1.
* When the task completes, it will be decremented.
* @return A TaskContext that can be used to refer to this task
*/
//! @private
TaskContext(CARB_ABI* internalAddTask)(TaskDesc task, Counter* counter);
/**
* Adds a group of tasks to the internal queue
*
* @param tasks The tasks to queue.
* @param taskCount The number of tasks.
* @param counter A counter to associate with the task group as a whole.
* Initially it incremented by taskCount. When each task completes, it will be decremented by 1.
*/
void(CARB_ABI* addTasks)(TaskDesc* tasks, size_t taskCount, Counter* counter);
//! @private
TaskContext(CARB_ABI* internalAddDelayedTask)(uint64_t delayNs, TaskDesc desc, Counter* counter);
//! @private
void(CARB_ABI* internalApplyRange)(size_t range, ApplyFn fn, void* context);
/**
* Yields execution to another task until counter reaches its target value.
*
* Tasks invoking this call can resume on different thread. If the task must resume on the same thread, use
* PinGuard.
*
* @note deprecated Use wait() instead.
*
* @param counter The counter to check.
*/
CARB_DEPRECATED("Use wait() instead") void yieldUntilCounter(RequiredObject counter);
/**
* Yields execution to another task until counter reaches its target value or the timeout period elapses.
*
* Tasks invoking this call can resume on different thread. If the task must resume on the same thread, use
* PinGuard.
*
* @note Deprecated: Use wait_for() or wait_until() instead.
*
* @param counter The counter to check.
* @param timeoutNs The number of nanoseconds to wait. Pass kInfinite to wait forever or 0 to try immediately
* without waiting.
* @return true if the counter period has completed; false if the timeout period elapses.
*/
CARB_DEPRECATED("Use wait_for() or wait_until() instead.")
bool timedYieldUntilCounter(RequiredObject counter, uint64_t timeoutNs);
//! @private
bool(CARB_ABI* internalCheckCounter)(Counter* counter);
//! @private
uint32_t(CARB_ABI* internalGetCounterValue)(Counter* counter);
//! @private
uint32_t(CARB_ABI* internalGetCounterTarget)(Counter* counter);
//! @private
uint32_t(CARB_ABI* internalFetchAddCounter)(Counter* counter, uint32_t value);
//! @private
uint32_t(CARB_ABI* internalFetchSubCounter)(Counter* counter, uint32_t value);
//! @private
void(CARB_ABI* internalStoreCounter)(Counter* counter, uint32_t value);
/**
* Checks if counter is at the counter's target value
*
* @note Deprecated: The Counter interface is deprecated.
*
* @param c The counter to check.
* @return `true` if the counter is at the target value; `false` otherwise.
*/
CARB_DEPRECATED("The Counter interface is deprecated.") bool checkCounter(Counter* c)
{
return internalCheckCounter(c);
}
/**
* Retrieves the current value of the target. Note! Because of the threaded nature of counters, this
* value may have changed by another thread before the function returns.
*
* @note Deprecated: The Counter interface is deprecated.
*
* @param counter The counter.
* @return The current value of the counter.
*/
CARB_DEPRECATED("The Counter interface is deprecated.") uint32_t getCounterValue(Counter* counter)
{
return internalGetCounterValue(counter);
}
/**
* Gets the target value for the Counter
*
* @note Deprecated: The Counter interface is deprecated.
*
* @param counter The counter to check.
* @return The target value of the counter.
*/
CARB_DEPRECATED("The Counter interface is deprecated.") uint32_t getCounterTarget(Counter* counter)
{
return internalGetCounterTarget(counter);
}
/**
* Atomically adds a value to the counter and returns the value held previously.
*
* The fetchAdd operation on the counter will be atomic, but this function as a whole is not atomic.
*
* @note Deprecated: The Counter interface is deprecated.
*
* @param counter The counter.
* @param value The value to add to the counter.
* @return The value of the counter before the addition.
*/
CARB_DEPRECATED("The Counter interface is deprecated.") uint32_t fetchAddCounter(Counter* counter, uint32_t value)
{
return internalFetchAddCounter(counter, value);
}
/**
* Atomically subtracts a value from the counter and returns the value held previously.
*
* The fetchSub operation on the counter will be atomic, but this function as a whole is not atomic.
*
* @note Deprecated: The Counter interface is deprecated.
*
* @param counter The counter.
* @param value The value to subtract from the counter.
* @return The value of the counter before the addition.
*/
CARB_DEPRECATED("The Counter interface is deprecated.") uint32_t fetchSubCounter(Counter* counter, uint32_t value)
{
return internalFetchSubCounter(counter, value);
}
/**
* Atomically replaces the current value with desired on a counter.
*
* The store operation on the counter will be atomic, but this function as a whole is not atomic.
*
* @note Deprecated: The Counter interface is deprecated.
*
* @param counter The counter.
* @param value The value to load into to the counter.
*/
CARB_DEPRECATED("The Counter interface is deprecated.") void storeCounter(Counter* counter, uint32_t value)
{
return internalStoreCounter(counter, value);
}
/**
* Yields execution. Task invoking this call will be put in the very end of task queue, priority is ignored.
*/
void(CARB_ABI* yield)();
/**
* Causes the currently executing TaskContext to be "pinned" to the thread it is currently running on.
*
* @warning Do not call this function directly; instead use PinGuard.
*
* This function causes the current thread to be the only task thread that can run the current task. This is
* necessary in some cases where thread specificity is required (those these situations are NOT recommended for
* tasks): holding a mutex, or using thread-specific data, etc. Thread pinning is not efficient (the pinned thread
* could be running a different task causing delays for the current task to be resumed, and wakeTask() must wait to
* return until the pinned thread has been notified) and should therefore be avoided.
*
* Call unpinFromCurrentThread() to remove the pin, allowing the task to run on any thread.
*
* @note %All calls to pin a thread will issue a warning log message.
*
* @note It is assumed that the task is allowed to move to another thread during the pinning process, though this
* may not always be the case. Only after pinToCurrentThread() returns will a task be pinned. Therefore, make sure
* to call pinToCurrentThread() *before* any operation that requires pinning.
*
* @return true if the task was already pinned; false if the task was not pinned or if not called from Task Context
* (i.e. getTaskContext() would return kInvalidTaskContext)
*/
bool(CARB_ABI* pinToCurrentThread)();
/**
* Un-pins the currently executing TaskContext from the thread it is currently running on.
*
* @warning Do not call this function directly; instead use PinGuard.
*
* @return true if the task was successfully un-pinned; false if the task was not pinned or if not called from Task
* Context (i.e. getTaskContext() would return kInvalidTaskContext)
*/
bool(CARB_ABI* unpinFromCurrentThread)();
/**
* Creates a non-recursive mutex.
*
* @warning Prefer using MutexWrapper instead.
*
* @note Both createMutex() and createRecursiveMutex() return a Mutex object; it is up to the creator to ensure that
* the Mutex object is used properly. A Mutex created with createMutex() will call `std::terminate()` if recursively
* locked.
*
* @return The created non-recursive mutex.
*/
Mutex*(CARB_ABI* createMutex)();
/**
* Destroys a mutex.
*
* @param The mutex to destroy.
*/
void(CARB_ABI* destroyMutex)(Mutex* mutex);
/**
* Locks a mutex
*
* @param mutex The mutex to lock.
*/
void lockMutex(Mutex* mutex);
/**
* Locks a mutex or waits for the timeout period to expire.
*
* @note Attempting to recursively lock a mutex created with createMutex() will abort. Use a mutex created with
* createRecursiveMutex() to support recursive locking.
*
* @param mutex The mutex to lock.
* @param timeoutNs The relative timeout in nanoseconds. Specify kInfinite to wait forever or 0 to try locking
* without waiting.
* @returns true if the calling thread/fiber now has ownership of the mutex; false if the timeout period expired.
*/
bool(CARB_ABI* timedLockMutex)(Mutex* mutex, uint64_t timeoutNs);
/**
* Unlock a mutex
*
* @param The mutex to unlock.
*/
void(CARB_ABI* unlockMutex)(Mutex* mutex);
/**
* Sleeps for the given number of nanoseconds. Prefer using sleep_for() or sleep_until()
*
* @note This function is fiber-aware. If currently executing in a fiber, the fiber will be yielded until the
* requested amount of time has passed. If a thread is currently executing, then the thread will sleep.
*
* @param nanoseconds The amount of time to yield/sleep, in nanoseconds.
*/
void(CARB_ABI* sleepNs)(uint64_t nanoseconds);
/**
* If the calling thread is running in "task context", that is, a fiber executing a task previously queued with
* addTask(), this function returns a handle that can be used with suspendTask() and wakeTask().
*
* @return kInvalidTaskContext if the calling thread is not running within "task context"; otherwise, a TaskContext
* handle is returned that can be used with suspendTask() and wakeTask(), as well as anywhere a RequiredObject is
* used.
*/
TaskContext(CARB_ABI* getTaskContext)();
/**
* Suspends the current task. Does not return until wakeTask() is called with the task's TaskContext (see
* getTaskContext()).
*
* @note to avoid race-conditions between wakeTask() and suspendTask(), a wakeTask() that occurs before
* suspendTask() has been called will cause suspendTask() to return true immediately without waiting.
*
* @return true when wakeTask() is called. If the current thread is not running in "task context" (i.e.
* getTaskContext() would return kInvalidTaskContext), then this function returns false immediately.
*/
bool(CARB_ABI* suspendTask)();
/**
* Wakes a task previously suspended with suspendTask().
*
* @note to avoid race-conditions between wakeTask() and suspendTask(), a wakeTask() that occurs before
* suspendTask() has been called will cause suspendTask() to return true immediately without waiting. The wakeTask()
* function returns immediately and does not wait for the suspended task to resume.
*
* wakeTask() cannot be called on the current task context (false will be returned). Additional situations that will
* log (as a warning) and return false:
* - The task context given already has a pending wake
* - The task has finished
* - The task context given is sleeping or otherwise waiting on an event (cannot be woken)
* - The given TaskContext is not valid
*
* @param task The TaskContext (returned by getTaskContext()) for the task suspended with suspendTask().
* @return true if the task was woken properly. false if a situation listed above occurs.
*/
bool(CARB_ABI* wakeTask)(TaskContext task);
/**
* Blocks the current thread/task until the given Task has completed.
*
* Similar to yieldUntilCounter() but does not require a Counter object.
*
* @note Deprecated: Use wait() instead.
*
* @param task The TaskContext to wait on
* @return true if the wait was successful; false if the TaskContext has already expired or was invalid.
*/
CARB_DEPRECATED("Use wait() instead") bool waitForTask(TaskContext task);
//! @private
bool(CARB_ABI* internalTimedWait)(Object obj, uint64_t timeoutNs);
/**
* Checks the object specified in @p req to see if it is signaled.
*
* @param req The RequiredObject object to check.
* @returns `true` if the object is signaled; `false` if the object is invalid or not signaled.
*/
bool try_wait(RequiredObject req);
/**
* Blocks the calling thread or task until @p req is signaled.
*
* @param req The RequiredObject object to check.
*/
void wait(RequiredObject req);
/**
* Blocks the calling thread or task until @p req is signaled or @p dur has elapsed.
*
* @param dur The duration to wait for.
* @param req The RequiredObject object to check.
* @returns `true` if the object is signaled; `false` if the object is invalid or not signaled, or @p dur elapses.
*/
template <class Rep, class Period>
bool wait_for(std::chrono::duration<Rep, Period> dur, RequiredObject req);
/**
* Blocks the calling thread or task until @p req is signaled or the clock reaches @p when.
*
* @param when The time_point to wait until.
* @param req The RequiredObject object to check.
* @returns `true` if the object is signaled; `false` if the object is invalid or not signaled, or @p when is
* reached.
*/
template <class Clock, class Duration>
bool wait_until(std::chrono::time_point<Clock, Duration> when, RequiredObject req);
/**
* Creates a fiber-aware semaphore primitive.
*
* A semaphore is a gate that lets a certain number of tasks/threads through. This can also be used to throttle
* tasks (see addThrottledTask()). When the count of a semaphore goes negative tasks/threads will wait on the
* semaphore.
*
* @param value The starting value of the semaphore. Limited to INT_MAX. 0 means that any attempt to wait on the
* semaphore will block until the semaphore is released.
* @return A Semaphore object. When finished, dispose of the semaphore with destroySemaphore().
*
* @warning Prefer using SemaphoreWrapper instead.
*
* @note Semaphore can be used for @rstref{Throttling <tasking-throttling-label>} tasks.
*/
Semaphore*(CARB_ABI* createSemaphore)(unsigned value);
/**
* Destroys a semaphore object created by createSemaphore()
*
* @param sema The semaphore to destroy.
*/
void(CARB_ABI* destroySemaphore)(Semaphore* sema);
/**
* Releases (or posts, or signals) a semaphore.
*
* If a task/thread is waiting on the semaphore when it is released,
* the task/thread is un-blocked and will be resumed. If no tasks/threads are waiting on the semaphore, the next
* task/thread that attempts to wait will resume immediately.
*
* @param sema The semaphore to release.
* @param count The number of tasks/threads to release.
*/
void(CARB_ABI* releaseSemaphore)(Semaphore* sema, unsigned count);
/**
* Waits on a semaphore until it has been signaled.
*
* If the semaphore has already been signaled, this function returns immediately.
*
* @param sema The semaphore to wait on.
*/
void waitSemaphore(Semaphore* sema);
/**
* Waits on a semaphore until it has been signaled or the timeout period expires.
*
* If the semaphore has already been signaled, this function returns immediately.
*
* @param sema The semaphore to wait on.
* @param timeoutNs The relative timeout period in nanoseconds. Specify kInfinite to wait forever, or 0 to test
* immediately without waiting.
* @returns true if the semaphore count was decremented; false if the timeout period expired.
*/
bool(CARB_ABI* timedWaitSemaphore)(Semaphore* sema, uint64_t timeoutNs);
/**
* Creates a fiber-aware SharedMutex primitive.
*
* @warning Prefer using SharedMutexWrapper instead.
*
* A SharedMutex (also known as a read/write mutex) allows either multiple threads/tasks to share the primitive, or
* a single thread/task to own the primitive exclusively. Threads/tasks that request ownership of the primitive,
* whether shared or exclusive, will be blocked until they can be granted the access level requested. SharedMutex
* gives priority to exclusive access, but will not block additional shared access requests when exclusive access
* is requested.
*
* @return A SharedMutex object. When finished, dispose of the SharedMutex with destroySharedMutex().
*/
SharedMutex*(CARB_ABI* createSharedMutex)();
/**
* Requests shared access on a SharedMutex object.
*
* Use unlockSharedMutex() to release the shared lock. SharedMutex is not recursive.
*
* @param mutex The SharedMutex object.
*/
void lockSharedMutex(SharedMutex* mutex);
/**
* Requests shared access on a SharedMutex object with a timeout period.
*
* Use unlockSharedMutex() to release the shared lock. SharedMutex is not recursive.
*
* @param mutex The SharedMutex object.
* @param timeoutNs The relative timeout period in nanoseconds. Specify kInfinite to wait forever or 0 to test
* immediately without waiting.
* @returns true if the shared lock succeeded; false if timed out.
*/
bool(CARB_ABI* timedLockSharedMutex)(SharedMutex* mutex, uint64_t timeoutNs);
/**
* Requests exclusive access on a SharedMutex object.
*
* Use unlockSharedMutex() to release the exclusive lock. SharedMutex is not recursive.
*
* @param mutex The SharedMutex object.
*/
void lockSharedMutexExclusive(SharedMutex* mutex);
/**
* Requests exclusive access on a SharedMutex object with a timeout period.
*
* Use unlockSharedMutex() to release the exclusive lock. SharedMutex is not recursive.
*
* @param mutex The SharedMutex object.
* @param timeoutNs The relative timeout period in nanoseconds. Specify kInfinite to wait forever or 0 to test
* immediately without waiting.
* @returns true if the exclusive lock succeeded; false if timed out.
*/
bool(CARB_ABI* timedLockSharedMutexExclusive)(SharedMutex* mutex, uint64_t timeoutNs);
/**
* Releases a shared or an exclusive lock on a SharedMutex object.
*
* @param mutex The SharedMutex object.
*/
void(CARB_ABI* unlockSharedMutex)(SharedMutex* mutex);
/**
* Destroys a SharedMutex previously created with createSharedMutex().
*
* @param mutex The SharedMutex object to destroy.
*/
void(CARB_ABI* destroySharedMutex)(SharedMutex* mutex);
/**
* Creates a fiber-aware ConditionVariable primitive.
*
* @warning Prefer using ConditionVariableWrapper instead.
*
* ConditionVariable is a synchronization primitive that, together with a Mutex, blocks one or more threads or tasks
* until a condition becomes true.
*
* @return The ConditionVariable object. Destroy with destroyConditionVariable() when finished.
*/
ConditionVariable*(CARB_ABI* createConditionVariable)();
/**
* Destroys a previously-created ConditionVariable object.
*
* @param cv The ConditionVariable to destroy
*/
void(CARB_ABI* destroyConditionVariable)(ConditionVariable* cv);
/**
* Waits on a ConditionVariable object until it is notified. Prefer using the helper function,
* waitConditionVariablePred().
*
* The given Mutex must match the Mutex passed in by all other threads/tasks waiting on the ConditionVariable, and
* must be locked by the current thread/task. While waiting, the Mutex is unlocked. When the thread/task is notified
* the Mutex is re-locked before returning to the caller. ConditionVariables are allowed to spuriously wake up, so
* best practice is to check the variable in a loop and sleep if the variable still does not match desired.
*
* @param cv The ConditionVariable to wait on.
* @param m The Mutex that is locked by the current thread/task.
*/
void waitConditionVariable(ConditionVariable* cv, Mutex* m);
/**
* Waits on a ConditionVariable object until it is notified or the timeout period expires. Prefer using the helper
* function, timedWaitConditionVariablePred().
*
* The given Mutex must match the Mutex passed in by all other threads/tasks waiting on the ConditionVariable, and
* must be locked by the current thread/task. While waiting, the Mutex is unlocked. When the thread/task is notified
* the Mutex is re-locked before returning to the caller. ConditionVariables are allowed to spuriously wake up, so
* best practice is to check the variable in a loop and sleep if the variable still does not match desired.
*
* @param cv The ConditionVariable to wait on.
* @param m The Mutex that is locked by the current thread/task.
* @param timeoutNs The relative timeout period in nanoseconds. Specify kInfinite to wait forever or 0 to test
* immediately without waiting.
* @returns true if the condition variable was notified; false if the timeout period expired.
*/
bool(CARB_ABI* timedWaitConditionVariable)(ConditionVariable* cv, Mutex* m, uint64_t timeoutNs);
/**
* Wakes one thread/task currently waiting on the ConditionVariable.
*
* @note Having the Mutex provided to waitConditionVariable() locked while calling this function is recommended
* but not required.
*
* @param cv The condition variable to notify
*/
void(CARB_ABI* notifyConditionVariableOne)(ConditionVariable* cv);
/**
* Wakes all threads/tasks currently waiting on the ConditionVariable.
*
* @note Having the Mutex provided to waitConditionVariable() locked while calling this function is recommended
* but not required.
*
* @param cv The condition variable to notify
*/
void(CARB_ABI* notifyConditionVariableAll)(ConditionVariable* cv);
/**
* Changes a tasks priority.
*
* @note This can be used to change a task to execute on the main thread when it next resumes when using
* Priority::eMain. If called from within the context of the running task, the task immediately suspends itself
* until resumed on the main thread with the next call to executeMainTasks(), at which point this function will
* return.
*
* @param ctx The \ref TaskContext returned by \ref getTaskContext() or \ref Future::task_if().
* @param newPrio The Priority to change the task to.
* @returns `true` if the priority change took effect; `false` if the TaskContext is invalid.
*/
bool(CARB_ABI* changeTaskPriority)(TaskContext ctx, Priority newPrio);
/**
* Executes all tasks that have been queued with Priority::eMain until they finish or yield.
*
* @note Scheduled tasks (addTaskIn() / addTaskAt()) with Priority::eMain will only be executed during the next
* executeMainTasks() call after the requisite time has elapsed.
*/
void(CARB_ABI* executeMainTasks)();
// Intended for internal use only; only for the RequiredObject object.
// NOTE: The Counter returned from this function is a one-shot counter that is only intended to be passed as a
// RequiredObject. It is immediately released.
//! @private
enum GroupType
{
eAny,
eAll,
};
//! @private
Counter*(CARB_ABI* internalGroupObjects)(GroupType type, Object const* counters, size_t count);
/**
* Creates a recursive mutex.
*
* @warning Prefer using RecursiveMutexWrapper instead.
*
* @note Both createMutex() and createRecursiveMutex() return a Mutex object; it is up to the creator to ensure that
* the Mutex object is used properly. A Mutex created with createMutex() will call `std::terminate()` if recursively
* locked.
*
* @return The created recursive mutex.
*/
Mutex*(CARB_ABI* createRecursiveMutex)();
/**
* Attempts to cancel an outstanding task.
*
* If the task has already been started, has already been canceled or has completed, `false` is returned.
*
* If `true` is returned, then the task is guaranteed to never start, but every other side effect is as if the task
* completed. That is, any Counter objects that were passed to addTask() will be decremented; any blocking calls to
* waitForTask() will return `true`. The Future object for this task will no longer wait, but any attempt to read a
* non-`void` value from it will call `std::terminate()`. If the addTask() call provided a TaskDesc::cancel member,
* it will be called in the context of the calling thread and will finish before tryCancelTask() returns true.
*
* @param task The \ref TaskContext returned by \ref getTaskContext() or \ref Future::task_if().
* @returns `true` if the task was successfully canceled and state reset as described above. `false` if the task has
* cannot be canceled because it has already started, already been canceled or has already finished.
*/
bool(CARB_ABI* tryCancelTask)(TaskContext task);
//! @private
bool(CARB_ABI* internalFutexWait)(const void* addr, const void* compare, size_t size, uint64_t timeoutNs);
//! @private
unsigned(CARB_ABI* internalFutexWakeup)(const void* addr, unsigned count);
/**
* Attempts to allocate task storage, which is similar to thread-local storage but specific to a task.
*
* Allocates a "key" for Task Storage. A value can be stored at this key location ("slot") that is specific to each
* task. When the task finishes, @p fn is executed for any non-`nullptr` value stored in that slot.
*
* Values can be stored in the Task Storage slot with setTaskStorage() and getTaskStorage().
*
* When Task Storage is no longer needed, use freeTaskStorage() to return the slot to the system.
*
* @warning The number of slots are very limited. If no slots are available, kInvalidTaskStorageKey is returned.
*
* @param fn (Optional) A destructor function called when a task finishes with a non-`nullptr` value in the
* allocated slot. The value stored with setTaskStorage() is passed to the destructor. If a destructor is not
* desired, `nullptr` can be passed.
* @returns An opaque TaskStorageKey representing the slot for the requested Task Storage data. If no slots are
* available, kInvalidTaskStorageKey is returned.
*/
TaskStorageKey(CARB_ABI* allocTaskStorage)(TaskStorageDestructorFn fn);
/**
* Frees a Task Storage slot.
*
* @note Any associated destructor function registered with allocTaskStorage() will not be called for any data
* present in currently running tasks. Once freeTaskStorage() returns, the destructor function registered with
* allocTaskStorage() will not be called for any data on any tasks.
*
* @param key The Task Storage key previously allocated with allocTaskStorage().
*/
void(CARB_ABI* freeTaskStorage)(TaskStorageKey key);
/**
* Stores a value at a slot in Task Storage for the current task.
*
* The destructor function passed to allocTaskStorage() will be called with any non-`nullptr` values remaining in
* Task Storage at the associated @p key when the task finishes.
*
* @warning This function can only be called from task context, otherwise `false` is returned.
* @param key The Task Storage key previously allocated with allocTaskStorage().
* @param value A value to store at the Task Storage slot described by @p key for the current task only.
* @return `true` if the value was stored; `false` otherwise.
*/
bool(CARB_ABI* setTaskStorage)(TaskStorageKey key, void* value);
/**
* Retrieves a value at a slot in Task Storage for the current task.
*
* The destructor function passed to allocTaskStorage() will be called with any non-`nullptr` values remaining in
* Task Storage at the associated @p key when the task finishes.
*
* @warning This function can only be called from task context, otherwise `nullptr` is returned.
* @param key The Task Storage key previously allocated with allocTaskStorage().
* @returns The value previously passed to setTaskStorage(), or `nullptr` if not running in task context or a value
* was not previously passed to setTaskStorage() for the current task.
*/
void*(CARB_ABI* getTaskStorage)(TaskStorageKey key);
// Do not call directly; use ScopedTracking instead.
// Returns a special tracking object that MUST be passed to endTracking().
//! @private
Object(CARB_ABI* beginTracking)(Object const* trackers, size_t numTrackers);
// Do not call directly; use ScopedTracking instead.
//! @private
void(CARB_ABI* endTracking)(Object tracker);
/**
* Retrieves debug information about a specific task.
*
* @note This information is intended for debug only and should not affect application state or decisions in the
* application.
*
* @warning Since carb.tasking is an inherently multi-threaded API, the values presented as task debug information
* may have changed in a worker thread in the short amount of time between when they were generated and when they
* were read by the application. As such, the debug information was true at a previous point in time and should not
* be considered necessarily up-to-date.
*
* @param task The TaskContext to retrieve information about.
* @param[out] out A structure to fill with debug information about @p task. The TaskDebugInfo::sizeOf field must be
* pre-filled by the caller. May be `nullptr` to determine if @p task is valid.
* @returns `true` if the TaskContext was valid and @p out (if non-`nullptr`) was filled with known information
* about @p task. `false` if @p out specified an unknown size or @p task does not refer to a valid task.
*/
bool(CARB_ABI* getTaskDebugInfo)(TaskContext task, TaskDebugInfo* out);
/**
* Walks all current tasks and calls a callback function with debug info for each.
*
* @note This information is intended for debug only and should not affect application state or decisions in the
* application.
*
* @warning Since carb.tasking is an inherently multi-threaded API, the values presented as task debug information
* may have changed in a worker thread in the short amount of time between when they were generated and when they
* were read by the application. As such, the debug information was true at a previous point in time and should not
* be considered necessarily up-to-date.
*
* @param info A structure to fill with debug information about tasks encountered during the walk. The
* TaskDebugInfo::sizeOf field must be pre-filled by the caller.
* @param fn A function to call for each task encountered. The function is called repeatedly with a different task
* each time, until all tasks have been visited or the callback function returns `false`.
* @param context Application-specific context information that is passed directly to each invocation of @p fn.
*/
bool(CARB_ABI* walkTaskDebugInfo)(TaskDebugInfo& info, TaskDebugInfoFn fn, void* context);
//! @private
void(CARB_ABI* internalApplyRangeBatch)(size_t range, size_t batchHint, ApplyBatchFn fn, void* context);
//! @private
void(CARB_ABI* internalBindTrackers)(Object required, Object const* ptrackes, size_t numTrackers);
//! @private
void(CARB_ABI* internalNameTask)(TaskContext task, const char* name, bool dynamic);
/**
* Instructs ITasking to reload all IFiberEvents interfaces.
*
* The @ref IFiberEvents interface is used by @ref ITasking to notify listeners that are interested in fiber-switch
* events. All @ref IFiberEvents interfaces are queried from the @ref carb::Framework by @ref ITasking only at
* startup, or when @ref changeParameters() is called, or when `reloadFiberEvents()` is called.
*
* Unlike @ref changeParameters(), this function is safe to call from within a task, or from multiple threads
* simultaneously, and tasks can be added while this function is executing.
*
* @note This function is a task system synchronization point, requiring all task threads to synchronize and pause
* before reloading @ref IFiberEvents interfaces. Generally this happens rapidly, but if a task thread is busy, the
* entire tasking system will wait for the task to finish or enter a wait state before reloading @ref IFiberEvents
* interfaces.
*/
void(CARB_ABI* reloadFiberEvents)();
///////////////////////////////////////////////////////////////////////////
// Helper functions
/**
* Yields execution to another task until `counter == value`.
*
* Task invoking this call will resume on the same thread due to thread pinning. Thread pinning is not efficient.
* See pinToCurrentThread() for details.
*
* @param counter The counter to check.
*/
void yieldUntilCounterPinThread(RequiredObject counter);
/**
* Checks @p pred in a loop until it returns true, and waits on a ConditionVariable if @p pred returns false.
*
* @param cv The ConditionVariable to wait on
* @param m The Mutex associated with the ConditionVariable. Must be locked by the calling thread/task.
* @param pred A function-like predicate object in the form `bool(void)`. waitConditionVariablePred() returns when
* @p pred returns true.
*/
template <class Pred>
void waitConditionVariablePred(ConditionVariable* cv, Mutex* m, Pred&& pred)
{
while (!pred())
{
this->waitConditionVariable(cv, m);
}
}
/**
* Checks @p pred in a loop until it returns true or the timeout period expires, and waits on a ConditionVariable
* if @p pred returns false.
*
* @param cv The ConditionVariable to wait on
* @param m The Mutex associated with the ConditionVariable. Must be locked by the calling thread/task.
* @param timeoutNs The relative timeout period in nanoseconds. Specify @ref kInfinite to wait forever or 0 to test
* immediately without waiting.
* @param pred A function-like predicate object in the form `bool(void)`. waitConditionVariablePred() returns when
* @p pred returns true.
* @returns `true` if the predicate returned `true`; `false` if the timeout period expired
*/
template <class Pred>
bool timedWaitConditionVariablePred(ConditionVariable* cv, Mutex* m, uint64_t timeoutNs, Pred&& pred)
{
while (!pred())
if (!this->timedWaitConditionVariable(cv, m, timeoutNs))
return false;
return true;
}
/**
* Executes a task synchronously.
*
* @note To ensure that the task executes in task context, the function is called directly if already in task
* context. If called from non-task context, @p f is executed by a call to addTask() but this function does not
* return until the subtask is complete.
*
* @param priority The priority of the task to execute. Only used if not called in task context.
* @param f A C++ "Callable" object (i.e. functor, lambda, [member] function ptr) that optionally returns a value.
* @param args Arguments to pass to @p f.
* @return The return value of @p f.
*/
template <class Callable, class... Args>
auto awaitSyncTask(Priority priority, Callable&& f, Args&&... args);
/**
* Runs the given function-like object as a task.
*
* @param priority The priority of the task to execute.
* @param trackers (optional) A `std::initializer_list` of zero or more Tracker objects. Note that this *must* be a
* temporary object. The Tracker objects can be used to determine task completion or to provide input/output
* parameters to the task system.
* @param f A C++ "Callable" object (i.e. functor, lambda, [member] function ptr) that optionally returns a value
* @param args Arguments to pass to @p f
* @return A Future based on the return type of @p f
*/
template <class Callable, class... Args>
auto addTask(Priority priority, Trackers&& trackers, Callable&& f, Args&&... args);
/**
* Adds a task to the internal queue.
*
* @note Deprecated: The other addTask() (and variant) functions accept lambdas and function-like objects, and are
* designed to simplify adding tasks and add tasks succinctly. Prefer using those functions.
*
* @param desc The TaskDesc describing the task.
* @param counter A counter to associate with this task. It will be incremented by 1.
* When the task completes, it will be decremented.
* @return A TaskContext that can be used to refer to this task
*/
CARB_DEPRECATED("Use a C++ addTask() function") TaskContext addTask(TaskDesc desc, Counter* counter)
{
return this->internalAddTask(desc, counter);
}
/**
* Runs the given function-like object as a task when a Semaphore is signaled.
*
* @param throttler (optional) A Semaphore used to throttle the number of tasks that can run concurrently. The task
* waits until the semaphore is signaled (released) before starting, and then signals the semaphore after the task
* has executed.
* @param priority The priority of the task to execute.
* @param trackers (optional) A `std::initializer_list` of zero or more Tracker objects. Note that this *must* be a
* temporary object. The Tracker objects can be used to determine task completion or to provide input/output
* parameters to the task system.
* @param f A C++ "Callable" object (i.e. functor, lambda, [member] function ptr) that optionally returns a value
* @param args Arguments to pass to @p f
* @return A Future based on the return type of @p f
*/
template <class Callable, class... Args>
auto addThrottledTask(Semaphore* throttler, Priority priority, Trackers&& trackers, Callable&& f, Args&&... args);
/**
* Runs the given function-like object as a task once a Counter reaches its target.
*
* @param requiredObject (optional) An object convertible to RequiredObject (such as a task or Future).
* that will, upon completing, trigger the execution of this task.
* @param priority The priority of the task to execute.
* @param trackers (optional) A `std::initializer_list` of zero or more Tracker objects. Note that this *must* be a
* temporary object. The Tracker objects can be used to determine task completion or to provide input/output
* parameters to the task system.
* @param f A C++ "Callable" object (i.e. functor, lambda, [member] function ptr) that optionally returns a value
* @param args Arguments to pass to @p f
* @return A Future based on the return type of @p f
*/
template <class Callable, class... Args>
auto addSubTask(RequiredObject requiredObject, Priority priority, Trackers&& trackers, Callable&& f, Args&&... args);
/**
* Runs the given function-like object as a task once a Counter reaches its target and when a Semaphore is signaled.
*
* @param requiredObject (optional) An object convertible to RequiredObject (such as a task or Future).
* that will, upon completing, trigger the execution of this task.
* @param throttler (optional) A semaphore used to throttle the number of tasks that can run concurrently. Once
* requiredObject becomes signaled, the task waits until the semaphore is signaled (released) before starting, and
* then signals the semaphore after the task has executed.
* @param priority The priority of the task to execute.
* @param trackers (optional) A `std::initializer_list` of zero or more Tracker objects. Note that this *must* be a
* temporary object. The Tracker objects can be used to determine task completion or to provide input/output
* parameters to the task system.
* @param f A C++ "Callable" object (i.e. functor, lambda, [member] function ptr) that optionally returns a value
* @param args Arguments to pass to @p f
* @return A Future based on the return type of @p f
*/
template <class Callable, class... Args>
auto addThrottledSubTask(RequiredObject requiredObject,
Semaphore* throttler,
Priority priority,
Trackers&& trackers,
Callable&& f,
Args&&... args);
/**
* Adds a task to occur after a specific duration has passed.
*
* @param dur The duration to wait for. The task is not started until this duration elapses.
* @param priority The priority of the task to execute
* @param trackers (optional) A `std::initializer_list` of zero or more Tracker objects. Note that this *must* be a
* temporary object. The Tracker objects can be used to determine task completion or to provide input/output
* parameters to the task system.
* @param f A C++ "Callable" object (i.e. functor, lambda, [member] function ptr) that optionally returns a value
* @param args Arguments to pass to @p f
* @return A Future based on the return type of @p f
*/
template <class Callable, class Rep, class Period, class... Args>
auto addTaskIn(const std::chrono::duration<Rep, Period>& dur,
Priority priority,
Trackers&& trackers,
Callable&& f,
Args&&... args);
/**
* Adds a task to occur at a specific point in time
*
* @param when The point in time at which to begin the task
* @param priority The priority of the task to execute
* @param trackers (optional) A `std::initializer_list` of zero or more Tracker objects. Note that this *must* be a
* temporary object. The Tracker objects can be used to determine task completion or to provide input/output
* parameters to the task system.
* @param f A C++ "Callable" object (i.e. functor, lambda, [member] function ptr) that optionally returns a value
* @param args Arguments to pass to @p f
* @return A Future based on the return type of @p f
*/
template <class Callable, class Clock, class Duration, class... Args>
auto addTaskAt(const std::chrono::time_point<Clock, Duration>& when,
Priority priority,
Trackers&& trackers,
Callable&& f,
Args&&... args);
/**
* Processes a range from `[0..range)` calling a functor for each index, potentially from different threads.
*
* @note This function does not return until @p f has been called (and returned) on every index from [0..
* @p range)
* @warning Since @p f can be called from multiple threads simultaneously, all operations it performs must
* be thread-safe. Additional consideration must be taken since mutable captures of any lambdas or passed in
* @p args will be accessed simultaneously by multiple threads so care must be taken to ensure thread safety.
* @note Calling this function recursively will automatically scale down the parallelism in order to not overburden
* the system.
* @note As there is overhead to calling \p f repeatedly, it is more efficient to use \ref applyRangeBatch() with
* `batchHint = 0` and a `f` that handles multiple indexes on one invocation.
*
* See the @rstref{additional documentation <tasking-parallel-for>} for `applyRange`.
*
* @param range The number of times to call @p f.
* @param f A C++ "Callable" object (i.e. functor, lambda, [member] function ptr) that is repeatedly called until
* all indexes in `[0..range)` have been processed, potentially from different threads. It is invoked with
* parameters `f(args..., index)` where `index` is within the range `[0..range)`.
* @param args Arguments to pass to @p f
*/
template <class Callable, class... Args>
void applyRange(size_t range, Callable f, Args&&... args);
/**
* Processes a range from `[0..range)` calling a functor for batches of indexes, potentially from different threads.
*
* @note This function does not return until @p f has been called (and returned) for every index from
* `[0..range)`
* @warning Since @p f can be called from multiple threads simultaneously, all operations it performs must
* be thread-safe. Additional consideration must be taken since mutable captures of any lambdas or passed in
* @p args will be accessed simultaneously by multiple threads so care must be taken to ensure thread safety.
* @note Calling this function recursively will automatically scale down the parallelism in order to not overburden
* the system.
*
* See the @rstref{additional documentation <tasking-parallel-for>} for `applyRange`.
*
* @param range The number of times to call @p f.
* @param batchHint A recommendation of batch size to determine the range of indexes to pass to @p f for processing.
* A value of 0 uses an internal heuristic to divide work, which is recommended in most cases. This value is a hint
* to the internal heuristic and therefore \p f may be invoked with a different range size.
* @param f A C++ "Callable" object (i.e. functor, lambda, [member] function ptr) that is repeatedly called until
* all indexes in `[0..range)` have been processed, potentially from different threads. It is invoked with
* parameters `f(args..., startIndex, endIndex)` where `[startIndex..endIndex)` is the range of indexes that must be
* processed by that invocation of `f`. Note that `endIndex` is a past-the-end index and must not actually be
* processed by that invocation of `f`.
* @param args Arguments to pass to @p f
*/
template <class Callable, class... Args>
void applyRangeBatch(size_t range, size_t batchHint, Callable f, Args&&... args);
/**
* Processes a range from [begin..end) calling a functor for each index, potentially from different threads.
*
* @note This function does not return until @p f has been called (and returned) on every index from [begin..
* @p end)
* @warning Since @p f can be called from multiple threads simultaneously, all operations it performs must
* be thread-safe. Additional consideration must be taken since mutable captures of any lambdas or passed in
* @p args will be accessed simultaneously by multiple threads so care must be taken to ensure thread safety.
* @note Calling this function recursively will automatically scale down the parallelism in order to not overburden
* the system.
*
* @param begin The starting value passed to @p f
* @param end The ending value. Every T(1) step in [begin, end) is passed to @p f
* @param f A C++ "Callable" object (i.e. functor, lambda, [member] function ptr) that optionally returns a value.
* The index value from [begin..end) is passed as the last parameter (after any passed @p args).
* @param args Arguments to pass to @p f
*/
template <class T, class Callable, class... Args>
void parallelFor(T begin, T end, Callable f, Args&&... args);
/**
* Processes a stepped range from [begin..end) calling a functor for each step, potentially from different threads.
*
* @note This function does not return until @p f has been called (and returned) on every index from [begin..
* @p end)
* @warning Since @p f can be called from multiple threads simultaneously, all operations it performs must
* be thread-safe. Additional consideration must be taken since mutable captures of any lambdas or passed in
* @p args will be accessed simultaneously by multiple threads so care must be taken to ensure thread safety.
* @note Calling this function recursively will automatically scale down the parallelism in order to not overburden
* the system.
*
* @param begin The starting value passed to @p f
* @param end The ending value. Every @p step in [begin, end) is passed to @p f
* @param step The step size to determine every value passed to @p f
* @param f A C++ "Callable" object (i.e. functor, lambda, [member] function ptr) that optionally returns a value.
* The stepped value from [begin..end) is passed as the last parameter (after any passed @p args).
* @param args Arguments to pass to @p f
*/
template <class T, class Callable, class... Args>
void parallelFor(T begin, T end, T step, Callable f, Args&&... args);
/**
* Causes the current thread or task to sleep for the specified time.
*
* @note This function is fiber-aware. If currently executing in a fiber, the fiber will be yielded until the
* requested amount of time has passed. If a thread is currently executing, then the thread will sleep.
*
* @param dur The duration to sleep for
*/
template <class Rep, class Period>
void sleep_for(const std::chrono::duration<Rep, Period>& dur)
{
sleepNs(detail::convertDuration(dur));
}
/**
* Causes the current thread or task to sleep until the specified time.
*
* @note This function is fiber-aware. If currently executing in a fiber, the fiber will be yielded until the
* requested amount of time has passed. If a thread is currently executing, then the thread will sleep.
*
* @param tp The absolute time point to sleep until
*/
template <class Clock, class Duration>
void sleep_until(const std::chrono::time_point<Clock, Duration>& tp)
{
sleepNs(detail::convertAbsTime(tp));
}
/**
* A fiber-safe futex implementation: if @p val equals @p compare, the thread or task sleeps until woken.
*
* @warning Futexes are complicated and error-prone. Prefer using higher-level synchronization primitives.
*
* @param val The atomic value to check.
* @param compare The value to compare against. If @p val matches this, then the calling thread or task sleeps until
* futexWakeup() is called.
*/
template <class T>
void futexWait(const std::atomic<T>& val, T compare)
{
bool b = internalFutexWait(&val, &compare, sizeof(T), kInfinite);
CARB_ASSERT(b);
CARB_UNUSED(b);
}
/**
* A fiber-safe futex implementation: if @p val equals @p compare, the thread or task sleeps until woken or the
* timeout period expires.
*
* @warning Futexes are complicated and error-prone. Prefer using higher-level synchronization primitives.
*
* @param val The atomic value to check.
* @param compare The value to compare against. If @p val matches this, then the calling thread or task sleeps until
* futexWakeup() is called.
* @param dur The maximum duration to wait.
* @returns `true` if @p val doesn't match @p compare or if futexWakeup() was called; `false` if the timeout period
* expires.
*/
template <class T, class Rep, class Period>
bool futexWaitFor(const std::atomic<T>& val, T compare, std::chrono::duration<Rep, Period> dur)
{
return internalFutexWait(&val, &compare, sizeof(T), detail::convertDuration(dur));
}
/**
* A fiber-safe futex implementation: if @p val equals @p compare, the thread or task sleeps until woken or the
* specific time is reached.
*
* @warning Futexes are complicated and error-prone. Prefer using higher-level synchronization primitives.
*
* @param val The atomic value to check.
* @param compare The value to compare against. If @p val matches this, then the calling thread or task sleeps until
* futexWakeup() is called.
* @param when The clock time to wait until.
* @returns `true` if @p val doesn't match @p compare or if futexWakeup() was called; `false` if the clock time is
* reached.
*/
template <class T, class Clock, class Duration>
bool futexWaitUntil(const std::atomic<T>& val, T compare, std::chrono::time_point<Clock, Duration> when)
{
return internalFutexWait(&val, &compare, sizeof(T), detail::convertAbsTime(when));
}
/**
* Wakes threads or tasks waiting in futexWait(), futexWaitFor() or futexWaitUntil().
*
* @warning Futexes are complicated and error-prone. Prefer using higher-level synchronization primitives.
*
* @param val The same `val` passed to futexWait(), futexWaitFor() or futexWaitUntil().
* @param count The number of threads or tasks to wakeup. To wake all waiters use `UINT_MAX`.
* @returns The number of threads or tasks that were waiting and are now woken.
*/
template <class T>
unsigned futexWakeup(const std::atomic<T>& val, unsigned count)
{
return internalFutexWakeup(&val, count);
}
/**
* Binds any number of \ref Tracker objects to the given \ref RequiredObject. Effectively allows adding trackers to
* a given object.
*
* Previously this was only achievable through a temporary task:
* ```cpp
* // Old way: a task that would bind `taskGroup` to `requiredObject`
* tasking->addSubTask(requiredObject, Priority::eDefault, { taskGroup }, []{});
* // New way: direct binding:
* tasking->bindTrackers(requiredObject, { taskGroup });
* ```
* The previous method wasted time in that one of the task threads would eventually have to pop the task from the
* queue and run an empty function. Calling `bindTrackers()` does not waste this time.
*
* However, there are some "disadvantages." The `addSubTask()` method would allocate a \ref TaskContext, return a
* \ref Future, and could be canceled. These features were seldom needed, hence this function.
*
* @param requiredObject An object convertible to RequiredObject (such as a task or Future). The given \p trackers
* will be bound to this required object.
* @param trackers A `std::initializer_list` of zero or more Tracker objects. Note that this *must* be a
* temporary object. The Tracker objects can be used to determine task completion or to provide input/output
* parameters to the task system.
*/
void bindTrackers(RequiredObject requiredObject, Trackers&& trackers);
/**
* Sets a name for a task for debugging purposes, similar to how threads can be named.
*
* This function is optimized for a literal string: passing a literal string as @p name does not copy the string but
* instead retains the pointer as it is guaranteed to never change. Passing a non-literal string will result in a
* copy.
*
* The task name is visible in the debugger as @rstref{debug information <showing-all-tasks>} for a task.
*
* Retrieving the task name can be accomplished through @ref getTaskDebugInfo().
*
* @note It is often easier to name the task as it's created by passing a task name as a @ref Tracker object to
* @ref addTask() or the other task creation functions.
*
* @thread_safety It is safe to set a task name from multiple threads, though inadvisable. Reading the task name via
* @ref getTaskDebugInfo() while it is being changed in a different thread is not strongly ordered and may
* result in an empty string being read, or random bytes as the name string, but will not result in a crash.
* @tparam T A type that is convertible to `const char*`. See @p name below.
* @param task The @ref TaskContext to name. If this is not a valid task, or the task has already completed or has
* been cancelled, nothing happens.
* @param name Either a `const char*` (dynamic string) or a `const char (&)[N]` (literal string) as the string name.
* May be `nullptr` to un-set a task name. Dynamic strings will be copied before the call returns. Literal
* strings will be retained by pointer value.
*/
template <class T, std::enable_if_t<std::is_convertible<T, const char*>::value, bool> = false>
void nameTask(TaskContext task, T&& name)
{
internalNameTask(task, name, !detail::is_literal_string<T>::value);
}
};
/**
* Causes the currently executing TaskContext to be "pinned" to the thread it is currently running on until PinGuard is
* destroyed.
*
* Appropriately handles recursive pinning. This class causes the current thread to be the only task thread that can run
* the current task. This is necessary in some cases where thread specificity is required (those these situations are
* NOT recommended for tasks): holding a mutex, or using thread-specific data, etc. Thread pinning is not efficient (the
* pinned thread could be running a different task causing delays for the current task to be resumed, and wakeTask()
* must wait to return until the pinned thread has been notified) and should therefore be avoided.
*
* @note It is assumed that the task is allowed to move to another thread during the pinning process, though this may
* not always be the case. Only after the PinGuard is constructed will a task be pinned. Therefore, make sure to
* construct PinGuard *before* any operation that requires pinning.
*/
class PinGuard
{
public:
/**
* Constructs a PinGuard and enters the "pinned" scope.
*/
PinGuard() : m_wasPinned(carb::getCachedInterface<ITasking>()->pinToCurrentThread())
{
}
/**
* Constructs a PinGuard and enters the "pinned" scope.
* @note Deprecated: ITasking no longer needed.
*/
CARB_DEPRECATED("ITasking no longer needed.")
PinGuard(ITasking*) : m_wasPinned(carb::getCachedInterface<ITasking>()->pinToCurrentThread())
{
}
/**
* Destructs a PinGuard and leaves the "pinned" scope.
*/
~PinGuard()
{
if (!m_wasPinned)
carb::getCachedInterface<ITasking>()->unpinFromCurrentThread();
}
private:
bool m_wasPinned;
};
} // namespace tasking
} // namespace carb
#include "ITasking.inl"
|
omniverse-code/kit/include/carb/tasking/TaskingTypes.h | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief carb.tasking type definitions
#pragma once
#include "../Defines.h"
namespace carb
{
namespace tasking
{
/**
* Used to create dependencies between tasks and to wait for a set of tasks to finish.
*
* @note Prefer using CounterWrapper.
*
* @see ITasking::createCounter(), ITasking::createCounterWithTarget(), ITasking::destroyCounter(),
* ITasking::yieldUntilCounter(), ITasking::timedYieldUntilCounter(), ITasking::checkCounter(),
* ITasking::getCounterValue(), ITasking::getCounterTarget(), ITasking::fetchAddCounter(), ITasking::fetchSubCounter(),
* ITasking::storeCounter()
*/
class Counter DOXYGEN_EMPTY_CLASS;
/**
* A fiber-aware mutex: a synchronization primitive for mutual exclusion. Only one thread/fiber can "own" the mutex at
* a time.
*
* @note Prefer using MutexWrapper.
*
* @see ITasking::createMutex(), ITasking::destroyMutex(), ITasking::lockMutex(), ITasking::timedLockMutex(),
* ITasking::unlockMutex(), ITasking::createRecursiveMutex()
*/
class Mutex DOXYGEN_EMPTY_CLASS;
/**
* A fiber-aware semaphore: a synchronization primitive that limits to N threads/fibers.
*
* @note Prefer using SemaphoreWrapper.
*
* @see ITasking::createSemaphore(), ITasking::destroySemaphore(), ITasking::releaseSemaphore(),
* ITasking::waitSemaphore(), ITasking::timedWaitSemaphore()
*/
class Semaphore DOXYGEN_EMPTY_CLASS;
/**
* A fiber-aware shared_mutex: a synchronization primitive that functions as a multiple-reader/single-writer lock.
*
* @note Prefer using SharedMutexWrapper.
*
* @see ITasking::createSharedMutex(), ITasking::lockSharedMutex(), ITasking::timedLockSharedMutex(),
* ITasking::lockSharedMutexExclusive(), ITasking::timedLockSharedMutexExclusive(), ITasking::unlockSharedMutex(),
* ITasking::destroySharedMutex()
*/
class SharedMutex DOXYGEN_EMPTY_CLASS;
/**
* A fiber-aware condition_variable: a synchronization primitive that, together with a Mutex, blocks one or more threads
* or tasks until a condition becomes true.
*
* @note Prefer using ConditionVariableWwrapper.
*
* @see ITasking::createConditionVariable(), ITasking::destroyConditionVariable(), ITasking::waitConditionVariable(),
* ITasking::timedWaitConditionVariable(), ITasking::notifyConditionVariableOne(),
* ITasking::notifyConditionVariableAll()
*/
class ConditionVariable DOXYGEN_EMPTY_CLASS;
struct ITasking;
/**
* A constant for ITasking wait functions indicating "infinite" timeout
*/
constexpr uint64_t kInfinite = uint64_t(-1);
/**
* Defines a task priority.
*/
enum class Priority
{
eLow, //!< Low priority. Tasks will be executed after higher priority tasks.
eMedium, //!< Medium priority.
eHigh, //!< High priority. Tasks will be executed before lower priority tasks.
eMain, //!< A special priority for tasks that are only executed during ITasking::executeMainTasks()
eCount, //!< The number of Priority classes
// Aliases
eDefault = eMedium, //!< Alias for eMedium priority.
};
/**
* Object type for Object.
*
* @note These are intended to be used only by helper classes such as RequiredObject.
*/
enum class ObjectType
{
eNone, //!< Null/no object.
eCounter, //!< Object::data refers to a Counter*.
eTaskContext, //!< Object::data refers to a TaskContext.
ePtrTaskContext, //!< Object::data refers to a TaskContext*.
eTaskGroup, //!< Object::data is a pointer to a std::atomic_size_t. @see TaskGroup
eSharedState, //!< Object::data is a pointer to a detail::SharedState. Not used internally by carb.tasking.
eFutex1, //!< Object::data is a pointer to a std::atomic_uint8_t. Signaled on zero.
eFutex2, //!< Object::data is a pointer to a std::atomic_uint16_t. Signaled on zero.
eFutex4, //!< Object::data is a pointer to a std::atomic_uint32_t. Signaled on zero.
eFutex8, //!< Object::data is a pointer to a std::atomic_uint64_t. Signaled on zero.
eTrackerGroup, //!< Object::data is a pointer to an internal tracking object.
eTaskName, //!< Object::data is a `const char*` to be copied and used as a task name.
eTaskNameLiteral, //!< Object::data is a `const char*` that can be retained because it is a literal.
};
/**
* The function to execute as a task.
*
* @param taskArg The argument passed to ITasking::addTask() variants.
*/
using OnTaskFn = void (*)(void* taskArg);
/**
* The function executed by ITasking::applyRange()
*
* @param index The ApplyFn is called once for every integer @p index value from 0 to the range provided to
* ITasking::applyRange().
* @param taskArg The argument passed to ITasking::applyRange().
*/
using ApplyFn = void (*)(size_t index, void* taskArg);
/**
* The function executed by ITasking::applyRangeBatch()
*
* @note This function differs from \ref ApplyFn in that it must handle a contiguous range of indexes determined by
* `[startIndex, endIndex)`.
* @warning The item at index \p endIndex is \b not to be processed by this function. In other words, the range handled
* by this function is:
* ```cpp
* for (size_t i = startIndex; i != endIndex; ++i)
* array[i]->process();
* ```
* @param startIndex The initial index that must be handled by this function call.
* @param endIndex The after-the-end index representing the range of indexes that must be handled by this function call.
* The item at this index is after-the-end of the assigned range and <strong>must not be processed</strong>.
* @param taskArg The argument passed to ITasking::applyRangeBatch().
*/
using ApplyBatchFn = void (*)(size_t startIndex, size_t endIndex, void* taskArg);
/**
* A destructor function for a Task Storage slot.
*
* This function is called when a task completes with a non-`nullptr` value in the respective Task Storage slot.
* @see ITasking::allocTaskStorage()
* @param arg The non-`nullptr` value stored in a task storage slot.
*/
using TaskStorageDestructorFn = void (*)(void* arg);
/**
* An opaque handle representing a Task Storage slot.
*/
using TaskStorageKey = size_t;
/**
* Represents an invalid TaskStorageKey.
*/
constexpr TaskStorageKey kInvalidTaskStorageKey = size_t(-1);
/**
* An opaque handle that is used with getTaskContext(), suspendTask() and wakeTask().
*/
using TaskContext = size_t;
/**
* A specific value for TaskContext that indicates a non-valid TaskContext.
*/
constexpr TaskContext kInvalidTaskContext = 0;
/**
* The absolute maximum number of fibers that ITasking will create.
*/
constexpr uint32_t kMaxFibers = 1048575;
/**
* A generic @rstref{ABI-safe <abi-compatibility>} representation of multiple types.
*/
struct Object
{
ObjectType type; //!< The ObjectType of the represented type.
void* data; //!< Interpreted based on the ObjectType provided.
};
/**
* Defines a task descriptor.
*/
struct TaskDesc
{
/// Must be set to sizeof(TaskDesc).
size_t size{ sizeof(TaskDesc) };
/// The task function to execute
OnTaskFn task;
/// The argument passed to the task function
void* taskArg;
/// The priority assigned to the task
Priority priority;
/// If not nullptr, then the task will only start when this counter reaches its target value. Specifying the counter
/// here is more efficient than having the task function yieldUntilCounter().
Object requiredObject;
/// If waitSemaphore is not nullptr, then the task will wait on the semaphore before starting. This can be used to
/// throttle tasks. If requiredObject is also specified, then the semaphore is not waited on until requiredObject
/// has reached its target value. Specifying the semaphore here is more efficient than having the task function
/// wait on the semaphore.
Semaphore* waitSemaphore;
/// Optional. An OnTaskFn that is executed only when ITasking::tryCancelTask() successfully cancels the task. Called
/// in the context of ITasking::tryCancelTask(). Typically provided to destroy taskArg.
OnTaskFn cancel;
// Internal only
//! @private
Object const* trackers{ nullptr };
//! @private
size_t numTrackers{ 0 };
/// Constructor.
constexpr TaskDesc(OnTaskFn task_ = nullptr,
void* taskArg_ = nullptr,
Priority priority_ = Priority::eLow,
Counter* requiredCounter_ = nullptr,
Semaphore* waitSemaphore_ = nullptr,
OnTaskFn cancel_ = nullptr)
: task(task_),
taskArg(taskArg_),
priority(priority_),
requiredObject{ ObjectType::eCounter, requiredCounter_ },
waitSemaphore(waitSemaphore_),
cancel(cancel_)
{
}
};
/**
* Defines a tasking plugin descriptor.
*/
struct TaskingDesc
{
/**
* The size of the fiber pool, limited to kMaxFibers.
*
* Every task must be assigned a fiber before it can execute. A fiber is like a thread stack, but carb.tasking can
* choose when the fibers run, as opposed to threads where the OS schedules them.
*
* A value of 0 means to use kMaxFibers.
*/
uint32_t fiberCount;
/**
* The number of worker threads.
*
* A value of 0 means to use \ref carb::thread::hardware_concurrency().
*/
uint32_t threadCount;
/**
* The optional array of affinity values for every thread.
*
* If set to `nullptr`, affinity is not set. Otherwise it must contain `threadCount` number of elements. Each
* affinity value is a CPU index in the range [0 - `carb::thread::hardware_concurrency()`)
*/
uint32_t* threadAffinity;
/**
* The stack size per fiber. 0 indicates to use the system default.
*/
uint64_t stackSize;
};
/**
* Debug state of a task.
*/
enum class TaskDebugState
{
Pending, //!< The task has unmet pre-requisites and cannot be started yet.
New, //!< The task has passed all pre-requisites and is waiting to be assigned to a task thread.
Running, //!< The task is actively running on a task thread.
Waiting, //!< The task has been started but is currently waiting and is not running on a task thread.
Finished, //!< The task has finished or has been canceled.
};
/**
* Defines debug information about a task retrieved by ITasking::getTaskDebugInfo() or ITasking::walkTaskDebugInfo().
*
* @note This information is intended for debug only and should not affect application state or decisions in the
* application.
*
* @warning Since carb.tasking is an inherently multi-threaded API, the values presented as task debug information
* may have changed in a worker thread in the short amount of time between when they were generated and when they were
* read by the application. As such, the debug information was true at a previous point in time and should not be
* considered necessarily up-to-date.
*/
struct TaskDebugInfo
{
//! Size of this struct, used for versioning.
size_t sizeOf{ sizeof(TaskDebugInfo) };
//! The TaskContext handle for the task.
TaskContext context{};
//! The state of the task.
TaskDebugState state{};
//! The task function for this task that was submitted to ITasking::addTask() (or variant function), if known. May
//! be `nullptr` if the task has finished or was canceled.
OnTaskFn task{};
//! The task argument for this task that was submitted to ITasking::addTask() (or variant function), if known. May
//! be `nullptr` if the task has finished or was canceled.
void* taskArg{};
//! Input: the maximum number of frames that can be stored in the memory pointed to by the `creationCallstack`
//! member.
//! Output: the number of frames that were stored in the memory pointed to by the `creationCallstack` member.
size_t numCreationFrames{ 0 };
//! The callstack that called ITasking::addTask() (or variant function). The callstack is only available if
//! carb.tasking is configured to capture callstacks with setting */plugins/carb.tasking.plugin/debugTaskBacktrace*.
//!
//! @note If this value is desired, prior to calling ITasking::getTaskDebugInfo() set this member to a buffer that
//! will be filled by the ITasking::getTaskDebugInfo() function. Set `numCreationFrames` to the number of frames
//! that can be contained in the buffer. After calling ITasking::getTaskDebugInfo(), this member will contain the
//! available creation callstack frames and `numCreationFrames` will be set to the number of frames that could be
//! written.
void** creationCallstack{ nullptr };
//! Input: the maximum number of frames that can be stored in the memory pointed to by the `waitingCallstack`
//! member.
//! Output: the number of frames that were stored in the memory pointed to by the `waitingCallstack` member.
size_t numWaitingFrames{ 0 };
//! The callstack of the task when waiting. This is only captured if carb.tasking is configured to capture
//! callstacks with setting */plugins/carb.tasking.plugin/debugTaskBacktrace* and if `state` is
//! TaskDebugState::Waiting.
//!
//! @warning Capturing this value is somewhat unsafe as debug information is not stored in a way that will impede
//! task execution whatsoever (i.e. with synchronization), therefore information is gathered from a running task
//! without stopping it. As such, reading the waiting callstack may produce bad data and in extremely rare cases
//! cause a crash. If the state changes while gathering info, `state` may report TaskDebugState::Waiting but
//! `numWaitingFrames` may be `0` even though some data was written to the buffer pointed to by `waitingCallstack`.
//!
//! @note If this value is desired, prior to calling ITasking::getTaskDebugInfo() set this member to a buffer that
//! will be filled by the ITasking::getTaskDebugInfo() function. Set `numWaitingFrames` to the number of frames that
//! can be contained in the buffer. After calling ITasking::getTaskDebugInfo(), this member will contain the
//! available waiting callstack frames and `numWaitingFrames` will be set to the number of frames that could be
//! written.
void** waitingCallstack{ nullptr };
//! Input: the maximum number of characters that can be stored in the memory pointed to by the `taskName` member.
//! Output: the number of characters written (including the NUL terminator) to the memory pointed to by the
//! `taskName` member.
size_t taskNameSize{ 0 };
//! A optional buffer that will be filled with the task name if provided.
//!
//! @note If this value is desired, prior to calling ITasking::getTaskDebugInfo() set this member to a buffer that
//! will be filled by the ITasking::getTaskDebugInfo() function. Set `taskNameSize` to the number of characters that
//! can be contained in the buffer. After calling ITasking::getTaskDebugInfo(), this member will contain the
//! NUL-terminated task name and `taskNameSize` will be set to the number of characters that could be written
//! (including the NUL-terminator).
char* taskName{ nullptr };
};
//! Callback function for ITasking::walkTaskDebugInfo().
//! @param info The TaskDebugInfo structure passed to ITasking::walkTaskDebugInfo(), filled with information about a
//! task.
//! @param context The `context` field passed to ITasking::walkTaskDebugInfo().
//! @return `true` if walking tasks should continue; `false` to terminate walking tasks.
using TaskDebugInfoFn = bool (*)(const TaskDebugInfo& info, void* context);
#ifndef DOXYGEN_BUILD
namespace detail
{
template <class T>
struct GenerateFuture;
template <class T>
class SharedState;
} // namespace detail
struct Trackers;
struct RequiredObject;
template <class T>
class Promise;
template <class T>
class SharedFuture;
#endif
/**
* A Future is a counterpart to a Promise. It is the receiving end of a one-way, one-time asynchronous communication
* channel for transmitting the result of an asynchronous operation.
*
* Future is very similar to <a href="https://en.cppreference.com/w/cpp/thread/future">std::future</a>
*
* Communication starts by creating a Promise. The Promise has an associated Future that can be retrieved once via
* Promise::get_future(). The Promise and the Future both reference a "shared state" that is used to communicate the
* result. When the result is available, it is set through Promise::set_value() (or the promise can be broken through
* Promise::setCanceled()), at which point the shared state becomes Ready and the Future will be able to retrieve the
* value through Future::get() (or determine cancellation via Future::isCanceled()).
*
* Task functions like ITasking::addTask() return a Future where the Promise side is the return value from the callable
* passed when the task is created.
*
* Future is inherently a "read-once" object. Once Future::get() is called, the Future becomes invalid. However,
* SharedFuture can be used (created via Future::share()) to retain the value. Many threads can wait on a SharedFuture
* and access the result simultaneously through SharedFuture::get().
*
* There are three specializations of Future:
* * Future<T>: The base specialization, used to communicate objects between tasks/threads.
* * Future<T&>: Reference specialization, used to communicate references between tasks/threads.
* * Future<void>: Void specialization, used to communicate stateless events between tasks/threads.
*
* The `void` specialization of Future is slightly different:
* * Future<void> does not have Future::isCanceled(); cancellation state cannot be determined.
*/
template <class T = void>
class Future
{
public:
/**
* Creates a future in an invalid state (valid() would return false).
*/
constexpr Future() noexcept = default;
/**
* Destructor.
*/
~Future();
/**
* Futures are movable.
*/
Future(Future&& rhs) noexcept;
/**
* Futures are movable.
*/
Future& operator=(Future&& rhs) noexcept;
/**
* Tests to see if this Future is valid.
*
* @returns true if get() and wait() are supported; false otherwise
*/
bool valid() const noexcept;
/**
* Checks to see if a value can be read from this Future
*
* @warning Undefined behavior to call this if valid() == `false`.
* @returns true if a value can be read from this Future; false if the value is not yet ready
*/
bool try_wait() const;
/**
* Waits until a value can be read from this Future
* @warning Undefined behavior to call this if valid() == `false`.
*/
void wait() const;
/**
* Waits until a value can be read from this Future, or the timeout period expires.
*
* @warning Undefined behavior to call this if valid() == `false`.
* @param dur The relative timeout period.
* @returns true if a value can be read from this Future; false if the timeout period expires before the value can
* be read
*/
template <class Rep, class Period>
bool wait_for(const std::chrono::duration<Rep, Period>& dur) const;
/**
* Waits until a value can be read from this Future, or the timeout period expires.
*
* @warning Undefined behavior to call this if valid() == `false`.
* @param when The absolute timeout period.
* @returns true if a value can be read from this Future; false if the timeout period expires before the value can
* be read
*/
template <class Clock, class Duration>
bool wait_until(const std::chrono::time_point<Clock, Duration>& when) const;
/**
* Waits until the future value is ready and returns the value. Resets the Future to an invalid state.
*
* @warning This function will call `std::terminate()` if the underlying task has been canceled with
* ITasking::tryCancelTask() or the Promise was broken. Use isCanceled() to determine if the value is safe to read.
*
* @returns The value passed to Promise::set_value().
*/
T get();
/**
* Returns whether the Promise has been broken (or if this Future represents a task, the task has been canceled).
*
* @warning Undefined behavior to call this if valid() == `false`.
* @note The `void` specialization of Future does not have this function.
* @returns `true` if the task has been canceled; `false` if the task is still pending or has a valid value to read.
*/
bool isCanceled() const;
/**
* Transfers the Future's shared state (if any) to a SharedFuture and leaves `*this` invalid (valid() == `false`).
* @returns A SharedFuture with the same shared state as `*this`.
*/
SharedFuture<T> share();
/**
* Returns a valid TaskContext if this Future represents a task.
*
* @note Futures can be returned from \ref ITasking::addTask() and related functions or from
* \ref Promise::get_future(). Only Future objects returned from \ref ITasking::addTask() will return a valid
* pointer from task_if().
*
* @returns A pointer to a TaskContext if this Future was created from \ref ITasking::addTask() or related
* functions; `nullptr` otherwise. The pointer is valid as long as the Future exists and the result from
* \ref valid() is `true`.
*/
const TaskContext* task_if() const;
/**
* Convertible to RequiredObject.
*/
operator RequiredObject() const;
/**
* Syntactic sugar around ITasking::addSubTask() that automatically passes the value from get() into `Callable` and
* resets the Future to an invalid state.
*
* @warning This resets the Future to an invalid state since the value is being consumed by the sub-task.
*
* @note This can be used to "chain" tasks together.
*
* @warning If the dependent task is canceled then the sub-task will call `std::terminate()`. When canceling the
* dependent task you must first cancel the sub-task.
*
* @warning For non-`void` specializations, it is undefined behavior to call this if valid() == `false`.
*
* @param prio The priority of the task to execute.
* @param trackers (optional) A `std::initializer_list` of zero or more Tracker objects. Note that this *must* be a
* temporary object. The Tracker objects can be used to determine task completion or to provide input/output
* parameters to the task system.
* @param f A C++ "Callable" object (i.e. functor, lambda, [member] function ptr) that optionally returns a value.
* The Callable object must take the Future's `T` type as its last parameter.
* @param args Arguments to pass to @p f
* @return A Future based on the return type of @p f
*/
template <class Callable, class... Args>
auto then(Priority prio, Trackers&& trackers, Callable&& f, Args&&... args);
private:
template <class U>
friend struct detail::GenerateFuture;
template <class U>
friend class Promise;
template <class U>
friend class SharedFuture;
CARB_PREVENT_COPY(Future);
constexpr Future(detail::SharedState<T>* state) noexcept;
Future(TaskContext task, detail::SharedState<T>* state) noexcept;
detail::SharedState<T>* m_state{ nullptr };
};
#ifndef DOXYGEN_BUILD
template <>
class Future<void>
{
public:
constexpr Future() noexcept = default;
~Future();
Future(Future&& rhs) noexcept;
Future& operator=(Future&& rhs) noexcept;
bool valid() const noexcept;
bool try_wait() const;
void wait() const;
template <class Rep, class Period>
bool wait_for(const std::chrono::duration<Rep, Period>& dur) const;
template <class Clock, class Duration>
bool wait_until(const std::chrono::time_point<Clock, Duration>& when) const;
void get();
SharedFuture<void> share();
const TaskContext* task_if() const;
operator RequiredObject() const;
template <class Callable, class... Args>
auto then(Priority prio, Trackers&& trackers, Callable&& f, Args&&... args);
private:
template <class U>
friend struct detail::GenerateFuture;
template <class U>
friend class Future;
template <class U>
friend class Promise;
template <class U>
friend class SharedFuture;
friend struct Tracker;
TaskContext* ptask();
detail::SharedState<void>* state() const noexcept;
Future(TaskContext task);
Future(detail::SharedState<void>* state);
Object m_obj{ ObjectType::eNone, nullptr };
};
#endif
/**
* SharedFuture is a shareable version of Future. Instead of Future::get() invalidating the Future and returning the
* value one time, multiple SharedFuture objects can reference the same shared state and allow multiple threads to
* wait and access the result value simultaneously.
*
* SharedFuture is similar to <a href="https://en.cppreference.com/w/cpp/thread/shared_future">std::shared_future</a>
*
* The same specializations (and their limitations) exist as with Future.
*/
template <class T = void>
class SharedFuture
{
public:
/**
* Default constructor. Constructs a SharedFuture where valid() == `false`.
*/
SharedFuture() noexcept = default;
/**
* Copy constructor. Holds the same state (if any) as @p other.
* @param other A SharedFuture to copy state from.
*/
SharedFuture(const SharedFuture<T>& other) noexcept;
/**
* Move constructor. Moves the shared state (if any) from @p other.
*
* After this call, @p other will report valid() == `false`.
* @param other A SharedFuture to move state from.
*/
SharedFuture(SharedFuture<T>&& other) noexcept;
/**
* Transfers the shared state (if any) from @p fut.
*
* After construction, @p fut will report valid() == `false`.
* @param fut A Future to move state from.
*/
SharedFuture(Future<T>&& fut) noexcept;
/**
* Destructor.
*/
~SharedFuture();
/**
* Copy-assign operator. Holds the same state (if any) as @p other after releasing any shared state previously held.
* @param other A SharedFuture to copy state from.
* @returns `*this`
*/
SharedFuture<T>& operator=(const SharedFuture<T>& other);
/**
* Move-assign operator. Swaps shared states with @p other.
* @param other A SharedFuture to swap states with.
* @returns `*this`
*/
SharedFuture<T>& operator=(SharedFuture<T>&& other) noexcept;
/**
* Waits until the shared state is Ready and retrieves the value stored.
* @warning Undefined behavior if valid() == `false`.
* @returns A const reference to the stored value.
*/
const T& get() const;
/**
* Checks if the SharedFuture references a shared state.
*
* This is only `true` for default-constructed SharedFuture or when moved from. Unlike Future, SharedFuture does not
* invalidate once the value is read with Future::get().
* @returns `true` if this SharedFuture references a shared state; `false` otherwise.
*/
bool valid() const noexcept;
/**
* Checks to see if the shared state is Ready without waiting.
*
* @warning Undefined behavior to call this if valid() == `false`.
* @returns `true` if the shared state is Ready; `false` otherwise.
*/
bool try_wait() const;
/**
* Blocks the task or thread and waits for the shared state to become Ready. try_wait() == `true` after this call
* and get() will immediately return a value.
*
* @warning Undefined behavior to call this if valid() == `false`.
*/
void wait() const;
/**
* Blocks the task or thread until @p dur has elapsed or the shared state becomes Ready.
*
* If `true` is returned, get() will return a value immediately.
* @warning Undefined behavior to call this if valid() == `false`.
* @param dur The duration to wait for.
* @returns `true` If the shared state is Ready; `false` if the timeout period elapsed.
*/
template <class Rep, class Period>
bool wait_for(const std::chrono::duration<Rep, Period>& dur) const;
/**
* Blocks the task or thread until @p when is reached or the shared state becomes Ready.
*
* If `true` is returned, get() will return a value immediately.
* @warning Undefined behavior to call this if valid() == `false`.
* @param when The clock time to wait until.
* @returns `true` If the shared state is Ready; `false` if the timeout period elapsed.
*/
template <class Clock, class Duration>
bool wait_until(const std::chrono::time_point<Clock, Duration>& when) const;
/**
* Returns whether the task promising a value to this Future has been canceled.
*
* @warning Undefined behavior to call this if valid() == `false`.
* @note The `void` specialization of SharedFuture does not have this function.
* @returns `true` if the task has been canceled or promise broken; `false` if the task is still pending, promise
* not yet fulfilled, or has a valid value to read.
*/
bool isCanceled() const;
/**
* Convertible to RequiredObject.
*/
operator RequiredObject() const;
/**
* Returns a valid TaskContext if this SharedFuture represents a task.
*
* @note Futures can be returned from addTask() and related functions or from Promise::get_future(). Only Future
* objects returned from addTask() and transferred to SharedFuture will return a valid pointer from task_if().
*
* @returns A pointer to a TaskContext if this SharedFuture was created from addTask() or related functions;
* `nullptr` otherwise. The pointer is valid as long as the SharedFuture exists and the response from valid() would
* be consistent.
*/
const TaskContext* task_if() const;
/**
* Syntactic sugar around ITasking::addSubTask() that automatically passes the value from get() into `Callable`.
* Unlike Future::then(), the SharedFuture is not reset to an invalid state.
*
* @note This can be used to "chain" tasks together.
*
* @warning If the dependent task is canceled then the sub-task will call `std::terminate()`. When canceling the
* dependent task you must first cancel the sub-task.
*
* @param prio The priority of the task to execute.
* @param trackers (optional) A `std::initializer_list` of zero or more Tracker objects. Note that this *must* be a
* temporary object. The Tracker objects can be used to determine task completion or to provide input/output
* parameters to the task system.
* @param f A C++ "Callable" object (i.e. functor, lambda, [member] function ptr) that optionally returns a value.
* The Callable object must take `const T&` as its last parameter.
* @param args Arguments to pass to @p f
* @return A Future based on the return type of @p f
*/
template <class Callable, class... Args>
auto then(Priority prio, Trackers&& trackers, Callable&& f, Args&&... args);
private:
detail::SharedState<T>* m_state{ nullptr };
};
#ifndef DOXYGEN_BUILD
template <class T>
class SharedFuture<T&>
{
public:
constexpr SharedFuture() noexcept = default;
SharedFuture(const SharedFuture& other) noexcept;
SharedFuture(SharedFuture&& other) noexcept;
SharedFuture(Future<T&>&& fut) noexcept;
~SharedFuture();
SharedFuture& operator=(const SharedFuture& other);
SharedFuture& operator=(SharedFuture&& other) noexcept;
T& get() const;
bool valid() const noexcept;
bool try_wait() const;
void wait() const;
template <class Rep, class Period>
bool wait_for(const std::chrono::duration<Rep, Period>& dur) const;
template <class Clock, class Duration>
bool wait_until(const std::chrono::time_point<Clock, Duration>& when) const;
bool isCanceled() const;
operator RequiredObject() const;
const TaskContext* task_if() const;
template <class Callable, class... Args>
auto then(Priority prio, Trackers&& trackers, Callable&& f, Args&&... args);
private:
detail::SharedState<T&>* m_state{ nullptr };
};
template <>
class SharedFuture<void>
{
public:
constexpr SharedFuture() noexcept = default;
SharedFuture(const SharedFuture<void>& other) noexcept;
SharedFuture(SharedFuture<void>&& other) noexcept;
SharedFuture(Future<void>&& fut) noexcept;
~SharedFuture();
SharedFuture<void>& operator=(const SharedFuture<void>& other);
SharedFuture<void>& operator=(SharedFuture<void>&& other) noexcept;
void get() const;
bool valid() const noexcept;
bool try_wait() const;
void wait() const;
template <class Rep, class Period>
bool wait_for(const std::chrono::duration<Rep, Period>& dur) const;
template <class Clock, class Duration>
bool wait_until(const std::chrono::time_point<Clock, Duration>& when) const;
operator RequiredObject() const;
const TaskContext* task_if() const;
template <class Callable, class... Args>
auto then(Priority prio, Trackers&& trackers, Callable&& f, Args&&... args);
private:
friend struct Tracker;
TaskContext* ptask();
detail::SharedState<void>* state() const;
Object m_obj{ ObjectType::eNone, nullptr };
};
#endif
/**
* A facility to store a value that is later acquired asynchronously via a Future created via Promise::get_future().
*
* The carb.tasking implementation is very similar to the C++11 <a
* href="https://en.cppreference.com/w/cpp/thread/promise">std::promise</a>.
*
* A promise has a "shared state" that is shared with the Future that it creates through Promise::get_future().
*
* A promise is a single-use object. The get_future() function may only be called once, and either set_value() or
* setCanceled() may only be called once.
*
* A promise that is destroyed without ever having called set_value() or setCanceled() is consider a broken promise and
* automatically calls setCanceled().
*
* There are three specializations of Promise:
* * Promise<T>: The base specialization, used to communicate objects between tasks/threads.
* * Promise<T&>: Reference specialization, used to communicate references between tasks/threads.
* * Promise<void>: Void specialization, used to communicate stateless events between tasks/threads.
*
* The `void` specialization of Promise is slightly different:
* * Promise<void> does not have Promise::setCanceled(); cancellation state cannot be determined.
*/
template <class T = void>
class Promise
{
CARB_PREVENT_COPY(Promise);
public:
/**
* Default constructor.
*
* Initializes the shared state.
*/
Promise();
/**
* Can be move-constructed.
*/
Promise(Promise&& other) noexcept;
/**
* Destructor.
*
* If the shared state has not yet received a value with set_value(), then it is canceled and made Ready similarly
* to setCanceled().
*/
~Promise();
/**
* Can be move-assigned.
*/
Promise& operator=(Promise&& other) noexcept;
/**
* Swaps the shared state with @p other's.
*
* @param other A Promise to swap shared states with.
*/
void swap(Promise& other) noexcept;
/**
* Atomically retrieves and clears the Future from this Promise that shares the same state.
*
* A Future::wait() call will wait until the shared state becomes Ready.
*
* @warning `std::terminate()` will be called if this function is called more than once.
*
* @returns A Future with the same shared state as this Promise.
*/
Future<T> get_future();
/**
* Atomically stores the value in the shared state and makes the state Ready.
*
* @warning Only one call of set_value() or setCanceled() is allowed. Subsequent calls will result in a call to
* `std::terminate()`.
*
* @param value The value to atomically set into the shared state.
*/
void set_value(const T& value);
/**
* Atomically stores the value in the shared state and makes the state Ready.
*
* @warning Only one call of set_value() or setCanceled() is allowed. Subsequent calls will result in a call to
* `std::terminate()`.
*
* @param value The value to atomically set into the shared state.
*/
void set_value(T&& value);
/**
* Atomically sets the shared state to canceled and makes the state Ready. This is a broken promise.
*
* @warning Calling Future::get() will result in a call to `std::terminate()`; Future::isCanceled() will return
* `true`.
*/
void setCanceled();
private:
using State = detail::SharedState<T>;
State* m_state{ nullptr };
};
#ifndef DOXYGEN_BUILD
template <class T>
class Promise<T&>
{
CARB_PREVENT_COPY(Promise);
public:
Promise();
Promise(Promise&& other) noexcept;
~Promise();
Promise& operator=(Promise&& other) noexcept;
void swap(Promise& other) noexcept;
Future<T&> get_future();
void set_value(T& value);
void setCanceled();
private:
using State = detail::SharedState<T&>;
State* m_state{ nullptr };
};
template <>
class Promise<void>
{
CARB_PREVENT_COPY(Promise);
public:
Promise();
Promise(Promise&& other) noexcept;
~Promise();
Promise& operator=(Promise&& other) noexcept;
void swap(Promise& other) noexcept;
Future<void> get_future();
void set_value();
private:
using State = detail::SharedState<void>;
State* m_state{ nullptr };
};
#endif
} // namespace tasking
} // namespace carb
|
omniverse-code/kit/include/carb/tasking/ITasking.inl | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include "../cpp/Tuple.h"
namespace carb
{
namespace tasking
{
namespace detail
{
template <class R>
struct GenerateFuture
{
struct Releaser
{
void operator()(detail::SharedState<R>* p)
{
p->release();
}
};
using SharedStatePtr = std::unique_ptr<detail::SharedState<R>, Releaser>;
template <class Tuple, size_t... I>
static constexpr void callTupleImpl(Tuple&& t, std::index_sequence<I...>)
{
detail::SharedState<R>* state = std::get<0>(std::forward<Tuple>(t)).get();
state->set(carb::cpp::invoke(std::get<1>(std::forward<Tuple>(t)), std::get<I + 2>(std::forward<Tuple>(t))...));
}
template <class Tuple>
static constexpr void callTuple(Tuple&& t)
{
callTupleImpl(std::forward<Tuple>(t),
std::make_index_sequence<std::tuple_size<std::remove_reference_t<Tuple>>::value - 2>{});
}
template <class Callable, class... Args>
Future<R> operator()(ITasking* tasking, Counter* counter, TaskDesc& desc, Callable&& func, Args&&... args)
{
auto* state = new (carb::allocate(sizeof(detail::SharedState<R>))) detail::SharedState<R>(true);
using Tuple = std::tuple<SharedStatePtr, std::decay_t<Callable>, std::decay_t<Args>...>;
Tuple* t =
new (carb::allocate(sizeof(Tuple))) Tuple(state, std::forward<Callable>(func), std::forward<Args>(args)...);
detail::generateTaskFunc(desc, [t]() {
std::unique_ptr<Tuple, detail::CarbDeleter<Tuple>> p(t);
callTuple(std::move(*t));
});
CARB_ASSERT(desc.taskArg == t);
CARB_ASSERT(!desc.cancel);
desc.cancel = [](void* arg) { detail::carbDelete(static_cast<Tuple*>(arg)); };
return Future<R>(tasking->internalAddTask(desc, counter), state);
}
template <class Callable, class Rep, class Period, class... Args>
Future<R> operator()(ITasking* tasking,
const std::chrono::duration<Rep, Period>& dur,
Counter* counter,
TaskDesc& desc,
Callable&& func,
Args&&... args)
{
auto* state = new (carb::allocate(sizeof(detail::SharedState<R>))) detail::SharedState<R>(true);
using Tuple = std::tuple<SharedStatePtr, std::decay_t<Callable>, std::decay_t<Args>...>;
Tuple* t =
new (carb::allocate(sizeof(Tuple))) Tuple(state, std::forward<Callable>(func), std::forward<Args>(args)...);
detail::generateTaskFunc(desc, [t]() {
std::unique_ptr<Tuple, detail::CarbDeleter<Tuple>> p(t);
callTuple(std::move(*t));
});
CARB_ASSERT(desc.taskArg == t);
CARB_ASSERT(!desc.cancel);
desc.cancel = [](void* arg) { detail::carbDelete(static_cast<Tuple*>(arg)); };
return Future<R>(tasking->internalAddDelayedTask(detail::convertDuration(dur), desc, counter), state);
}
template <class Callable, class Clock, class Duration, class... Args>
Future<R> operator()(ITasking* tasking,
const std::chrono::time_point<Clock, Duration>& when,
Counter* counter,
TaskDesc& desc,
Callable&& func,
Args&&... args)
{
auto* state = new (carb::allocate(sizeof(detail::SharedState<R>))) detail::SharedState<R>(true);
using Tuple = std::tuple<SharedStatePtr, std::decay_t<Callable>, std::decay_t<Args>...>;
Tuple* t =
new (carb::allocate(sizeof(Tuple))) Tuple(state, std::forward<Callable>(func), std::forward<Args>(args)...);
detail::generateTaskFunc(desc, [t]() {
std::unique_ptr<Tuple, detail::CarbDeleter<Tuple>> p(t);
callTuple(std::move(*t));
});
CARB_ASSERT(desc.taskArg == t);
CARB_ASSERT(!desc.cancel);
desc.cancel = [](void* arg) { detail::carbDelete(static_cast<Tuple*>(arg)); };
return Future<R>(tasking->internalAddDelayedTask(detail::convertAbsTime(when), desc, counter), state);
}
};
template <>
struct GenerateFuture<void>
{
template <class Callable>
Future<void> operator()(ITasking* tasking, Counter* counter, TaskDesc& desc, Callable&& func)
{
detail::generateTaskFunc(desc, std::forward<Callable>(func));
return Future<>(tasking->internalAddTask(desc, counter));
}
template <class Callable, class... Args>
Future<void> operator()(ITasking* tasking, Counter* counter, TaskDesc& desc, Callable&& func, Args&&... args)
{
using Tuple = std::tuple<std::decay_t<Args>...>;
detail::generateTaskFunc(
desc, [func = std::forward<Callable>(func), args = Tuple(std::forward<Args>(args)...)]() mutable {
cpp::apply(std::move(func), std::move(args));
});
return Future<>(tasking->internalAddTask(desc, counter));
}
template <class Callable, class Rep, class Period>
Future<void> operator()(ITasking* tasking,
const std::chrono::duration<Rep, Period>& dur,
Counter* counter,
TaskDesc& desc,
Callable&& func)
{
detail::generateTaskFunc(desc, std::forward<Callable>(func));
return Future<>(tasking->internalAddDelayedTask(detail::convertDuration(dur), desc, counter));
}
template <class Callable, class Rep, class Period, class... Args>
Future<void> operator()(ITasking* tasking,
const std::chrono::duration<Rep, Period>& dur,
Counter* counter,
TaskDesc& desc,
Callable&& func,
Args&&... args)
{
using Tuple = std::tuple<std::decay_t<Args>...>;
detail::generateTaskFunc(
desc, [func = std::forward<Callable>(func), args = Tuple(std::forward<Args>(args)...)]() mutable {
cpp::apply(std::move(func), std::move(args));
});
return Future<>(tasking->internalAddDelayedTask(detail::convertDuration(dur), desc, counter));
}
template <class Callable, class Clock, class Duration>
Future<void> operator()(ITasking* tasking,
const std::chrono::time_point<Clock, Duration>& when,
Counter* counter,
TaskDesc& desc,
Callable&& func)
{
detail::generateTaskFunc(desc, std::forward<Callable>(func));
return Future<>(tasking->internalAddDelayedTask(detail::convertAbsTime(when), desc, counter));
}
template <class Callable, class Clock, class Duration, class... Args>
Future<void> operator()(ITasking* tasking,
const std::chrono::time_point<Clock, Duration>& when,
Counter* counter,
TaskDesc& desc,
Callable&& func,
Args&&... args)
{
using Tuple = std::tuple<std::decay_t<Args>...>;
detail::generateTaskFunc(
desc, [func = std::forward<Callable>(func), args = Tuple(std::forward<Args>(args)...)]() mutable {
cpp::apply(std::move(func), std::move(args));
});
return Future<>(tasking->internalAddDelayedTask(detail::convertAbsTime(when), desc, counter));
}
};
inline void SharedState<void>::notify()
{
CARB_ASSERT(m_futex.load(std::memory_order_relaxed) == eReady);
carb::getCachedInterface<ITasking>()->futexWakeup(m_futex, UINT_MAX);
}
} // namespace detail
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Future<T>
template <class T>
inline Future<T>::Future(Future&& rhs) noexcept : m_state(std::exchange(rhs.m_state, nullptr))
{
}
template <class T>
inline constexpr Future<T>::Future(detail::SharedState<T>* state) noexcept : m_state(state)
{
// State has already been ref-counted.
}
template <class T>
inline Future<T>::Future(TaskContext task, detail::SharedState<T>* state) noexcept : m_state(state)
{
// State has already been ref-counted.
m_state->m_object = Object{ ObjectType::eTaskContext, reinterpret_cast<void*>(task) };
}
template <class T>
inline Future<T>::~Future()
{
if (m_state)
m_state->release();
}
template <class T>
inline Future<T>& Future<T>::operator=(Future&& rhs) noexcept
{
std::swap(m_state, rhs.m_state);
return *this;
}
template <class T>
inline bool Future<T>::valid() const noexcept
{
return m_state != nullptr;
}
template <class T>
inline bool Future<T>::try_wait() const
{
CARB_ASSERT(valid());
if (!m_state->ready())
{
if (!carb::getCachedInterface<ITasking>()->try_wait(*this))
return false;
m_state->markReady();
}
return true;
}
template <class T>
inline void Future<T>::wait() const
{
CARB_ASSERT(valid());
if (!m_state->ready())
{
carb::getCachedInterface<ITasking>()->wait(*this);
m_state->markReady();
}
}
template <class T>
template <class Rep, class Period>
inline bool Future<T>::wait_for(const std::chrono::duration<Rep, Period>& dur) const
{
CARB_ASSERT(valid());
if (!m_state->ready())
{
if (!carb::getCachedInterface<ITasking>()->wait_for(dur, *this))
return false;
m_state->markReady();
}
return true;
}
template <class T>
template <class Clock, class Duration>
inline bool Future<T>::wait_until(const std::chrono::time_point<Clock, Duration>& when) const
{
CARB_ASSERT(valid());
if (!m_state->ready())
{
if (!carb::getCachedInterface<ITasking>()->wait_until(when, *this))
return false;
m_state->markReady();
}
}
template <class T>
inline T Future<T>::get()
{
CARB_ASSERT(valid());
Future local(std::move(*this));
local.wait();
return local.m_state->get();
}
template <class T>
inline bool Future<T>::isCanceled() const
{
CARB_ASSERT(valid());
return try_wait() && !m_state->isSet();
}
template <class T>
inline SharedFuture<T> Future<T>::share()
{
return SharedFuture<T>(std::move(*this));
}
template <class T>
inline const TaskContext* Future<T>::task_if() const
{
return m_state && m_state->isTask() ? reinterpret_cast<const TaskContext*>(&m_state->m_object.data) : nullptr;
}
template <class T>
inline Future<T>::operator RequiredObject() const
{
return valid() ? m_state->m_object : RequiredObject{ Object{ ObjectType::eNone, nullptr } };
}
template <class T>
template <class Callable, class... Args>
inline auto Future<T>::then(Priority prio, Trackers&& trackers, Callable&& f, Args&&... args)
{
CARB_ASSERT(valid());
RequiredObject req = *this;
return carb::getCachedInterface<ITasking>()->addSubTask(
req, prio, std::move(trackers),
[t = std::move(*this), f = std::forward<Callable>(f),
args = std::make_tuple(std::forward<Args>(args)...)]() mutable {
return detail::applyExtra(std::move(f), std::move(args), t.get());
});
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Future<void>
inline Future<void>::~Future()
{
if (auto s = state())
{
s->release();
}
}
inline Future<void>::Future(Future&& rhs) noexcept : m_obj(std::exchange(rhs.m_obj, { ObjectType::eNone, nullptr }))
{
}
inline Future<void>& Future<void>::operator=(Future&& rhs) noexcept
{
std::swap(m_obj, rhs.m_obj);
return *this;
}
inline Future<void>::Future(TaskContext task) : m_obj{ ObjectType::eTaskContext, reinterpret_cast<void*>(task) }
{
}
inline Future<void>::Future(detail::SharedState<void>* state) : m_obj{ ObjectType::eSharedState, state }
{
// Has already been referenced, so no need to addRef
CARB_ASSERT(state);
}
inline bool Future<void>::valid() const noexcept
{
if (state())
return true;
return reinterpret_cast<TaskContext>(m_obj.data) != kInvalidTaskContext;
}
inline bool Future<void>::try_wait() const
{
CARB_ASSERT(valid());
auto s = state();
if (!s || !s->ready())
{
if (!carb::getCachedInterface<ITasking>()->try_wait(*this))
return false;
if (s)
s->markReady();
}
return true;
}
inline void Future<void>::wait() const
{
CARB_ASSERT(valid());
auto s = state();
if (!s || !s->ready())
{
carb::getCachedInterface<ITasking>()->wait(*this);
if (s)
s->markReady();
}
}
template <class Rep, class Period>
inline bool Future<void>::wait_for(const std::chrono::duration<Rep, Period>& dur) const
{
CARB_ASSERT(valid());
auto s = state();
if (!s || !s->ready())
{
if (!carb::getCachedInterface<ITasking>()->wait_for(dur, *this))
return false;
if (s)
s->markReady();
}
return true;
}
template <class Clock, class Duration>
inline bool Future<void>::wait_until(const std::chrono::time_point<Clock, Duration>& when) const
{
CARB_ASSERT(valid());
auto s = state();
if (!s || !s->ready())
{
if (!carb::getCachedInterface<ITasking>()->wait_until(when, *this))
return false;
if (s)
s->markReady();
}
return true;
}
inline void Future<void>::get()
{
CARB_ASSERT(valid());
Future local(std::move(*this));
local.wait();
}
inline SharedFuture<void> Future<void>::share()
{
return SharedFuture<void>(std::move(*this));
}
inline const TaskContext* Future<void>::task_if() const
{
auto s = state();
if (s && s->isTask())
return reinterpret_cast<TaskContext*>(&s->m_object.data);
return m_obj.type == ObjectType::eTaskContext ? reinterpret_cast<const TaskContext*>(&m_obj.data) : nullptr;
}
inline Future<void>::operator RequiredObject() const
{
auto s = state();
return s ? s->m_object : m_obj;
}
template <class Callable, class... Args>
inline auto Future<void>::then(Priority prio, Trackers&& trackers, Callable&& f, Args&&... args)
{
return carb::getCachedInterface<ITasking>()->addSubTask(
*this, prio, std::move(trackers),
[f = std::forward<Callable>(f), args = std::make_tuple(std::forward<Args>(args)...)]() mutable {
return carb::cpp::apply(std::move(f), std::move(args));
});
}
inline TaskContext* Future<void>::ptask()
{
if (auto s = state())
s->release();
m_obj = Object{ ObjectType::eTaskContext, nullptr };
return reinterpret_cast<TaskContext*>(&m_obj.data);
}
inline detail::SharedState<void>* Future<void>::state() const noexcept
{
return m_obj.type == ObjectType::eSharedState ? static_cast<detail::SharedState<void>*>(m_obj.data) : nullptr;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// SharedFuture<T>
template <class T>
inline SharedFuture<T>::SharedFuture(const SharedFuture<T>& other) noexcept : m_state(other.m_state)
{
if (m_state)
m_state->addRef();
}
template <class T>
inline SharedFuture<T>::SharedFuture(SharedFuture<T>&& other) noexcept : m_state(std::exchange(other.m_state, nullptr))
{
}
template <class T>
inline SharedFuture<T>::SharedFuture(Future<T>&& fut) noexcept : m_state(std::exchange(fut.m_state, nullptr))
{
}
template <class T>
inline SharedFuture<T>::~SharedFuture()
{
if (m_state)
m_state->release();
}
template <class T>
inline SharedFuture<T>& SharedFuture<T>::operator=(const SharedFuture<T>& other)
{
if (other.m_state)
other.m_state->addRef();
if (m_state)
m_state->release();
m_state = other.m_state;
return *this;
}
template <class T>
inline SharedFuture<T>& SharedFuture<T>::operator=(SharedFuture<T>&& other) noexcept
{
std::swap(m_state, other.m_state);
return *this;
}
template <class T>
inline const T& SharedFuture<T>::get() const
{
CARB_ASSERT(valid());
wait();
return m_state->get_ref();
}
template <class T>
inline bool SharedFuture<T>::valid() const noexcept
{
return m_state != nullptr;
}
template <class T>
inline bool SharedFuture<T>::try_wait() const
{
CARB_ASSERT(valid());
if (!m_state->ready())
{
if (!carb::getCachedInterface<ITasking>()->try_wait(*this))
return false;
m_state->markReady();
}
return true;
}
template <class T>
inline void SharedFuture<T>::wait() const
{
CARB_ASSERT(valid());
if (!m_state->ready())
{
carb::getCachedInterface<ITasking>()->wait(*this);
m_state->markReady();
}
}
template <class T>
template <class Rep, class Period>
inline bool SharedFuture<T>::wait_for(const std::chrono::duration<Rep, Period>& dur) const
{
CARB_ASSERT(valid());
if (!m_state->ready())
{
if (!carb::getCachedInterface<ITasking>()->wait_for(dur, *this))
return false;
m_state->markReady();
}
return true;
}
template <class T>
template <class Clock, class Duration>
inline bool SharedFuture<T>::wait_until(const std::chrono::time_point<Clock, Duration>& when) const
{
CARB_ASSERT(valid());
if (!m_state->ready())
{
if (!carb::getCachedInterface<ITasking>()->wait_until(when, *this))
return false;
m_state->markReady();
}
return true;
}
template <class T>
inline bool SharedFuture<T>::isCanceled() const
{
CARB_ASSERT(valid());
return try_wait() && !m_state->m_type.has_value();
}
template <class T>
inline SharedFuture<T>::operator RequiredObject() const
{
return m_state ? m_state->m_object : Object{ ObjectType::eNone, nullptr };
}
template <class T>
inline const TaskContext* SharedFuture<T>::task_if() const
{
return m_state && m_state->isTask() ? reinterpret_cast<TaskContext*>(&m_state->m_object.data) : nullptr;
}
template <class T>
template <class Callable, class... Args>
inline auto SharedFuture<T>::then(Priority prio, Trackers&& trackers, Callable&& f, Args&&... args)
{
CARB_ASSERT(valid());
RequiredObject req = *this;
return carb::getCachedInterface<ITasking>()->addSubTask(
req, prio, std::move(trackers),
[t = *this, f = std::forward<Callable>(f), args = std::make_tuple(std::forward<Args>(args)...)]() mutable {
return detail::applyExtra(std::move(f), std::move(args), t.get());
});
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// SharedFuture<T&>
template <class T>
inline SharedFuture<T&>::SharedFuture(const SharedFuture& other) noexcept : m_state(other.m_state)
{
if (m_state)
m_state->addRef();
}
template <class T>
inline SharedFuture<T&>::SharedFuture(SharedFuture&& other) noexcept : m_state(std::exchange(other.m_state, nullptr))
{
}
template <class T>
inline SharedFuture<T&>::SharedFuture(Future<T&>&& fut) noexcept : m_state(std::exchange(fut.m_state, nullptr))
{
}
template <class T>
inline SharedFuture<T&>::~SharedFuture()
{
if (m_state)
m_state->release();
}
template <class T>
inline SharedFuture<T&>& SharedFuture<T&>::operator=(const SharedFuture& other)
{
if (other.m_state)
other.m_state->addRef();
if (m_state)
m_state->release();
m_state = other.m_state;
return *this;
}
template <class T>
inline SharedFuture<T&>& SharedFuture<T&>::operator=(SharedFuture&& other) noexcept
{
std::swap(m_state, other.m_state);
return *this;
}
template <class T>
inline T& SharedFuture<T&>::get() const
{
CARB_ASSERT(valid());
wait();
return m_state->get();
}
template <class T>
inline bool SharedFuture<T&>::valid() const noexcept
{
return m_state != nullptr;
}
template <class T>
inline bool SharedFuture<T&>::try_wait() const
{
CARB_ASSERT(valid());
if (CARB_UNLIKELY(!m_state->ready()))
{
if (!carb::getCachedInterface<ITasking>()->try_wait(*this))
return false;
m_state->markReady();
}
return true;
}
template <class T>
inline void SharedFuture<T&>::wait() const
{
CARB_ASSERT(valid());
if (CARB_UNLIKELY(!m_state->ready()))
{
carb::getCachedInterface<ITasking>()->wait(*this);
m_state->markReady();
}
}
template <class T>
template <class Rep, class Period>
inline bool SharedFuture<T&>::wait_for(const std::chrono::duration<Rep, Period>& dur) const
{
CARB_ASSERT(valid());
if (CARB_UNLIKELY(!m_state->ready()))
{
if (!carb::getCachedInterface<ITasking>()->wait_for(dur, *this))
return false;
m_state->markReady();
}
return true;
}
template <class T>
template <class Clock, class Duration>
inline bool SharedFuture<T&>::wait_until(const std::chrono::time_point<Clock, Duration>& when) const
{
CARB_ASSERT(valid());
if (CARB_UNLIKELY(!m_state->ready()))
{
if (!carb::getCachedInterface<ITasking>()->wait_until(when, *this))
return false;
m_state->markReady();
}
return true;
}
template <class T>
inline bool SharedFuture<T&>::isCanceled() const
{
CARB_ASSERT(valid());
return try_wait() && !m_state->m_value;
}
template <class T>
inline SharedFuture<T&>::operator RequiredObject() const
{
return m_state ? m_state->m_object : Object{ ObjectType::eNone, nullptr };
}
template <class T>
inline const TaskContext* SharedFuture<T&>::task_if() const
{
return m_state && m_state->isTask() ? reinterpret_cast<const TaskContext*>(&m_state->m_object.data) : nullptr;
}
template <class T>
template <class Callable, class... Args>
inline auto SharedFuture<T&>::then(Priority prio, Trackers&& trackers, Callable&& f, Args&&... args)
{
CARB_ASSERT(valid());
RequiredObject req = *this;
return carb::getCachedInterface<ITasking>()->addSubTask(
req, prio, std::move(trackers),
[t = *this, f = std::forward<Callable>(f), args = std::make_tuple(std::forward<Args>(args)...)]() mutable {
return detail::applyExtra(std::move(f), std::move(args), t.get());
});
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// SharedFuture<void>
inline SharedFuture<void>::SharedFuture(const SharedFuture<void>& other) noexcept : m_obj(other.m_obj)
{
if (auto s = state())
s->addRef();
}
inline SharedFuture<void>::SharedFuture(SharedFuture<void>&& other) noexcept
: m_obj(std::exchange(other.m_obj, { ObjectType::eNone, nullptr }))
{
}
inline SharedFuture<void>::SharedFuture(Future<void>&& fut) noexcept
: m_obj(std::exchange(fut.m_obj, { ObjectType::eNone, nullptr }))
{
}
inline SharedFuture<void>::~SharedFuture()
{
if (auto s = state())
s->release();
}
inline SharedFuture<void>& SharedFuture<void>::operator=(const SharedFuture<void>& other)
{
if (auto s = other.state())
s->addRef();
if (auto s = state())
s->release();
m_obj = other.m_obj;
return *this;
}
inline SharedFuture<void>& SharedFuture<void>::operator=(SharedFuture<void>&& other) noexcept
{
std::swap(m_obj, other.m_obj);
return *this;
}
inline void SharedFuture<void>::get() const
{
CARB_ASSERT(valid());
wait();
}
inline bool SharedFuture<void>::valid() const noexcept
{
if (state())
return true;
return reinterpret_cast<TaskContext>(m_obj.data) != kInvalidTaskContext;
}
inline bool SharedFuture<void>::try_wait() const
{
CARB_ASSERT(valid());
auto s = state();
if (s && s->ready())
return true;
return carb::getCachedInterface<ITasking>()->try_wait(*this);
}
inline void SharedFuture<void>::wait() const
{
CARB_ASSERT(valid());
auto s = state();
if (s && s->ready())
return;
carb::getCachedInterface<ITasking>()->wait(*this);
}
template <class Rep, class Period>
inline bool SharedFuture<void>::wait_for(const std::chrono::duration<Rep, Period>& dur) const
{
CARB_ASSERT(valid());
auto s = state();
if (s && s->ready())
return true;
return carb::getCachedInterface<ITasking>()->wait_for(dur, *this);
}
template <class Clock, class Duration>
inline bool SharedFuture<void>::wait_until(const std::chrono::time_point<Clock, Duration>& when) const
{
CARB_ASSERT(valid());
auto s = state();
if (s && s->ready())
return true;
return carb::getCachedInterface<ITasking>()->wait_until(when, *this);
}
inline SharedFuture<void>::operator RequiredObject() const
{
if (auto s = state())
return s->m_object;
return m_obj;
}
inline const TaskContext* SharedFuture<void>::task_if() const
{
auto s = state();
if (s && s->isTask())
return reinterpret_cast<const TaskContext*>(s->m_object.data);
return m_obj.type == ObjectType::eTaskContext ? reinterpret_cast<const TaskContext*>(&m_obj.data) : nullptr;
}
template <class Callable, class... Args>
inline auto SharedFuture<void>::then(Priority prio, Trackers&& trackers, Callable&& f, Args&&... args)
{
return carb::getCachedInterface<ITasking>()->addSubTask(
*this, prio, std::move(trackers),
[f = std::forward<Callable>(f), args = std::make_tuple(std::forward<Args>(args)...)]() mutable {
return detail::applyExtra(std::move(f), std::move(args));
});
}
inline detail::SharedState<void>* SharedFuture<void>::state() const
{
return m_obj.type == ObjectType::eSharedState ? static_cast<detail::SharedState<void>*>(m_obj.data) : nullptr;
}
inline TaskContext* SharedFuture<void>::ptask()
{
if (auto s = state())
s->release();
m_obj = Object{ ObjectType::eTaskContext, nullptr };
return reinterpret_cast<TaskContext*>(&m_obj.data);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Promise<T>
template <class T>
inline Promise<T>::Promise()
: m_state(new (carb::allocate(sizeof(detail::SharedState<T>))) detail::SharedState<T>(false))
{
}
template <class T>
inline Promise<T>::Promise(Promise&& other) noexcept : m_state(std::exchange(other.m_state, nullptr))
{
}
template <class T>
inline Promise<T>::~Promise()
{
if (m_state)
{
auto old = m_state->m_futex.load(std::memory_order_relaxed);
CARB_ASSERT(old == State::eReady || old == State::eUnset); // Should only be unset or set.
if (old != 0)
{
// Mark as canceled since the promise is broken.
m_state->m_futex.store(State::eReady, std::memory_order_release);
m_state->notify();
}
m_state->release();
}
}
template <class T>
inline Promise<T>& Promise<T>::operator=(Promise&& other) noexcept
{
std::swap(m_state, other.m_state);
return *this;
}
template <class T>
inline void Promise<T>::swap(Promise& other) noexcept
{
std::swap(m_state, other.m_state);
}
template <class T>
inline Future<T> Promise<T>::get_future()
{
CARB_FATAL_UNLESS(!m_state->m_futureRetrieved.exchange(true, std::memory_order_acquire), "Future already retrieved!");
m_state->addRef();
return Future<T>(m_state);
}
template <class T>
inline void Promise<T>::set_value(const T& value)
{
m_state->set(value);
m_state->notify();
}
template <class T>
inline void Promise<T>::set_value(T&& value)
{
m_state->set(std::move(value));
m_state->notify();
}
template <class T>
inline void Promise<T>::setCanceled()
{
CARB_FATAL_UNLESS(m_state->m_futex.exchange(State::eReady, std::memory_order_acquire) == 1, "Value already set");
m_state->notify();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Promise<T&>
template <class T>
inline Promise<T&>::Promise()
: m_state(new (carb::allocate(sizeof(detail::SharedState<T&>))) detail::SharedState<T&>(false))
{
}
template <class T>
inline Promise<T&>::Promise(Promise&& other) noexcept : m_state(std::exchange(other.m_state, nullptr))
{
}
template <class T>
inline Promise<T&>::~Promise()
{
if (m_state)
{
auto old = m_state->m_futex.load(std::memory_order_relaxed);
CARB_ASSERT(old == State::eReady || old == State::eUnset);
if (old != 0)
{
// Mark as canceled since the promise is broken.
m_state->m_futex.store(State::eReady, std::memory_order_release);
m_state->notify();
}
m_state->release();
}
}
template <class T>
inline Promise<T&>& Promise<T&>::operator=(Promise&& other) noexcept
{
std::swap(m_state, other.m_state);
return *this;
}
template <class T>
inline void Promise<T&>::swap(Promise& other) noexcept
{
std::swap(m_state, other.m_state);
}
template <class T>
inline Future<T&> Promise<T&>::get_future()
{
CARB_FATAL_UNLESS(!m_state->m_futureRetrieved.exchange(true, std::memory_order_acquire), "Future already retrieved!");
m_state->addRef();
return Future<T&>(m_state);
}
template <class T>
inline void Promise<T&>::set_value(T& value)
{
m_state->set(value);
m_state->notify();
}
template <class T>
inline void Promise<T&>::setCanceled()
{
CARB_FATAL_UNLESS(m_state->m_futex.exchange(State::eReady, std::memory_order_acq_rel) == 1, "Value already set");
m_state->notify();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Promise<void>
inline Promise<void>::Promise()
: m_state(new (carb::allocate(sizeof(detail::SharedState<void>))) detail::SharedState<void>(false))
{
}
inline Promise<void>::Promise(Promise&& other) noexcept : m_state(std::exchange(other.m_state, nullptr))
{
}
inline Promise<void>::~Promise()
{
if (m_state)
{
auto old = m_state->m_futex.load(std::memory_order_relaxed);
CARB_ASSERT(old == State::eReady || old == State::eUnset); // Should only be unset or set.
if (old != 0)
{
// Mark as canceled since the promise is broken.
m_state->m_futex.store(State::eReady, std::memory_order_release);
m_state->notify();
}
m_state->release();
}
}
inline Promise<void>& Promise<void>::operator=(Promise&& other) noexcept
{
std::swap(m_state, other.m_state);
return *this;
}
inline void Promise<void>::swap(Promise& other) noexcept
{
std::swap(m_state, other.m_state);
}
inline Future<void> Promise<void>::get_future()
{
CARB_FATAL_UNLESS(!m_state->m_futureRetrieved.exchange(true, std::memory_order_acquire), "Future already retrieved!");
m_state->addRef();
return Future<void>(m_state);
}
inline void Promise<void>::set_value()
{
m_state->set();
m_state->notify();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// ITasking
inline void ITasking::yieldUntilCounterPinThread(RequiredObject obj)
{
PinGuard pin;
wait(std::move(obj));
}
inline void ITasking::yieldUntilCounter(RequiredObject obj)
{
wait(obj);
}
inline bool ITasking::timedYieldUntilCounter(RequiredObject obj, uint64_t timeoutNs)
{
return internalTimedWait(obj, timeoutNs);
}
inline void ITasking::lockMutex(Mutex* mutex)
{
bool b = timedLockMutex(mutex, kInfinite);
CARB_ASSERT(b);
CARB_UNUSED(b);
}
inline bool ITasking::waitForTask(TaskContext task)
{
return internalTimedWait({ ObjectType::eTaskContext, reinterpret_cast<void*>(task) }, kInfinite);
}
inline bool ITasking::try_wait(RequiredObject req)
{
return internalTimedWait(req, 0);
}
inline void ITasking::wait(RequiredObject req)
{
bool b = internalTimedWait(req, kInfinite);
CARB_ASSERT(b);
CARB_UNUSED(b);
}
template <class Rep, class Period>
inline bool ITasking::wait_for(std::chrono::duration<Rep, Period> dur, RequiredObject req)
{
return internalTimedWait(req, detail::convertDuration(dur));
}
template <class Clock, class Duration>
inline bool ITasking::wait_until(std::chrono::time_point<Clock, Duration> when, RequiredObject req)
{
return internalTimedWait(req, detail::convertAbsTime(when));
}
inline void ITasking::waitSemaphore(Semaphore* sema)
{
bool b = timedWaitSemaphore(sema, kInfinite);
CARB_ASSERT(b);
CARB_UNUSED(b);
}
inline void ITasking::lockSharedMutex(SharedMutex* mutex)
{
bool b = timedLockSharedMutex(mutex, kInfinite);
CARB_ASSERT(b);
CARB_UNUSED(b);
}
inline void ITasking::lockSharedMutexExclusive(SharedMutex* mutex)
{
bool b = timedLockSharedMutexExclusive(mutex, kInfinite);
CARB_ASSERT(b);
CARB_UNUSED(b);
}
inline void ITasking::waitConditionVariable(ConditionVariable* cv, Mutex* mutex)
{
bool b = timedWaitConditionVariable(cv, mutex, kInfinite);
CARB_ASSERT(b);
CARB_UNUSED(b);
}
inline void ITasking::bindTrackers(RequiredObject requiredObject, Trackers&& trackers)
{
const Tracker* ptrackers{};
size_t numTrackers{};
trackers.output(ptrackers, numTrackers);
internalBindTrackers(requiredObject, ptrackers, numTrackers);
}
template <class Callable, class... Args>
inline auto ITasking::awaitSyncTask(Priority priority, Callable&& f, Args&&... args)
{
if (getTaskContext() != kInvalidTaskContext)
{
// Call directly
return carb::cpp::invoke(std::forward<Callable>(f), std::forward<Args>(args)...);
}
else
{
// Call within a task and return the result.
return addTask(priority, {}, std::forward<Callable>(f), std::forward<Args>(args)...).get();
}
}
template <class Callable, class... Args>
inline auto ITasking::addTask(Priority priority, Trackers&& trackers, Callable&& f, Args&&... args)
{
TaskDesc desc{};
desc.priority = priority;
using RetType = typename cpp::invoke_result_t<Callable, Args...>;
return detail::GenerateFuture<RetType>()(this, trackers.fill(desc.trackers, desc.numTrackers), desc,
std::forward<Callable>(f), std::forward<Args>(args)...);
}
template <class Callable, class... Args>
inline auto ITasking::addThrottledTask(
Semaphore* throttler, Priority priority, Trackers&& trackers, Callable&& f, Args&&... args)
{
TaskDesc desc{};
desc.priority = priority;
desc.waitSemaphore = throttler;
using RetType = typename cpp::invoke_result_t<Callable, Args...>;
return detail::GenerateFuture<RetType>()(this, trackers.fill(desc.trackers, desc.numTrackers), desc,
std::forward<Callable>(f), std::forward<Args>(args)...);
}
template <class Callable, class... Args>
inline auto ITasking::addSubTask(
RequiredObject requiredObj, Priority priority, Trackers&& trackers, Callable&& f, Args&&... args)
{
TaskDesc desc{};
desc.priority = priority;
desc.requiredObject = requiredObj;
using RetType = typename cpp::invoke_result_t<Callable, Args...>;
return detail::GenerateFuture<RetType>()(this, trackers.fill(desc.trackers, desc.numTrackers), desc,
std::forward<Callable>(f), std::forward<Args>(args)...);
}
template <class Callable, class... Args>
inline auto ITasking::addThrottledSubTask(
RequiredObject requiredObj, Semaphore* throttler, Priority priority, Trackers&& trackers, Callable&& f, Args&&... args)
{
TaskDesc desc{};
desc.priority = priority;
desc.requiredObject = requiredObj;
desc.waitSemaphore = throttler;
using RetType = typename cpp::invoke_result_t<Callable, Args...>;
return detail::GenerateFuture<RetType>()(this, trackers.fill(desc.trackers, desc.numTrackers), desc,
std::forward<Callable>(f), std::forward<Args>(args)...);
}
template <class Callable, class Rep, class Period, class... Args>
inline auto ITasking::addTaskIn(
const std::chrono::duration<Rep, Period>& dur, Priority priority, Trackers&& trackers, Callable&& f, Args&&... args)
{
using RetType = typename cpp::invoke_result_t<Callable, Args...>;
TaskDesc desc{};
desc.priority = priority;
return detail::GenerateFuture<RetType>()(this, dur, trackers.fill(desc.trackers, desc.numTrackers), desc,
std::forward<Callable>(f), std::forward<Args>(args)...);
}
template <class Callable, class Clock, class Duration, class... Args>
inline auto ITasking::addTaskAt(const std::chrono::time_point<Clock, Duration>& when,
Priority priority,
Trackers&& trackers,
Callable&& f,
Args&&... args)
{
using RetType = typename cpp::invoke_result_t<Callable, Args...>;
TaskDesc desc{};
desc.priority = priority;
return detail::GenerateFuture<RetType>()(this, when, trackers.fill(desc.trackers, desc.numTrackers), desc,
std::forward<Callable>(f), std::forward<Args>(args)...);
}
template <class Callable, class... Args>
inline void ITasking::applyRange(size_t range, Callable f, Args&&... args)
{
using Tuple = std::tuple<std::decay_t<Args>...>;
using Data = carb::EmptyMemberPair<Tuple, Callable*>;
Data data{ carb::InitBoth{}, Tuple{ std::forward<Args>(args)... }, &f };
auto callFunctor = [](size_t index, void* context) {
Data* pData = static_cast<Data*>(context);
detail::applyExtra(*pData->second, pData->first(), index);
};
return internalApplyRange(range, callFunctor, &data);
}
template <class Callable, class... Args>
inline void ITasking::applyRangeBatch(size_t range, size_t batchHint, Callable f, Args&&... args)
{
using Tuple = std::tuple<std::decay_t<Args>...>;
using Data = carb::EmptyMemberPair<Tuple, Callable*>;
Data data{ carb::InitBoth{}, Tuple{ std::forward<Args>(args)... }, &f };
auto callFunctor = [](size_t startIndex, size_t endIndex, void* context) {
Data* pData = static_cast<Data*>(context);
detail::applyExtra(*pData->second, pData->first(), startIndex, endIndex);
};
return internalApplyRangeBatch(range, batchHint, callFunctor, &data);
}
template <class T, class Callable, class... Args>
inline void ITasking::parallelFor(T begin, T end, Callable f, Args&&... args)
{
using Tuple = std::tuple<std::decay_t<Args>...>;
using Tuple2 = std::tuple<const T, Callable*>;
using Data = carb::EmptyMemberPair<Tuple, Tuple2>;
Data data{ carb::InitBoth{}, Tuple{ std::forward<Args>(args)... }, Tuple2{ begin, &f } };
auto callFunctor = [](size_t index, void* context) {
Data* pData = static_cast<Data*>(context);
detail::applyExtra(*std::get<1>(pData->second), pData->first(), std::get<0>(pData->second) + T(index));
};
CARB_ASSERT(end >= begin);
return internalApplyRange(size_t(end - begin), callFunctor, &data);
}
template <class T, class Callable, class... Args>
inline void ITasking::parallelFor(T begin, T end, T step, Callable f, Args&&... args)
{
using Tuple = std::tuple<std::decay_t<Args>...>;
using Tuple2 = std::tuple<const T, const T, Callable*>;
using Data = carb::EmptyMemberPair<Tuple, Tuple2>;
Data data{ carb::InitBoth{}, Tuple{ std::forward<Args>(args)... }, Tuple2{ begin, step, &f } };
auto callFunctor = [](size_t index, void* context) {
Data* pData = static_cast<Data*>(context);
detail::applyExtra(*std::get<2>(pData->second), pData->first(),
std::get<0>(pData->second) + (std::get<1>(pData->second) * T(index)));
};
CARB_ASSERT(step != T(0));
CARB_ASSERT((end >= begin && step >= T(0)) || (step < T(0)));
return internalApplyRange(size_t((end - begin) / step), callFunctor, &data);
}
} // namespace tasking
} // namespace carb
|
omniverse-code/kit/include/carb/tasking/TaskingHelpers.h | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief carb.tasking helper functions
#pragma once
#include "TaskingTypes.h"
#include "../thread/Futex.h"
#include "../cpp/Functional.h"
#include "../cpp/Optional.h"
#include "../cpp/Variant.h"
#include "../Memory.h"
#include <atomic>
#include <chrono>
#include <iterator>
#include <vector>
namespace carb
{
namespace tasking
{
#ifndef DOXYGEN_BUILD
namespace detail
{
template <class T>
struct CarbDeleter
{
void operator()(T* p) noexcept
{
p->~T();
carb::deallocate(p);
}
};
template <class T, class U = std::remove_cv_t<T>>
inline void carbDelete(T* p) noexcept
{
if (p)
{
p->~T();
carb::deallocate(const_cast<U*>(p));
}
}
template <class T>
struct is_literal_string
{
constexpr static bool value = false;
};
template <size_t N>
struct is_literal_string<const char (&)[N]>
{
constexpr static bool value = true;
};
Counter* const kListOfCounters{ (Counter*)(size_t)-1 };
template <class Rep, class Period>
uint64_t convertDuration(const std::chrono::duration<Rep, Period>& dur)
{
auto ns = std::chrono::duration_cast<std::chrono::nanoseconds>(thread::detail::clampDuration(dur)).count();
return uint64_t(::carb_max(std::chrono::nanoseconds::rep(0), ns));
}
template <class Clock, class Duration>
uint64_t convertAbsTime(const std::chrono::time_point<Clock, Duration>& tp)
{
return convertDuration(tp - Clock::now());
}
template <class F, class Tuple, size_t... I, class... Args>
decltype(auto) applyExtraImpl(F&& f, Tuple&& t, std::index_sequence<I...>, Args&&... args)
{
CARB_UNUSED(t); // Can get C4100: unreferenced formal parameter on MSVC when Tuple is empty.
return cpp::invoke(std::forward<F>(f), std::get<I>(std::forward<Tuple>(t))..., std::forward<Args>(args)...);
}
template <class F, class Tuple, class... Args>
decltype(auto) applyExtra(F&& f, Tuple&& t, Args&&... args)
{
return applyExtraImpl(std::forward<F>(f), std::forward<Tuple>(t),
std::make_index_sequence<std::tuple_size<std::remove_reference_t<Tuple>>::value>{},
std::forward<Args>(args)...);
}
// U looks like an iterator convertible to V when dereferenced
template <class U, class V>
using IsForwardIter = carb::cpp::conjunction<
carb::cpp::negation<
typename std::is_convertible<typename std::iterator_traits<U>::iterator_category, std::random_access_iterator_tag>>,
typename std::is_convertible<typename std::iterator_traits<U>::iterator_category, std::forward_iterator_tag>,
std::is_convertible<decltype(*std::declval<U&>()), V>>;
template <class U, class V>
using IsRandomAccessIter = carb::cpp::conjunction<
typename std::is_convertible<typename std::iterator_traits<U>::iterator_category, std::random_access_iterator_tag>,
std::is_convertible<decltype(*std::declval<U&>()), V>>;
// Must fit within a pointer, be trivially move constructible and trivially destructible.
template <class Functor>
using FitsWithinPointerTrivially =
carb::cpp::conjunction<carb::cpp::bool_constant<sizeof(typename std::decay_t<Functor>) <= sizeof(void*)>,
std::is_trivially_move_constructible<typename std::decay_t<Functor>>,
std::is_trivially_destructible<typename std::decay_t<Functor>>>;
template <class Functor, std::enable_if_t<FitsWithinPointerTrivially<Functor>::value, bool> = false>
inline void generateTaskFunc(TaskDesc& desc, Functor&& func)
{
// Use SFINAE to have this version of generateTaskFunc() contribute to resolution only if Functor will fit within a
// void*, so that we can use the taskArg as the instance. On my machine, this is about a tenth of the time for the
// below specialization, and happens more frequently.
using Func = typename std::decay_t<Functor>;
union
{
Func f;
void* v;
} u{ std::forward<Functor>(func) };
desc.taskArg = u.v;
desc.task = [](void* arg) {
union CARB_ATTRIBUTE(visibility("hidden"))
{
void* v;
Func f;
} u{ arg };
u.f();
};
// Func is trivially destructible so we don't need a cancel func
}
template <class Functor, std::enable_if_t<!FitsWithinPointerTrivially<Functor>::value, bool> = false>
inline void generateTaskFunc(TaskDesc& desc, Functor&& func)
{
// Use SFINAE to have this version of generateTaskFunc() contribute to resolution only if Functor will NOT fit
// within a void*, so that the heap can be used only if necessary
using Func = typename std::decay_t<Functor>;
// Need to allocate
desc.taskArg = new (carb::allocate(sizeof(Func))) Func(std::forward<Functor>(func));
desc.task = [](void* arg) {
std::unique_ptr<Func, detail::CarbDeleter<Func>> p(static_cast<Func*>(arg));
(*p)();
};
desc.cancel = [](void* arg) { detail::carbDelete(static_cast<Func*>(arg)); };
}
template <class T>
class SharedState;
template <>
class SharedState<void>
{
std::atomic_size_t m_refs;
public:
SharedState(bool futureRetrieved) noexcept : m_refs(1 + futureRetrieved), m_futureRetrieved(futureRetrieved)
{
}
virtual ~SharedState() = default;
void addRef() noexcept
{
m_refs.fetch_add(1, std::memory_order_relaxed);
}
void release()
{
if (m_refs.fetch_sub(1, std::memory_order_release) == 1)
{
std::atomic_thread_fence(std::memory_order_acquire);
detail::carbDelete(this);
}
}
void set()
{
CARB_FATAL_UNLESS(m_futex.exchange(isTask() ? eTaskPending : eReady, std::memory_order_acq_rel) == eUnset,
"Value already set");
}
void get()
{
}
void notify();
void markReady()
{
m_futex.store(eReady, std::memory_order_release);
}
bool ready() const
{
return m_futex.load(std::memory_order_relaxed) == eReady;
}
bool isTask() const
{
return m_object.type == ObjectType::eTaskContext;
}
enum State : uint8_t
{
eReady = 0,
eUnset,
eInProgress,
eTaskPending,
};
std::atomic<State> m_futex{ eUnset };
std::atomic_bool m_futureRetrieved{ false };
Object m_object{ ObjectType::eFutex1, &m_futex };
};
template <class T>
class SharedState<T&> final : public SharedState<void>
{
public:
SharedState(bool futureRetrieved) noexcept : SharedState<void>(futureRetrieved)
{
}
bool isSet() const noexcept
{
return m_value != nullptr;
}
T& get() const
{
CARB_FATAL_UNLESS(m_value, "Attempting to retrieve value from broken promise");
return *m_value;
}
void set(T& val)
{
CARB_FATAL_UNLESS(m_futex.exchange(eInProgress, std::memory_order_acquire) == 1, "Value already set");
m_value = std::addressof(val);
m_futex.store(this->isTask() ? eTaskPending : eReady, std::memory_order_release);
}
T* m_value{ nullptr };
};
template <class T>
class SharedState final : public SharedState<void>
{
public:
using Type = typename std::decay<T>::type;
SharedState(bool futureRetrieved) noexcept : SharedState<void>(futureRetrieved)
{
}
bool isSet() const noexcept
{
return m_type.has_value();
}
const T& get_ref() const
{
CARB_FATAL_UNLESS(m_type, "Attempting to retrieve value from broken promise");
return m_type.value();
}
T get()
{
CARB_FATAL_UNLESS(m_type, "Attempting to retrieve value from broken promise");
return std::move(m_type.value());
}
void set(const T& value)
{
CARB_FATAL_UNLESS(m_futex.exchange(eInProgress, std::memory_order_acquire) == 1, "Value already set");
m_type.emplace(value);
m_futex.store(this->isTask() ? eTaskPending : eReady, std::memory_order_release);
}
void set(T&& value)
{
CARB_FATAL_UNLESS(m_futex.exchange(eInProgress, std::memory_order_acquire) == 1, "Value already set");
m_type.emplace(std::move(value));
m_futex.store(this->isTask() ? eTaskPending : eReady, std::memory_order_release);
}
carb::cpp::optional<Type> m_type;
};
} // namespace detail
#endif
class TaskGroup;
/**
* Helper class to ensure correct compliance with the requiredObject parameter of ITasking::add[Throttled]SubTask() and
* wait() functions.
*
* The following may be converted into a RequiredObject: TaskContext, Future, Any, All, Counter*, or CounterWrapper.
*/
struct RequiredObject final : public Object
{
/**
* Constructor that accepts a `std::nullptr_t`.
*/
constexpr RequiredObject(std::nullptr_t) : Object{ ObjectType::eNone, nullptr }
{
}
/**
* Constructor that accepts an object that can be converted to Counter*.
*
* @param c An object convertible to Counter*. This can be Any, All, Counter* or CounterWrapper.
*/
template <class T, std::enable_if_t<std::is_convertible<T, Counter*>::value, bool> = false>
constexpr RequiredObject(T&& c) : Object{ ObjectType::eCounter, static_cast<Counter*>(c) }
{
}
/**
* Constructor that accepts an object that can be converted to TaskContext.
*
* @param tc A TaskContext or object convertible to TaskContext, such as a Future.
*/
template <class T, std::enable_if_t<std::is_convertible<T, TaskContext>::value, bool> = true>
constexpr RequiredObject(T&& tc)
: Object{ ObjectType::eTaskContext, reinterpret_cast<void*>(static_cast<TaskContext>(tc)) }
{
}
/**
* Constructor that accepts a TaskGroup&.
*/
constexpr RequiredObject(const TaskGroup& tg);
/**
* Constructor that accepts a TaskGroup*. `nullptr` may be provided.
*/
constexpr RequiredObject(const TaskGroup* tg);
private:
friend struct ITasking;
template <class U>
friend class Future;
template <class U>
friend class SharedFuture;
constexpr RequiredObject(const Object& o) : Object(o)
{
}
void get(TaskDesc& desc);
};
/**
* Specifies an "all" grouping of RequiredObject(s).
*
* @note *ALL* RequiredObject(s) given in the constructor must become signaled before the All object will be considered
* signaled.
*
* All and Any objects can be nested as they are convertible to RequiredObject.
*/
struct All final
{
/**
* Constructor that accepts an initializer_list of RequiredObject(s).
* @param il The `initializer_list` of RequiredObject(s).
*/
All(std::initializer_list<RequiredObject> il);
/**
* Constructor that accepts begin and end iterators that produce RequiredObject objects.
* @param begin The beginning iterator.
* @param end An off-the-end iterator just beyond the end of the list.
*/
template <class InputIt, std::enable_if_t<detail::IsForwardIter<InputIt, RequiredObject>::value, bool> = false>
All(InputIt begin, InputIt end);
//! @private
template <class InputIt, std::enable_if_t<detail::IsRandomAccessIter<InputIt, RequiredObject>::value, bool> = false>
All(InputIt begin, InputIt end);
/**
* Convertible to RequiredObject.
*/
operator RequiredObject() const
{
return RequiredObject(m_counter);
}
private:
friend struct RequiredObject;
Counter* m_counter;
operator Counter*() const
{
return m_counter;
}
};
/**
* Specifies an "any" grouping of RequiredObject(s).
*
* @note *ANY* RequiredObject given in the constructor that is or becomes signaled will cause the Any object to become
* signaled.
*
* All and Any objects can be nested as they are convertible to RequiredObject.
*/
struct Any final
{
/**
* Constructor that accepts an initializer_list of RequiredObject objects.
* @param il The initializer_list of RequiredObject objects.
*/
Any(std::initializer_list<RequiredObject> il);
/**
* Constructor that accepts begin and end iterators that produce RequiredObject objects.
* @param begin The beginning iterator.
* @param end An off-the-end iterator just beyond the end of the list.
*/
template <class InputIt, std::enable_if_t<detail::IsForwardIter<InputIt, RequiredObject>::value, bool> = false>
Any(InputIt begin, InputIt end);
//! @private
template <class InputIt, std::enable_if_t<detail::IsRandomAccessIter<InputIt, RequiredObject>::value, bool> = false>
Any(InputIt begin, InputIt end);
/**
* Convertible to RequiredObject.
*/
operator RequiredObject() const
{
return RequiredObject(m_counter);
}
private:
friend struct RequiredObject;
Counter* m_counter;
operator Counter*() const
{
return m_counter;
}
};
/**
* Helper class to provide correct types to the Trackers class.
*
* The following types are valid trackers:
* - Anything convertible to Counter*, such as CounterWrapper. Counters are deprecated however. The Counter is
* incremented before the task can possibly begin executing and decremented when the task finishes.
* - Future<void>&: This can be used to atomically populate a Future<void> before the task could possibly start
* executing.
* - Future<void>*: Can be `nullptr`, but if not, can be used to atomically populate a Future<void> before the task
* could possibly start executing.
* - TaskContext&: By providing a reference to a TaskContext it will be atomically filled before the task could possibly
* begin executing.
* - TaskContext*: By providing a pointer to a TaskContext (that can be `nullptr`), it will be atomically filled before
* the task could possibly begin executing, if valid.
*/
struct Tracker final : Object
{
/**
* Constructor that accepts a `std::nullptr_t`.
*/
constexpr Tracker(std::nullptr_t) : Object{ ObjectType::eNone, nullptr }
{
}
/**
* Constructor that accepts a Counter* or an object convertible to Counter*, such as CounterWrapper.
*
* @param c The object convertible to Counter*.
*/
template <class T, std::enable_if_t<std::is_convertible<T, Counter*>::value, bool> = false>
constexpr Tracker(T&& c) : Object{ ObjectType::eCounter, reinterpret_cast<void*>(static_cast<Counter*>(c)) }
{
}
/**
* Constructor that accepts a task name.
*
* @note This is not a Tracker per se; this is syntactic sugar to name a task as it is created.
* @tparam T A type that is convertible to `const char*`.
* @param name Either a `const char*` (dynamic string) or a `const char (&)[N]` (literal string) as the string name
* for a task.
* @see ITasking::nameTask()
*/
template <class T, std::enable_if_t<std::is_convertible<T, const char*>::value, bool> = false>
constexpr Tracker(T&& name)
: Object{ detail::is_literal_string<T>::value ? ObjectType::eTaskNameLiteral : ObjectType::eTaskName,
const_cast<void*>(reinterpret_cast<const void*>(name)) }
{
}
/**
* Constructor that accepts a Future<void>&. The Future will be initialized before the task can begin.
*/
Tracker(Future<>& fut) : Object{ ObjectType::ePtrTaskContext, fut.ptask() }
{
}
/**
* Constructor that accepts a Future<void>*. The Future<void> will be initialized before the task can begin.
* The Future<void> pointer can be `nullptr`.
*/
Tracker(Future<>* fut) : Object{ ObjectType::ePtrTaskContext, fut ? fut->ptask() : nullptr }
{
}
/**
* Constructor that accepts a SharedFuture<void>&. The SharedFuture will be initialized before the task can begin.
*/
Tracker(SharedFuture<>& fut) : Object{ ObjectType::ePtrTaskContext, fut.ptask() }
{
}
/**
* Constructor that accepts a SharedFuture<void>*. The SharedFuture<void> will be initialized before the task can
* begin. The SharedFuture<void> pointer can be `nullptr`.
*/
Tracker(SharedFuture<>* fut) : Object{ ObjectType::ePtrTaskContext, fut ? fut->ptask() : nullptr }
{
}
/**
* Constructor that accepts a TaskContext&. The value will be atomically written before the task can begin.
*/
constexpr Tracker(TaskContext& ctx) : Object{ ObjectType::ePtrTaskContext, &ctx }
{
}
/**
* Constructor that accepts a TaskContext*. The value will be atomically written before the task can begin.
* The TaskContext* can be `nullptr`.
*/
constexpr Tracker(TaskContext* ctx) : Object{ ObjectType::ePtrTaskContext, ctx }
{
}
/**
* Constructor that accepts a TaskGroup&. The TaskGroup will be entered immediately and left when the task finishes.
* The TaskGroup must exist until the task completes.
*/
Tracker(TaskGroup& grp);
/**
* Constructor that accepts a TaskGroup*. The TaskGroup will be entered immediately and left when the task finishes.
* The TaskGroup* can be `nullptr` in which case nothing happens. The TaskGroup must exist until the task completes.
*/
Tracker(TaskGroup* grp);
private:
friend struct Trackers;
};
/**
* Helper class to ensure correct compliance with trackers parameter of ITasking::addTask() variants
*/
struct Trackers final
{
/**
* Default constructor.
*/
constexpr Trackers() : m_variant{}
{
}
/**
* Constructor that accepts a single Tracker.
*
* @param t The type passed to the Tracker constructor.
*/
template <class T, std::enable_if_t<std::is_constructible<Tracker, T>::value, bool> = false>
constexpr Trackers(T&& t) : m_variant(Tracker(t))
{
}
/**
* Constructor that accepts an initializer_list of Tracker objects.
*
* @param il The `std::initializer_list` of Tracker objects.
*/
constexpr Trackers(std::initializer_list<Tracker> il)
{
switch (il.size())
{
case 0:
break;
case 1:
m_variant.emplace<Tracker>(*il.begin());
break;
default:
m_variant.emplace<std::vector<Tracker>>(std::move(il));
}
}
/**
* Constructor that accepts an initializer_list of Tracker objects and additional Tracker objects.
*
* @param il The `std::initializer_list` of Tracker objects.
* @param p A pointer to additional Tracker objects; size specified by @p count.
* @param count The number of additional Tracker objects in the list specified by @p p.
*/
Trackers(std::initializer_list<Tracker> il, Tracker const* p, size_t count)
: m_variant(carb::cpp::in_place_index<2>)
{
switch (il.size() + count)
{
case 0:
break;
case 1:
m_variant.emplace<Tracker>(il.size() == 0 ? *p : *il.begin());
break;
default:
{
auto& vec = m_variant.emplace<std::vector<Tracker>>();
vec.reserve(il.size() + count);
vec.insert(vec.end(), il.begin(), il.end());
vec.insert(vec.end(), p, p + count);
}
}
}
/**
* Retrieves a list of Tracker objects managed by this helper object.
*
* @param trackers Receives a pointer to a list of Tracker objects.
* @param count Receives the count of Tracker objects.
*/
void output(Tracker const*& trackers, size_t& count) const
{
static_assert(sizeof(Object) == sizeof(Tracker), "");
fill(reinterpret_cast<Object const*&>(trackers), count);
}
CARB_PREVENT_COPY(Trackers);
/**
* Trackers is move-constructible.
*/
Trackers(Trackers&&) = default;
/**
* Trackers is move-assignable.
*/
Trackers& operator=(Trackers&&) = default;
private:
friend struct ITasking;
using TrackerVec = std::vector<Tracker>;
using Variant = carb::cpp::variant<carb::cpp::monostate, Tracker, TrackerVec>;
Variant m_variant;
Counter* fill(carb::tasking::Object const*& trackers, size_t& count) const
{
if (m_variant.index() == 0)
{
trackers = nullptr;
count = 0;
return nullptr;
}
if (auto* vec = carb::cpp::get_if<TrackerVec>(&m_variant))
{
trackers = vec->data();
count = vec->size();
}
else
{
const Tracker& t = carb::cpp::get<Tracker>(m_variant);
trackers = &t;
count = 1;
}
return detail::kListOfCounters;
}
};
//! A macro that can be used to mark a function as async, that is, it always executes in the context of a task.
//!
//! Generally the body of the function has one of @ref CARB_ASSERT_ASYNC, @ref CARB_CHECK_ASYNC, or
//! @ref CARB_FATAL_UNLESS_ASYNC.
//!
//! @code{.cpp}
//! void CARB_ASYNC Context::loadTask();
//! @endcode
#define CARB_ASYNC
//! A macro that can be used to mark a function as possibly async, that is, it may execute in the context of a task.
//! @code{.cpp}
//! void CARB_MAYBE_ASYNC Context::loadTask();
//! @endcode
#define CARB_MAYBE_ASYNC
//! Helper macro that results in a boolean expression which is `true` if the current thread is running in task context.
#define CARB_IS_ASYNC \
(::carb::getCachedInterface<carb::tasking::ITasking>()->getTaskContext() != ::carb::tasking::kInvalidTaskContext)
//! A macro that is used to assert that a scope is running in task context in debug builds only.
#define CARB_ASSERT_ASYNC CARB_ASSERT(CARB_IS_ASYNC)
//! A macro that is used to assert that a scope is running in task context in debug and checked builds.
#define CARB_CHECK_ASYNC CARB_CHECK(CARB_IS_ASYNC)
//! A macro that is used to assert that a scope is running in task context.
#define CARB_FATAL_UNLESS_ASYNC CARB_FATAL_UNLESS(CARB_IS_ASYNC, "Not running in task context!")
} // namespace tasking
} // namespace carb
|
omniverse-code/kit/include/carb/tasking/IFiberEvents.h | // Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief IFiberEvents definition file.
#pragma once
#include "../Framework.h"
#include "../Interface.h"
namespace carb
{
namespace tasking
{
/**
* Defines the fiber events interface that receives fiber-related notifications.
*
* This is a \a reverse interface. It is not implemented by *carb.tasking.plugin*. Instead, *carb.tasking.plugin* looks
* for all instances of this interface and will call the functions to inform other plugins of fiber events. This can be
* used, for example, by a profiler that wants to keep track of which fiber is running on a thread.
*
* Once \ref IFiberEvents::notifyFiberStart() has been called, this is a signal to the receiver that a task is executing
* on the current thread, and will be executing on the current thread until \ref IFiberEvents::notifyFiberStop() is
* called on the same thread. Between these two calls, the thread is executing in *Task context*, that is, within a task
* submitted to *carb.tasking.plugin*. As such, it is possible to query information about the task, such as the context
* handle ( \ref ITasking::getTaskContext()) or access task-local storage ( \ref ITasking::getTaskStorage() /
* \ref ITasking::setTaskStorage()). However, **anything that could cause a task to yield is strictly prohibited**
* in these functions and will produce undefined behavior. This includes but is not limited to yielding, waiting on any
* task-aware synchronization primitive (i.e. locking a \ref Mutex), sleeping in a task-aware manner, suspending a task,
* etc.
*
* @warning *carb.tasking.plugin* queries for all IFiberEvents interfaces only during startup, during
* @ref ITasking::changeParameters() and @ref ITasking::reloadFiberEvents(). If a plugin is loaded which exports
* \c IFiberEvents then you **must** perform one of these methods to receive notifications about fiber events.
*
* @warning **DO NOT EVER** call the functions; only *carb.tasking.plugin* should be calling these functions. Receiving
* one of these function calls implies that *carb.tasking.plugin* is loaded, and these function calls can be coordinated
* with certain *carb.tasking.plugin* actions (reading task-specific data, for instance).
*
* @note Notification functions are called in the context of the thread which caused the fiber event.
*/
struct IFiberEvents
{
CARB_PLUGIN_INTERFACE("carb::tasking::IFiberEvents", 1, 0)
/**
* Specifies that a fiber started or resumed execution on the calling thread.
*
* Specifies that the calling thread is now running fiber with ID @p fiberId until notifyFiberStop() is called on
* the same thread.
*
* @note A thread switching fibers will always call notifyFiberStop() before calling notifyFiberStart() with the new
* fiber ID.
*
* @param fiberId A unique identifier for a fiber.
*/
void(CARB_ABI* notifyFiberStart)(const uint64_t fiberId);
/**
* Specifies that a fiber yielded execution on the calling thread. It may or may not restart again at some later
* point, on the same thread or a different one.
*
* Specifies that the calling thread has yielded fiber with ID @p fiberId and is now running its own context.
*
* @param fiberId A unique identifier for a fiber.
*/
void(CARB_ABI* notifyFiberStop)(const uint64_t fiberId);
};
} // namespace tasking
} // namespace carb
|
omniverse-code/kit/include/carb/tasking/TaskingUtils.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief carb.tasking utilities.
#pragma once
#include "ITasking.h"
#include "../thread/Util.h"
#include <atomic>
#include <condition_variable> // for std::cv_status
#include <functional>
namespace carb
{
namespace tasking
{
class RecursiveSharedMutex;
/**
* This atomic spin lock conforms to C++ Named Requirements of <a
* href="https://en.cppreference.com/w/cpp/named_req/Lockable">Lockable</a> which makes it compatible with
* std::lock_guard.
*/
struct SpinMutex
{
public:
/**
* Constructs the SpinMutex.
*/
constexpr SpinMutex() noexcept = default;
CARB_PREVENT_COPY_AND_MOVE(SpinMutex);
/**
* Spins until the lock is acquired.
*
* See § 30.4.1.2.1 in the C++11 standard.
*/
void lock() noexcept
{
this_thread::spinWaitWithBackoff([&] { return try_lock(); });
}
/**
* Attempts to acquire the lock, on try, returns true if the lock was acquired.
*/
bool try_lock() noexcept
{
return (!mtx.load(std::memory_order_relaxed) && !mtx.exchange(true, std::memory_order_acquire));
}
/**
* Unlocks, wait-free.
*
* See § 30.4.1.2.1 in the C++11 standard.
*/
void unlock() noexcept
{
mtx.store(false, std::memory_order_release);
}
private:
std::atomic_bool mtx{};
};
/**
* Spin lock conforming to C++ named requirements of <a
* href="https://en.cppreference.com/w/cpp/named_req/SharedMutex">SharedMutex</a>.
*
* @warning This implementation is non-recursive.
*/
struct SpinSharedMutex
{
public:
/**
* Constructor.
*/
constexpr SpinSharedMutex() = default;
CARB_PREVENT_COPY_AND_MOVE(SpinSharedMutex);
/**
* Spins until the shared mutex is exclusive-locked
*
* @warning It is an error to lock recursively or shared-lock when exclusive-locked, or vice versa.
*/
void lock()
{
while (!try_lock())
{
CARB_HARDWARE_PAUSE();
}
}
/**
* Attempts to exclusive-lock the shared mutex immediately without spinning.
*
* @warning It is an error to lock recursively or shared-lock when exclusive-locked, or vice versa.
* @returns true if the mutex was exclusive-locked; false if no exclusive lock could be obtained.
*/
bool try_lock()
{
int expected = 0;
return counter.compare_exchange_strong(expected, -1, std::memory_order_acquire, std::memory_order_relaxed);
}
/**
* Unlocks the mutex previously exclusive-locked by this thread/task.
*
* @warning It is undefined behavior to unlock a mutex that is not owned by the current thread or task.
*/
void unlock()
{
CARB_ASSERT(counter == -1);
counter.store(0, std::memory_order_release);
}
/**
* Attempts to shared-lock the shared mutex immediately without spinning.
*
* @warning It is an error to lock recursively or shared-lock when exclusive-locked, or vice versa.
* @returns true if the mutex was shared-locked; false if no shared lock could be obtained.
*/
bool try_lock_shared()
{
auto ctr = counter.load(std::memory_order_relaxed);
if (ctr >= 0)
{
return counter.compare_exchange_strong(ctr, ctr + 1, std::memory_order_acquire, std::memory_order_relaxed);
}
return false;
}
/**
* Spins until the shared mutex is shared-locked
*
* @warning It is an error to lock recursively or shared-lock when exclusive-locked, or vice versa.
*/
void lock_shared()
{
auto ctr = counter.load(std::memory_order_relaxed);
for (;;)
{
if (ctr < 0)
{
CARB_HARDWARE_PAUSE();
ctr = counter.load(std::memory_order_relaxed);
}
else if (counter.compare_exchange_strong(ctr, ctr + 1, std::memory_order_acquire, std::memory_order_relaxed))
{
return;
}
}
}
/**
* Unlocks the mutex previously shared-locked by this thread/task.
*
* @warning It is undefined behavior to unlock a mutex that is not owned by the current thread or task.
*/
void unlock_shared()
{
int ctr = counter.fetch_sub(1, std::memory_order_release);
CARB_ASSERT(ctr > 0);
CARB_UNUSED(ctr);
}
private:
// 0 - unlocked
// > 0 - Shared lock count
// -1 - Exclusive lock
std::atomic<int> counter{ 0 };
};
/**
* Wrapper for a carb::tasking::Counter
*/
class CounterWrapper
{
public:
/**
* Constructs a new CounterWrapper.
*
* @param target An optional (default:0) target value for the Counter to become signaled.
*/
CounterWrapper(uint32_t target = 0)
: m_counter(carb::getCachedInterface<ITasking>()->createCounterWithTarget(target))
{
}
/**
* Constructs a new CounterWrapper.
*
* @note Deprecated: The ITasking* parameter is no longer needed in this call.
* @param tasking The acquired ITasking interface.
* @param target An optional (default:0) target value for the Counter to become signaled.
*/
CARB_DEPRECATED("ITasking no longer needed.")
CounterWrapper(ITasking* tasking, uint32_t target = 0)
: m_counter(carb::getCachedInterface<ITasking>()->createCounterWithTarget(target))
{
CARB_UNUSED(tasking);
}
/**
* Destrutor
*
* @warning Destroying a Counter that is not signaled will assert in debug builds.
*/
~CounterWrapper()
{
carb::getCachedInterface<ITasking>()->destroyCounter(m_counter);
}
/**
* @returns true if the Counter is signaled; false otherwise
*/
CARB_DEPRECATED("The Counter interface is deprecated.") bool check() const
{
return try_wait();
}
/**
* @returns true if the Counter is signaled; false otherwise
*/
bool try_wait() const
{
return carb::getCachedInterface<ITasking>()->try_wait(m_counter);
}
/**
* Blocks the current thread or task in a fiber-safe way until the Counter becomes signaled.
*/
void wait() const
{
carb::getCachedInterface<ITasking>()->wait(m_counter);
}
/**
* Blocks the current thread or task in a fiber-safe way until the Counter becomes signaled or a period of time has
* elapsed.
*
* @param dur The amount of time to wait for.
* @returns true if the Counter is signaled; false if the time period elapsed.
*/
template <class Rep, class Period>
bool wait_for(const std::chrono::duration<Rep, Period>& dur) const
{
return carb::getCachedInterface<ITasking>()->wait_for(dur, m_counter);
}
/**
* Blocks the current thread or task in a fiber-safe way until the Counter becomes signaled or the clock reaches the
* given time point.
*
* @param tp The time point to wait until.
* @returns true if the Counter is signaled; false if the clock time is reached.
*/
template <class Clock, class Duration>
bool wait_until(const std::chrono::time_point<Clock, Duration>& tp) const
{
return carb::getCachedInterface<ITasking>()->wait_until(tp, m_counter);
}
/**
* Convertible to Counter*.
*/
operator Counter*() const
{
return m_counter;
}
/**
* Returns the acquired ITasking interface that was used to construct this object.
* @note Deprecated: Use carb::getCachedInterface instead.
*/
CARB_DEPRECATED("Use carb::getCachedInterface") ITasking* getTasking() const
{
return carb::getCachedInterface<ITasking>();
}
CARB_PREVENT_COPY_AND_MOVE(CounterWrapper);
private:
Counter* m_counter;
};
/**
* TaskGroup is a small and fast counter for tasks.
*
* TaskGroup blocks when tasks have "entered" the TaskGroup. It becomes signaled when all tasks have left the TaskGroup.
*/
class TaskGroup
{
public:
CARB_PREVENT_COPY_AND_MOVE(TaskGroup);
/**
* Constructs an empty TaskGroup.
*/
constexpr TaskGroup() = default;
/**
* TaskGroup destructor.
*
* @warning It is an error to destroy a TaskGroup that is not empty. Doing so can result in memory corruption.
*/
~TaskGroup()
{
CARB_CHECK(empty(), "Destroying busy TaskGroup!");
}
/**
* Returns (with high probability) whether the TaskGroup is empty.
*
* As TaskGroup atomically tracks tasks, this function may return an incorrect value as another task may have
* entered or left the TaskGroup before the return value could be processed.
*
* @returns `true` if there is high probability that the TaskGroup is empty (signaled); `false` otherwise.
*/
bool empty() const
{
// This cannot be memory_order_relaxed because it does not synchronize with anything and would allow the
// compiler to cache the value or hoist it out of a loop. Acquire semantics will require synchronization with
// all other locations that release m_count.
return m_count.load(std::memory_order_acquire) == 0;
}
/**
* "Enters" the TaskGroup.
*
* @warning Every call to this function must be paired with leave(). It is generally better to use with().
*/
void enter()
{
m_count.fetch_add(1, std::memory_order_acquire); // synchronizes-with all other locations releasing m_count
}
/**
* "Leaves" the TaskGroup.
*
* @warning Every call to this function must be paired with an earlier enter() call. It is generally better to use
* with().
*/
void leave()
{
size_t v = m_count.fetch_sub(1, std::memory_order_release);
CARB_ASSERT(v, "Mismatched enter()/leave() calls");
if (v == 1)
{
carb::getCachedInterface<ITasking>()->futexWakeup(m_count, UINT_MAX);
}
}
/**
* Returns `true` if the TaskGroup is empty (signaled) with high probability.
*
* @returns `true` if there is high probability that the TaskGroup is empty (signaled); `false` otherwise.
*/
bool try_wait() const
{
return empty();
}
/**
* Blocks the calling thread or task until the TaskGroup becomes empty.
*/
void wait() const
{
size_t v = m_count.load(std::memory_order_acquire); // synchronizes-with all other locations releasing m_count
if (v)
{
ITasking* tasking = carb::getCachedInterface<ITasking>();
while (v)
{
tasking->futexWait(m_count, v);
v = m_count.load(std::memory_order_relaxed);
}
}
}
/**
* Blocks the calling thread or task until the TaskGroup becomes empty or the given duration elapses.
*
* @param dur The duration to wait for.
* @returns `true` if the TaskGroup has become empty; `false` if the duration elapses.
*/
template <class Rep, class Period>
bool try_wait_for(std::chrono::duration<Rep, Period> dur)
{
return try_wait_until(std::chrono::steady_clock::now() + dur);
}
/**
* Blocks the calling thread or task until the TaskGroup becomes empty or the given time is reached.
*
* @param when The time to wait until.
* @returns `true` if the TaskGroup has become empty; `false` if the given time is reached.
*/
template <class Clock, class Duration>
bool try_wait_until(std::chrono::time_point<Clock, Duration> when)
{
size_t v = m_count.load(std::memory_order_acquire); // synchronizes-with all other locations releasing m_count
if (v)
{
ITasking* tasking = carb::getCachedInterface<ITasking>();
while (v)
{
if (!tasking->futexWaitUntil(m_count, v, when))
{
return false;
}
v = m_count.load(std::memory_order_relaxed);
}
}
return true;
}
/**
* A helper function for entering the TaskGroup during a call to `invoke()` and leaving afterwards.
*
* @param args Arguments to pass to `carb::cpp::invoke`. The TaskGroup is entered (via \ref enter()) before the
* invoke and left (via \ref leave()) when the invoke completes.
* @returns the value returned by `carb::cpp::invoke`.
*/
template <class... Args>
auto with(Args&&... args)
{
enter();
CARB_SCOPE_EXIT
{
leave();
};
return carb::cpp::invoke(std::forward<Args>(args)...);
}
private:
friend struct Tracker;
friend struct RequiredObject;
std::atomic_size_t m_count{ 0 };
};
/**
* Wrapper for a carb::tasking::Mutex that conforms to C++ Named Requirements of <a
* href="https://en.cppreference.com/w/cpp/named_req/Lockable">Lockable</a>.
*
* Non-recursive. If a recursive mutex is desired, use RecursiveMutexWrapper.
*/
class MutexWrapper
{
public:
/**
* Constructs a new MutexWrapper object
*/
MutexWrapper() : m_mutex(carb::getCachedInterface<ITasking>()->createMutex())
{
}
/**
* Constructs a new MutexWrapper object
* @note Deprecated: ITasking no longer needed.
*/
CARB_DEPRECATED("ITasking no longer needed.")
MutexWrapper(ITasking*) : m_mutex(carb::getCachedInterface<ITasking>()->createMutex())
{
}
/**
* Destructor
*
* @warning It is an error to destroy a mutex that is locked.
*/
~MutexWrapper()
{
carb::getCachedInterface<ITasking>()->destroyMutex(m_mutex);
}
/**
* Attempts to lock the mutex immediately.
*
* @warning It is an error to lock recursively. Use RecursiveSharedMutex if recursive locking is desired.
*
* @returns true if the mutex was locked; false otherwise
*/
bool try_lock()
{
return carb::getCachedInterface<ITasking>()->timedLockMutex(m_mutex, 0);
}
/**
* Locks the mutex, waiting until it becomes available.
*
* @warning It is an error to lock recursively. Use RecursiveSharedMutex if recursive locking is desired.
*/
void lock()
{
carb::getCachedInterface<ITasking>()->lockMutex(m_mutex);
}
/**
* Unlocks a mutex previously acquired with try_lock() or lock()
*
* @warning It is undefined behavior to unlock a mutex that is not owned by the current thread or task.
*/
void unlock()
{
carb::getCachedInterface<ITasking>()->unlockMutex(m_mutex);
}
/**
* Attempts to lock a mutex within a specified duration.
*
* @warning It is an error to lock recursively. Use RecursiveSharedMutex if recursive locking is desired.
*
* @param duration The duration to wait for the mutex to be available
* @returns true if the mutex was locked; false if the timeout period expired
*/
template <class Rep, class Period>
bool try_lock_for(const std::chrono::duration<Rep, Period>& duration)
{
return carb::getCachedInterface<ITasking>()->timedLockMutex(m_mutex, detail::convertDuration(duration));
}
/**
* Attempts to lock a mutex waiting until a specific clock time.
*
* @warning It is an error to lock recursively. Use RecursiveSharedMutex if recursive locking is desired.
*
* @param time_point The clock time to wait until.
* @returns true if the mutex was locked; false if the timeout period expired
*/
template <class Clock, class Duration>
bool try_lock_until(const std::chrono::time_point<Clock, Duration>& time_point)
{
return carb::getCachedInterface<ITasking>()->timedLockMutex(m_mutex, detail::convertAbsTime(time_point));
}
/**
* Convertible to Mutex*.
*/
operator Mutex*() const
{
return m_mutex;
}
/**
* Returns the acquired ITasking interface that was used to construct this object.
* @note Deprecated: Use carb::getCachedInterface instead.
*/
CARB_DEPRECATED("Use carb::getCachedInterface") ITasking* getTasking() const
{
return carb::getCachedInterface<ITasking>();
}
CARB_PREVENT_COPY_AND_MOVE(MutexWrapper);
private:
Mutex* m_mutex;
};
/**
* Wrapper for a recursive carb::tasking::Mutex that conforms to C++ Named Requirements of <a
* href="https://en.cppreference.com/w/cpp/named_req/Lockable">Lockable</a>.
*/
class RecursiveMutexWrapper
{
public:
/**
* Constructs a new RecursiveMutexWrapper object
*/
RecursiveMutexWrapper() : m_mutex(carb::getCachedInterface<ITasking>()->createRecursiveMutex())
{
}
/**
* Constructs a new RecursiveMutexWrapper object
* @note Deprecated: ITasking no longer needed.
*/
CARB_DEPRECATED("ITasking no longer needed.")
RecursiveMutexWrapper(ITasking*) : m_mutex(carb::getCachedInterface<ITasking>()->createRecursiveMutex())
{
}
/**
* Destructor
*
* @warning It is an error to destroy a mutex that is locked.
*/
~RecursiveMutexWrapper()
{
carb::getCachedInterface<ITasking>()->destroyMutex(m_mutex);
}
/**
* Attempts to lock the mutex immediately.
*
* @returns true if the mutex was locked or already owned by this thread/task; false otherwise. If true is returned,
* unlock() must be called to release the lock.
*/
bool try_lock()
{
return carb::getCachedInterface<ITasking>()->timedLockMutex(m_mutex, 0);
}
/**
* Locks the mutex, waiting until it becomes available. Call unlock() to release the lock.
*/
void lock()
{
carb::getCachedInterface<ITasking>()->lockMutex(m_mutex);
}
/**
* Unlocks a mutex previously acquired with try_lock() or lock()
*
* @note The unlock() function must be called for each successful lock.
* @warning It is undefined behavior to unlock a mutex that is not owned by the current thread or task.
*/
void unlock()
{
carb::getCachedInterface<ITasking>()->unlockMutex(m_mutex);
}
/**
* Attempts to lock a mutex within a specified duration.
*
* @param duration The duration to wait for the mutex to be available
* @returns true if the mutex was locked; false if the timeout period expired. If true is returned, unlock() must be
* called to release the lock.
*/
template <class Rep, class Period>
bool try_lock_for(const std::chrono::duration<Rep, Period>& duration)
{
return carb::getCachedInterface<ITasking>()->timedLockMutex(m_mutex, detail::convertDuration(duration));
}
/**
* Attempts to lock a mutex waiting until a specific clock time.
*
* @param time_point The clock time to wait until.
* @returns true if the mutex was locked; false if the timeout period expired. If true is returned, unlock() must be
* called to release the lock.
*/
template <class Clock, class Duration>
bool try_lock_until(const std::chrono::time_point<Clock, Duration>& time_point)
{
return carb::getCachedInterface<ITasking>()->timedLockMutex(m_mutex, detail::convertAbsTime(time_point));
}
/**
* Convertible to Mutex*.
*/
operator Mutex*() const
{
return m_mutex;
}
/**
* Returns the acquired ITasking interface that was used to construct this object.
* @note Deprecated: Use carb::getCachedInterface instead.
*/
CARB_DEPRECATED("Use carb::getCachedInterface") ITasking* getTasking() const
{
return carb::getCachedInterface<ITasking>();
}
CARB_PREVENT_COPY_AND_MOVE(RecursiveMutexWrapper);
private:
Mutex* m_mutex;
};
/**
* Wrapper for a carb::tasking::Semaphore
*
* @note SemaphoreWrapper can be used for @rstref{Throttling <tasking-throttling-label>} tasks.
*/
class SemaphoreWrapper
{
public:
/**
* Constructs a new SemaphoreWrapper object
*
* @param value The initial value of the semaphore (i.e. how many times acquire() can be called without blocking).
*/
SemaphoreWrapper(unsigned value) : m_sema(carb::getCachedInterface<ITasking>()->createSemaphore(value))
{
}
/**
* Constructs a new SemaphoreWrapper object
*
* @note Deprecated: ITasking no longer needed.
* @param value The initial value of the semaphore (i.e. how many times acquire() can be called without blocking).
*/
CARB_DEPRECATED("ITasking no longer needed.")
SemaphoreWrapper(ITasking*, unsigned value) : m_sema(carb::getCachedInterface<ITasking>()->createSemaphore(value))
{
}
/**
* Destructor
*/
~SemaphoreWrapper()
{
carb::getCachedInterface<ITasking>()->destroySemaphore(m_sema);
}
/**
* Increases the value of the semaphore, potentially unblocking any threads waiting in acquire().
*
* @param count The value to add to the Semaphore's value. That is, the number of threads to either unblock while
* waiting in acquire(), or to allow to call acquire() without blocking.
*/
void release(unsigned count = 1)
{
carb::getCachedInterface<ITasking>()->releaseSemaphore(m_sema, count);
}
/**
* Reduce the value of the Semaphore by one, potentially blocking if the count is already zero.
*
* @note Threads that are blocked by acquire() must be released by other threads calling release().
*/
void acquire()
{
carb::getCachedInterface<ITasking>()->waitSemaphore(m_sema);
}
/**
* Attempts to reduce the value of the Semaphore by one. If the Semaphore's value is zero, false is returned.
*
* @returns true if the count of the Semaphore was reduced by one; false if the count is already zero.
*/
bool try_acquire()
{
return carb::getCachedInterface<ITasking>()->timedWaitSemaphore(m_sema, 0);
}
/**
* Attempts to reduce the value of the Semaphore by one, waiting until the duration expires if the value is zero.
*
* @returns true if the count of the Semaphore was reduced by one; false if the duration expires.
*/
template <class Rep, class Period>
bool try_acquire_for(const std::chrono::duration<Rep, Period>& dur)
{
return carb::getCachedInterface<ITasking>()->timedWaitSemaphore(m_sema, detail::convertDuration(dur));
}
/**
* Attempts to reduce the value of the Semaphore by one, waiting until the given time point is reached if the value
* is zero.
*
* @returns true if the count of the Semaphore was reduced by one; false if the time point is reached by the clock.
*/
template <class Clock, class Duration>
bool try_acquire_until(const std::chrono::time_point<Clock, Duration>& tp)
{
return carb::getCachedInterface<ITasking>()->timedWaitSemaphore(m_sema, detail::convertAbsTime(tp));
}
/**
* Convertible to Semaphore*.
*/
operator Semaphore*() const
{
return m_sema;
}
/**
* Returns the acquired ITasking interface that was used to construct this object.
* @note Deprecated: Use carb::getCachedInterface instead.
*/
CARB_DEPRECATED("Use carb::getCachedInterface") ITasking* getTasking() const
{
return carb::getCachedInterface<ITasking>();
}
CARB_PREVENT_COPY_AND_MOVE(SemaphoreWrapper);
private:
Semaphore* m_sema;
};
/**
* Wrapper for a carb::tasking::SharedMutex that (mostly) conforms to C++ Named Requirements of SharedMutex.
*/
class SharedMutexWrapper
{
public:
/**
* Constructs a new SharedMutexWrapper object
*/
SharedMutexWrapper() : m_mutex(carb::getCachedInterface<ITasking>()->createSharedMutex())
{
}
/**
* Constructs a new SharedMutexWrapper object
* @note Deprecated: ITasking no longer needed.
*/
CARB_DEPRECATED("ITasking no longer needed.")
SharedMutexWrapper(ITasking*) : m_mutex(carb::getCachedInterface<ITasking>()->createSharedMutex())
{
}
/**
* Destructor
*
* @note It is an error to destroy a shared mutex that is locked.
*/
~SharedMutexWrapper()
{
carb::getCachedInterface<ITasking>()->destroySharedMutex(m_mutex);
}
/**
* Attempts to shared-lock the shared mutex immediately.
*
* @note It is an error to lock recursively or shared-lock when exclusive-locked, or vice versa.
*
* @returns true if the mutex was shared-locked; false otherwise
*/
bool try_lock_shared()
{
return carb::getCachedInterface<ITasking>()->timedLockSharedMutex(m_mutex, 0);
}
/**
* Attempts to exclusive-lock the shared mutex immediately.
*
* @note It is an error to lock recursively or shared-lock when exclusive-locked, or vice versa.
*
* @returns true if the mutex was shared-locked; false otherwise
*/
bool try_lock()
{
return carb::getCachedInterface<ITasking>()->timedLockSharedMutexExclusive(m_mutex, 0);
}
/**
* Attempts to exclusive-lock the shared mutex within a specified duration.
*
* @note It is an error to lock recursively or shared-lock when exclusive-locked, or vice versa.
*
* @param duration The duration to wait for the mutex to be available
* @returns true if the mutex was exclusive-locked; false if the timeout period expired
*/
template <class Rep, class Period>
bool try_lock_for(const std::chrono::duration<Rep, Period>& duration)
{
return carb::getCachedInterface<ITasking>()->timedLockSharedMutexExclusive(
m_mutex, detail::convertDuration(duration));
}
/**
* Attempts to shared-lock the shared mutex within a specified duration.
*
* @note It is an error to lock recursively or shared-lock when exclusive-locked, or vice versa.
*
* @param duration The duration to wait for the mutex to be available
* @returns true if the mutex was shared-locked; false if the timeout period expired
*/
template <class Rep, class Period>
bool try_lock_shared_for(const std::chrono::duration<Rep, Period>& duration)
{
return carb::getCachedInterface<ITasking>()->timedLockSharedMutex(m_mutex, detail::convertDuration(duration));
}
/**
* Attempts to exclusive-lock the shared mutex until a specific clock time.
*
* @note It is an error to lock recursively or shared-lock when exclusive-locked, or vice versa.
*
* @param time_point The clock time to wait until.
* @returns true if the mutex was exclusive-locked; false if the timeout period expired
*/
template <class Clock, class Duration>
bool try_lock_until(const std::chrono::time_point<Clock, Duration>& time_point)
{
return try_lock_for(time_point - Clock::now());
}
/**
* Attempts to shared-lock the shared mutex until a specific clock time.
*
* @note It is an error to lock recursively or shared-lock when exclusive-locked, or vice versa.
*
* @param time_point The clock time to wait until.
* @returns true if the mutex was shared-locked; false if the timeout period expired
*/
template <class Clock, class Duration>
bool try_lock_shared_until(const std::chrono::time_point<Clock, Duration>& time_point)
{
return try_lock_shared_for(time_point - Clock::now());
}
/**
* Shared-locks the shared mutex, waiting until it becomes available.
*
* @note It is an error to lock recursively or shared-lock when exclusive-locked, or vice versa.
*/
void lock_shared()
{
carb::getCachedInterface<ITasking>()->lockSharedMutex(m_mutex);
}
/**
* Unlocks a mutex previously shared-locked by this thread/task.
*
* @note It is undefined behavior to unlock a mutex that is not owned by the current thread or task.
*/
void unlock_shared()
{
carb::getCachedInterface<ITasking>()->unlockSharedMutex(m_mutex);
}
/**
* Exclusive-locks the shared mutex, waiting until it becomes available.
*
* @note It is an error to lock recursively or shared-lock when exclusive-locked, or vice versa.
*/
void lock()
{
carb::getCachedInterface<ITasking>()->lockSharedMutexExclusive(m_mutex);
}
/**
* Unlocks a mutex previously exclusive-locked by this thread/task.
*
* @note It is undefined behavior to unlock a mutex that is not owned by the current thread or task.
*/
void unlock()
{
carb::getCachedInterface<ITasking>()->unlockSharedMutex(m_mutex);
}
/**
* Convertible to SharedMutex*.
*/
operator SharedMutex*() const
{
return m_mutex;
}
/**
* Returns the acquired ITasking interface that was used to construct this object.
* @note Deprecated: Use carb::getCachedInterface instead.
*/
CARB_DEPRECATED("Use carb::getCachedInterface") ITasking* getTasking() const
{
return carb::getCachedInterface<ITasking>();
}
CARB_PREVENT_COPY_AND_MOVE(SharedMutexWrapper);
private:
SharedMutex* m_mutex;
};
/**
* Wrapper for carb::tasking::ConditionVariable
*/
class ConditionVariableWrapper
{
public:
/**
* Constructs a new ConditionVariableWrapper object
*/
ConditionVariableWrapper() : m_cv(carb::getCachedInterface<ITasking>()->createConditionVariable())
{
}
/**
* Constructs a new ConditionVariableWrapper object
* @note Deprecated: ITasking no longer needed.
*/
CARB_DEPRECATED("ITasking no longer needed.")
ConditionVariableWrapper(ITasking*) : m_cv(carb::getCachedInterface<ITasking>()->createConditionVariable())
{
}
/**
* Destructor
*
* @note It is an error to destroy a condition variable that has waiting threads.
*/
~ConditionVariableWrapper()
{
carb::getCachedInterface<ITasking>()->destroyConditionVariable(m_cv);
}
/**
* Waits until the condition variable is notified.
*
* @note @p m must be locked when calling this function. The mutex will be unlocked while waiting and re-locked
* before returning to the caller.
*
* @param m The mutex to unlock while waiting for the condition variable to be notified.
*/
void wait(Mutex* m)
{
carb::getCachedInterface<ITasking>()->waitConditionVariable(m_cv, m);
}
/**
* Waits until a predicate has been satisfied and the condition variable is notified.
*
* @note @p m must be locked when calling this function. The mutex will be locked when calling @p pred and when this
* function returns, but unlocked while waiting.
*
* @param m The mutex to unlock while waiting for the condition variable to be notified.
* @param pred A predicate that is called repeatedly. When it returns true, the function returns. If it returns
* false, @p m is unlocked and the thread/task waits until the condition variable is notified.
*/
template <class Pred>
void wait(Mutex* m, Pred&& pred)
{
carb::getCachedInterface<ITasking>()->waitConditionVariablePred(m_cv, m, std::forward<Pred>(pred));
}
/**
* Waits until the condition variable is notified or the specified duration expires.
*
* @note @p m must be locked when calling this function. The mutex will be unlocked while waiting and re-locked
* before returning to the caller.
*
* @param m The mutex to unlock while waiting for the condition variable to be notified.
* @param duration The amount of time to wait for.
* @returns `std::cv_status::no_timeout` if the condition variable was notified; `std::cv_status::timeout` if the
* timeout period expired.
*/
template <class Rep, class Period>
std::cv_status wait_for(Mutex* m, const std::chrono::duration<Rep, Period>& duration)
{
return carb::getCachedInterface<ITasking>()->timedWaitConditionVariable(
m_cv, m, detail::convertDuration(duration)) ?
std::cv_status::no_timeout :
std::cv_status::timeout;
}
/**
* Waits until a predicate is satisfied and the condition variable is notified, or the specified duration expires.
*
* @note @p m must be locked when calling this function. The mutex will be unlocked while waiting and re-locked
* before returning to the caller.
*
* @param m The mutex to unlock while waiting for the condition variable to be notified.
* @param duration The amount of time to wait for.
* @param pred A predicate that is called repeatedly. When it returns true, the function returns. If it returns
* false, @p m is unlocked and the thread/task waits until the condition variable is notified.
* @returns true if the predicate was satisfied; false if a timeout occurred.
*/
template <class Rep, class Period, class Pred>
bool wait_for(Mutex* m, const std::chrono::duration<Rep, Period>& duration, Pred&& pred)
{
return carb::getCachedInterface<ITasking>()->timedWaitConditionVariablePred(
m_cv, m, detail::convertDuration(duration), std::forward<Pred>(pred));
}
/**
* Waits until the condition variable is notified or the clock reaches the given time point.
*
* @note @p m must be locked when calling this function. The mutex will be unlocked while waiting and re-locked
* before returning to the caller.
*
* @param m The mutex to unlock while waiting for the condition variable to be notified.
* @param time_point The clock time to wait until.
* @returns `std::cv_status::no_timeout` if the condition variable was notified; `std::cv_status::timeout` if the
* timeout period expired.
*/
template <class Clock, class Duration>
std::cv_status wait_until(Mutex* m, const std::chrono::time_point<Clock, Duration>& time_point)
{
return carb::getCachedInterface<ITasking>()->timedWaitConditionVariable(
m_cv, m, detail::convertAbsTime(time_point)) ?
std::cv_status::no_timeout :
std::cv_status::timeout;
}
/**
* Waits until a predicate is satisfied and the condition variable is notified or the clock reaches the given time
* point.
*
* @note @p m must be locked when calling this function. The mutex will be unlocked while waiting and re-locked
* before returning to the caller.
*
* @param m The mutex to unlock while waiting for the condition variable to be notified.
* @param time_point The clock time to wait until.
* @param pred A predicate that is called repeatedly. When it returns true, the function returns. If it returns
* false, @p m is unlocked and the thread/task waits until the condition variable is notified.
* @returns true if the predicate was satisfied; false if a timeout occurred.
*/
template <class Clock, class Duration, class Pred>
bool wait_until(Mutex* m, const std::chrono::time_point<Clock, Duration>& time_point, Pred&& pred)
{
return carb::getCachedInterface<ITasking>()->timedWaitConditionVariablePred(
m_cv, m, detail::convertAbsTime(time_point), std::forward<Pred>(pred));
}
/**
* Notifies one waiting thread/task to wake and check the predicate (if applicable).
*/
void notify_one()
{
carb::getCachedInterface<ITasking>()->notifyConditionVariableOne(m_cv);
}
/**
* Notifies all waiting threads/tasks to wake and check the predicate (if applicable).
*/
void notify_all()
{
carb::getCachedInterface<ITasking>()->notifyConditionVariableAll(m_cv);
}
/**
* Convertible to ConditionVariable*.
*/
operator ConditionVariable*() const
{
return m_cv;
}
/**
* Returns the acquired ITasking interface that was used to construct this object.
* @note Deprecated: Use carb::getCachedInterface instead.
*/
CARB_DEPRECATED("Use carb::getCachedInterface") ITasking* getTasking() const
{
return carb::getCachedInterface<ITasking>();
}
CARB_PREVENT_COPY_AND_MOVE(ConditionVariableWrapper);
private:
ConditionVariable* m_cv;
};
/**
* When instantiated, begins tracking the passed Trackers. At destruction, tracking on the given Trackers is ended.
*
* This is similar to the manner in which ITasking::addTask() accepts Trackers and begins tracking them prior to the
* task starting, and then leaves them when the task finishes. This class allows performing the same tracking behavior
* without the overhead of a task.
*/
class ScopedTracking
{
public:
/**
* Default constructor.
*/
ScopedTracking() : m_tracker{ ObjectType::eNone, nullptr }
{
}
/**
* Constructor that accepts a Trackers object.
* @param trackers The Trackers to begin tracking.
*/
ScopedTracking(Trackers trackers);
/**
* Destructor. The Trackers provided to the constructor finish tracking when `this` is destroyed.
*/
~ScopedTracking();
CARB_PREVENT_COPY(ScopedTracking);
/**
* Allows move-construct.
*/
ScopedTracking(ScopedTracking&& rhs);
/**
* Allows move-assign.
*/
ScopedTracking& operator=(ScopedTracking&& rhs) noexcept;
private:
Object m_tracker;
};
inline constexpr RequiredObject::RequiredObject(const TaskGroup& tg)
: Object{ ObjectType::eTaskGroup, const_cast<std::atomic_size_t*>(&tg.m_count) }
{
}
inline constexpr RequiredObject::RequiredObject(const TaskGroup* tg)
: Object{ ObjectType::eTaskGroup, tg ? const_cast<std::atomic_size_t*>(&tg->m_count) : nullptr }
{
}
inline All::All(std::initializer_list<RequiredObject> il)
{
static_assert(sizeof(RequiredObject) == sizeof(Object), "Invalid assumption");
m_counter = carb::getCachedInterface<ITasking>()->internalGroupObjects(ITasking::eAll, il.begin(), il.size());
}
template <class InputIt, std::enable_if_t<detail::IsForwardIter<InputIt, RequiredObject>::value, bool>>
inline All::All(InputIt begin, InputIt end)
{
static_assert(sizeof(RequiredObject) == sizeof(Object), "Invalid assumption");
std::vector<RequiredObject> objects;
for (; begin != end; ++begin)
objects.push_back(*begin);
m_counter =
carb::getCachedInterface<ITasking>()->internalGroupObjects(ITasking::eAll, objects.data(), objects.size());
}
template <class InputIt, std::enable_if_t<detail::IsRandomAccessIter<InputIt, RequiredObject>::value, bool>>
inline All::All(InputIt begin, InputIt end)
{
static_assert(sizeof(RequiredObject) == sizeof(Object), "Invalid assumption");
size_t const count = end - begin;
RequiredObject* objects = CARB_STACK_ALLOC(RequiredObject, count);
size_t index = 0;
for (; begin != end; ++begin)
objects[index++] = *begin;
CARB_ASSERT(index == count);
m_counter = carb::getCachedInterface<ITasking>()->internalGroupObjects(ITasking::eAll, objects, count);
}
inline Any::Any(std::initializer_list<RequiredObject> il)
{
static_assert(sizeof(RequiredObject) == sizeof(Object), "Invalid assumption");
m_counter = carb::getCachedInterface<ITasking>()->internalGroupObjects(ITasking::eAny, il.begin(), il.size());
}
template <class InputIt, std::enable_if_t<detail::IsForwardIter<InputIt, RequiredObject>::value, bool>>
inline Any::Any(InputIt begin, InputIt end)
{
static_assert(sizeof(RequiredObject) == sizeof(Object), "Invalid assumption");
std::vector<RequiredObject> objects;
for (; begin != end; ++begin)
objects.push_back(*begin);
m_counter =
carb::getCachedInterface<ITasking>()->internalGroupObjects(ITasking::eAny, objects.data(), objects.size());
}
template <class InputIt, std::enable_if_t<detail::IsRandomAccessIter<InputIt, RequiredObject>::value, bool>>
inline Any::Any(InputIt begin, InputIt end)
{
static_assert(sizeof(RequiredObject) == sizeof(Object), "Invalid assumption");
size_t const count = end - begin;
RequiredObject* objects = CARB_STACK_ALLOC(RequiredObject, count);
size_t index = 0;
for (; begin != end; ++begin)
objects[index++] = *begin;
CARB_ASSERT(index == count);
m_counter = carb::getCachedInterface<ITasking>()->internalGroupObjects(ITasking::eAny, objects, count);
}
inline Tracker::Tracker(TaskGroup& grp) : Object{ ObjectType::eTaskGroup, &grp.m_count }
{
}
inline Tracker::Tracker(TaskGroup* grp) : Object{ ObjectType::eTaskGroup, grp ? &grp->m_count : nullptr }
{
}
inline ScopedTracking::ScopedTracking(Trackers trackers)
{
Tracker const* ptrackers;
size_t numTrackers;
trackers.output(ptrackers, numTrackers);
m_tracker = carb::getCachedInterface<ITasking>()->beginTracking(ptrackers, numTrackers);
}
inline ScopedTracking::~ScopedTracking()
{
Object tracker = std::exchange(m_tracker, { ObjectType::eNone, nullptr });
if (tracker.type == ObjectType::eTrackerGroup)
{
carb::getCachedInterface<ITasking>()->endTracking(tracker);
}
}
inline ScopedTracking::ScopedTracking(ScopedTracking&& rhs)
: m_tracker(std::exchange(rhs.m_tracker, { ObjectType::eNone, nullptr }))
{
}
inline ScopedTracking& ScopedTracking::operator=(ScopedTracking&& rhs) noexcept
{
std::swap(m_tracker, rhs.m_tracker);
return *this;
}
} // namespace tasking
} // namespace carb
|
omniverse-code/kit/include/carb/memory/MemoryTrackerReplaceAllocation.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../Defines.h"
#if CARB_PLATFORM_WINDOWS && defined CARB_MEMORY_TRACKER_ENABLED && defined CARB_MEMORY_TRACKER_MODE_REPLACE
# include <vcruntime_new.h>
# pragma warning(push)
# pragma warning(disable : 4595) // non-member operator new or delete functions may not be declared inline
/**
* Replacement of the new/delete operator in C++
*/
inline void* operator new(size_t size)
{
return malloc(size);
}
inline void operator delete(void* address)
{
free(address);
}
inline void* operator new[](size_t size)
{
return malloc(size);
}
inline void operator delete[](void* address)
{
free(address);
}
/*
void* operator new(size_t size, const std::nothrow_t&)
{
return malloc(size);
}
void operator delete(void* address, const std::nothrow_t&)
{
free(address);
}
void* operator new[](size_t size, const std::nothrow_t&)
{
return malloc(size);
}
void operator delete[](void* address, const std::nothrow_t&)
{
free(address);
}*/
# pragma warning(pop)
#endif
|
omniverse-code/kit/include/carb/memory/Memory.h | // Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../Defines.h"
#include "IMemoryTracker.h"
#if CARB_MEMORY_WORK_AS_PLUGIN
# define CARB_MEMORY_GLOBALS() CARB_MEMORY_TRACKER_GLOBALS()
class MemoryInitializerScoped
{
public:
MemoryInitializerScoped()
{
carb::memory::registerMemoryTrackerForClient();
}
~MemoryInitializerScoped()
{
carb::memory::deregisterMemoryTrackerForClient();
}
};
#endif
#if defined(CARB_MEMORY_TRACKER_MODE_REPLACE)
inline void* mallocWithRecord(size_t size)
{
void* address = malloc(size);
if (address)
{
carb::memory::IMemoryTracker* tracker = carb::memory::getMemoryTracker();
if (tracker)
{
// Set allocationGroup to nullptr means using default allocation group(HEAP)
tracker->recordAllocation(nullptr, address, size);
}
}
return address;
}
inline void freeWithRecord(void* address)
{
carb::memory::IMemoryTracker* tracker = carb::memory::getMemoryTracker();
if (tracker)
{
// Set allocationGroup to nullptr means using default allocation group(HEAP)
tracker->recordFree(nullptr, address);
}
}
# if CARB_PLATFORM_WINDOWS
inline void* operator new(size_t size) throw()
# else
void* operator new(size_t size) throw()
# endif
{
return mallocWithRecord(size);
}
# if CARB_PLATFORM_WINDOWS
inline void operator delete(void* address) throw()
# else
void operator delete(void* address) throw()
# endif
{
freeWithRecord(address);
}
# if CARB_PLATFORM_WINDOWS
inline void operator delete(void* address, unsigned long) throw()
# else
void operator delete(void* address, unsigned long) throw()
# endif
{
freeWithRecord(address);
}
# if CARB_PLATFORM_WINDOWS
inline void* operator new[](size_t size) throw()
# else
void* operator new[](size_t size) throw()
# endif
{
return mallocWithRecord(size);
}
# if CARB_PLATFORM_WINDOWS
inline void operator delete[](void* address) throw()
# else
void operator delete[](void* address) throw()
# endif
{
freeWithRecord(address);
}
# if CARB_PLATFORM_WINDOWS
inline void operator delete[](void* address, unsigned long) throw()
# else
void operator delete[](void* address, unsigned long) throw()
# endif
{
freeWithRecord(address);
}
void* operator new(size_t size, const std::nothrow_t&)
{
return mallocWithRecord(size);
}
void operator delete(void* address, const std::nothrow_t&)
{
freeWithRecord(address);
}
void* operator new[](size_t size, const std::nothrow_t&)
{
return mallocWithRecord(size);
}
void operator delete[](void* address, const std::nothrow_t&)
{
freeWithRecord(address);
}
#endif
inline void* _carbMalloc(size_t size, va_list args)
{
carb::memory::Context* context = va_arg(args, carb::memory::Context*);
carb::memory::IMemoryTracker* tracker = carb::memory::getMemoryTracker();
if (tracker && context)
tracker->pushContext(*context);
#if defined(CARB_MEMORY_TRACKER_MODE_REPLACE)
void* address = mallocWithRecord(size);
#else
void* address = malloc(size);
#endif
if (tracker && context)
tracker->popContext();
return address;
}
inline void* carbMalloc(size_t size, ...)
{
va_list args;
va_start(args, size);
void* address = _carbMalloc(size, args);
va_end(args);
return address;
}
inline void carbFree(void* address)
{
#if defined(CARB_MEMORY_TRACKER_MODE_REPLACE)
freeWithRecord(address);
#else
free(address);
#endif
}
inline void* operator new(size_t size, const char* file, int line, ...)
{
CARB_UNUSED(file);
va_list args;
va_start(args, line);
void* address = _carbMalloc(size, args);
va_end(args);
return address;
}
inline void* operator new[](size_t size, const char* file, int line, ...)
{
CARB_UNUSED(file);
va_list args;
va_start(args, line);
void* address = _carbMalloc(size, args);
va_end(args);
return address;
}
#define NV_MALLOC(size, ...) carbMalloc(size, ##__VA_ARGS__)
#define NV_FREE(p) carbFree(p)
#define NV_NEW(...) new (__FILE__, __LINE__, ##__VA_ARGS__)
#define NV_DELETE delete
#define NV_DELETE_ARRAY delete[]
|
omniverse-code/kit/include/carb/memory/PooledAllocator.h | // Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../container/LocklessQueue.h"
#include "../thread/Mutex.h"
#include <memory>
// Change this to 1 to enable pooled allocator leak checking. This captures a callstack and puts everything into an
// intrusive list.
#define CARB_POOLEDALLOC_LEAKCHECK 0
#if CARB_POOLEDALLOC_LEAKCHECK
# include "../extras/Debugging.h"
# include "../container/IntrusiveList.h"
#endif
#if CARB_DEBUG
# include "../logging/Log.h"
#endif
namespace carb
{
namespace memory
{
/**
* PooledAllocator implements the Allocator named requirements. It is thread-safe and (mostly) lockless. The given
* Allocator type must be thread-safe as well. Memory is never returned to the given Allocator until destruction.
*
* @param T The type created by this PooledAllocator
* @param Allocator The allocator to use for underlying memory allocation. Must be able to allocate many instances
* contiguously.
*/
template <class T, class Allocator = std::allocator<T>>
class PooledAllocator
{
public:
using pointer = T*;
using const_pointer = const T*;
using reference = T&;
using const_reference = const T&;
using void_pointer = void*;
using const_void_pointer = const void*;
using value_type = T;
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
template <class U>
struct rebind
{
using other = PooledAllocator<U>;
};
PooledAllocator() : m_emo(ValueInitFirst{}), m_debugName(CARB_PRETTY_FUNCTION)
{
}
~PooledAllocator()
{
#if CARB_DEBUG
// Leak checking
size_type freeCount = 0;
m_emo.second.forEach([&freeCount](MemBlock*) { ++freeCount; });
size_t const totalCount =
m_bucketCount ? (size_t(1) << (m_bucketCount + kBucketShift)) - (size_t(1) << kBucketShift) : 0;
size_t const leaks = totalCount - freeCount;
if (leaks != 0)
{
CARB_LOG_ERROR("%s: leaked %zu items", m_debugName, leaks);
}
#endif
#if CARB_POOLEDALLOC_LEAKCHECK
m_list.clear();
#endif
// Deallocate everything
m_emo.second.popAll();
for (size_type i = 0; i != m_bucketCount; ++i)
{
m_emo.first().deallocate(m_buckets[i], size_t(1) << (i + kBucketShift));
}
}
pointer allocate(size_type n = 1)
{
CARB_CHECK(n <= 1); // cannot allocate more than 1 item simultaneously
MemBlock* p = m_emo.second.pop();
p = p ? p : _expand();
#if CARB_POOLEDALLOC_LEAKCHECK
size_t frames = extras::debugBacktrace(0, p->entry.callstack, CARB_COUNTOF(p->entry.callstack));
memset(p->entry.callstack + frames, 0, sizeof(void*) * (CARB_COUNTOF(p->entry.callstack) - frames));
new (&p->entry.link) decltype(p->entry.link){};
std::lock_guard<carb::thread::mutex> g(m_listMutex);
m_list.push_back(p->entry);
#endif
return reinterpret_cast<pointer>(p);
}
pointer allocate(size_type n, const_void_pointer p)
{
pointer mem = p ? pointer(p) : allocate(n);
return mem;
}
void deallocate(pointer p, size_type n = 1)
{
CARB_CHECK(n <= 1); // cannot free more than 1 item simultaneously
MemBlock* mb = reinterpret_cast<MemBlock*>(p);
#if CARB_POOLEDALLOC_LEAKCHECK
{
std::lock_guard<carb::thread::mutex> g(m_listMutex);
m_list.remove(mb->entry);
}
#endif
m_emo.second.push(mb);
}
size_type max_size() const
{
return 1;
}
private:
constexpr static size_type kBucketShift = 10; // First bucket contains 1<<10 items
constexpr static size_type kAlignment = ::carb_max(alignof(T), alignof(carb::container::LocklessQueueLink<void*>));
struct alignas(kAlignment) PoolEntry
{
T obj;
#if CARB_POOLEDALLOC_LEAKCHECK
void* callstack[32];
carb::container::IntrusiveListLink<PoolEntry> link;
#endif
};
struct NontrivialDummyType
{
constexpr NontrivialDummyType() noexcept
{
// Avoid zero-initialization when value initialized
}
};
struct alignas(kAlignment) MemBlock
{
union
{
NontrivialDummyType dummy{};
PoolEntry entry;
carb::container::LocklessQueueLink<MemBlock> m_link;
};
~MemBlock()
{
}
};
#if CARB_POOLEDALLOC_LEAKCHECK
carb::thread::mutex m_listMutex;
carb::container::IntrusiveList<PoolEntry, &PoolEntry::link> m_list;
#endif
// std::allocator<>::rebind<> has been deprecated in C++17 on mac.
CARB_IGNOREWARNING_GNUC_WITH_PUSH("-Wdeprecated-declarations")
using BaseAllocator = typename Allocator::template rebind<MemBlock>::other;
CARB_IGNOREWARNING_GNUC_POP
MemBlock* _expand()
{
std::lock_guard<Lock> g(m_mutex);
// If we get the lock, first check to see if another thread populated the buckets first
if (MemBlock* mb = m_emo.second.pop())
{
return mb;
}
size_t const bucket = m_bucketCount;
size_t const allocationCount = size_t(1) << (bucket + kBucketShift);
// Allocate from base. The underlying allocator may throw
MemBlock* mem = m_emo.first().allocate(allocationCount);
CARB_FATAL_UNLESS(mem, "PooledAllocator underlying allocation failed: Out of memory");
// If any further exceptions are thrown, deallocate `mem`.
CARB_SCOPE_EXCEPT
{
m_emo.first().deallocate(mem, allocationCount);
};
// Resize the number of buckets. This can throw if make_unique() fails.
auto newBuckets = std::make_unique<MemBlock*[]>(m_bucketCount + 1);
if (m_bucketCount++ > 0)
memcpy(newBuckets.get(), m_buckets.get(), sizeof(MemBlock*) * (m_bucketCount - 1));
m_buckets = std::move(newBuckets);
// Populate the new bucket
// Add entries (after the first) to the free list
m_emo.second.push(mem + 1, mem + allocationCount);
m_buckets[bucket] = mem;
// Return the first entry that we reserved for the caller
return mem;
}
using LocklessQueue = carb::container::LocklessQueue<MemBlock, &MemBlock::m_link>;
EmptyMemberPair<BaseAllocator, LocklessQueue> m_emo;
using Lock = carb::thread::mutex;
Lock m_mutex;
std::unique_ptr<MemBlock* []> m_buckets {};
size_t m_bucketCount{ 0 };
const char* const m_debugName;
};
} // namespace memory
} // namespace carb
|
omniverse-code/kit/include/carb/memory/IMemoryTracker.h | // Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "MemoryTrackerDefines.h"
#include "MemoryTrackerReplaceAllocation.h"
#include "MemoryTrackerTypes.h"
#if CARB_MEMORY_WORK_AS_PLUGIN
# include "../Framework.h"
#endif
#include "../Types.h"
namespace carb
{
namespace memory
{
/**
* Defines a toolkit Memory Tracker, used to monitor/track memory usage/leak.
*/
struct IMemoryTracker
{
CARB_PLUGIN_INTERFACE("carb::memory::IMemoryTracker", 1, 0)
/**
* Setting this number either in the debugger, or in code will result in causing
* the memory allocator to break when this allocation is encountered.
*/
intptr_t* breakOnAlloc;
/**
* Specify that the debugger signal should be triggered nth allocation within a context.
* @param context The context to modify.
* @param nAlloc Signal the debugger on the nth allocation within context. -1 disables
* this feature.
* This feature only respects the top of the Context stack.
*/
void(CARB_ABI* contextBreakOnAlloc)(const Context& context, intptr_t nAlloc);
/**
* Makes the context active on the context stack for this thread.
*
* @param context The context to become active
*/
void(CARB_ABI* pushContext)(const Context& context);
/**
* Pops the context on the top of the stack off for this thread.
*/
void(CARB_ABI* popContext)();
/**
* Creates an allocation group.
*
* @param name The name of the memory address group.
* @return The address group object.
*/
AllocationGroup*(CARB_ABI* createAllocationGroup)(const char* name);
/**
* Destroys an allocation group.
*
* @param allocationGroup The address group to destroy
*/
void(CARB_ABI* destroyAllocationGroup)(AllocationGroup* allocationGroup);
/**
* Records an allocation on behalf of a region.
*
* The context recorded is on the top of the context stack. Additionally, the backtrace
* associated with this allocation is recorded from this call site.
*
* @param allocationGroup The allocationGroup to record the allocation into
* @param address The address that the allocation exists at
* @param size The size of the allocation.
*/
void(CARB_ABI* recordAllocation)(AllocationGroup* allocationGroup, const void* const address, size_t size);
/**
* Records an allocation on behalf of a region.
*
* Additionally, the backtrace associated with this allocation is recorded from this call
* site.
*
* @param allocationGroup The allocationGroup to record the allocation into
* @param context The context that the allocation is associated with.
* @param address The address that the allocation exists at
* @param size The size of the allocation.
*/
void(CARB_ABI* recordAllocationWithContext)(AllocationGroup* allocationGroup,
const Context& context,
const void* const address,
size_t size);
/**
* Records that an allocation that was previously recorded was released.
*
* @param allocationGroup The allocation group that the allocation was associated with.
* @param address The address the allocation was associated with.
*/
void(CARB_ABI* recordFree)(AllocationGroup* allocationGroup, const void* const address);
/**
* Creates a bookmark of the current state of the memory system.
*
* This is somewhat of a heavy-weight operation and should only be used at certain times
* such as level load.
*
* @return A snapshot of the current state of the memory system.
*/
Bookmark*(CARB_ABI* createBookmark)();
/**
* Destroys a memory bookmark.
*
* @param bookmark The bookmark to destroy.
*/
void(CARB_ABI* destroyBookmark)(Bookmark* bookmark);
/**
* Get a basic summary of the current state of the memory system, that is of a low enough overhead that we could put
* on a ImGui page that updates ever frame.
*
* @return The Summary struct of current state.
*/
Summary(CARB_ABI* getSummary)();
/**
* Generates a memory report.
*
* @param reportFlags The flags about the report.
* @param report The generated report, it is up to the user to release the report with releaseReport.
* @return nullptr if the report couldn't be generated, otherwise the report object.
*/
Report*(CARB_ABI* createReport)(ReportFlags reportFlags);
/**
* Generates a memory report, starting at a bookmark to now.
*
* @param reportFlags The flags about the report.
* @param bookmark Any allocations before bookmark will be ignored in the report.
* @param report The generated report, it is up to the user to release the report with
* releaseReport.
* @return nullptr if the report couldn't be generated, otherwise the report object.
*/
Report*(CARB_ABI* createReportFromBookmark)(ReportFlags reportFlags, Bookmark* bookmark);
/**
* Frees underlying data for the report.
*
* @param report The report to free.
*/
void(CARB_ABI* destroyReport)(Report* report);
/**
* Returns a pointer to the report data. The returned pointer can not be stored for persistence usage, and it will
* be freed along with the report.
*
* @param report The report data to inspect.
* @return The raw report data.
*/
const char*(CARB_ABI* reportGetData)(Report* report);
/**
* Returns the number of leaks stored in a memory report.
*
* @param report The report to return the number of leaks for.
* @return The number of leaks associated with the report.
*/
size_t(CARB_ABI* getReportMemoryLeakCount)(const Report* report);
/**
* When exiting, memory tracker will create a memory leak report.
* The report file name could be (from high priority to low):
* - In command line arguments (top priority) as format: --memory.report.path
* - Parameter in this function
* - The default: * ${WorkingDir}/memoryleak.json
* @param fileName The file name (including full path) to save memory leak report when exiting
*/
void(CARB_ABI* setReportFileName)(const char* fileName);
};
} // namespace memory
} // namespace carb
#if CARB_MEMORY_WORK_AS_PLUGIN
CARB_WEAKLINK carb::memory::IMemoryTracker* g_carbMemoryTracker;
# define CARB_MEMORY_TRACKER_GLOBALS()
#endif
namespace carb
{
namespace memory
{
#if CARB_MEMORY_WORK_AS_PLUGIN
inline void registerMemoryTrackerForClient()
{
Framework* framework = getFramework();
g_carbMemoryTracker = framework->acquireInterface<memory::IMemoryTracker>();
}
inline void deregisterMemoryTrackerForClient()
{
g_carbMemoryTracker = nullptr;
}
/**
* Get the toolkit Memory Tracker
* @return the memory tracker toolkit
*/
inline IMemoryTracker* getMemoryTracker()
{
return g_carbMemoryTracker;
}
#else
/**
* Get the toolkit Memory Tracker
* @return the memory tracker toolkit
*/
CARB_EXPORT memory::IMemoryTracker* getMemoryTracker();
#endif
/**
* RAII Context helper
*
* This class uses RAII to automatically set a context as active and then release it.
*
* @code
* {
* ScopedContext(SoundContext);
* // Allocate some sound resources
* }
* @endcode
*/
class ScopedContext
{
public:
ScopedContext(const Context& context)
{
CARB_UNUSED(context);
#if CARB_MEMORY_TRACKER_ENABLED
IMemoryTracker* tracker = getMemoryTracker();
CARB_ASSERT(tracker);
if (tracker)
tracker->pushContext(context);
#endif
}
~ScopedContext()
{
#if CARB_MEMORY_TRACKER_ENABLED
IMemoryTracker* tracker = getMemoryTracker();
CARB_ASSERT(tracker);
if (tracker)
tracker->popContext();
#endif
}
};
} // namespace memory
} // namespace carb
|
omniverse-code/kit/include/carb/memory/MemoryTrackerTypes.h | // Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
// ver: 0.1
//
#pragma once
#include <cstddef>
#include <cstdint>
namespace carb
{
namespace memory
{
/**
* A context is a thin wrapper of a string pointer, as such it is up to the programmer
* to ensure that the pointer is valid at the invocation.
*
* To minimize the possibility of error any API receiving the context should copy the
* string rather than reference its pointer.
*/
class Context
{
public:
explicit Context(const char* contextName) : m_contextName(contextName)
{
}
const char* getContextName() const
{
return m_contextName;
}
private:
const char* m_contextName;
};
/**
* An address space is a type of memory that the user wishes to track. Normal
* allocation goes into the Global address space. This is used to track manual heaps, as
* well as resources that behave like memory but are not directly tied to the memory
* systems provided by the global heap. This can also be used to track an object who has
* unique id for the life-time of the object. Example: OpenGL Texture Ids
*
* Examples include GPU resources and Object Pools.
*/
struct AllocationGroup;
#define DEFAULT_ALLOCATION_GROUP_NAME ""
/**
* A bookmark is a point in time in the memory tracker, it allows the user to
* create a view of the memory between a bookmark and now.
*/
struct Bookmark;
struct ReportFlag
{
enum
{
eReportLeaks = 0x1, ///< Report any memory leaks as well.
eSummary = 0x2, ///< Just a summary.
eFull = eReportLeaks | eSummary,
};
};
typedef uint32_t ReportFlags;
/**
* This structure wraps up the data of the report.
*/
class Report;
/**
* A Summary is a really simple report.
*/
struct Summary
{
size_t allocationGroupCount;
size_t allocationCount;
size_t allocationBytes;
size_t freeCount;
size_t freeBytes;
};
enum class MemoryType
{
eMalloc,
eCalloc,
eRealloc,
eAlignedAlloc,
eStrdup,
eNew,
eNewArray,
eExternal,
eMemalign, // Linux only
eValloc, // Linux only
ePosixMemalign, // Linux only
eHeapAlloc,
eHeapRealloc,
};
} // namespace memory
} // namespace carb
|
omniverse-code/kit/include/carb/memory/MemoryTrackerDefines.h | // Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../Defines.h"
// In plugin mode, memory tracker is required to be loaded/unloaded as other plugins
// In this mode, always track memory allocation/free after loaded
#define CARB_MEMORY_WORK_AS_PLUGIN 1
// JMK 2021-09-13: disabling carb.memory as it can lead to shutdown issues. The hooks are not added or removed in a
// thread-safe way, which means that other threads can be in a trampoline or hook function when they are removed. This
// leads to potential crashes at shutdown.
#ifndef CARB_MEMORY_TRACKER_ENABLED
// # define CARB_MEMORY_TRACKER_ENABLED (CARB_DEBUG)
# define CARB_MEMORY_TRACKER_ENABLED 0
#endif
// Option on work mode for Windows
// Set to 1: Hook windows heap API (only for Windows)
// Set to 0: Replace malloc/free, new/delete
// Linux always use replace mode
#define CARB_MEMORY_HOOK 1
#if CARB_PLATFORM_LINUX && CARB_MEMORY_TRACKER_ENABLED
# define CARB_MEMORY_TRACKER_MODE_REPLACE
#elif CARB_PLATFORM_WINDOWS && CARB_MEMORY_TRACKER_ENABLED
# if CARB_MEMORY_HOOK
# define CARB_MEMORY_TRACKER_MODE_HOOK
# else
# define CARB_MEMORY_TRACKER_MODE_REPLACE
# endif
#endif
// Option to add addition header before allocated memory
// See MemoryBlockHeader for header structure
#define CARB_MEMORY_ADD_HEADER 0
#if !CARB_MEMORY_ADD_HEADER
// If header not added, will verify the 6 of 8 bytes before allocated memory
// ---------------------------------
// | Y | Y | Y | Y | N | N | Y | Y | Allocated memory
// ---------------------------------
// Y means to verify, N means to ignore
// These 8 bytes should be part of heap chunk header
// During test, the 6 bytes will not changed before free while other 2 bytes may changed.
// Need investigate more for reason.
# define CARB_MEMORY_VERIFY_HEAP_CHUNK_HEADER 1
#else
# define CARB_MEMORY_VERIFY_HEAP_CHUNK_HEADER 0
#endif
|
omniverse-code/kit/include/carb/memory/ArenaAllocator.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Allocator that initially uses a memory arena first (typically on the stack) and then falls back to the heap.
#pragma once
#include "../Defines.h"
#include <memory>
namespace carb
{
namespace memory
{
/**
* An allocator that initially allocates from a memory arena (typically on the stack) and falls back to another
* allocator when that is exhausted.
*
* ArenaAllocator conforms to the C++ Named Requirement of <a
* href="https://en.cppreference.com/w/cpp/named_req/Allocator">Allocator</a>.
* @tparam T The type allocated by this allocator.
* @tparam FallbackAllocator The Allocator that is used when the arena is exhausted.
*/
template <class T, class FallbackAllocator = std::allocator<T>>
class ArenaAllocator
{
public:
//! \c T*
using pointer = typename std::allocator_traits<FallbackAllocator>::pointer;
//! `T const*`
using const_pointer = typename std::allocator_traits<FallbackAllocator>::const_pointer;
//! \c void*
using void_pointer = typename std::allocator_traits<FallbackAllocator>::void_pointer;
//! `void const*`
using const_void_pointer = typename std::allocator_traits<FallbackAllocator>::const_void_pointer;
//! \c T
using value_type = typename std::allocator_traits<FallbackAllocator>::value_type;
//! \c std::size_t
using size_type = typename std::allocator_traits<FallbackAllocator>::size_type;
//! \c std::ptrdiff_t
using difference_type = typename std::allocator_traits<FallbackAllocator>::difference_type;
//! Rebinds ArenaAllocator to a different type \c U
template <class U>
struct rebind
{
//! The rebound ArenaAllocator
using other = ArenaAllocator<U, typename FallbackAllocator::template rebind<U>::other>;
};
/**
* Default constructor. Only uses \c FallbackAllocator as no arena is given.
*/
ArenaAllocator() : m_members(ValueInitFirst{}, nullptr), m_current(nullptr), m_end(nullptr)
{
}
/**
* Constructs \c ArenaAllocator with a specific \c FallbackAllocator. Only uses \c FallbackAllocator as no arena is
* given.
*
* @param fallback A \c FallbackAllocator instance to copy.
*/
explicit ArenaAllocator(const FallbackAllocator& fallback)
: m_members(InitBoth{}, fallback, nullptr), m_current(nullptr), m_end(nullptr)
{
}
/**
* Constructs \c ArenaAllocator with an arena and optionally a specific \c FallbackAllocator.
*
* @warning It is the caller's responsibility to ensure that the given memory arena outlives \c *this and any other
* \ref ArenaAllocator which it may be moved to.
*
* @param begin A pointer to the beginning of the arena.
* @param end A pointer immediately past the end of the arena.
* @param fallback A \c FallbackAllocator instance to copy.
*/
ArenaAllocator(void* begin, void* end, const FallbackAllocator& fallback = FallbackAllocator())
: m_members(InitBoth{}, fallback, static_cast<uint8_t*>(begin)),
m_current(alignForward(m_members.second)),
m_end(static_cast<uint8_t*>(end))
{
}
/**
* Move constructor: constructs \c ArenaAllocator by moving from a different \c ArenaAllocator.
*
* @param other The \c ArenaAllocator to copy from.
*/
ArenaAllocator(ArenaAllocator&& other)
: m_members(InitBoth{}, std::move(other.m_members.first()), other.m_members.second),
m_current(other.m_current),
m_end(other.m_end)
{
// Prevent `other` from allocating memory from the arena. By adding 1 we put it past the end which prevents
// other->deallocate() from reclaiming the last allocation.
other.m_current = other.m_end + 1;
}
/**
* Copy constructor: constructs \c ArenaAllocator from a copy of a given \c ArenaAllocator.
*
* @note Even though \p other is passed via const-reference, the arena is transferred from \p other to `*this`.
* Further allocations from \p other will defer to the FallbackAllocator.
*
* @param other The \c ArenaAllocator to copy from.
*/
ArenaAllocator(const ArenaAllocator& other)
: m_members(InitBoth{}, other.m_members.first(), other.m_members.second),
m_current(other.m_current),
m_end(other.m_end)
{
// Prevent `other` from allocating memory from the arena. By adding 1 we put it past the end which prevents
// other->deallocate() from reclaiming the last allocation.
other.m_current = other.m_end + 1;
}
/**
* Copy constructor: constructs \c ArenaAllocator for type \c T from a copy of a given \c ArenaAllocator for type
* \c U.
*
* @note This does not copy the arena; that is retained by the original allocator.
*
* @param other The \c ArenaAllocator to copy from.
*/
template <class U, class UFallbackAllocator>
ArenaAllocator(const ArenaAllocator<U, UFallbackAllocator>& other)
: m_members(InitBoth{}, other.m_members.first(), other.m_members.second),
m_current(other.m_end + 1),
m_end(other.m_end)
{
// m_current is explicitly assigned to `other.m_end + 1` to prevent further allocations from the arena from
// *this and to prevent this->deallocate() from reclaiming the last allocation.
}
/**
* Allocates (but does not construct) memory for one or more instances of \c value_type.
*
* @param n The number of contiguous \c value_type instances to allocate. If the request cannot be serviced by the
* arena, the \c FallbackAllocator is used.
* @returns An uninitialized memory region that will fit \p n contiguous instances of \c value_type.
* @throws Memory Any exception that would be thrown by \c FallbackAllocator.
*/
pointer allocate(size_type n = 1)
{
if ((m_current + (sizeof(value_type) * n)) <= end())
{
pointer p = reinterpret_cast<pointer>(m_current);
m_current += (sizeof(value_type) * n);
return p;
}
return m_members.first().allocate(n);
}
/**
* Deallocates (but does not destruct) memory for one or more instances of \c value_type.
*
* @note If the memory came from the arena, the memory will not be available for reuse unless the memory is the
* most recent allocation from the arena.
* @param in The pointer previously returned from \ref allocate().
* @param n The same \c n value that was passed to \ref allocate() that produced \p in.
*/
void deallocate(pointer in, size_type n = 1)
{
uint8_t* p = reinterpret_cast<uint8_t*>(in);
if (p >= begin() && p < end())
{
if ((p + (sizeof(value_type) * n)) == m_current)
m_current -= (sizeof(value_type) * n);
}
else
m_members.first().deallocate(in, n);
}
private:
uint8_t* begin() const noexcept
{
return m_members.second;
}
uint8_t* end() const noexcept
{
return m_end;
}
static uint8_t* alignForward(void* p)
{
uint8_t* out = reinterpret_cast<uint8_t*>(p);
constexpr static size_t align = alignof(value_type);
size_t aligned = (size_t(out) + (align - 1)) & -(ptrdiff_t)align;
return out + (aligned - size_t(out));
}
template <class U, class UFallbackAllocator>
friend class ArenaAllocator;
mutable EmptyMemberPair<FallbackAllocator, uint8_t* /*begin*/> m_members;
mutable uint8_t* m_current;
mutable uint8_t* m_end;
};
//! Equality operator
//! @param lhs An allocator to compare
//! @param rhs An allocator to compare
//! @returns \c true if \p lhs and \p rhs can deallocate each other's allocations.
template <class T, class U, class Allocator1, class Allocator2>
bool operator==(const ArenaAllocator<T, Allocator1>& lhs, const ArenaAllocator<U, Allocator2>& rhs)
{
return (void*)lhs.m_members.second == (void*)rhs.m_members.second && lhs.m_members.first() == rhs.m_members.first();
}
//! Inequality operator
//! @param lhs An allocator to compare
//! @param rhs An allocator to compare
//! @returns the inverse of the equality operator.
template <class T, class U, class Allocator1, class Allocator2>
bool operator!=(const ArenaAllocator<T, Allocator1>& lhs, const ArenaAllocator<U, Allocator2>& rhs)
{
return !(lhs == rhs);
}
} // namespace memory
} // namespace carb
|
omniverse-code/kit/include/carb/memory/Util.h | // Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Helper utilities for memory
#pragma once
#include "../Defines.h"
#if CARB_PLATFORM_LINUX
# include <unistd.h>
#endif
namespace carb
{
namespace memory
{
// Turn off optimization for testReadable() for Visual Studio, otherwise the read will be elided and it will always
// return true.
CARB_OPTIMIZE_OFF_MSC()
/**
* Tests if a memory word (size_t) can be read from an address without crashing.
*
* @note This is not a particularly efficient function and should not be depended on for performance.
*
* @param mem The address to attempt to read
* @returns `true` if a value could be read successfully, `false` if attempting to read the value would cause an access
* violation or SIGSEGV.
*/
CARB_ATTRIBUTE(no_sanitize_address) inline bool testReadable(const void* mem)
{
#if CARB_PLATFORM_WINDOWS
// Use SEH to catch a read failure. This is very fast unless an exception occurs as no setup work is needed on
// x86_64.
__try
{
size_t s = *reinterpret_cast<const size_t*>(mem);
CARB_UNUSED(s);
return true;
}
__except (1)
{
return false;
}
#elif CARB_POSIX
// The pipes trick: use the kernel to validate that the memory can be read. write() will return -1 with errno=EFAULT
// if the memory is not readable.
int pipes[2];
CARB_FATAL_UNLESS(pipe(pipes) == 0, "Failed to create pipes");
int ret = CARB_RETRY_EINTR(write(pipes[1], mem, sizeof(size_t)));
CARB_FATAL_UNLESS(
ret == sizeof(size_t) || errno == EFAULT, "Unexpected result from write(): {%d/%s}", errno, strerror(errno));
close(pipes[0]);
close(pipes[1]);
return ret == sizeof(size_t);
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
}
CARB_OPTIMIZE_ON_MSC()
/**
* Copies memory as via memmove, but returns false if a read access violation occurs while reading.
*
* @rst
* .. warning:: This function is designed for protection, not performance, and may be very slow to execute.
* @endrst
* @thread_safety This function is safe to call concurrently. However, this function makes no guarantees about the
* consistency of data copied when the data is modified while copied, only the attempting to read invalid memory
* will not result in an access violation.
*
* As with `memmove()`, the memory areas may overlap: copying takes place as though the bytes in @p source are first
* copied into a temporary array that does not overlap @p source or @p dest, and the bytes are then copied from the
* temporary array to @p dest.
*
* @param dest The destination buffer that will receive the copied bytes.
* @param source The source buffer to copy bytes from.
* @param len The number of bytes of @p source to copy to @p dest.
* @returns `true` if the memory was successfully copied. If `false` is returned, @p dest is in a valid but undefined
* state.
*/
CARB_ATTRIBUTE(no_sanitize_address) inline bool protectedMemmove(void* dest, const void* source, size_t len)
{
if (!source)
return false;
#if CARB_PLATFORM_WINDOWS
// Use SEH to catch a read failure. This is very fast unless an exception occurs as no setup work is needed on
// x86_64.
__try
{
memmove(dest, source, len);
return true;
}
__except (1)
{
return false;
}
#elif CARB_POSIX
// Create a pipe and read the data through the pipe. The kernel will sanitize the reads.
int pipes[2];
if (pipe(pipes) != 0)
return false;
while (len != 0)
{
ssize_t s = ::carb_min((ssize_t)len, (ssize_t)4096);
if (CARB_RETRY_EINTR(write(pipes[1], source, s)) != s || CARB_RETRY_EINTR(read(pipes[0], dest, s)) != s)
break;
len -= size_t(s);
dest = static_cast<uint8_t*>(dest) + s;
source = static_cast<const uint8_t*>(source) + s;
}
close(pipes[0]);
close(pipes[1]);
return len == 0;
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
}
/**
* Copies memory as via strncpy, but returns false if an access violation occurs while reading.
*
* @rst
* .. warning:: This function is designed for safety, not performance, and may be very slow to execute.
* .. warning:: The `source` and `dest` buffers may not overlap.
* @endrst
* @thread_safety This function is safe to call concurrently. However, this function makes no guarantees about the
* consistency of data copied when the data is modified while copied, only the attempting to read invalid memory
* will not result in an access violation.
* @param dest The destination buffer that will receive the memory. Must be at least @p n bytes in size.
* @param source The source buffer. Up to @p n bytes will be copied.
* @param n The maximum number of characters of @p source to copy to @p dest. If no `NUL` character was encountered in
* the first `n - 1` characters of `source`, then `dest[n - 1]` will be a `NUL` character. This is a departure
* from `strncpy()` but similar to `strncpy_s()`.
* @returns `true` if the memory was successfully copied; `false` otherwise. If `false` is returned, `dest` is in a
* valid but undefined state.
*/
CARB_ATTRIBUTE(no_sanitize_address) inline bool protectedStrncpy(char* dest, const char* source, size_t n)
{
if (!source)
return false;
#if CARB_PLATFORM_WINDOWS
// Use SEH to catch a read failure. This is very fast unless an exception occurs as no setup work is needed on
// x86_64.
__try
{
size_t len = strnlen(source, n - 1);
memcpy(dest, source, len);
dest[len] = '\0';
return true;
}
__except (1)
{
return false;
}
#elif CARB_POSIX
if (n == 0)
return false;
// Create a pipe and read the data through the pipe. The kernel will sanitize the reads.
struct Pipes
{
bool valid;
int fds[2];
Pipes()
{
valid = pipe(fds) == 0;
}
~Pipes()
{
if (valid)
{
close(fds[0]);
close(fds[1]);
}
}
int operator[](int p) const noexcept
{
return fds[p];
}
} pipes;
if (!pipes.valid)
return false;
constexpr static size_t kBytes = sizeof(size_t);
constexpr static size_t kMask = kBytes - 1;
// Unaligned reads
while (n != 0 && (size_t(source) & kMask) != 0)
{
if (CARB_RETRY_EINTR(write(pipes[1], source, 1)) != 1 || CARB_RETRY_EINTR(read(pipes[0], dest, 1)) != 1)
return false;
if (*dest == '\0')
return true;
++source, ++dest, --n;
}
// Aligned reads
while (n >= kBytes)
{
CARB_ASSERT((size_t(source) & kMask) == 0);
union
{
size_t value;
char chars[kBytes];
} u;
if (CARB_RETRY_EINTR(write(pipes[1], source, kBytes)) != kBytes ||
CARB_RETRY_EINTR(read(pipes[0], &u.value, kBytes)) != kBytes)
return false;
// Use the strlen bit trick to check if any bytes that make up a word are definitely not zero
if (CARB_UNLIKELY(((u.value - 0x0101010101010101) & 0x8080808080808080)))
{
// One of the bytes could be zero
for (int i = 0; i != sizeof(size_t); ++i)
{
dest[i] = u.chars[i];
if (!dest[i])
return true;
}
}
else
{
memcpy(dest, u.chars, kBytes);
}
source += kBytes;
dest += kBytes;
n -= kBytes;
}
// Trailing reads
while (n != 0)
{
if (CARB_RETRY_EINTR(write(pipes[1], source, 1)) != 1 || CARB_RETRY_EINTR(read(pipes[0], dest, 1)) != 1)
return false;
if (*dest == '\0')
return true;
++source, ++dest, --n;
}
// Truncate
*(dest - 1) = '\0';
return true;
#else
CARB_UNSUPPORTED_PLATFORM();
#endif
}
} // namespace memory
} // namespace carb
|
omniverse-code/kit/include/carb/windowing/IGLContext.h | // Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../Interface.h"
#include "../Types.h"
namespace carb
{
namespace windowing
{
struct GLContext;
/**
* Defines a GL context interface for off-screen rendering.
*/
struct IGLContext
{
CARB_PLUGIN_INTERFACE("carb::windowing::IGLContext", 1, 0)
/**
* Creates a context for OpenGL.
*
* @param width The width of the off-screen surface for the context.
* @param height The height of the off-screen surface for the context.
* @return The GL context created.
*/
GLContext*(CARB_ABI* createContextOpenGL)(int width, int height);
/**
* Creates a context for OpenGL(ES).
*
* @param width The width of the off-screen surface for the context.
* @param height The height of the off-screen surface for the context.
* @return The GL context created.
*/
GLContext*(CARB_ABI* createContextOpenGLES)(int width, int height);
/**
* Destroys a GL context.
*
* @param ctx The GL context to be destroyed.
*/
void(CARB_ABI* destroyContext)(GLContext* ctx);
/**
* Makes the GL context current.
*
* After calling this you can make any GL function calls.
*
* @param ctx The GL context to be made current.
*/
void(CARB_ABI* makeContextCurrent)(GLContext* ctx);
/**
* Try and resolve an OpenGL or OpenGL(es) procedure address from name.
*
* @param procName The name of procedure to load.
* @return The address of procedure.
*/
void*(CARB_ABI* getProcAddress)(const char* procName);
};
} // namespace windowing
} // namespace carb
|
omniverse-code/kit/include/carb/windowing/WindowingBindingsPython.h | // Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../BindingsPythonUtils.h"
#include "../BindingsPythonTypes.h"
#include "IGLContext.h"
#include "IWindowing.h"
#include <memory>
#include <string>
#include <vector>
namespace carb
{
namespace windowing
{
struct Cursor
{
};
struct GLContext
{
};
struct Monitor
{
};
struct ImagePy
{
int32_t width;
int32_t height;
py::bytes pixels;
ImagePy(int32_t _width, int32_t _height, py::bytes& _pixels) : width(_width), height(_height), pixels(_pixels)
{
}
};
inline void definePythonModule(py::module& m)
{
using namespace carb;
using namespace carb::windowing;
m.doc() = "pybind11 carb.windowing bindings";
py::class_<Window>(m, "Window");
py::class_<Cursor>(m, "Cursor");
py::class_<GLContext>(m, "GLContext");
py::class_<Monitor>(m, "Monitor");
py::class_<ImagePy>(m, "Image")
.def(py::init<int32_t, int32_t, py::bytes&>(), py::arg("width"), py::arg("height"), py::arg("pixels"));
m.attr("WINDOW_HINT_NONE") = py::int_(kWindowHintNone);
m.attr("WINDOW_HINT_NO_RESIZE") = py::int_(kWindowHintNoResize);
m.attr("WINDOW_HINT_NO_DECORATION") = py::int_(kWindowHintNoDecoration);
m.attr("WINDOW_HINT_NO_AUTO_ICONIFY") = py::int_(kWindowHintNoAutoIconify);
m.attr("WINDOW_HINT_NO_FOCUS_ON_SHOW") = py::int_(kWindowHintNoFocusOnShow);
m.attr("WINDOW_HINT_SCALE_TO_MONITOR") = py::int_(kWindowHintScaleToMonitor);
m.attr("WINDOW_HINT_FLOATING") = py::int_(kWindowHintFloating);
m.attr("WINDOW_HINT_MAXIMIZED") = py::int_(kWindowHintMaximized);
py::enum_<CursorStandardShape>(m, "CursorStandardShape")
.value("ARROW", CursorStandardShape::eArrow)
.value("IBEAM", CursorStandardShape::eIBeam)
.value("CROSSHAIR", CursorStandardShape::eCrosshair)
.value("HAND", CursorStandardShape::eHand)
.value("HORIZONTAL_RESIZE", CursorStandardShape::eHorizontalResize)
.value("VERTICAL_RESIZE", CursorStandardShape::eVerticalResize);
py::enum_<CursorMode>(m, "CursorMode")
.value("NORMAL", CursorMode::eNormal)
.value("HIDDEN", CursorMode::eHidden)
.value("DISABLED", CursorMode::eDisabled);
py::enum_<InputMode>(m, "InputMode")
.value("STICKY_KEYS", InputMode::eStickyKeys)
.value("STICKY_MOUSE_BUTTONS", InputMode::eStickyMouseButtons)
.value("LOCK_KEY_MODS", InputMode::eLockKeyMods)
.value("RAW_MOUSE_MOTION", InputMode::eRawMouseMotion);
defineInterfaceClass<IWindowing>(m, "IWindowing", "acquire_windowing_interface")
.def("create_window",
[](const IWindowing* iface, int width, int height, const char* title, bool fullscreen, int hints) {
WindowDesc desc = {};
desc.width = width;
desc.height = height;
desc.title = title;
desc.fullscreen = fullscreen;
desc.hints = hints;
return iface->createWindow(desc);
},
py::arg("width"), py::arg("height"), py::arg("title"), py::arg("fullscreen"),
py::arg("hints") = kWindowHintNone, py::return_value_policy::reference)
.def("destroy_window", wrapInterfaceFunction(&IWindowing::destroyWindow))
.def("show_window", wrapInterfaceFunction(&IWindowing::showWindow))
.def("hide_window", wrapInterfaceFunction(&IWindowing::hideWindow))
.def("get_window_width", wrapInterfaceFunction(&IWindowing::getWindowWidth))
.def("get_window_height", wrapInterfaceFunction(&IWindowing::getWindowHeight))
.def("get_window_position", wrapInterfaceFunction(&IWindowing::getWindowPosition))
.def("set_window_position", wrapInterfaceFunction(&IWindowing::setWindowPosition))
.def("set_window_title", wrapInterfaceFunction(&IWindowing::setWindowTitle))
.def("set_window_opacity", wrapInterfaceFunction(&IWindowing::setWindowOpacity))
.def("get_window_opacity", wrapInterfaceFunction(&IWindowing::getWindowOpacity))
.def("set_window_fullscreen", wrapInterfaceFunction(&IWindowing::setWindowFullscreen))
.def("is_window_fullscreen", wrapInterfaceFunction(&IWindowing::isWindowFullscreen))
.def("resize_window", wrapInterfaceFunction(&IWindowing::resizeWindow))
.def("focus_window", wrapInterfaceFunction(&IWindowing::focusWindow))
.def("is_window_focused", wrapInterfaceFunction(&IWindowing::isWindowFocused))
.def("maximize_window", wrapInterfaceFunction(&IWindowing::maximizeWindow))
.def("minimize_window", wrapInterfaceFunction(&IWindowing::minimizeWindow))
.def("restore_window", wrapInterfaceFunction(&IWindowing::restoreWindow))
.def("is_window_maximized", wrapInterfaceFunction(&IWindowing::isWindowMaximized))
.def("is_window_minimized", wrapInterfaceFunction(&IWindowing::isWindowMinimized))
.def("should_window_close", wrapInterfaceFunction(&IWindowing::shouldWindowClose))
.def("set_window_should_close", wrapInterfaceFunction(&IWindowing::setWindowShouldClose))
.def("get_window_user_pointer", wrapInterfaceFunction(&IWindowing::getWindowUserPointer))
.def("set_window_user_pointer", wrapInterfaceFunction(&IWindowing::setWindowUserPointer))
.def("set_window_content_scale", wrapInterfaceFunction(&IWindowing::getWindowContentScale))
.def("get_native_display", wrapInterfaceFunction(&IWindowing::getNativeDisplay))
.def("get_native_window", wrapInterfaceFunction(&IWindowing::getNativeWindow), py::return_value_policy::reference)
.def("set_input_mode", wrapInterfaceFunction(&IWindowing::setInputMode))
.def("get_input_mode", wrapInterfaceFunction(&IWindowing::getInputMode))
.def("update_input_devices", wrapInterfaceFunction(&IWindowing::updateInputDevices))
.def("poll_events", wrapInterfaceFunction(&IWindowing::pollEvents))
.def("wait_events", wrapInterfaceFunction(&IWindowing::waitEvents))
.def("get_keyboard", wrapInterfaceFunction(&IWindowing::getKeyboard), py::return_value_policy::reference)
.def("get_mouse", wrapInterfaceFunction(&IWindowing::getMouse), py::return_value_policy::reference)
.def("create_cursor_standard", wrapInterfaceFunction(&IWindowing::createCursorStandard),
py::return_value_policy::reference)
.def("create_cursor",
[](IWindowing* windowing, ImagePy& imagePy, int32_t xhot, int32_t yhot) {
py::buffer_info info(py::buffer(imagePy.pixels).request());
uint8_t* data = reinterpret_cast<uint8_t*>(info.ptr);
Image image{ imagePy.width, imagePy.height, data };
return windowing->createCursor(image, xhot, yhot);
},
py::return_value_policy::reference)
.def("destroy_cursor", wrapInterfaceFunction(&IWindowing::destroyCursor))
.def("set_cursor", wrapInterfaceFunction(&IWindowing::setCursor))
.def("set_cursor_mode", wrapInterfaceFunction(&IWindowing::setCursorMode))
.def("get_cursor_mode", wrapInterfaceFunction(&IWindowing::getCursorMode))
.def("set_cursor_position", wrapInterfaceFunction(&IWindowing::setCursorPosition))
.def("get_cursor_position", wrapInterfaceFunction(&IWindowing::getCursorPosition))
.def("set_clipboard", wrapInterfaceFunction(&IWindowing::setClipboard))
.def("get_clipboard", wrapInterfaceFunction(&IWindowing::getClipboard))
.def("get_monitors",
[](IWindowing* iface) {
size_t monitorCount;
const Monitor** monitors = iface->getMonitors(&monitorCount);
py::tuple tuple(monitorCount);
for (size_t i = 0; i < monitorCount; ++i)
{
tuple[i] = monitors[i];
}
return tuple;
})
.def("get_monitor_position", wrapInterfaceFunction(&IWindowing::getMonitorPosition))
.def("get_monitor_work_area",
[](IWindowing* iface, Monitor* monitor) {
Int2 pos, size;
iface->getMonitorWorkArea(monitor, &pos, &size);
py::tuple tuple(2);
tuple[0] = pos;
tuple[1] = size;
return tuple;
})
.def("set_window_icon", [](IWindowing* windowing, Window* window, ImagePy& imagePy) {
py::buffer_info info(py::buffer(imagePy.pixels).request());
uint8_t* data = reinterpret_cast<uint8_t*>(info.ptr);
Image image{ imagePy.width, imagePy.height, data };
windowing->setWindowIcon(window, image);
});
defineInterfaceClass<IGLContext>(m, "IGLContext", "acquire_gl_context_interface")
.def("create_context_opengl",
[](const IGLContext* iface, int width, int height) { return iface->createContextOpenGL(width, height); },
py::arg("width"), py::arg("height"), py::return_value_policy::reference)
.def("create_context_opengles",
[](const IGLContext* iface, int width, int height) { return iface->createContextOpenGLES(width, height); },
py::arg("width"), py::arg("height"), py::return_value_policy::reference)
.def("destroy_context", wrapInterfaceFunction(&IGLContext::destroyContext))
.def("make_context_current", wrapInterfaceFunction(&IGLContext::makeContextCurrent));
}
} // namespace windowing
} // namespace carb
|
omniverse-code/kit/include/carb/windowing/IWindowing.h | // Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../Interface.h"
#include "../Types.h"
namespace carb
{
namespace input
{
struct Keyboard;
struct Mouse;
struct Gamepad;
} // namespace input
namespace windowing
{
struct Window;
struct Cursor;
struct Monitor;
enum class MonitorChangeEvent : uint32_t
{
eUnknown,
eConnected,
eDisconnected
};
typedef void (*OnWindowMoveFn)(Window* window, int x, int y, void* userData);
typedef void (*OnWindowResizeFn)(Window* window, int width, int height, void* userData);
typedef void (*OnWindowDropFn)(Window* window, const char** paths, int count, void* userData);
typedef void (*OnWindowCloseFn)(Window* window, void* userData);
typedef void (*OnWindowContentScaleFn)(Window* window, float scaleX, float scaleY, void* userData);
typedef void (*OnWindowFocusFn)(Window* window, bool isFocused, void* userData);
typedef void (*OnWindowMaximizeFn)(Window* window, bool isMaximized, void* userData);
typedef void (*OnWindowMinimizeFn)(Window* window, bool isMinimized, void* userData);
typedef void (*OnMonitorChangeFn)(const Monitor* monitor, MonitorChangeEvent evt);
typedef uint32_t WindowHints;
constexpr WindowHints kWindowHintNone = 0;
constexpr WindowHints kWindowHintNoResize = 1 << 0;
constexpr WindowHints kWindowHintNoDecoration = 1 << 1;
constexpr WindowHints kWindowHintNoAutoIconify = 1 << 2;
constexpr WindowHints kWindowHintNoFocusOnShow = 1 << 3;
constexpr WindowHints kWindowHintScaleToMonitor = 1 << 4;
constexpr WindowHints kWindowHintFloating = 1 << 5;
constexpr WindowHints kWindowHintMaximized = 1 << 6;
/**
* Descriptor for how a window is to be created.
*/
struct WindowDesc
{
int width; ///! The initial window width.
int height; ///! The initial window height.
const char* title; ///! The initial title of the window.
bool fullscreen; ///! Should the window be initialized in fullscreen mode.
WindowHints hints; ///! Initial window hints / attributes.
};
/**
* Defines cursor standard shapes.
*/
enum class CursorStandardShape : uint32_t
{
eArrow, ///! The regular arrow cursor shape.
eIBeam, ///! The text input I-beam cursor shape.
eCrosshair, ///! The crosshair shape.
eHand, ///! The hand shape
eHorizontalResize, ///! The horizontal resize arrow shape.
eVerticalResize ///! The vertical resize arrow shape.
};
enum class CursorMode : uint32_t
{
eNormal, ///! Cursor visible and behaving normally.
eHidden, ///! Cursor invisible when over the content area of window but does not restrict the cursor from leaving.
eDisabled, ///! Hides and grabs the cursor, providing virtual and unlimited cursor movement. This is useful
/// for implementing for example 3D camera controls.
};
enum class InputMode : uint32_t
{
eStickyKeys, ///! Config sticky key.
eStickyMouseButtons, ///! Config sticky mouse button.
eLockKeyMods, ///! Config lock key modifier bits.
eRawMouseMotion ///! Config raw mouse motion.
};
struct VideoMode
{
int width; ///! The width, in screen coordinates, of the video mode.
int height; ///! The height, in screen coordinates, of the video mode.
int redBits; ///! The bit depth of the red channel of the video mode.
int greenBits; ///! The bit depth of the green channel of the video mode.
int blueBits; ///! The bit depth of the blue channel of the video mode.
int refreshRate; ///! The refresh rate, in Hz, of the video mode.
};
/**
* This describes a single 2D image. See the documentation for each related function what the expected pixel format is.
*/
struct Image
{
int32_t width; ///! The width, in pixels, of this image.
int32_t height; ///! The height, in pixels, of this image.
uint8_t* pixels; ///! The pixel data of this image, arranged left-to-right, top-to-bottom.
};
/**
* Defines a windowing interface.
*/
struct IWindowing
{
CARB_PLUGIN_INTERFACE("carb::windowing::IWindowing", 1, 4)
/**
* Creates a window.
*
* @param desc The descriptor for the window.
* @return The window created.
*/
Window*(CARB_ABI* createWindow)(const WindowDesc& desc);
/**
* Destroys a window.
*
* @param window The window to be destroyed.
*/
void(CARB_ABI* destroyWindow)(Window* window);
/**
* Shows a window making it visible.
*
* @param window The window to use.
*/
void(CARB_ABI* showWindow)(Window* window);
/**
* Hides a window making it hidden.
*
* @param window The window to use.
*/
void(CARB_ABI* hideWindow)(Window* window);
/**
* Gets the current window width.
*
* @param window The window to use.
* @return The current window width.
*/
uint32_t(CARB_ABI* getWindowWidth)(Window* window);
/**
* Gets the current window height.
*
* @param window The window to use.
* @return The current window height.
*/
uint32_t(CARB_ABI* getWindowHeight)(Window* window);
/**
* Gets the current window position.
*
* @param window The window to use.
* @return The current window position.
*/
Int2(CARB_ABI* getWindowPosition)(Window* window);
/**
* Sets the current window position.
*
* @param window The window to use.
* @param position The position to set the window to.
*/
void(CARB_ABI* setWindowPosition)(Window* window, const Int2& position);
/**
* Sets the window title.
*
* @param window The window to use.
* @param title The window title to be set (as a utf8 string)
*/
void(CARB_ABI* setWindowTitle)(Window* window, const char* title);
/**
* Sets the window opacity.
*
* @param window The window to use.
* @param opacity The window opacity. 1.0f is fully opaque. 0.0 is fully transparent.
*/
void(CARB_ABI* setWindowOpacity)(Window* window, float opacity);
/**
* Gets the window opacity.
*
* @param window The window to use.
* @return The window opacity. 1.0f is fully opaque. 0.0 is fully transparent.
*/
float(CARB_ABI* getWindowOpacity)(Window* window);
/**
* Sets the window into fullscreen or windowed mode.
*
* @param window The window to use.
* @param fullscreen true to be set to fullscreen, false to be set to windowed.
*/
void(CARB_ABI* setWindowFullscreen)(Window* window, bool fullscreen);
/**
* Determines if the window is in fullscreen mode.
*
* @param window The window to use.
* @return true if the window is in fullscreen mode, false if in windowed mode.
*/
bool(CARB_ABI* isWindowFullscreen)(Window* window);
/**
* Sets the function for handling resize events.
*
* @param window The window to use.
* @param onWindowResize The function callback to handle resize events on the window.
*/
void(CARB_ABI* setWindowResizeFn)(Window* window, OnWindowResizeFn onWindowResize, void* userData);
/**
* Resizes the window.
*
* @param window The window to resize.
* @param width The width to resize to.
* @param height The height to resize to.
*/
void(CARB_ABI* resizeWindow)(Window* window, int width, int height);
/**
* Set the window in focus.
*
* @param window The window to use.
*/
void(CARB_ABI* focusWindow)(Window* window);
/**
* Sets the function for handling window focus events.
*
* @param window The window to use.
* @param onWindowFocusFn The function callback to handle focus events on the window.
*/
void(CARB_ABI* setWindowFocusFn)(Window* window, OnWindowFocusFn onWindowFocusFn, void* userData);
/**
* Determines if the window is in focus.
*
* @param window The window to use.
* @return true if the window is in focus, false if it is not.
*/
bool(CARB_ABI* isWindowFocused)(Window* window);
/**
* Sets the function for handling window minimize events.
*
* @param window The window to use.
* @param onWindowMinimizeFn The function callback to handle minimize events on the window.
*/
void(CARB_ABI* setWindowMinimizeFn)(Window* window, OnWindowMinimizeFn onWindowMinimizeFn, void* userData);
/**
* Determines if the window is minimized.
*
* @param window The window to use.
* @return true if the window is minimized, false if it is not.
*/
bool(CARB_ABI* isWindowMinimized)(Window* window);
/**
* Sets the function for handling drag-n-drop events.
*
* @param window The window to use.
* @param onWindowDrop The function callback to handle drop events on the window.
*/
void(CARB_ABI* setWindowDropFn)(Window* window, OnWindowDropFn onWindowDrop, void* userData);
/**
* Sets the function for handling window close events.
*
* @param window The window to use.
* @param onWindowClose The function callback to handle window close events.
*/
void(CARB_ABI* setWindowCloseFn)(Window* window, OnWindowCloseFn onWindowClose, void* userData);
/**
* Determines if the user has attempted to closer the window.
*
* @param window The window to use.
* @return true if the user has attempted to closer the window, false if still open.
*/
bool(CARB_ABI* shouldWindowClose)(Window* window);
/**
* Hints to the window that it should close.
*
* @param window The window to use.
* @param value true to request the window to close, false to request it not to close.
*/
void(CARB_ABI* setWindowShouldClose)(Window* window, bool value);
/**
* This function returns the current value of the user-defined pointer of the specified window.
* The initial value is nullptr.
*
* @param window The window to use.
* @return the current value of the user-defined pointer of the specified window.
*/
void*(CARB_ABI* getWindowUserPointer)(Window* window);
/**
* This function sets the user-defined pointer of the specified window.
* The current value is retained until the window is destroyed. The initial value is nullptr.
*
* @param window The window to use.
* @param pointer The new pointer value.
*/
void(CARB_ABI* setWindowUserPointer)(Window* window, void* pointer);
/**
* Sets the function for handling content scale events.
*
* @param window The window to use.
* @param onWindowContentScale The function callback to handle content scale events on the window.
*/
void(CARB_ABI* setWindowContentScaleFn)(Window* window, OnWindowContentScaleFn onWindowContentScale, void* userData);
/**
* Retrieves the content scale for the specified monitor.
*
* @param window The window to use.
* @return The content scale of the window.
*/
Float2(CARB_ABI* getWindowContentScale)(Window* window);
/**
* Gets the native display handle.
*
* windows = nullptr
* linux = ::Display*
*
* @param window The window to use.
* @return The native display handle.
*/
void*(CARB_ABI* getNativeDisplay)(Window* window);
/**
* Gets the native window handle.
*
* windows = ::HWND
* linux = ::Window
*
* @param window The window to use.
* @return The native window handle.
*/
void*(CARB_ABI* getNativeWindow)(Window* window);
/**
* Sets an input mode option for the specified window.
*
* @param window The window to set input mode.
* @param mode The mode to set.
* @param enabled The new value @ref mode should be changed to.
*/
void(CARB_ABI* setInputMode)(Window* window, InputMode mode, bool enabled);
/**
* Gets the value of a input mode option for the specified window.
*
* @param window The window to get input mode value.
* @param mode The input mode to get value from.
* @return The input mode value associated with the window.
*/
bool(CARB_ABI* getInputMode)(Window* window, InputMode mode);
/**
* Updates input device states.
*/
void(CARB_ABI* updateInputDevices)();
/**
* Polls and processes only those events that have already been received and then returns immediately.
*/
void(CARB_ABI* pollEvents)();
/**
* Puts the calling thread to sleep until at least one event has been received.
*/
void(CARB_ABI* waitEvents)();
/**
* Gets the logical keyboard associated with the window.
*
* @param window The window to use.
* @return The keyboard.
*/
input::Keyboard*(CARB_ABI* getKeyboard)(Window* window);
/**
* Gets the logical mouse associated with the window.
*
* @param window The window to use.
* @return The mouse.
*/
input::Mouse*(CARB_ABI* getMouse)(Window* window);
/**
* Creates a cursor with a standard shape, that can be set for a window with @ref setCursor.
*
* Use @ref destroyCursor to destroy cursors.
*
* @param shape The standard shape of cursor to be created.
* @return A new cursor ready to use or nullptr if an error occurred.
*/
Cursor*(CARB_ABI* createCursorStandard)(CursorStandardShape shape);
/**
* Destroys a cursor previously created with @ref createCursorStandard.
* If the specified cursor is current for any window, that window will be reverted to the default cursor.
*
* @param cursor the cursor object to destroy.
*/
void(CARB_ABI* destroyCursor)(Cursor* cursor);
/**
* Sets the cursor image to be used when the cursor is over the content area of the specified window.
*
* @param window The window to set the cursor for.
* @param cursor The cursor to set, or nullptr to switch back to the default arrow cursor.
*/
void(CARB_ABI* setCursor)(Window* window, Cursor* cursor);
/**
* Sets cursor mode option for the specified window.
*
* @param window The window to set cursor mode.
* @param mode The mouse mode to set to.
*/
void(CARB_ABI* setCursorMode)(Window* window, CursorMode mode);
/**
* Gets cursor mode option for the specified window.
*
* @param window The window to get cursor mode.
* @return The mouse mode associated with the window.
*/
CursorMode(CARB_ABI* getCursorMode)(Window* window);
/**
* Sets cursor position relative to the window.
*
* @param window The window to set input mode.
* @param position The x/y coordinates relative to the window.
*/
void(CARB_ABI* setCursorPosition)(Window* window, const Int2& position);
/**
* Gets cursor position relative to the window.
*
* @param window The window to set input mode.
* @return The x/y coordinates relative to the window.
*/
Int2(CARB_ABI* getCursorPosition)(Window* window);
/**
* The set clipboard function, which expects a Window and text.
*
* @param window The window that contains a glfwWindow
* @param text The text to set to the clipboard
*/
void(CARB_ABI* setClipboard)(Window* window, const char* text);
/**
* Gets the clipboard text.
*
* @param window The window that contains a glfwWindow
* @return The text from the clipboard
*/
const char*(CARB_ABI* getClipboard)(Window* window);
/**
* Sets the monitors callback function for configuration changes
*
* The onMonitorChange function callback will occur when monitors are changed.
* Current changes that can occur are connected/disconnected.
*
* @param onMonitorChange The callback function when monitors change.
*/
void(CARB_ABI* setMonitorsChangeFn)(OnMonitorChangeFn onMonitorChange);
/**
* Gets the primary monitor.
*
* A Monitor object represents a currently connected monitor and is represented as a pointer
* to the opaque native monitor. Monitor objects cannot be created or destroyed by the application
* and retain their addresses until the monitors they represent are disconnected.
*
* @return The primary monitor.
*/
const Monitor*(CARB_ABI* getMonitorPrimary)();
/**
* Gets the enumerated monitors.
*
* This represents a currently connected monitors and is represented as a pointer
* to the opaque native monitor. Monitors cannot be created or destroyed
* and retain their addresses until the monitors are disconnected.
*
* Use @ref setMonitorsChangeFn to know when a monitor is disconnected.
*
* @param monitorCount The returned number of monitors enumerated.
* @return The enumerated monitors.
*/
const Monitor**(CARB_ABI* getMonitors)(size_t* monitorCount);
/**
* Gets the human read-able monitor name.
*
* The name pointer returned is only valid for the life of the Monitor.
* When the Monitor is disconnected, the name pointer becomes invalid.
*
* Use @ref setMonitorsChangeFn to know when a monitor is disconnected.
*
* @param monitor The monitor to use.
* @return The human read-able monitor name. Pointer returned is owned by monitor.
*/
const char*(CARB_ABI* getMonitorName)(const Monitor* monitor);
/**
* Gets a monitors physical size in millimeters.
*
* The size returned is only valid for the life of the Monitor.
* When the Monitor is disconnected, the size becomes invalid.
*
* Use @ref setMonitorsChangeFn to know when a monitor is disconnected.
*
* @param monitor The monitor to use.
* @param size The monitor physical size returned.
*/
Int2(CARB_ABI* getMonitorPhysicalSize)(const Monitor* monitor);
/**
* Gets a monitors current video mode.
*
* The pointer returned is only valid for the life of the Monitor.
* When the Monitor is disconnected, the pointer becomes invalid.
*
* Use @ref setMonitorsChangeFn to know when a monitor is disconnected.
*
* @param monitor The monitor to use.
* @return The video mode.
*/
const VideoMode*(CARB_ABI* getMonitorVideoMode)(const Monitor* monitor);
/**
* Gets a monitors virtual position.
*
* The position returned is only valid for the life of the Monitor.
* When the Monitor is disconnected, the position becomes invalid.
*
* Use @ref setMonitorsChangeFn to know when a monitor is disconnected.
*
* @param monitor The monitor to use.
* @param position The monitor virtual position returned.
*/
Int2(CARB_ABI* getMonitorPosition)(const Monitor* monitor);
/**
* Gets a monitors content scale.
*
* The content scale is the ratio between the current DPI and the platform's default DPI.
* This is especially important for text and any UI elements. If the pixel dimensions of
* your UI scaled by this look appropriate on your machine then it should appear at a
* reasonable size on other machines regardless of their DPI and scaling settings.
* This relies on the system DPI and scaling settings being somewhat correct.
*
* The content scale returned is only valid for the life of the Monitor.
* When the Monitor is disconnected, the content scale becomes invalid.
*
* Use @ref setMonitorsChangeFn to know when a monitor is disconnected.
*
* @param monitor The monitor to use.
* @return The monitor content scale (dpi).
*/
Float2(CARB_ABI* getMonitorContentScale)(const Monitor* monitor);
/**
* Gets a monitors work area.
*
* The area of a monitor not occupied by global task bars or
* menu bars is the work area
*
* The work area returned is only valid for the life of the Monitor.
* When the Monitor is disconnected, the work area becomes invalid.
*
* Use @ref setMonitorsChangeFn to know when a monitor is disconnected.
*
* @param monitor The monitor to use.
* @param position The returned position.
* @param size The returned size.
*/
void(CARB_ABI* getMonitorWorkArea)(const Monitor* monitor, Int2* positionOut, Int2* sizeOut);
/**
* Sets the function for handling move events. Must be called on a main thread.
*
* @param window The window to use (shouldn't be nullptr).
* @param onWindowMove The function callback to handle move events on the window (can be nullptr).
* @param userData User-specified pointer to the data. Lifetime and value can be anything.
*/
void(CARB_ABI* setWindowMoveFn)(Window* window, OnWindowMoveFn onWindowMove, void* userData);
/**
* Determines if the window is floating (or always-on-top).
*
* @param window The window to use.
* @return true if the window is floating.
*/
bool(CARB_ABI* isWindowFloating)(Window* window);
/**
* Sets the window into floating (always-on-top) or regular mode.
*
* @param window The window to use.
* @param fullscreen true to be set to floating (always-on-top), false to be set to regular.
*/
void(CARB_ABI* setWindowFloating)(Window* window, bool isFloating);
/**
* Creates a new custom cursor image that can be set for a window with @ref setCursor. The cursor can be destroyed
* with @ref destroyCursor.
*
* The pixels are 32-bit, little-endian, non-premultiplied RGBA, i.e. eight bits per channel with the red channel
* first. They are arranged canonically as packed sequential rows, starting from the top-left corner.
*
* The cursor hotspot is specified in pixels, relative to the upper-left corner of the cursor image. Like all other
* coordinate systems in GLFW, the X-axis points to the right and the Y-axis points down.
*
* @param image The desired cursor image.
* @param xhot The desired x-coordinate, in pixels, of the cursor hotspot.
* @param yhot The desired y-coordinate, in pixels, of the cursor hotspot.
* @return created cursor, or nullptr if error occurred.
*/
Cursor*(CARB_ABI* createCursor)(const Image& image, int32_t xhot, int32_t yhot);
/**
* Maximize the window.
*
* @param window The window to use.
*/
void(CARB_ABI* maximizeWindow)(Window* window);
/**
* Minimize the window.
*
* @param window The window to use.
*/
void(CARB_ABI* minimizeWindow)(Window* window);
/**
* Restore the window.
*
* @param window The window to use.
*/
void(CARB_ABI* restoreWindow)(Window* window);
/**
* Sets the function for handling window maximize events.
*
* @param window The window to use.
* @param onWindowMaximizeFn The function callback to handle maximize events on the window.
*/
void(CARB_ABI* setWindowMaximizeFn)(Window* window, OnWindowMaximizeFn onWindowMaximizeFn, void* userData);
/**
* Determines if the window is maximized.
*
* @param window The window to use.
* @return true if the window is maximized, false if it is not.
*/
bool(CARB_ABI* isWindowMaximized)(Window* window);
/**
* This function sets the icon of the specified window.
*
* This function will do nothing when pass a invalid image, i.e. image.width==0 or
* image.height ==0 or image.pixels == nullptr
*
* The image.pixels are 32-bit, little-endian, non-premultiplied RGBA, i.e. eight
* bits per channel with the red channel first. They are arranged canonically
* as packed sequential rows, starting from the top-left corner.
*
* The desired image sizes varies depending on platform and system settings.
* The selected images will be rescaled as needed. Good sizes include 16x16,
* 32x32 and 48x48.
*
* @param window The window to use.
* @param image The desired icon image.
*/
void(CARB_ABI* setWindowIcon)(Window* window, const Image& image);
};
} // namespace windowing
} // namespace carb
|
omniverse-code/kit/include/carb/dictionary/DictionaryUtils.h | // Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//! @brief Utility helper functions for common dictionary operations.
#pragma once
#include "../Framework.h"
#include "../InterfaceUtils.h"
#include "../datasource/IDataSource.h"
#include "../extras/CmdLineParser.h"
#include "../filesystem/IFileSystem.h"
#include "../logging/Log.h"
#include "IDictionary.h"
#include "ISerializer.h"
#include <algorithm>
#include <string>
namespace carb
{
/** Namespace for @ref carb::dictionary::IDictionary related interfaces and helpers. */
namespace dictionary
{
/** helper function to retrieve the IDictionary interface.
*
* @returns The cached @ref carb::dictionary::IDictionary interface. This will be cached
* until the plugin is unloaded.
*/
inline IDictionary* getCachedDictionaryInterface()
{
return getCachedInterface<IDictionary>();
}
/** Prototype for a callback function used to walk items in a dictionary.
*
* @tparam ElementData An arbitrary data type used as both a parameter and the return
* value of the callback. The callback itself is assumed to know
* how to interpret and use this value.
* @param[in] srcItem The current item being visited. This will never be `nullptr`.
* @param[in] elementData An arbitrary data object passed into the callback by the caller of
* walkDictionary(). The callback is assumed that it knows how to
* interpret and use this value.
* @param[in] userData An opaque data object passed by the caller of walkDictionary().
* The callback is assumed that it knows how to interpret and use
* this object.
* @returns An \a ElementData object or value to pass back to the dictionary walker. When
* the callback returns from passing in a new dictionary value (ie: a child of the
* original dictionary), this value is stored and passed on to following callbacks.
*/
template <typename ElementData>
using OnItemFn = ElementData (*)(const Item* srcItem, ElementData elementData, void* userData);
/** Prototype for a callback function used to walk children in a dictionary.
*
* @tparam ItemPtrType The data type of the dictionary item in the dictionary being walked.
* This should be either `Item` or `const Item`.
* @param[in] dict The @ref IDictionary interface being used to access the items in the
* dictionary during the walk. This must not be `nullptr`.
* @param[in] item The dictionary item to retrieve one of the child items from. This
* must not be `nullptr`. This is assumed to be an item of type
* @ref ItemType::eDictionary.
* @param[in] idx The zero based index of the child item of @p item to retrieve. This
* is expected to be within the range of the number of children in the
* given dictionary item.
* @returns The child item at the requested index in the given dictionary item @p item. Returns
* `nullptr` if the given index is out of range of the number of children in the given
* dictionary item.
*
* @remarks This callback provides a way to control the order in which the items in a dictionary
* are walked. An basic implementation is provided below.
*/
template <typename ItemPtrType>
inline ItemPtrType* getChildByIndex(IDictionary* dict, ItemPtrType* item, size_t idx);
/** Specialization for the getChildByIndex() callback that implements a simple retrieval of the
* requested child item using @ref IDictionary::getItemChildByIndex().
*
* @sa getChildByIndex(IDictionary*,ItemPtrType*,size_t).
*/
template <>
inline const Item* getChildByIndex(IDictionary* dict, const Item* item, size_t idx)
{
return dict->getItemChildByIndex(item, idx);
}
/** Mode names for the ways to walk the requested dictionary. */
enum class WalkerMode
{
/** When walking the dictionary, include the root item itself. */
eIncludeRoot,
/** When walking the dictionary, skip the root item and start with the enumeration with
* the immediate children of the root item.
*/
eSkipRoot
};
/** Walk a dictionary item to enumerate all of its values.
*
* @tparam ElementData The data type for the per-item element data that is maintained
* during the walk. This can be used for example to track which
* level of the dictionary a given item is at by using an `int`
* type here.
* @tparam OnItemFnType The type for the @p onItemFn callback function.
* @tparam ItemPtrType The type used for the item type in the @p GetChildByIndexFuncType
* callback. This must either be `const Item` or `Item`. This
* defaults to `const Item`. If a non-const type is used here
* it is possible that items' values could be modified during the
* walk. Using a non-const value is discouraged however since it
* can lead to unsafe use or undefined behavior.
* @tparam GetChildByIndexFuncType The type for the @p getChildByIndexFunc callback function.
* @param[in] dict The @ref IDictionary interface to use to access the items
* in the dictionary. This must not be `nullptr`. This must
* be the same interface that was originally used to create
* the dictionary @p root being walked.
* @param[in] walkerMode The mode to walk the given dictionary in.
* @param[in] root The root dictionary to walk. This must not be `nullptr`.
* @param[in] rootElementData The user specified element data value that is to be associated
* with the @p root element. This value can be changed during
* the walk by the @p onItemFn callback function.
* @param[in] onItemFn The callback function that is performed for each value in the
* given dictionary. The user specified element data value can
* be modified on each non-leaf item. This modified element data
* value is then passed to all further children of the given
* item. The element data value returned for leaf items is
* discarded. This must not be `nullptr`.
* @param[in] userData Opaque user data object that is passed to each @p onItemFn
* callback. The caller is responsible for knowing how to
* interpret and access this value.
* @param[in] getChildByIndexFunc Callback function to enumerate the children of a given
* item in the dictionary being walked. This must not be
* `nullptr`. This can be used to either control the order
* in which the child items are enumerated (ie: sort them
* before returning), or to return them as non-const objects
* so that the item's value can be changed during enumeration.
* Attempting to insert or remove items by using a non-const
* child enumerator is unsafe and will generally result in
* undefined behavior.
* @returns No return value.
*
* @remarks This walks a dictionary and enumerates all of its values of all types. This
* includes even @ref ItemType::eDictionary items. Non-leaf items in the walk will
* be passed to the @p onItemFn callback before walking through its children.
* The @p getChildByIndexFunc callback function can be used to control the order in
* which the children of each level of the dictionary are enumerated. The default
* implementation simply enumerates the items in the order they are stored in (which
* is generally arbitrary). The dictionary's full tree is walked in a depth first
* manner so sibling items are not guaranteed to be enumerated consecutively.
*
* @thread_safety This function is thread safe as long as nothing else is concurrently modifying
* the dictionary being walked. It is the caller's responsibility to ensure that
* neither the dictionary nor any of its children will be modified until the walk
* is complete.
*/
template <typename ElementData,
typename OnItemFnType,
typename ItemPtrType = const Item,
typename GetChildByIndexFuncType CARB_NO_DOC(= decltype(getChildByIndex<ItemPtrType>))>
inline void walkDictionary(IDictionary* dict,
WalkerMode walkerMode,
ItemPtrType* root,
ElementData rootElementData,
OnItemFnType onItemFn,
void* userData,
GetChildByIndexFuncType getChildByIndexFunc = getChildByIndex<ItemPtrType>)
{
if (!root)
{
return;
}
struct ValueToParse
{
ItemPtrType* srcItem;
ElementData elementData;
};
std::vector<ValueToParse> valuesToParse;
valuesToParse.reserve(100);
if (walkerMode == WalkerMode::eSkipRoot)
{
size_t numChildren = dict->getItemChildCount(root);
for (size_t chIdx = 0; chIdx < numChildren; ++chIdx)
{
valuesToParse.push_back({ getChildByIndexFunc(dict, root, numChildren - chIdx - 1), rootElementData });
}
}
else
{
valuesToParse.push_back({ root, rootElementData });
}
while (valuesToParse.size())
{
const ValueToParse valueToParse = valuesToParse.back();
ItemPtrType* curItem = valueToParse.srcItem;
ItemType curItemType = dict->getItemType(curItem);
valuesToParse.pop_back();
if (curItemType == ItemType::eDictionary)
{
size_t numChildren = dict->getItemChildCount(curItem);
ElementData elementData = onItemFn(curItem, valueToParse.elementData, userData);
for (size_t chIdx = 0; chIdx < numChildren; ++chIdx)
{
valuesToParse.push_back({ getChildByIndexFunc(dict, curItem, numChildren - chIdx - 1), elementData });
}
}
else
{
onItemFn(curItem, valueToParse.elementData, userData);
}
}
}
/** Attempts to retrieve the name of an item from a given path in a dictionary.
*
* @param[in] dict The @ref IDictionary interface to use to access the items in the
* dictionary. This must not be `nullptr`. This must be the same
* interface that was originally used to create the dictionary
* @p baseItem.
* @param[in] baseItem The base item to retrieve the item name relative to. This is expected
* to contain the child path @p path. This may not be `nullptr`.
* @param[in] path The item path relative to @p baseItem that indicates where to find the
* item whose name should be retrieved. This may be `nullptr` to retrieve
* the name of @p baseItem itself.
* @returns A string containing the name of the item at the given path relative to @p baseItem
* if it exists. Returns an empty string if no item could be found at the requested
* path or a string buffer could not be allocated for its name.
*
* @thread_safety This call is thread safe.
*/
inline std::string getStringFromItemName(const IDictionary* dict, const Item* baseItem, const char* path = nullptr)
{
const Item* item = dict->getItem(baseItem, path);
if (!item)
{
return std::string();
}
const char* itemNameBuf = dict->createStringBufferFromItemName(item);
std::string returnString = itemNameBuf;
dict->destroyStringBuffer(itemNameBuf);
return returnString;
}
/** Attempts to retrieve the value of an item from a given path in a dictionary.
*
* @param[in] dict The @ref IDictionary interface to use to access the items in the
* dictionary. This must not be `nullptr`. This must be the same
* interface that was originally used to create the dictionary
* @p baseItem.
* @param[in] baseItem The base item to retrieve the item value relative to. This is expected
* to contain the child path @p path. This may not be `nullptr`.
* @param[in] path The item path relative to @p baseItem that indicates where to find the
* item whose value should be retrieved. This may be `nullptr` to retrieve
* the value of @p baseItem itself.
* @returns A string containing the value of the item at the given path relative to @p baseItem
* if it exists. If the requested item was not of type @ref ItemType::eString, the
* value will be converted to a string as best it can. Returns an empty string if no
* item could be found at the requested path or a string buffer could not be allocated
* for its name.
*
* @thread_safety This call is thread safe.
*/
inline std::string getStringFromItemValue(const IDictionary* dict, const Item* baseItem, const char* path = nullptr)
{
const Item* item = dict->getItem(baseItem, path);
if (!item)
{
return std::string();
}
const char* stringBuf = dict->createStringBufferFromItemValue(item);
std::string returnString = stringBuf;
dict->destroyStringBuffer(stringBuf);
return returnString;
}
/** Attempts to retrieve an array of string values from a given dictionary path.
*
* @param[in] dict The @ref IDictionary interface to use to access the items in the
* dictionary. This must not be `nullptr`. This must be the same
* interface that was originally used to create the dictionary
* @p baseItem.
* @param[in] baseItem The base item to retrieve the item values relative to. This is expected
* to contain the child path @p path. This may not be `nullptr`.
* @param[in] path The item path relative to @p baseItem that indicates where to find the
* item whose value should be retrieved. This may be `nullptr` to retrieve
* the values of @p baseItem itself. The value at this path is expected to
* be an array of strings.
* @returns A vector of string values for the array at the path @p path relative to @p baseItem.
* If the given path is not an array item, a vector containing a single value will be
* returned. If @p path points to an item that is an array of something other than
* strings, a vector of empty strings will be returned instead.
*
* @thread_safety This call in itself is thread safe, however the retrieved array may contain
* unexpected or incorrect values if another thread is modifying the same item
* in the dictionary simultaneously.
*/
inline std::vector<std::string> getStringArray(const IDictionary* dict, const Item* baseItem, const char* path)
{
const Item* itemAtKey = dict->getItem(baseItem, path);
std::vector<std::string> stringArray(dict->getArrayLength(itemAtKey));
for (size_t i = 0; i < stringArray.size(); i++)
{
stringArray[i] = dict->getStringBufferAt(itemAtKey, i);
}
return stringArray;
}
/** Attempts to retrieve an array of string values from a given dictionary path.
*
* @param[in] dict The @ref IDictionary interface to use to access the items in the
* dictionary. This must not be `nullptr`. This must be the same
* interface that was originally used to create the dictionary
* @p baseItem.
* @param[in] item The base item to retrieve the item values relative to. This is expected
* to contain the child path @p path. This may not be `nullptr`.
* @returns A vector of string values for the array at the path @p path relative to @p baseItem.
* If the given path is not an array item, a vector containing a single value will be
* returned. If @p path points to an item that is an array of something other than
* strings, a vector of empty strings will be returned instead.
*
* @thread_safety This call in itself is thread safe, however the retrieved array may contain
* unexpected or incorrect values if another thread is modifying the same item
* in the dictionary simultaneously.
*/
inline std::vector<std::string> getStringArray(const IDictionary* dict, const Item* item)
{
return getStringArray(dict, item, nullptr);
}
/** Sets an array of values at a given path relative to a dictionary item.
*
* @param[in] dict The @ref IDictionary interface to use to access the items in the
* dictionary. This must not be `nullptr`. This must be the same
* interface that was originally used to create the dictionary
* @p baseItem.
* @param[in] baseItem The base item to act as the root of where to set the values relative
* to. This is expected to contain the child path @p path. This may not
* be `nullptr`.
* @param[in] path The path to the item to set the array of strings in. This path must
* either already exist as an array of strings or be an empty item in the
* dictionary. This may be `nullptr` to set the string array into the
* item @p baseItem itself.
* @param[in] stringArray The array of strings to set in the dictionary. This may contain a
* different number of items than the existing array. If the number of
* items differs, this new array of values will replace the existing
* item at @p path entirely. If the count is the same as the previous
* item, values will simply be replaced.
* @returns No return value.
*
* @thread_safety This call itself is thread safe as long as no other call is trying to
* concurrently modify the same item in the dictionary. Results are undefined
* if another thread is modifying the same item in the dictionary. Similarly,
* undefined behavior may result if another thread is concurrently trying to
* retrieve the items from this same dictionary.
*/
inline void setStringArray(IDictionary* dict, Item* baseItem, const char* path, const std::vector<std::string>& stringArray)
{
Item* itemAtKey = dict->getItemMutable(baseItem, path);
if (dict->getItemType(itemAtKey) != dictionary::ItemType::eCount)
{
dict->destroyItem(itemAtKey);
}
for (size_t i = 0, stringCount = stringArray.size(); i < stringCount; ++i)
{
dict->setStringAt(itemAtKey, i, stringArray[i].c_str());
}
}
/** Sets an array of values at a given path relative to a dictionary item.
*
* @param[in] dict The @ref IDictionary interface to use to access the items in the
* dictionary. This must not be `nullptr`. This must be the same
* interface that was originally used to create the dictionary
* @p baseItem.
* @param[in] item The base item to act as the root of where to set the values relative
* to. This is expected to contain the child path @p path. This may not
* be `nullptr`.
* @param[in] stringArray The array of strings to set in the dictionary. This may contain a
* different number of items than the existing array. If the number of
* items differs, this new array of values will replace the existing
* item at @p path entirely. If the count is the same as the previous
* item, values will simply be replaced.
* @returns No return value.
*
* @thread_safety This call itself is thread safe as long as no other call is trying to
* concurrently modify the same item in the dictionary. Results are undefined
* if another thread is modifying the same item in the dictionary. Similarly,
* undefined behavior may result if another thread is concurrently trying to
* retrieve the items from this same dictionary.
*/
inline void setStringArray(IDictionary* dict, Item* item, const std::vector<std::string>& stringArray)
{
setStringArray(dict, item, nullptr, stringArray);
}
/** Attempts to set a value in a dictionary with an attempt to detect the value type.
*
* @param[in] id The @ref IDictionary interface to use to access the items in the
* dictionary. This must not be `nullptr`. This must be the same
* interface that was originally used to create the dictionary
* @p dict.
* @param[in] dict The base item to act as the root of where to set the value relative
* to. This may not be `nullptr`.
* @param[in] path The path to the item to set the value in. This path does not need to
* exist yet in the dictionary. This may be `nullptr` to create the new
* value in the @p dict item itself.
* @param[in] value The new value to set expressed as a string. An attempt will be made to
* detect the type of the data from the contents of the string. If there
* are surrounding quotation marks, it will be treated as a string. If the
* value is a case insensitive variant on `FALSE` or `TRUE`, it will be
* treated as a boolean value. If the value fully converts to an integer or
* floating point value, it will be treated as those types. Otherwise the
* value is stored unmodified as a string.
* @returns No return value.
*
* @thread_safety This call is thread safe.
*/
inline void setDictionaryElementAutoType(IDictionary* id, Item* dict, const std::string& path, const std::string& value)
{
if (!path.empty())
{
// We should validate that provided path is a proper path but for now we just use it
//
// Simple rules to support basic values:
// if the value starts and with quotes (" or ') then it's the string inside the quotes
// else if we can parse the value as a bool, int or float then we read it
// according to the type. Otherwise we consider it to be a string.
// Special case, if the string is empty, write an empty string early
if (value.empty())
{
constexpr const char* kEmptyString = "";
id->makeStringAtPath(dict, path.c_str(), kEmptyString);
return;
}
if (value.size() > 1 &&
((value.front() == '"' && value.back() == '"') || (value.front() == '\'' && value.back() == '\'')))
{
// string value - chop off quotes
id->makeStringAtPath(dict, path.c_str(), value.substr(1, value.size() - 2).c_str());
return;
}
// Convert the value to upper case to simplify checks
std::string uppercaseValue = value;
std::transform(value.begin(), value.end(), uppercaseValue.begin(),
[](const char c) { return static_cast<char>(::toupper(c)); });
// let's see if it's a boolean
if (uppercaseValue == "TRUE")
{
id->makeBoolAtPath(dict, path.c_str(), true);
return;
}
if (uppercaseValue == "FALSE")
{
id->makeBoolAtPath(dict, path.c_str(), false);
return;
}
// let's see if it's an integer
size_t valueLen = value.length();
char* endptr;
// Use a radix of 0 to allow for decimal, octal, and hexadecimal values to all be parsed.
const long long int valueAsInt = strtoll(value.c_str(), &endptr, 0);
if (endptr - value.c_str() == (ptrdiff_t)valueLen)
{
id->makeInt64AtPath(dict, path.c_str(), valueAsInt);
return;
}
// let's see if it's a float
const double valueAsFloat = strtod(value.c_str(), &endptr);
if (endptr - value.c_str() == (ptrdiff_t)valueLen)
{
id->makeFloat64AtPath(dict, path.c_str(), valueAsFloat);
return;
}
// consider the value to be a string even if it's empty
id->makeStringAtPath(dict, path.c_str(), value.c_str());
}
}
/** Sets a series of values in a dictionary based on keys and values in a map object.
*
* @param[in] id The @ref IDictionary interface to use to access the items in the
* dictionary. This must not be `nullptr`. This must be the same
* interface that was originally used to create the dictionary
* @p dict.
* @param[in] dict The base item to act as the root of where to set the values relative
* to. This may not be `nullptr`.
* @param[in] mapping A map containing item paths (as the map keys) and their values to be
* set in the dictionary.
* @returns No return value.
*
* @remarks This takes a map of path and value pairs and sets those values into the given
* dictionary @p dict. Each entry in the map identifies a potential new value to
* create in the dictionary. The paths to each of the new values do not have to
* already exist in the dictionary. The new items will be created as needed. If
* a given path already exists in the dictionary, its value is replaced with the
* one from the map. All values will attempt to auto-detect their type based on
* the content of the string value. See setDictionaryElementAutoType() for more
* info on how the types are detected.
*
* @note If the map contains entries for an array and the array also exists in the dictionary,
* the resulting dictionary could have more or fewer elements in the array entries if the
* map either contained fewer items than the previous array's size or contained a
* non-consecutive set of numbered elements in the array. If the array already exists in
* the dictionary, it will not be destroyed or removed before adding the new values.
*
* @thread_safety This itself operation is thread safe, but a race condition may still exist
* if multiple threads are trying to set the values for the same set of items
* simultaneously. The operation will succeed, but the value that gets set in
* each item in the end is undefined.
*/
inline void setDictionaryFromStringMapping(IDictionary* id, Item* dict, const std::map<std::string, std::string>& mapping)
{
for (const auto& kv : mapping)
{
setDictionaryElementAutoType(id, dict, kv.first, kv.second);
}
}
/** Parses a set of command line arguments for dictionary items arguments and sets them.
*
* @param[in] id The @ref IDictionary interface to use to access the items in the
* dictionary. This must not be `nullptr`. This must be the same
* interface that was originally used to create the dictionary
* @p dict.
* @param[in] dict The base item to act as the root of where to set the values relative
* to. This may not be `nullptr`.
* @param[in] argv The Unix style argument array for the command line to the process. This
* must not be `nullptr`. The first entry in this array is expected to be
* the process's name. Only the arguments starting with @p prefix will be
* parsed here.
* @param[in] argc The Unix style argument count for the total number of items in @p argv
* to parse.
* @param[in] prefix A string indicating the prefix of arguments that should be parsed by this
* operation. This may not be `nullptr`. Arguments that do not start with
* this prefix will simply be ignored.
* @returns No return value.
*
* @remarks This parses command line arguments to find ones that should be added to a settings
* dictionary. Only arguments beginning with the given prefix will be added. The
* type of each individual item added to the dictionary will be automatically detected
* based on the same criteria used for setDictionaryElementAutoType().
*
* @thread_safety This operation itself is thread safe, but a race condition may still exist
* if multiple threads are trying to set the values for the same set of items
* simultaneously. The operation will succeed, but the value that gets set in
* each item in the end is undefined.
*/
inline void setDictionaryFromCmdLine(IDictionary* id, Item* dict, char** argv, int argc, const char* prefix = "--/")
{
carb::extras::CmdLineParser cmdLineParser(prefix);
cmdLineParser.parse(argv, argc);
const std::map<std::string, std::string>& opts = cmdLineParser.getOptions();
setDictionaryFromStringMapping(id, dict, opts);
}
/** Parses a string representation of an array and sets it relative to a dictionary path.
*
* @param[in] dictionaryInterface The @ref IDictionary interface to use to access the items in
* the dictionary. This must not be `nullptr`. This must be the
* same interface that was originally used to create the
* dictionary @p targetDictionary.
* @param[in] targetDictionary The base item to act as the root of where to set the values
* relative to. This may not be `nullptr`.
* @param[in] elementPath The path to the item to set the values in. This path does not
* need to exist yet in the dictionary. This may be an empty
* string to create the new values in the @p dict item itself.
* Any item at this path will be completely overwritten by this
* operation.
* @param[in] elementValue The string containing the values to parse for the new array
* value. These are expected to be expressed in the format
* "[<value1>, <value2>, <value3>, ...]" (ie: all values enclosed
* in a single set of square brackets with each individual value
* separated by commas). Individual value strings may not
* contain a comma (even if escaped or surrounded by quotation
* marks) otherwise they will be seen as separate values and
* likely not set appropriately in the dictionary. This must
* not be an empty string and must contain at least the square
* brackets at either end of the string.
* @returns No return value.
*
* @remarks This parses an array of values from a string into a dictionary array item. The array
* string is expected to have the format "[<value1>, <value2>, <value3>, ...]". Quoted
* values are not respected if they contain internal commas (the comma is still seen as
* a value separator in this case). Each value parsed from the array will be set in the
* dictionary item with its data type detected from its content. This detection is done
* in the same manner as in setDictionaryElementAutoType().
*
* @thread_safety This call itself is thread safe. However, if another thread is simultaneously
* attempting to modify, retrieve, or delete items or values in the same branch of
* the dictionary, the results may be undefined.
*/
inline void setDictionaryArrayElementFromStringValue(dictionary::IDictionary* dictionaryInterface,
dictionary::Item* targetDictionary,
const std::string& elementPath,
const std::string& elementValue)
{
if (elementPath.empty())
{
return;
}
CARB_ASSERT(elementValue.size() >= 2 && elementValue.front() == '[' && elementValue.back() == ']');
// Force delete item if it exists before creating a new array
dictionary::Item* arrayItem = dictionaryInterface->getItemMutable(targetDictionary, elementPath.c_str());
if (arrayItem)
{
dictionaryInterface->destroyItem(arrayItem);
}
// Creating a new dictionary element at the required path
arrayItem = dictionaryInterface->makeDictionaryAtPath(targetDictionary, elementPath.c_str());
// Setting necessary flag to make it a proper empty array
// This will result in correct item replacement in case of dictionary merging
dictionaryInterface->setItemFlag(arrayItem, dictionary::ItemFlag::eUnitSubtree, true);
// Skip initial and the last square brackets and consider all elements separated by commas one by one
// For each value create corresponding new path including index
// Ex. "/some/path=[10,20]" will be processed as "/some/path/0=10" and "/some/path/1=20"
const std::string commonElementPath = elementPath + '/';
size_t curElementIndex = 0;
// Helper adds provided value into the dictionary and increases index for the next addition
auto dictElementAddHelper = [&](std::string value) {
carb::extras::trimStringInplace(value);
// Processing only non empty strings, empty string values should be stated as "": [ "a", "", "b" ]
if (value.empty())
{
CARB_LOG_WARN(
"Encountered and skipped an empty value for dictionary array element '%s' while parsing value '%s'",
elementPath.c_str(), elementValue.c_str());
return;
}
carb::dictionary::setDictionaryElementAutoType(
dictionaryInterface, targetDictionary, commonElementPath + std::to_string(curElementIndex), value);
++curElementIndex;
};
std::string::size_type curValueStartPos = 1;
// Add comma-separated values (except for the last one)
for (std::string::size_type curCommaPos = elementValue.find(',', curValueStartPos);
curCommaPos != std::string::npos; curCommaPos = elementValue.find(',', curValueStartPos))
{
dictElementAddHelper(elementValue.substr(curValueStartPos, curCommaPos - curValueStartPos));
curValueStartPos = curCommaPos + 1;
}
// Now only the last value is left for addition
std::string lastValue = elementValue.substr(curValueStartPos, elementValue.size() - curValueStartPos - 1);
carb::extras::trimStringInplace(lastValue);
// Do nothing if it's just a trailing comma: [ 1, 2, 3, ]
if (!lastValue.empty())
{
carb::dictionary::setDictionaryElementAutoType(
dictionaryInterface, targetDictionary, commonElementPath + std::to_string(curElementIndex), lastValue);
}
}
/** Attempts to read the contents of a file into a dictionary.
*
* @param[in] serializer The @ref ISerializer interface to use to parse the data in the file.
* This serializer must be able to parse the assumed format of the named
* file (ie: JSON or TOML). This must not be `nullptr`. Note that this
* interface will internally depend on only a single implementation of
* the @ref IDictionary interface having been loaded. If multiple
* implementations are available, this operation will be likely to
* result in undefined behavior.
* @param[in] filename The name of the file to parse in the format assumed by the serializer
* interface @p serializer. This must not be `nullptr`.
* @returns A new dictionary item containing the information parsed from the file @p filename if
* it is successfully opened, read, and parsed. When no longer needed, this must be
* passed to @ref IDictionary::destroyItem() to destroy it. Returns `nullptr` if the
* file does not exit, could not be opened, or a parsing error occurred.
*
* @remarks This attempts to parse a dictionary data from a file. The format that the file's
* data is parsed from will be implied by the specific implementation of the @ref
* ISerializer interface that is passed in. The data found in the file will be parsed
* into a full dictionary hierarchy if it is parsed correctly. If the file contains
* a syntax error, the specific result (ie: full failure vs partial success) is
* determined by the specific behavior of the @ref ISerializer interface.
*
* @thread_safety This operation is thread safe.
*/
inline Item* createDictionaryFromFile(ISerializer* serializer, const char* filename)
{
carb::filesystem::IFileSystem* fs = carb::getCachedInterface<carb::filesystem::IFileSystem>();
auto file = fs->openFileToRead(filename);
if (!file)
return nullptr;
const size_t fileSize = fs->getFileSize(file);
const size_t contentLen = fileSize + 1;
std::unique_ptr<char[]> heap;
char* content;
if (contentLen <= 4096)
{
content = CARB_STACK_ALLOC(char, contentLen);
}
else
{
heap.reset(new char[contentLen]);
content = heap.get();
}
const size_t readBytes = fs->readFileChunk(file, content, contentLen);
fs->closeFile(file);
if (readBytes != fileSize)
{
CARB_LOG_ERROR("Only read %zu bytes of a total of %zu bytes from file '%s'", readBytes, fileSize, filename);
}
// NUL terminate
content[readBytes] = '\0';
return serializer->createDictionaryFromStringBuffer(content, readBytes, fDeserializerOptionInSitu);
}
/** Writes the contents of a dictionary to a file.
*
* @param[in] serializer The @ref ISerializer interface to use to format the dictionary
* data before writing it to file. This may not be `nullptr`.
* @param[in] dictionary The dictionary root item to format and write to file. This
* not be `nullptr`. This must have been created by the same
* @ref IDictionary interface that @p serializer uses internally.
* @param[in] filename The name of the file to write the formatted dictionary data
* to. This file will be unconditionally overwritten. It is the
* caller's responsibility to ensure any previous file at this
* location can be safely overwritten. This must not be
* `nullptr`.
* @param[in] serializerOptions Option flags passed to the @p serializer interface when
* formatting the dictionary data.
* @returns No return value.
*
* @remarks This formats the contents of a dictionary to a string and writes it to a file. The
* file will be formatted according to the serializer interface that is used. The
* extra flag options passed to the serializer in @p serializerOptions control the
* specifics of the formatting. The formatted data will be written to the file as
* long as it can be opened successfully for writing.
*
* @thread_safety This operation itself is thread safe. However, if another thread is attempting
* to modify the dictionary @p dictionary at the same time, a race condition may
* exist and undefined behavior could occur. It won't crash but the written data
* may be unexpected.
*/
inline void saveFileFromDictionary(ISerializer* serializer,
const dictionary::Item* dictionary,
const char* filename,
SerializerOptions serializerOptions)
{
const char* serializedString = serializer->createStringBufferFromDictionary(dictionary, serializerOptions);
filesystem::IFileSystem* fs = getFramework()->acquireInterface<filesystem::IFileSystem>();
filesystem::File* sFile = fs->openFileToWrite(filename);
if (sFile == nullptr)
{
CARB_LOG_ERROR("failed to open file '%s' - unable to save the dictionary", filename);
return;
}
fs->writeFileChunk(sFile, serializedString, strlen(serializedString));
fs->closeFile(sFile);
serializer->destroyStringBuffer(serializedString);
}
/** Writes a dictionary to a string.
*
* @param[in] c The dictionary to be serialized. This must not be `nullptr`.
* This dictionary must have been created by the same dictionary
* interface that the serializer will use. The serializer that is
* used controls how the string is formatted.
* @param[in] serializerName The name of the serializer plugin to use. This must be the name
* of the serializer plugin to be potentially loaded and used. For
* example, "carb.dictionary.serializer-json.plugin" to serialize to
* use the JSON serializer. The serializer plugin must already be
* known to the framework. This may be `nullptr` to pick the first
* or best serializer plugin instead. If the serializer plugin with
* this name cannot be found or the @ref ISerializer interface
* cannot be acquired from it, the first loaded serializer interface
* will be acquired and used instead.
* @returns A string containing the human readable contents of the dictionary @p c. If the
* dictionary failed to be written, an empty string will be returned but the operation
* will still be considered successful. The dictionary will always be formatted using
* the @ref fSerializerOptionMakePretty flag so that it is as human-readable as
* possible.
*
* @thread_safety This operation itself is thread safe. However, if the dictionary @p c is
* being modified concurrently by another thread, the output contents may be
* unexpected.
*/
inline std::string dumpToString(const dictionary::Item* c, const char* serializerName = nullptr)
{
std::string serializedDictionary;
Framework* framework = carb::getFramework();
dictionary::ISerializer* configSerializer = nullptr;
// First, try to acquire interface with provided plugin name, if any
if (serializerName)
{
configSerializer = framework->tryAcquireInterface<dictionary::ISerializer>(serializerName);
}
// If not available, or plugin name is not provided, try to acquire any serializer interface
if (!configSerializer)
{
configSerializer = framework->tryAcquireInterface<dictionary::ISerializer>();
}
const char* configString =
configSerializer->createStringBufferFromDictionary(c, dictionary::fSerializerOptionMakePretty);
if (configString != nullptr)
{
serializedDictionary = configString;
configSerializer->destroyStringBuffer(configString);
}
return serializedDictionary;
};
/** Retrieves the full path to dictionary item from its top-most ancestor.
*
* @param[in] dict The @ref IDictionary interface to use to retrieve the full path to the
* requested dictionary item. This must not be `nullptr`.
* @param[in] item The dictionary item to retrieve the full path to. This may not be `nullptr`.
* This item must have been created by the same @ref IDictionary interface passed
* in as @p dict.
* @returns A string containing the full path to the dictionary item @p item. This path will be
* relative to its top-most ancestor. On failure, an empty string is returned.
*
* @thread_safety This operation itself is thread safe. However, if the item or its chain of
* ancestors is being modified concurrently, undefined behavior may result.
*/
inline std::string getItemFullPath(dictionary::IDictionary* dict, const carb::dictionary::Item* item)
{
if (!item)
{
return std::string();
}
std::vector<const char*> pathElementsNames;
while (item)
{
pathElementsNames.push_back(dict->getItemName(item));
item = dict->getItemParent(item);
}
size_t totalSize = 0;
for (const auto& elementName : pathElementsNames)
{
totalSize += 1; // the '/' separator
if (elementName)
{
totalSize += std::strlen(elementName);
}
}
std::string result;
result.reserve(totalSize);
for (size_t idx = 0, elementCount = pathElementsNames.size(); idx < elementCount; ++idx)
{
const char* elementName = pathElementsNames[elementCount - idx - 1];
result += '/';
if (elementName)
{
result += elementName;
}
}
return result;
}
/** Helper function to convert a data type to a corresponding dictionary item type.
*
* @tparam Type The primitive data type to convert to a dictionary item type. This operation
* is undefined for types other than the handful of primitive types it is
* explicitly specialized for. If a another data type is used here, a link
* link error will occur.
* @returns The dictionary item type corresponding to the templated primitive data type.
*
* @thread_safety This operation is thread safe.
*/
template <typename Type>
inline ItemType toItemType();
/** Specialization for an `int32_t` item value.
* @copydoc toItemType().
*/
template <>
inline ItemType toItemType<int32_t>()
{
return ItemType::eInt;
}
/** Specialization for an `int64_t` item value.
* @copydoc toItemType().
*/
template <>
inline ItemType toItemType<int64_t>()
{
return ItemType::eInt;
}
/** Specialization for an `float` item value.
* @copydoc toItemType().
*/
template <>
inline ItemType toItemType<float>()
{
return ItemType::eFloat;
}
/** Specialization for an `double` item value.
* @copydoc toItemType().
*/
template <>
inline ItemType toItemType<double>()
{
return ItemType::eFloat;
}
/** Specialization for an `bool` item value.
* @copydoc toItemType().
*/
template <>
inline ItemType toItemType<bool>()
{
return ItemType::eBool;
}
/** Specialization for an `char*` item value.
* @copydoc toItemType().
*/
template <>
inline ItemType toItemType<char*>()
{
return ItemType::eString;
}
/** Specialization for an `const char*` item value.
* @copydoc toItemType().
*/
template <>
inline ItemType toItemType<const char*>()
{
return ItemType::eString;
}
/** Unsubscribes all items in a dictionary tree from change notifications.
*
* @param[in] dict The @ref IDictionary interface to use when walking the dictionary. This must
* not be `nullptr`. This must be the same @ref IDictionary interface that was
* used to create the dictionary item @p item.
* @param[in] item The dictionary item to unsubscribe all nodes from change notifications. This
* must not be `nullptr`. Each item in this dictionary's tree will have all of
* its tree and node change subscriptions removed.
* @returns No return value.
*
* @remarks This removes all change notification subscriptions for an entire tree in a
* dictionary. This should only be used as a last cleanup effort to prevent potential
* shutdown crashes since it will even remove subscriptions that the caller didn't
* necessarily setup.
*
* @thread_safety This operation is thread safe.
*/
inline void unsubscribeTreeFromAllEvents(IDictionary* dict, Item* item)
{
auto unsubscribeItem = [](Item* srcItem, uint32_t elementData, void* userData) -> uint32_t {
IDictionary* dict = (IDictionary*)userData;
dict->unsubscribeItemFromNodeChangeEvents(srcItem);
dict->unsubscribeItemFromTreeChangeEvents(srcItem);
return elementData;
};
const auto getChildByIndexMutable = [](IDictionary* dict, Item* item, size_t index) {
return dict->getItemChildByIndexMutable(item, index);
};
walkDictionary(dict, WalkerMode::eIncludeRoot, item, 0, unsubscribeItem, dict, getChildByIndexMutable);
}
/** Helper function for IDictionary::update() that ensures arrays are properly overwritten.
*
* @param[in] dstItem The destination dictionary item for the merge operation. This
* may be `nullptr` if the destination item doesn't already exist
* in the tree (ie: a new item is being merged into the tree).
* @param[in] dstItemType The data type of the item @p dstItem. If @p dstItem is
* `nullptr`, this will be @ref ItemType::eCount.
* @param[in] srcItem The source dictionary item for the merge operation. This will
* never be `nullptr`. This will be the new value or node that
* is being merged into the dictionary tree.
* @param[in] srcItemType The data type of the item @p srcItem.
* @param[in] dictionaryInterface The @ref IDictionary interface to use when merging the new
* item into the dictionary tree. This is expected to be passed
* into the @a userData parameter for IDictionary::update().
* @returns an @ref UpdateAction value indicating how the merge operation should proceed.
*
* @remarks This is intended to be used as the @ref OnUpdateItemFn callback function for the
* @ref IDictionary::update() function when the handling of merging array items is
* potentially needed. When this is used, the @ref IDictionary interface object must
* be passed into the @a userData parameter of IDictionary::update().
*
* @thread_safety This operation is thread safe. However, this call isn't expected to be used
* directly, but rather through the IDictionary::update() function. The overall
* thread safety of that operation should be noted instead.
*/
inline UpdateAction overwriteOriginalWithArrayHandling(
const Item* dstItem, ItemType dstItemType, const Item* srcItem, ItemType srcItemType, void* dictionaryInterface)
{
CARB_UNUSED(dstItemType, srcItemType);
if (dstItem && dictionaryInterface)
{
carb::dictionary::IDictionary* dictInt = static_cast<carb::dictionary::IDictionary*>(dictionaryInterface);
if (dictInt->getItemFlag(srcItem, carb::dictionary::ItemFlag::eUnitSubtree))
{
return carb::dictionary::UpdateAction::eReplaceSubtree;
}
}
return carb::dictionary::UpdateAction::eOverwrite;
}
} // namespace dictionary
} // namespace carb
|
omniverse-code/kit/include/carb/dictionary/DictionaryBindingsPython.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../BindingsPythonUtils.h"
#include "../cpp/Optional.h"
#include "DictionaryUtils.h"
#include "IDictionary.h"
#include "ISerializer.h"
#include <memory>
#include <vector>
namespace carb
{
namespace dictionary
{
struct Item
{
};
} // namespace dictionary
} // namespace carb
namespace carb
{
namespace dictionary
{
template <typename T>
inline py::tuple toTuple(const std::vector<T>& v)
{
py::tuple tuple(v.size());
for (size_t i = 0; i < v.size(); i++)
tuple[i] = v[i];
return tuple;
}
// Prerequisites: dictionary lock must be held followed by GIL
inline py::object getPyObjectLocked(dictionary::ScopedRead& lock,
const dictionary::IDictionary* idictionary,
const Item* baseItem,
const char* path = "")
{
CARB_ASSERT(baseItem);
const Item* item = path && *path != '\0' ? idictionary->getItem(baseItem, path) : baseItem;
ItemType itemType = idictionary->getItemType(item);
switch (itemType)
{
case ItemType::eInt:
{
return py::int_(idictionary->getAsInt64(item));
}
case ItemType::eFloat:
{
return py::float_(idictionary->getAsFloat64(item));
}
case ItemType::eBool:
{
return py::bool_(idictionary->getAsBool(item));
}
case ItemType::eString:
{
return py::str(getStringFromItemValue(idictionary, item));
}
case ItemType::eDictionary:
{
size_t const arrayLength = idictionary->getArrayLength(item);
if (arrayLength > 0)
{
py::tuple v(arrayLength);
bool needsList = false;
for (size_t idx = 0; idx != arrayLength; ++idx)
{
v[idx] = getPyObjectLocked(lock, idictionary, idictionary->getItemChildByIndex(item, idx));
if (py::isinstance<py::dict>(v[idx]))
{
// The old code would return a list of dictionaries, but a tuple of everything else. *shrug*
needsList = true;
}
}
if (needsList)
{
return py::list(std::move(v));
}
return v;
}
else
{
size_t childCount = idictionary->getItemChildCount(item);
py::dict v;
for (size_t idx = 0; idx < childCount; ++idx)
{
const dictionary::Item* childItem = idictionary->getItemChildByIndex(item, idx);
if (childItem)
{
v[idictionary->getItemName(childItem)] = getPyObjectLocked(lock, idictionary, childItem);
}
}
return v;
}
}
default:
return py::none();
}
}
inline py::object getPyObject(const dictionary::IDictionary* idictionary, const Item* baseItem, const char* path = "")
{
if (!baseItem)
{
return py::none();
}
// We need both the dictionary lock and the GIL, but we should take the GIL last, so release the GIL temporarily,
// grab the dictionary lock and then re-lock the GIL by resetting the optional<>.
cpp::optional<py::gil_scoped_release> nogil{ cpp::in_place };
dictionary::ScopedRead readLock(*idictionary, baseItem);
nogil.reset();
return getPyObjectLocked(readLock, idictionary, baseItem, path);
}
inline void setPyObject(dictionary::IDictionary* idictionary, Item* baseItem, const char* path, const py::handle& value)
{
auto createDict = [](dictionary::IDictionary* idictionary, Item* baseItem, const char* path, const py::handle& value) {
py::dict valueDict = value.cast<py::dict>();
for (auto kv : valueDict)
{
std::string basePath = path ? path : "";
if (!basePath.empty())
basePath = basePath + "/";
std::string subPath = basePath + kv.first.cast<std::string>().c_str();
setPyObject(idictionary, baseItem, subPath.c_str(), kv.second);
}
};
if (py::isinstance<py::bool_>(value))
{
auto val = value.cast<bool>();
py::gil_scoped_release nogil;
idictionary->makeBoolAtPath(baseItem, path, val);
}
else if (py::isinstance<py::int_>(value))
{
auto val = value.cast<int64_t>();
py::gil_scoped_release nogil;
idictionary->makeInt64AtPath(baseItem, path, val);
}
else if (py::isinstance<py::float_>(value))
{
auto val = value.cast<double>();
py::gil_scoped_release nogil;
idictionary->makeFloat64AtPath(baseItem, path, val);
}
else if (py::isinstance<py::str>(value))
{
auto val = value.cast<std::string>();
py::gil_scoped_release nogil;
idictionary->makeStringAtPath(baseItem, path, val.c_str());
}
else if (py::isinstance<py::tuple>(value) || py::isinstance<py::list>(value))
{
Item* item;
py::sequence valueSeq = value.cast<py::sequence>();
{
py::gil_scoped_release nogil;
item = idictionary->makeDictionaryAtPath(baseItem, path);
idictionary->deleteChildren(item);
}
for (size_t idx = 0, valueSeqSize = valueSeq.size(); idx < valueSeqSize; ++idx)
{
py::object valueSeqElement = valueSeq[idx];
if (py::isinstance<py::bool_>(valueSeqElement))
{
auto val = valueSeqElement.cast<bool>();
py::gil_scoped_release nogil;
idictionary->setBoolAt(item, idx, val);
}
else if (py::isinstance<py::int_>(valueSeqElement))
{
auto val = valueSeqElement.cast<int64_t>();
py::gil_scoped_release nogil;
idictionary->setInt64At(item, idx, val);
}
else if (py::isinstance<py::float_>(valueSeqElement))
{
auto val = valueSeqElement.cast<double>();
py::gil_scoped_release nogil;
idictionary->setFloat64At(item, idx, val);
}
else if (py::isinstance<py::str>(valueSeqElement))
{
auto val = valueSeqElement.cast<std::string>();
py::gil_scoped_release nogil;
idictionary->setStringAt(item, idx, val.c_str());
}
else if (py::isinstance<py::dict>(valueSeqElement))
{
std::string basePath = path ? path : "";
std::string elemPath = basePath + "/" + std::to_string(idx);
createDict(idictionary, baseItem, elemPath.c_str(), valueSeqElement);
}
else
{
CARB_LOG_WARN("Unknown type in sequence being written to item");
}
}
}
else if (py::isinstance<py::dict>(value))
{
createDict(idictionary, baseItem, path, value);
}
}
inline carb::dictionary::IDictionary* getDictionary()
{
return getCachedInterfaceForBindings<carb::dictionary::IDictionary>();
}
inline void definePythonModule(py::module& m)
{
using namespace carb;
using namespace carb::dictionary;
m.doc() = "pybind11 carb.dictionary bindings";
py::enum_<ItemType>(m, "ItemType")
.value("BOOL", ItemType::eBool)
.value("INT", ItemType::eInt)
.value("FLOAT", ItemType::eFloat)
.value("STRING", ItemType::eString)
.value("DICTIONARY", ItemType::eDictionary)
.value("COUNT", ItemType::eCount);
py::enum_<UpdateAction>(m, "UpdateAction").value("OVERWRITE", UpdateAction::eOverwrite).value("KEEP", UpdateAction::eKeep);
py::class_<Item>(m, "Item")
.def("__getitem__", [](const Item& self, const char* path) { return getPyObject(getDictionary(), &self, path); })
.def("__setitem__",
[](Item& self, const char* path, py::object value) { setPyObject(getDictionary(), &self, path, value); })
.def("__len__", [](Item& self) { return getDictionary()->getItemChildCount(&self); },
py::call_guard<py::gil_scoped_release>())
.def("get",
[](const Item& self, const char* path, py::object defaultValue) {
py::object v = getPyObject(getDictionary(), &self, path);
return v.is_none() ? defaultValue : v;
})
.def("get_key_at",
[](const Item& self, size_t index) -> py::object {
cpp::optional<std::string> name;
{
py::gil_scoped_release nogil;
dictionary::ScopedRead readlock(*getDictionary(), &self);
auto child = getDictionary()->getItemChildByIndex(&self, index);
if (child)
name.emplace(getDictionary()->getItemName(child));
}
if (name)
return py::str(name.value());
return py::none();
})
.def("__contains__",
[](const Item& self, py::object value) -> bool {
auto name = value.cast<std::string>();
py::gil_scoped_release nogil;
dictionary::ScopedRead readlock(*getDictionary(), &self);
ItemType type = getDictionary()->getItemType(&self);
if (type != ItemType::eDictionary)
return false;
return getDictionary()->getItem(&self, name.c_str()) != nullptr;
})
.def("get_keys",
[](const Item& self) {
IDictionary* idictionary = getDictionary();
dictionary::ScopedRead readlock(*idictionary, &self);
std::vector<std::string> keys(idictionary->getItemChildCount(&self));
for (size_t i = 0; i < keys.size(); i++)
{
const Item* child = idictionary->getItemChildByIndex(&self, i);
if (child)
keys[i] = idictionary->getItemName(child);
}
return keys;
},
py::call_guard<py::gil_scoped_release>())
.def("clear", [](Item& self) { getDictionary()->deleteChildren(&self); },
py::call_guard<py::gil_scoped_release>())
.def("get_dict", [](Item& self) { return getPyObject(getDictionary(), &self, nullptr); })
.def("__str__", [](Item& self) { return py::str(getPyObject(getDictionary(), &self, nullptr)); })
.def("__repr__", [](Item& self) {
return py::str("carb.dictionary.Item({0})").format(getPyObject(getDictionary(), &self, nullptr));
});
using UpdateFunctionWrapper =
ScriptCallbackRegistryPython<void*, dictionary::UpdateAction, const dictionary::Item*, dictionary::ItemType,
const dictionary::Item*, dictionary::ItemType>;
defineInterfaceClass<IDictionary>(m, "IDictionary", "acquire_dictionary_interface")
.def("get_dict_copy", getPyObject,
R"(
Creates python object from the supplied dictionary at path (supplied item is unchanged). Item is calculated
via the path relative to the base item.
Args:
base_item: The base item.
path: Path, relative to the base item - to the item
Returns:
Python object with copies of the item data.)",
py::arg("base_item"), py::arg("path") = "")
.def("get_item", wrapInterfaceFunction(&IDictionary::getItem), py::arg("base_item"), py::arg("path") = "",
py::return_value_policy::reference, py::call_guard<py::gil_scoped_release>())
.def("get_item_mutable", wrapInterfaceFunction(&IDictionary::getItemMutable), py::arg("base_item"),
py::arg("path") = "", py::return_value_policy::reference, py::call_guard<py::gil_scoped_release>())
.def("get_item_child_count", wrapInterfaceFunction(&IDictionary::getItemChildCount),
py::call_guard<py::gil_scoped_release>())
.def("get_item_child_by_index", wrapInterfaceFunction(&IDictionary::getItemChildByIndex),
py::return_value_policy::reference, py::call_guard<py::gil_scoped_release>())
.def("get_item_child_by_index_mutable", wrapInterfaceFunction(&IDictionary::getItemChildByIndexMutable),
py::return_value_policy::reference, py::call_guard<py::gil_scoped_release>())
.def("get_item_parent", wrapInterfaceFunction(&IDictionary::getItemParent), py::return_value_policy::reference,
py::call_guard<py::gil_scoped_release>())
.def("get_item_parent_mutable", wrapInterfaceFunction(&IDictionary::getItemParentMutable),
py::return_value_policy::reference, py::call_guard<py::gil_scoped_release>())
.def("get_item_type", wrapInterfaceFunction(&IDictionary::getItemType), py::call_guard<py::gil_scoped_release>())
.def("get_item_name",
[](const dictionary::IDictionary* idictionary, const Item* baseItem, const char* path) {
return getStringFromItemName(idictionary, baseItem, path);
},
py::arg("base_item"), py::arg("path") = "", py::call_guard<py::gil_scoped_release>())
.def("create_item",
[](const dictionary::IDictionary* idictionary, const py::object& item, const char* path,
dictionary::ItemType itemType) {
Item* p = item.is_none() ? nullptr : item.cast<Item*>();
py::gil_scoped_release nogil;
return idictionary->createItem(p, path, itemType);
},
py::return_value_policy::reference)
.def("is_accessible_as", wrapInterfaceFunction(&IDictionary::isAccessibleAs),
py::call_guard<py::gil_scoped_release>())
.def("is_accessible_as_array_of", wrapInterfaceFunction(&IDictionary::isAccessibleAsArrayOf),
py::call_guard<py::gil_scoped_release>())
.def("get_array_length", wrapInterfaceFunction(&IDictionary::getArrayLength),
py::call_guard<py::gil_scoped_release>())
.def("get_preferred_array_type", wrapInterfaceFunction(&IDictionary::getPreferredArrayType),
py::call_guard<py::gil_scoped_release>())
.def("get_as_int", wrapInterfaceFunction(&IDictionary::getAsInt64), py::call_guard<py::gil_scoped_release>())
.def("set_int", wrapInterfaceFunction(&IDictionary::setInt64), py::call_guard<py::gil_scoped_release>())
.def("get_as_float", wrapInterfaceFunction(&IDictionary::getAsFloat64), py::call_guard<py::gil_scoped_release>())
.def("set_float", wrapInterfaceFunction(&IDictionary::setFloat64), py::call_guard<py::gil_scoped_release>())
.def("get_as_bool", wrapInterfaceFunction(&IDictionary::getAsBool), py::call_guard<py::gil_scoped_release>())
.def("set_bool", wrapInterfaceFunction(&IDictionary::setBool), py::call_guard<py::gil_scoped_release>())
.def("get_as_string",
[](const dictionary::IDictionary* idictionary, const Item* baseItem, const char* path) {
return getStringFromItemValue(idictionary, baseItem, path);
},
py::arg("base_item"), py::arg("path") = "", py::call_guard<py::gil_scoped_release>())
.def("set_string",
[](dictionary::IDictionary* idictionary, Item* item, const std::string& str) {
idictionary->setString(item, str.c_str());
},
py::call_guard<py::gil_scoped_release>())
.def("get", &getPyObject, py::arg("base_item"), py::arg("path") = "")
.def("set", &setPyObject, py::arg("item"), py::arg("path") = "", py::arg("value"))
.def("set_int_array",
[](const dictionary::IDictionary* idictionary, Item* item, const std::vector<int64_t>& v) {
idictionary->setInt64Array(item, v.data(), v.size());
},
py::call_guard<py::gil_scoped_release>())
.def("set_float_array",
[](const dictionary::IDictionary* idictionary, Item* item, const std::vector<double>& v) {
idictionary->setFloat64Array(item, v.data(), v.size());
},
py::call_guard<py::gil_scoped_release>())
.def("set_bool_array",
[](const dictionary::IDictionary* idictionary, Item* item, const std::vector<bool>& v) {
if (v.size() == 0)
return;
bool* pbool = CARB_STACK_ALLOC(bool, v.size());
for (size_t i = 0; i != v.size(); ++i)
pbool[i] = v[i];
idictionary->setBoolArray(item, pbool, v.size());
},
py::call_guard<py::gil_scoped_release>())
.def("set_string_array",
[](const dictionary::IDictionary* idictionary, Item* item, const std::vector<std::string>& v) {
if (v.size() == 0)
return;
const char** pstr = CARB_STACK_ALLOC(const char*, v.size());
for (size_t i = 0; i != v.size(); ++i)
pstr[i] = v[i].c_str();
idictionary->setStringArray(item, pstr, v.size());
},
py::call_guard<py::gil_scoped_release>())
.def("destroy_item", wrapInterfaceFunction(&IDictionary::destroyItem), py::call_guard<py::gil_scoped_release>())
.def("update",
[](dictionary::IDictionary* idictionary, dictionary::Item* dstItem, const char* dstPath,
const dictionary::Item* srcItem, const char* srcPath, const py::object& updatePolicy) {
if (py::isinstance<dictionary::UpdateAction>(updatePolicy))
{
dictionary::UpdateAction updatePolicyEnum = updatePolicy.cast<dictionary::UpdateAction>();
py::gil_scoped_release nogil;
if (updatePolicyEnum == dictionary::UpdateAction::eOverwrite)
{
idictionary->update(dstItem, dstPath, srcItem, srcPath, dictionary::overwriteOriginal, nullptr);
}
else if (updatePolicyEnum == dictionary::UpdateAction::eKeep)
{
idictionary->update(dstItem, dstPath, srcItem, srcPath, dictionary::keepOriginal, nullptr);
}
else
{
CARB_LOG_ERROR("Unknown update policy type");
}
}
else
{
const UpdateFunctionWrapper::FuncT updateFn =
updatePolicy.cast<const UpdateFunctionWrapper::FuncT>();
py::gil_scoped_release nogil;
idictionary->update(
dstItem, dstPath, srcItem, srcPath, UpdateFunctionWrapper::call, (void*)&updateFn);
}
})
.def("readLock", wrapInterfaceFunction(&IDictionary::readLock), py::call_guard<py::gil_scoped_release>())
.def("writeLock", wrapInterfaceFunction(&IDictionary::writeLock), py::call_guard<py::gil_scoped_release>())
.def("unlock", wrapInterfaceFunction(&IDictionary::unlock), py::call_guard<py::gil_scoped_release>());
carb::defineInterfaceClass<ISerializer>(m, "ISerializer", "acquire_serializer_interface")
.def("create_dictionary_from_file", &createDictionaryFromFile, py::arg("path"),
py::return_value_policy::reference, py::call_guard<py::gil_scoped_release>())
.def("create_dictionary_from_string_buffer",
[](ISerializer* self, std::string val) {
return self->createDictionaryFromStringBuffer(
val.data(), val.size(), carb::dictionary::fDeserializerOptionInSitu);
},
py::return_value_policy::reference, py::call_guard<py::gil_scoped_release>())
.def("create_string_buffer_from_dictionary",
[](ISerializer* self, const carb::dictionary::Item* dictionary, SerializerOptions serializerOptions) {
const char* buf = self->createStringBufferFromDictionary(dictionary, serializerOptions);
std::string ret = buf; // Copy
self->destroyStringBuffer(buf);
return ret;
},
py::arg("item"), py::arg("ser_options") = 0, py::call_guard<py::gil_scoped_release>())
.def("save_file_from_dictionary", &saveFileFromDictionary, py::arg("dict"), py::arg("path"),
py::arg("options") = 0, py::call_guard<py::gil_scoped_release>());
m.def("get_toml_serializer",
[]() {
static ISerializer* s_serializer =
carb::getFramework()->acquireInterface<ISerializer>("carb.dictionary.serializer-toml.plugin");
return s_serializer;
},
py::return_value_policy::reference, py::call_guard<py::gil_scoped_release>());
m.def("get_json_serializer",
[]() {
static ISerializer* s_serializer =
carb::getFramework()->acquireInterface<ISerializer>("carb.dictionary.serializer-json.plugin");
return s_serializer;
},
py::return_value_policy::reference, py::call_guard<py::gil_scoped_release>());
}
} // namespace dictionary
} // namespace carb
|
omniverse-code/kit/include/carb/dictionary/ISerializer.h | // Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//! @brief Interface to handle serializing data from a file format into an
//! @ref carb::dictionary::IDictionary item. This interface is
//! currently implemented in two plugins, each offering a different
//! input/output format - JSON and TOML. The plugins are called
//! `carb.dictionary.serializer-json.plugin` and
//! `carb.dictionary.serializer-toml.plugin`. The caller must ensure
//! they are using the appropriate one for their needs when loading,
//! the plugin, acquiring the interface, and performing serialization
//! operations (both to and from strings).
#pragma once
#include "../Framework.h"
#include "../Interface.h"
#include "../datasource/IDataSource.h"
#include "../dictionary/IDictionary.h"
#include <cstdint>
namespace carb
{
/** Namespace for @ref carb::dictionary::IDictionary related interfaces and helpers. */
namespace dictionary
{
/** Base type for flags for the ISerializer::createStringBufferFromDictionary() function. */
using SerializerOptions = uint32_t;
/** Flags to affect the behavior of the ISerializer::createStringBufferFromDictionary()
* function. Zero or more flags may be combined with the `|` operator. Use `0` to
* specify no flags.
* @{
*/
/** Flag to indicate that the generated string should include the name of the root node of the
* dictionary that is being serialized. If this flag is not used, the name of the root node
* will be skipped and only the children of the node will be serialized. This is only used
* when serializing to JSON with the `carb.dictionary.serializer-json.plugin` plugin. This
* flag will be ignored otherwise.
*/
constexpr SerializerOptions fSerializerOptionIncludeDictionaryName = 1;
/** Flag to indicate that the generated string should be formatted to be human readable and
* look 'pretty'. If this flag is not used, the default behavior is to format the string
* as compactly as possible with the aim of it being machine consumable. This flag may be
* used for both the JSON and TOML serializer plugins.
*/
constexpr SerializerOptions fSerializerOptionMakePretty = (1 << 1);
/** Flag to indicate that if an empty dictionary item is found while walking the dictionary
* that is being serialized, it should be represented by an empty array. If this flag is
* not used, the default behavior is to write out the empty dictionary item as an empty
* object. This flag may be used for both the JSON and TOML serializer plugins.
*/
constexpr SerializerOptions fSerializerOptionEmptyDictionaryIsArray = (1 << 2);
/** Flag to indicate that the JSON serializer should write out infinity and NaN floating point
* values as a null object. If this flag is not used, the default behavior is to write out
* the value as a special string that can later be serialized back to an infinite value by
* the same serializer plugin. This flag is only used in the JSON serializer plugin since
* infinite values are not supported by the JSON standard itself.
*/
constexpr SerializerOptions fSerializerOptionSerializeInfinityAsNull = (1 << 3);
/** Deprecated flag name for @ref fSerializerOptionIncludeDictionaryName. This flag should
* no longer be used for new code. Please use @ref fSerializerOptionIncludeDictionaryName
* instead.
*/
CARB_DEPRECATED("Use fSerializerOptionIncludeDictionaryName instead.")
constexpr SerializerOptions fSerializerOptionIncludeCollectionName = fSerializerOptionIncludeDictionaryName;
/** @} */
/** Deprecated serializer option flag names. Please use the `fSerializerOption*` flags instead.
* @{
*/
/** Deprecated flag. Please use @ref fSerializerOptionIncludeDictionaryName instead. */
CARB_DEPRECATED("Use fSerializerOptionIncludeDictionaryName instead.")
constexpr SerializerOptions kSerializerOptionIncludeDictionaryName = fSerializerOptionIncludeDictionaryName;
/** Deprecated flag. Please use @ref fSerializerOptionMakePretty instead. */
CARB_DEPRECATED("Use fSerializerOptionMakePretty instead.")
constexpr SerializerOptions kSerializerOptionMakePretty = fSerializerOptionMakePretty;
/** Deprecated flag. Please use @ref fSerializerOptionEmptyDictionaryIsArray instead. */
CARB_DEPRECATED("Use fSerializerOptionEmptyDictionaryIsArray instead.")
constexpr SerializerOptions kSerializerOptionEmptyDictionaryIsArray = fSerializerOptionEmptyDictionaryIsArray;
/** Deprecated flag. Please use @ref fSerializerOptionSerializeInfinityAsNull instead. */
CARB_DEPRECATED("Use fSerializerOptionSerializeInfinityAsNull instead.")
constexpr SerializerOptions kSerializerOptionSerializeInfinityAsNull = fSerializerOptionSerializeInfinityAsNull;
/** Deprecated flag. Please use @ref fSerializerOptionIncludeDictionaryName instead. */
CARB_DEPRECATED("Use fSerializerOptionIncludeDictionaryName instead.")
constexpr SerializerOptions kSerializerOptionIncludeCollectionName = fSerializerOptionIncludeDictionaryName;
/* @} **/
//! Flags for deserializing a string (for @ref ISerializer::createDictionaryFromStringBuffer())
using DeserializerOptions = uint32_t;
//! Default value for @ref DeserializerOptions that specifies no options.
constexpr DeserializerOptions kDeserializerOptionNone = 0;
//! Flag that indicates that the `const char* string` value can actually be considered as `char*` and treated
//! destructively (allow in-situ modification by the deserializer).
constexpr DeserializerOptions fDeserializerOptionInSitu = (1 << 0);
/** Interface intended to serialize dictionary objects to and from plain C strings. Each
* implementation of this interface is intended to handle a different format for the string
* data. The current implementations include support for JSON and TOML strings. It is left
* as an exercise for the caller to handle reading the string from a file before serializing
* or writing it to a file after serializing. Each implementation is assumed that it will be
* passed a dictionary item object has been created by the @ref carb::dictionary::IDictionary
* interface implemented by the `carb.dictionary.plugin` plugin or that the only IDictionary
* interface that can be acquired is also the one that created the item object.
*
* @note If multiple plugins that implement IDictionary are loaded, behavior may be undefined.
*/
struct ISerializer
{
CARB_PLUGIN_INTERFACE("carb::dictionary::ISerializer", 1, 1)
//! @private
CARB_DEPRECATED("use the new createDictionaryFromStringBuffer")
dictionary::Item*(CARB_ABI* deprecatedCreateDictionaryFromStringBuffer)(const char* serializedString);
/** Creates a new string representation of a dictionary.
*
* @param[in] dictionary The dictionary to be serialized. This must not be
* `nullptr` but may be an empty dictionary. The entire
* contents of the dictionary and all its children will be
* serialized to the output string.
* @param[in] serializerOptions Option flags to control how the output string is created.
* These flags can affect both the formatting and the content
* of the string.
* @returns On success, this returns a string containing the serialized dictionary. When
* this string is no longer needed, it must be destroyed using a call to
* ISerializer::destroyStringBuffer().
*
* On failure, `nullptr` is returned. This call can fail if an allocation error
* occurs, if a bad dictionary item object is encountered, or an error occurs
* formatting the output to the string.
*/
const char*(CARB_ABI* createStringBufferFromDictionary)(const dictionary::Item* dictionary,
SerializerOptions serializerOptions);
/** Destroys a string buffer returned from ISerializer::createStringBufferFromDictionary().
*
* @param[in] serializedString The string buffer to be destroyed. This must have been
* returned from a previous successful call to
* createStringBufferFromDictionary().
* @returns No return value.
*/
void(CARB_ABI* destroyStringBuffer)(const char* serializedString);
//! @private
dictionary::Item*(CARB_ABI* internalCreateDictionaryFromStringBuffer)(const char* string,
size_t len,
DeserializerOptions options);
/** Creates a new dictionary object from the contents of a string.
*
* @param[in] string The string containing the data to be serialized into a new
* dictionary object. This is assumed to be in the format that
* is supported by this specific interface object's
* implementation (for example, JSON or TOML for the default
* built-in implementations). If this string is not formatted
* correctly for the implementation, the operation will fail. Must
* be `NUL`-terminated even if the length is known.
* @param[in] len The length of the string data, if known. If not known, provide `size_t(-1)`
* as the length, which is also the default. The length should not include the
* `NUL` terminator.
* @param[in] options Options, if any, to pass to the deserializer. If no options are desired,
* pass @ref kDeserializerOptionNone.
* @returns On success, this returns a new dictionary item object containing the serialized
* data from the string. When this dictionary is no longer needed, it must be
* destroyed using the carb::dictionary::IDictionary::destroyItem() function.
*
* On failure, `nullptr` is returned. This call can fail if the input string is not
* in the correct format (ie: in TOML format when using the JSON serializer or
* vice versa), if the string is malformed, or has a syntax error in it.
*/
dictionary::Item* createDictionaryFromStringBuffer(const char* string,
size_t len = size_t(-1),
DeserializerOptions options = kDeserializerOptionNone)
{
return internalCreateDictionaryFromStringBuffer(string, len, options);
}
};
} // namespace dictionary
} // namespace carb
|
omniverse-code/kit/include/carb/dictionary/IDictionary.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//! @brief carb.dictionary interface definition file.
#pragma once
#include "../Framework.h"
#include "../Interface.h"
#include "../Types.h"
#include "../extras/Hash.h"
#include "../cpp/StringView.h"
#include "../../omni/String.h"
#include <cstdint>
namespace carb
{
namespace dictionary
{
/**
* Supported item types. Other types need to be converted from the string item.
*/
enum class ItemType
{
eBool, //!< Boolean type
eInt, //!< 64-bit integer type
eFloat, //!< 64-bit floating-point type
eString, //!< String type
eDictionary, //!< Dictionary type (may act as either an array type or a map type)
eCount //! Number of ItemTypes, not a valid item type
};
//! Structure used in opaque pointers to each dictionary node
struct Item DOXYGEN_EMPTY_CLASS;
//! Actions that may be returned by OnUpdateItemFn.
//! @see IDictionary::update() OnUpdateItemFn
enum class UpdateAction
{
eOverwrite, //!< The target item should be overwritten by the source item
eKeep, //!< The target item item should be retained, ignoring the source item
eReplaceSubtree //!< The entire subtree should be replaced (dictionary source item only)
};
//! Item flags that can be specified by the user.
//! @see IDictionary::getItemFlag() IDictionary::setItemFlag() IDictionary::copyItemFlags()
enum class ItemFlag
{
eUnitSubtree, //!< Indicates that this \ref Item is a subtree
};
/**
* Function that will tell whether the merger should overwrite the destination item
* with the source item. dstItem could be nullptr, meaning that the destination item
* doesn't exist. This function will be triggered not only for the leaf item, but also
* for the intermediate eDictionary items that need to be created.
* @see IDictionary::update()
*/
typedef UpdateAction (*OnUpdateItemFn)(
const Item* dstItem, ItemType dstItemType, const Item* srcItem, ItemType srcItemType, void* userData);
/**
* Note that this function does not properly handle overwriting of arrays due to
* overwriting array being shorter, potentially leaving part of the older array in-place
* after the merge
* Use \ref overwriteOriginalWithArrayHandling() if dictionaries are expected to contain array data.
* @see IDictionary::update()
*/
inline UpdateAction overwriteOriginal(
const Item* dstItem, ItemType dstItemType, const Item* srcItem, ItemType srcItemType, void* userData)
{
CARB_UNUSED(dstItem, dstItemType, srcItem, srcItemType, userData);
return UpdateAction::eOverwrite;
}
/**
* Function that indicates that the merger should retain the existing destination item.
* @see IDictionary::update()
*/
inline UpdateAction keepOriginal(
const Item* dstItem, ItemType dstItemType, const Item* srcItem, ItemType srcItemType, void* userData)
{
CARB_UNUSED(dstItemType, srcItem, srcItemType, userData);
if (!dstItem)
{
// If the destination item doesn't exist - allow to create a new one
return UpdateAction::eOverwrite;
}
return UpdateAction::eKeep;
}
//! Alias for \ref overwriteOriginal().
constexpr OnUpdateItemFn kUpdateItemOverwriteOriginal = overwriteOriginal;
//! Alias for \ref keepOriginal().
constexpr OnUpdateItemFn kUpdateItemKeepOriginal = keepOriginal;
//! Opaque value representing a subscription.
//! @see IDictionary::subscribeToNodeChangeEvents() IDictionary::subscribeToTreeChangeEvents()
struct SubscriptionId DOXYGEN_EMPTY_CLASS;
//! Type of a change passed to a subscription callback.
//! @see IDictionary::subscribeToNodeChangeEvents() IDictionary::subscribeToTreeChangeEvents() OnNodeChangeEventFn
//! OnTreeChangeEventFn
enum class ChangeEventType
{
eCreated, //!< An \ref Item was created.
eChanged, //!< An \ref Item was changed.
eDestroyed //!< An \ref Item was destroyed.
};
/**
* A callback that, once registered with subscribeToNodeChangeEvents(), receives callbacks when Items change.
*
* @note The callbacks happen in the context of the thread performing the change. It is safe to call back into the
* IDictionary and unsubscribe or otherwise make changes. For \ref ChangeEventType::eCreated and
* \ref ChangeEventType::eChanged types, no internal locks are held; for \ref ChangeEventType::eDestroyed internal locks
* are held which can cause thread-synchronization issues with locking order.
*
* @param changedItem The \ref Item that is changing
* @param eventType The event occurring on \p changedItem
* @param userData The user data given to \ref IDictionary::subscribeToNodeChangeEvents()
*/
using OnNodeChangeEventFn = void (*)(const Item* changedItem, ChangeEventType eventType, void* userData);
/**
* A callback that, once registered with subscribeToTreeChangeEvents(), receives callbacks when Items change.
*
* @note The callbacks happen in the context of the thread performing the change. It is safe to call back into the
* IDictionary and unsubscribe or otherwise make changes. For \ref ChangeEventType::eCreated and
* \ref ChangeEventType::eChanged types, no internal locks are held; for \ref ChangeEventType::eDestroyed internal locks
* are held which can cause thread-synchronization issues with locking order.
*
* @param treeItem The tree \ref Item given to \ref IDictionary::subscribeToTreeChangeEvents()
* @param changedItem The \ref Item that is changing. May be \p treeItem or a descendant,
* @param eventType The event occurring on \p changedItem
* @param userData The user data given to \ref IDictionary::subscribeToTreeChangeEvents()
*/
using OnTreeChangeEventFn = void (*)(const Item* treeItem,
const Item* changedItem,
ChangeEventType eventType,
void* userData);
/**
* DOM-style dictionary (keeps the whole structure in-memory).
*
* In most functions, item is specified using the relative root index and path from
* the relative root. Path can be nullptr, meaning that baseItem will be considered
* a specified item.
*
* @thread_safety
* IDictionary functions are thread-safe unless otherwise indicated. Where possible, a shared ("read") lock is held so
* that multiple threads may query data from a \ref carb::dictionary::Item without blocking each other. Functions that
* contain `Mutable` in the name, and functions that exchange non-const \ref carb::dictionary::Item pointers will hold
* an exclusive ("write") lock, which will block any other threads attempting to perform read/write operations on the
* \ref carb::dictionary::Item. These locks are held at the true-root level of the Item hierarchy which ensures safety
* across that root Item's hierarchy (see \ref IDictionary::createItem).
*
* In some cases, a read or write lock must be held across multiple function calls. An example of this would be a call
* to \ref getItemChildCount() followed by one or more calls to \ref getItemChildByIndex(). Since you would not want
* another thread to change the dictionary between the two calls, you must use a lock to keep the state consistent. In
* this case \ref ScopedWrite and \ref ScopedRead exist to maintain a lock across multiple calls.
*
* @par Ordering
* Dictionary items of type \ref ItemType::eDictionary can function as either an array type (sequential integer keys
* starting with `0`), or a map type. For map types, dictionary attempts to order child items in the order that they
* were created, provided that the items were created by \ref createItem(), \ref update(), or \ref duplicateItem() (the
* "creation functions"). This order is reflected when retrieving child items via \ref getItemChildByIndex(). Performing
* one of the creation functions on a key that already exists will move it to the end of the order, but merely setting a
* value on an existing item will not change its order.
*
* @par Subscriptions
* Dictionary \ref Item objects may have subscriptions for notification of changes, either for an individual \ref Item
* (\ref subscribeToNodeChangeEvents), or a sub-tree (\ref subscribeToTreeChangeEvents). Subscriptions are called in the
* context of the thread that triggered the change, and only once that thread has fully released the lock for the
* dictionary hierarchy that contains the changed \ref Item (since looks are at the true-root level). Subscription
* callbacks also follow the principles of Basic Callback Hygiene:
* 1. \ref unsubscribeToChangeEvents may be called from within the callback to unregister the called subscription or any
* other subscription.
* 2. Unregistering the subscription ensures that it will never be called again, and any calls in process on another
* thread will complete before \ref unsubscribeToChangeEvents returns.
* 3. The true-root level lock is not held while the callback is called, but may be temporarily taken for API calls
* within the callback.
*/
struct IDictionary
{
// Version 1.0: Initial
// Version 1.1: Ordering guarantees for createItem(), update(), duplicateItem().
CARB_PLUGIN_INTERFACE("carb::dictionary::IDictionary", 1, 1)
/**
* Returns opaque pointer to read-only item.
*
* @param baseItem Base item to apply path from (required)
* @param path Child path, separated with forward slash ('/'), can be nullptr
* @return Opaque item pointer if the item is valid and present, or nullptr otherwise
*/
const Item*(CARB_ABI* getItem)(const Item* baseItem, const char* path);
/**
* Returns opaque pointer to mutable item.
*
* @param baseItem Base item to apply path from (required)
* @param path Child path, separated with forward slash ('/'), can be nullptr
* @return Opaque item pointer if the item is valid and present, or nullptr otherwise
*/
Item*(CARB_ABI* getItemMutable)(Item* baseItem, const char* path);
/**
* Returns number of children that belong to the specified item, if this item
* is a dictionary. Returns 0 if item is not a dictionary, or doesn't exist.
*
* @param item Item to query number of children from.
* @return Number of children if applicable, 0 otherwise.
*/
size_t(CARB_ABI* getItemChildCount)(const Item* item);
/**
* Returns opaque pointer to a read-only child item by its index. Mostly for dynamic dictionary processing.
* This function is different from getItemAt function, in a sense that this function doesn't work with
* the array view of the supplied item - for example if the item has children named "A", "B", "0", this
* function will return all of them in an undefined succession. While getItemAt functions work only with
* items which has array-like names (e.g. "0", "1", "2", etc.).
*
* @param item Item to query child from.
* @param childIndex Child index.
* @return Opaque const item pointer if the item and index are valid, or nullptr otherwise.
*/
const Item*(CARB_ABI* getItemChildByIndex)(const Item* item, size_t childIndex);
/**
* Returns opaque pointer to a mutable child item by its index. Mostly for dynamic dictionary processing.
* This function is different from getItemAtMutable function, in a sense that this function doesn't work with
* the array view of the supplied item - for example if the item has children named "A", "B", "0", this
* function will return all of them in an undefined succession. While getItemAt functions work only with
* items which has array-like names (e.g. "0", "1", "2", etc.).
*
* @param item Item to query child from.
* @param childIndex Child index.
* @return Opaque item pointer if the item and index are valid, or nullptr otherwise.
*/
Item*(CARB_ABI* getItemChildByIndexMutable)(Item* item, size_t childIndex);
/**
* Returns read-only parent item, or nullptr if the supplied item is true root item.
*
* @param item Item to get parent for.
* @return Opaque item pointer if the item is valid and has parent, or nullptr otherwise.
*/
const Item*(CARB_ABI* getItemParent)(const Item* item);
/**
* Returns opaque pointer to a mutable parent item, or nullptr if the supplied item is true root item.
*
* @param item Item to get parent for.
* @return Opaque item pointer if the item is valid and has parent, or nullptr otherwise.
*/
Item*(CARB_ABI* getItemParentMutable)(Item* item);
/**
* Returns original item type. If the item is not a valid item, returns eCount.
*
* @param item \ref dictionary::Item
* @return Original item type if item is valid, eCount otherwise.
*/
ItemType(CARB_ABI* getItemType)(const Item* item);
/**
* Securely string buffer filled with the item name.
*
* @note Please use \ref destroyStringBuffer() to free the created buffer.
*
* @param item \ref dictionary::Item
* @return Pointer to the created item name buffer if applicable, nullptr otherwise.
*/
const char*(CARB_ABI* createStringBufferFromItemName)(const Item* item);
/**
* Returns pointer to an item name, if the item is valid.
* Dangerous function which only guarantees safety of the data when item is not changing.
*
* @param item \ref dictionary::Item
* @return Pointer to an internal item name string if the item is valid, nullptr otherwise.
*/
const char*(CARB_ABI* getItemName)(const Item* item);
/**
* Creates item, and all the required items along the path if necessary.
* If baseItem supplied is nullptr, the created item is created as a true root.
*
* @param baseItem Base \ref Item to apply path from. Passing \c nullptr means that the created item will be a true
* root (i.e. has no parent \ref Item).
* @param path Path to the new item.
* @param itemType \ref ItemType of the \ref Item to create.
* @return Opaque \ref Item pointer if it was successfully created, or \c nullptr otherwise.
*/
Item*(CARB_ABI* createItem)(Item* baseItem, const char* path, ItemType itemType);
/**
* Checks if the item could be accessible as the provided type, either directly, or via a cast.
*
* Generally this means for a given \ref ItemType to return `true` for \p item:
* * \ref ItemType::eDictionary -- \p item must be of type \ref ItemType::eDictionary.
* * \ref ItemType::eString -- \p item must be of any type \a except \ref ItemType::eDictionary.
* * \ref ItemType::eInt -- \p item must be of type \ref ItemType::eInt, \ref ItemType::eFloat, or
* \ref ItemType::eBool, or \ref ItemType::eString that contains only a string representation of an integer (in
* in decimal, hex or octal, as convertible by `strtoll()`) or a floating-point number (as convertible by
* `strtod()`).
* * \ref ItemType::eFloat -- \p item must be of type \ref ItemType::eFloat, \ref ItemType::eInt or
* \ref ItemType::eBool, or \ref ItemType::eString that contains only a string representation of a floating point
* number (as convertible by `strtod()`).
* * \ref ItemType::eBool -- \p item must be of type \ref ItemType::eBool, \ref ItemType::eInt, or
* \ref ItemType::eFloat, or \ref ItemType::eString that either contains only case-insensitive versions of
* `"true"` or `"false"` or only a string representation of a floating point number (as convertible by
* `strtod()`).
*
* @param itemType Item type to check for.
* @param item \ref dictionary::Item
* @return \c true if accessible (see above); \c false otherwise.
*/
bool(CARB_ABI* isAccessibleAs)(ItemType itemType, const Item* item);
/**
* Attempts to get the supplied item as integer, either directly or via a cast.
*
* @param[in] item The item to retrieve the integer value from.
* @return The 64 bit integer value of @p item. This will be converted from
* the existing type if this \ref Item is not a 64 bit integer.
* For an \ref Item of type `int32_t`, `float` or `double`,
* this conversion will be a direct cast.
* Note that overly large `double` values convert to `INT64_MIN`
* on x86_64.
* For an \ref Item of string type, the string data will be interpreted
* as a number.
* If the string is not a valid number, 0 will be returned.
* If the string is a valid number, but exceeds the range of an `int64_t`,
* it will be parsed as a `double` and converted to an `int64_t`.
*
* @note carb.dictionary.serializer-toml.plugin cannot handle integers that
* exceed `INT64_MAX`.
*
* @note When carb.dictionary.serializer-json.plugin reads integers exceeding `UINT64_MAX`,
* it will store the result as a double, so you may get unexpected 64 bit integer values.
* Unsigned 64 bit values are still retrievable with full precision though;
* they will just be wrapped around when returned from this function.
*/
int64_t(CARB_ABI* getAsInt64)(const Item* item);
/**
* Sets the integer value for the supplied item. If an item was already present,
* changes its original type to eInt. If the present item is a eDictionary item,
* destroys all its children.
* If the item doesn't exist, creates eInt item, and all the required items along
* the path if necessary.
*
* @param item \ref dictionary::Item
* @param value Integer value that will be set to the supplied item.
*/
void(CARB_ABI* setInt64)(Item* item, int64_t value);
/**
* Attempts to get the supplied item as 32-bit integer, either directly or via a cast.
*
* @warning The value is truncated by casting to 32-bits. In debug builds, an assert occurs if the value read from
* the item would be truncated.
* @param[in] item The item to retrieve the integer value from.
* @return The 64 bit integer value of @p item. This will be converted from
* the existing type if this \ref Item is not a 64 bit integer.
* The conversion semantics are the same as for getAsInt64().
* Note that the string conversion behavior has the same clamping
* limits as getAsInt64(), which may result in unexpected wraparound
* behavior; you should use getAsInt64() instead if you may be reading
* string values.
*/
int32_t getAsInt(const Item* item);
/**
* Sets the 32-bit integer value for the supplied item. If an item was already present,
* changes its original type to eInt. If the present item is a eDictionary item,
* destroys all its children.
* If the item doesn't exist, creates eInt item, and all the required items along
* the path if necessary.
*
* @param item \ref dictionary::Item
* @param value Integer value that will be set to the supplied item.
*/
void setInt(Item* item, int32_t value);
/**
* Helper function that sets the value to an item at path. Creates item at path if
* it doesn't exist.
*
* @param baseItem Base item to apply path from.
* @param path Path.
* @param value Integer value that will be set to the supplied item.
*/
Item* makeInt64AtPath(Item* baseItem, const char* path, int64_t value);
/**
* Helper function that sets the value to an item at path. Creates item at path if
* it doesn't exist.
*
* @param baseItem Base item to apply path from.
* @param path Path.
* @param value Integer value that will be set to the supplied item.
*/
Item* makeIntAtPath(Item* baseItem, const char* path, int32_t value);
/**
* Attempts to get the supplied item as float, either directly or via a cast.
*
* @param[in] item The item to retrieve the floating point value from.
* @return The 64 bit floating point value of @p item.
* This will be converted from the existing type if this \ref Item
* is not a `double`.
* For an \ref Item of type `int32_t`, `int64_t` or `float`,
* this conversion will be a direct cast.
* For an \ref Item of string type, the string data will be interpreted
* as a number.
* If the string is not a valid number, 0 will be returned.
* If the string is a valid number, but exceeds the range of a `double`
* `INFINITY` or `-INFINITY` will be returned.
* Some precision may be lost on overly precise strings.
*/
double(CARB_ABI* getAsFloat64)(const Item* item);
/**
* Sets the floating point value for the supplied item. If an item was already present,
* changes its original type to eFloat. If the present item is a eDictionary item,
* destroys all its children.
* If the item doesn't exist, creates eFloat item, and all the required items along
* the path if necessary.
*
* @param item \ref dictionary::Item
* @param value Floating point value that will be set to the supplied item.
*/
void(CARB_ABI* setFloat64)(Item* item, double value);
/**
* Attempts to get the supplied item as 32-bit float, either directly or via a cast.
*
* @param[in] item The item to retrieve the floating point value from.
* @return The 64 bit floating point value of @p item.
* This will be converted from the existing type if this \ref Item
* is not a `double`.
* The conversion semantics are the same as with getAsFloat64().
*/
float getAsFloat(const Item* item);
/**
* Sets the 32-bit floating point value for the supplied item. If an item was already present,
* changes its original type to eFloat. If the present item is a eDictionary item,
* destroys all its children.
* If the item doesn't exist, creates eFloat item, and all the required items along
* the path if necessary.
*
* @param item \ref dictionary::Item
* @param value Floating point value that will be set to the supplied item.
*/
void setFloat(Item* item, float value);
/**
* Helper function that sets the value to an item at path. Creates item at path if
* it doesn't exist.
*
* @param baseItem Base item to apply path from.
* @param path Path.
* @param value Float value that will be set to the supplied item.
*/
Item* makeFloat64AtPath(Item* baseItem, const char* path, double value);
/**
* Helper function that sets the value to an item at path. Creates item at path if
* it doesn't exist.
*
* @param baseItem Base item to apply path from.
* @param path Path.
* @param value Float value that will be set to the supplied item.
*/
Item* makeFloatAtPath(Item* baseItem, const char* path, float value);
/**
* Attempts to get the supplied item as eBool, either directly or via a cast.
*
* @param item \ref dictionary::Item
* @return Boolean value, either value directly or cast from the real item type.
*/
bool(CARB_ABI* getAsBool)(const Item* item);
/**
* Sets the boolean value for the supplied item. If an item was already present,
* changes its original type to eBool. If the present item is a eDictionary item,
* destroys all its children.
* If the item doesn't exist, creates eBool item, and all the required items along
* the path if necessary.
*
* @param item \ref dictionary::Item
* @param value Boolean value that will be set to the supplied item.
*/
void(CARB_ABI* setBool)(Item* item, bool value);
/**
* Helper function that sets the value to an item at path. Creates item at path if
* it doesn't exist.
*
* @param baseItem Base item to apply path from.
* @param path Path.
* @param value Bool value that will be set to the supplied item.
*/
Item* makeBoolAtPath(Item* baseItem, const char* path, bool value);
//! @private
const char*(CARB_ABI* internalCreateStringBufferFromItemValue)(const Item* item, size_t* pStringLen);
/**
* Attempts to create a new string buffer with a value, either the real string value or a string resulting
* from converting the item value to a string.
*
* @note Please use \ref destroyStringBuffer() to free the created buffer.
*
* @param item \ref dictionary::Item
* @param pStringLen (optional) Receives the length of the string. This can be useful if the string contains NUL
* characters (i.e. byte data).
* @return Pointer to the created string buffer if applicable, nullptr otherwise.
*/
const char* createStringBufferFromItemValue(const Item* item, size_t* pStringLen = nullptr) const
{
return internalCreateStringBufferFromItemValue(item, pStringLen);
}
//! @private
const char*(CARB_ABI* internalGetStringBuffer)(const Item* item, size_t* pStringLen);
/**
* Returns internal raw data pointer to the string value of an item. Thus, doesn't perform any
* conversions.
*
* @warning This function returns the internal string buffer. Another thread may change the setting value which can
* cause the string buffer to be destroyed. It is recommended to take a \ref ScopedRead lock around calling this
* function and using the return value.
*
* @param item \ref dictionary::Item
* @param pStringLen (optional) Receives the length of the string. This can be useful if the string contains NUL
* characters (i.e. byte data).
* @return Raw pointer to the internal string value if applicable, nullptr otherwise.
*/
const char* getStringBuffer(const Item* item, size_t* pStringLen = nullptr)
{
return internalGetStringBuffer(item, pStringLen);
}
//! @private
void(CARB_ABI* internalSetString)(Item* item, const char* value, size_t stringLen);
/**
* Sets a string value for the supplied item.
*
* If @p item is not of type `eString` it is changed to be `eString`. Change notifications for
* subscriptions are queued. The given string is then set in the item at @p path. If @p value is \c nullptr, the
* string item will store an empty string.
*
* @param item \ref dictionary::Item
* @param value String value that will be set to the supplied item.
* @param stringLen (optional) The length of the string at \p value to copy. This can be useful if only a portion
* of the string should be copied, or if \p value contains NUL characters (i.e. byte data). The default value of
* size_t(-1) treats \p value as a NUL-terminated string.
*/
void setString(Item* item, const char* value, size_t stringLen = size_t(-1)) const
{
internalSetString(item, value, stringLen);
}
/**
* Helper function that sets the value to an item at path. Creates item at path if
* it doesn't exist.
*
* @param baseItem Base item to apply path from.
* @param path Path.
* @param value String value that will be set to the supplied item.
* @param stringLen (optional) The length of the string at \p value to copy. This can be useful if only a portion
* of the string should be copied, or if \p value contains NUL characters (i.e. byte data). The default value of
* size_t(-1) treats \p value as a NUL-terminated string.
* @return The item at the given \p path
*/
Item* makeStringAtPath(Item* baseItem, const char* path, const char* value, size_t stringLen = size_t(-1));
/**
* Helper function that ensures the item at path is a dictionary. Creates item at path if
* it doesn't exist.
*
* @param parentItem Base item to apply path from.
* @param path Path.
*/
Item* makeDictionaryAtPath(Item* parentItem, const char* path);
/**
* Reads the value from an Item, converting if necessary.
* @tparam T The type of item to read. Must be a supported type.
* @param item The \ref Item to read from.
* @returns The value read from the \ref Item.
*/
template <typename T>
T get(const dictionary::Item* item);
/**
* Reads the value from an Item, converting if necessary.
* @tparam T The type of item to read. Must be a supported type.
* @param baseItem The base \ref Item to read from. Must not be `nullptr`.
* @param path The path from \p baseItem to determine the child \ref Item. May be an empty string.
* @returns The value read from the \ref Item.
*/
template <typename T>
T get(const dictionary::Item* baseItem, const char* path);
/**
* Constructs a new Item with the given value or changes an existing item to hold the given value.
* @tparam T The type of the value stored. Must be a supported type.
* @param baseItem The base \ref Item to read from. Must not be `nullptr`.
* @param path The path from \p baseItem where the child \ref Item will be constructed.
* @param value The value to assign to the new \ref Item.
*/
template <typename T>
void makeAtPath(dictionary::Item* baseItem, const char* path, T value);
/**
* Checks if the item could be accessible as array.
*
* @param item \ref dictionary::Item
* @return \c true if accessible, that is, all child items have names which are contiguous non-negative integers
* starting with zero; \c false otherwise or if \p item has no children (empty dictionary).
*/
bool(CARB_ABI* isAccessibleAsArray)(const Item* item);
/**
* Checks if the item could be accessible as the array of items of provided type,
* either directly, or via a cast of all elements.
*
* For this function to return `true`, \ref isAccessibleAsArray must be satisfied, as well as each child item must
* satisfy \ref isAccessibleAs for the given \p itemType. Empty dictionaries and non-dictionary types of \p item
* will always return `false`.
*
* @param itemType Item type to check for.
* @param item \ref dictionary::Item
* @return True if a valid array and with all items accessible (see above), or false otherwise.
*/
bool(CARB_ABI* isAccessibleAsArrayOf)(ItemType itemType, const Item* item);
/**
* Checks if the all the children of the item have array-style indices. If yes, returns the number
* of children (array elements).
*
* @param item \ref dictionary::Item
* @return Non-zero number of array elements if applicable (implies \ref isAccessibleAsArray), or 0 otherwise.
*/
size_t(CARB_ABI* getArrayLength)(const Item* item);
/**
* Runs through all the array elements and infers a type that is most suitable for the
* array.
*
* The rules are thus:
* - Strings attempt to convert to float or bool if possible (see \ref isAccessibleAs for possibilities).
* - The converted type of the first element is the initial type.
* - If the initial type is a \ref ItemType::eBool and later elements can be converted to \ref ItemType::eBool
* without losing precision, \ref ItemType::eBool is kept. (String variants of "true"/"false", or values exactly
* equal to 0/1)
* - Elements of type \ref ItemType::eFloat can convert to \ref ItemType::eInt if they don't lose precision.
*
* @param item \ref dictionary::Item
* @return Item type that is most suitable for the array, or \ref ItemType::eCount on failure.
*/
ItemType(CARB_ABI* getPreferredArrayType)(const Item* item);
/**
* Attempts to read an array item as a 64-bit integer.
*
* Attempts to get the child item as integer, either directly or via a cast, considering the item at path
* to be an array, and using the supplied index to access its child.
*
* @param item \ref dictionary::Item
* @param index Index of the element in array.
* @return Integer value, either value directly or cast from the real item type. Zero is returned on conversion
* failure.
*/
int64_t(CARB_ABI* getAsInt64At)(const Item* item, size_t index);
/**
* Set an integer value in an array item.
*
* Sets the integer value for the supplied item. If \p item is not an `eDictionary`, \p item is changed to an
* `eDictionary` and notifications are queued. \p index is used as the name of the child item.
* If the child item already existed and matches type `eInt` it is updated with the new value, otherwise any
* previous child item is destroyed (along with any of its children) and a new child item is created of type
* `eInt`; finally the new value is set into this child item. Change notifications are queued for every change that
* occurs. Subscriptions are notified of changes when the calling thread no longer holds any locks on the true root
* of \p item.
*
* @param item \ref dictionary::Item
* @param index Index of the element in array.
* @param value Integer value that will be set to the supplied item.
*/
void(CARB_ABI* setInt64At)(Item* item, size_t index, int64_t value);
/**
* Attempts to read an array item as a 32-bit integer.
*
* Attempts to get the child item as integer, either directly or via a cast, considering the item at path
* to be an array, and using the supplied index to access its child.
* @warning The value is truncated by casting to 32-bits. In debug builds, an assert occurs if the value read from
* the item would be truncated.
*
* @param item \ref dictionary::Item
* @param index Index of the element in array.
* @return Integer value, either value directly or cast from the real item type. Zero is returned on conversion
* failure.
*/
int32_t getAsIntAt(const Item* item, size_t index);
/**
* Set an integer value in an array item.
*
* Sets the integer value for the supplied item. If \p item is not an `eDictionary`, \p item is changed to an
* `eDictionary` and notifications are queued. \p index is used as the name of the child item.
* If the child item already existed and matches type `eInt` it is updated with the new value, otherwise any
* previous child item is destroyed (along with any of its children) and a new child item is created of type
* `eInt`; finally the new value is set into this child item. Change notifications are queued for every change that
* occurs. Subscriptions are notified of changes when the calling thread no longer holds any locks on the true root
* of \p item.
*
* @param item \ref dictionary::Item
* @param index Index of the element in array.
* @param value Integer value that will be set to the supplied item.
*/
void setIntAt(Item* item, size_t index, int32_t value);
/**
* Fills the given buffer with the array values from the given path.
*
* If the provided @p item is not an array item, @p arrayOut is not modified. If the array item contains more items
* than @p arrayBufferLength, a warning message is logged and items past the end are ignored.
* @warning Dictionary items that are not arrays will only have child keys which are convertible to array indices
* written to @p arrayOut. For example, if @p item is a dictionary item with keys '5' and '10',
* only those items will be written into @p arrayOut; the other indices will not be written. It is highly
* recommended to take a \ref ScopedRead lock around calling this function, and only if \ref isAccessibleAsArray
* or \ref isAccessibleAsArrayOf return \c true.
*
* @param item \ref dictionary::Item
* @param arrayOut Array buffer to fill with integer values.
* @param arrayBufferLength Size of the supplied array buffer.
*/
void(CARB_ABI* getAsInt64Array)(const Item* item, int64_t* arrayOut, size_t arrayBufferLength);
/**
* Creates or updates an item to be an integer array.
*
* \p item is changed to `eDictionary`, or if it already was, has all child items cleared (change notifications will
* will be queued for any child items that are destroyed). New child items will be created for all elements of the
* given @p array.
*
* @param item \ref dictionary::Item
* @param array Integer array to copy values from.
* @param arrayLength Array length.
*/
void(CARB_ABI* setInt64Array)(Item* item, const int64_t* array, size_t arrayLength);
/**
* Fills the given buffer with the array values from the given path.
*
* If the provided @p item is not a dictionary item, @p arrayOut is not modified. If the array item contains more
* items than @p arrayBufferLength, a warning message is logged and items past the end are ignored.
* @warning Dictionary items that are not arrays will only have child keys which are convertible to array indices
* written to @p arrayOut. For example, if @p item is a dictionary item with keys '5' and '10',
* only those items will be written into @p arrayOut; the other indices will not be written. It is highly
* recommended to take a \ref ScopedRead lock around calling this function, and call only if
* \ref isAccessibleAsArray or \ref isAccessibleAsArrayOf return \c true.
* @warning Any integer element that does not fit within \c int32_t is truncated by casting.
*
* @param item \ref dictionary::Item
* @param arrayOut Array buffer to fill with integer values.
* @param arrayBufferLength Size of the supplied array buffer.
*/
void(CARB_ABI* getAsIntArray)(const Item* item, int32_t* arrayOut, size_t arrayBufferLength);
/**
* Creates or updates an item to be an integer array.
*
* \p item is changed to `eDictionary`, or if it already was, has all child items cleared (change notifications will
* will be queued for any child items that are destroyed). New child items will be created for all elements of the
* given @p array.
*
* @param item \ref dictionary::Item
* @param array Integer array to copy values from.
* @param arrayLength Array length.
*/
void(CARB_ABI* setIntArray)(Item* item, const int32_t* array, size_t arrayLength);
/**
* Attempts to read an array item as a `double`.
*
* Attempts to read the path and array index as a `double`, either directly or via a cast, considering the
* item at path to be an array, and using the supplied index to access its child.
* Default value (`0.0`) is returned if \p item is `nullptr`, index doesn't exist, or there is a conversion
* failure.
*
* @param item \ref dictionary::Item
* @param index Index of the element in array.
* @return `double` value, either value directly or cast from the real item type.
*/
double(CARB_ABI* getAsFloat64At)(const Item* item, size_t index);
/**
* Set a `double` value in an array item.
*
* Sets the `double` value for the supplied item. If \p item is not an `eDictionary`, \p item is changed to an
* `eDictionary` and notifications are queued. \p index is used as the name of the child item.
* If the child item already existed and matches type `eFloat` it is updated with the new value, otherwise any
* previous child item is destroyed (along with any of its children) and a new child item is created of type
* `eFloat`; finally the new value is set into this child item. Change notifications are queued for every change
* that occurs. Subscriptions are notified of changes when the calling thread no longer holds any locks on the true
* root of \p item.
*
* @param item \ref dictionary::Item
* @param index Index of the element in array.
* @param value `double` value that will be set to the supplied item.
*/
void(CARB_ABI* setFloat64At)(Item* item, size_t index, double value);
/**
* Attempts to read an array item as a `float`.
*
* Attempts to read the path and array index as a `float`, either directly or via a cast, considering the
* item at path to be an array, and using the supplied index to access its child.
* Default value (`0.0f`) is returned if \p item is `nullptr`, index doesn't exist, or there is a conversion
* failure.
* @warning The value is truncated by casting to 32-bits.
*
* @param item An \ref Item
* @param index Index of the element in array
* @return `float` value, either value directly or cast from the real item type.
*/
float getAsFloatAt(const Item* item, size_t index);
/**
* Set a `float` value in an array item.
*
* Sets the `float` value for the supplied item. If \p item is not an `eDictionary`, \p item is changed to an
* `eDictionary` and notifications are queued. \p index is used as the name of the child item.
* If the child item already existed and matches type `eFloat` it is updated with the new value, otherwise any
* previous child item is destroyed (along with any of its children) and a new child item is created of type
* `eFloat`; finally the new value is set into this child item. Change notifications are queued for every change
* that occurs. Subscriptions are notified of changes when the calling thread no longer holds any locks on the true
* root of \p item.
*
* @param item \ref dictionary::Item
* @param index Index of the element in array.
* @param value Floating point value that will be set to the supplied item.
*/
void setFloatAt(Item* item, size_t index, float value);
/**
* Fills the given buffer with the array values from the given path.
*
* If the provided @p item is not a dictionary item, @p arrayOut is not modified. If the array item contains more
* items than @p arrayBufferLength, a warning message is logged and items past the end are ignored.
* @warning Dictionary items that are not arrays will only have child keys which are convertible to array indices
* written to @p arrayOut. For example, if @p item is a dictionary item with keys '5' and '10',
* only those items will be written into @p arrayOut; the other indices will not be written. It is highly
* recommended to take a \ref ScopedRead lock around calling this function, and call only if
* \ref isAccessibleAsArray or \ref isAccessibleAsArrayOf return \c true.
*
* @param item \ref dictionary::Item
* @param arrayOut Array buffer to fill with floating point values.
* @param arrayBufferLength Size of the supplied array buffer.
*/
void(CARB_ABI* getAsFloat64Array)(const Item* item, double* arrayOut, size_t arrayBufferLength);
/**
* Creates or updates an item to be a `double` array.
*
* \p item is changed to `eDictionary`, or if it already was, has all child items cleared (change notifications will
* will be queued for any child items that are destroyed). New child items will be created for all elements of the
* given @p array.
*
* @param item \ref dictionary::Item
* @param array Floating point array to copy values from.
* @param arrayLength Array length.
*/
void(CARB_ABI* setFloat64Array)(Item* item, const double* array, size_t arrayLength);
/**
* Fills the given buffer with the array values from the given path.
*
* If the provided @p item is not a dictionary item, @p arrayOut is not modified. If the array item contains more
* items than @p arrayBufferLength, a warning message is logged and items past the end are ignored.
* @warning Dictionary items that are not arrays will only have child keys which are convertible to array indices
* written to @p arrayOut. For example, if @p item is a dictionary item with keys '5' and '10',
* only those items will be written into @p arrayOut; the other indices will not be written. It is highly
* recommended to take a \ref ScopedRead lock around calling this function, and call only if
* \ref isAccessibleAsArray or \ref isAccessibleAsArrayOf return \c true.
* @warning Any element that does not fit within \c float is truncated by casting.
*
* @param item \ref dictionary::Item
* @param arrayOut Array buffer to fill with floating point values.
* @param arrayBufferLength Size of the supplied array buffer.
*/
void(CARB_ABI* getAsFloatArray)(const Item* item, float* arrayOut, size_t arrayBufferLength);
/**
* Creates or updates an item to be a `float` array.
*
* \p item is changed to `eDictionary`, or if it already was, has all child items cleared (change notifications will
* will be queued for any child items that are destroyed). New child items will be created for all elements of the
* given @p array.
*
* @param item \ref dictionary::Item
* @param array Floating point array to copy values from.
* @param arrayLength Array length.
*/
void(CARB_ABI* setFloatArray)(Item* item, const float* array, size_t arrayLength);
/**
* Attempts to read an array item as a `bool`.
*
* Attempts to read the path and array index as a `bool`, either directly or via a cast, considering the
* item at path to be an array, and using the supplied index to access its child.
* Default value (`false`) is returned if \p item is `nullptr`, index doesn't exist, or there is a conversion
* failure.
*
* @param item \ref dictionary::Item
* @param index Index of the element in array.
* @return Boolean value, either value directly or cast from the real item type.
*/
bool(CARB_ABI* getAsBoolAt)(const Item* item, size_t index);
/**
* Set a `bool` value in an array item.
*
* Sets the `bool` value for the supplied item. If \p item is not an `eDictionary`, \p item is changed to an
* `eDictionary` and notifications are queued. \p index is used as the name of the child item.
* If the child item already existed and matches type `eBool` it is updated with the new value, otherwise any
* previous child item is destroyed (along with any of its children) and a new child item is created of type
* `eBool`; finally the new value is set into this child item. Change notifications are queued for every change
* that occurs. Subscriptions are notified of changes when the calling thread no longer holds any locks on the true
* root of \p item.
*
* @param item \ref dictionary::Item
* @param index Index of the element in array.
* @param value Boolean value that will be set to the supplied item.
*/
void(CARB_ABI* setBoolAt)(Item* item, size_t index, bool value);
/**
* Fills the given buffer with the array values from the given path.
*
* If the provided @p item is not a dictionary item, @p arrayOut is not modified. If the array item contains more
* items than @p arrayBufferLength, a warning message is logged and items past the end are ignored.
* @warning Dictionary items that are not arrays will only have child keys which are convertible to array indices
* written to @p arrayOut. For example, if @p item is a dictionary item with keys '5' and '10',
* only those items will be written into @p arrayOut; the other indices will not be written. It is highly
* recommended to take a \ref ScopedRead lock around calling this function, and call only if
* \ref isAccessibleAsArray or \ref isAccessibleAsArrayOf return \c true.
*
* @param item \ref dictionary::Item
* @param arrayOut Array buffer to fill with boolean values.
* @param arrayBufferLength Size of the supplied array buffer.
*/
void(CARB_ABI* getAsBoolArray)(const Item* item, bool* arrayOut, size_t arrayBufferLength);
/**
* Creates or updates an item to be a `bool` array.
*
* \p item is changed to `eDictionary`, or if it already was, has all child items cleared (change notifications will
* will be queued for any child items that are destroyed). New child items will be created for all elements of the
* given @p array.
*
* @param item \ref dictionary::Item
* @param array Boolean array to copy values from.
* @param arrayLength Array length.
*/
void(CARB_ABI* setBoolArray)(Item* item, const bool* array, size_t arrayLength);
//! @private
const char*(CARB_ABI* internalCreateStringBufferFromItemValueAt)(const Item* item, size_t index, size_t* pStringLen);
/**
* Attempts to create new string buffer with a value, either the real string value or a string resulting from
* converting the item value to a string. Considers the item to be an array, and using the supplied index
* to access its child.
*
* @note Please use destroyStringBuffer to free the created buffer.
*
* @param item \ref dictionary::Item
* @param index Index of the element in array.
* @param pStringLen (optional) Receives the length of the string. This can be useful if the string contains NUL
* characters (i.e. byte data). Undefined if the function returns nullptr.
* @return Pointer to the created string buffer if applicable, nullptr otherwise.
*/
const char* createStringBufferFromItemValueAt(const Item* item, size_t index, size_t* pStringLen = nullptr) const
{
return internalCreateStringBufferFromItemValueAt(item, index, pStringLen);
}
//! @private
const char*(CARB_ABI* internalGetStringBufferAt)(const Item* item, size_t index, size_t* pStringLen);
/**
* Returns internal raw data pointer to the string value of an item. Thus, doesn't perform any
* conversions. Considers the item at path to be an array, and using the supplied index to access
* its child.
* Dangerous function which only guarantees safety of the data when dictionary is not changing.
*
* @param item \ref dictionary::Item
* @param index Index of the element in array.
* @param pStringLen (optional) Receives the length of the string. This can be useful if the string contains NUL
* characters (i.e. byte data). Undefined if the function returns nullptr.
* @return Raw pointer to the internal string value if applicable, nullptr otherwise.
*/
const char* getStringBufferAt(const Item* item, size_t index, size_t* pStringLen = nullptr) const
{
return internalGetStringBufferAt(item, index, pStringLen);
}
//! @private
void(CARB_ABI* internalSetStringAt)(Item* item, size_t index, const char* value, size_t stringLen);
/**
* Sets a string value in the supplied array item.
*
* Sets the string value for the supplied item. If \p item is not an `eDictionary`, \p item is changed to an
* `eDictionary` and notifications are queued. \p index is used as the name of the child item.
* If the child item already existed and matches type `eString` it is updated with the new value, otherwise any
* previous child item is destroyed (along with any of its children) and a new child item is created of type
* `eString`; finally the new value is set into this child item. Change notifications are queued for every change
* that occurs. Subscriptions are notified of changes when the calling thread no longer holds any locks on the true
* root of \p item.
*
* @param item \ref dictionary::Item
* @param index Index of the element in array.
* @param value String value that will be set to the supplied item. May be `nullptr` which is interpreted to be an
* empty string.
* @param stringLen (optional) The length of the string at \p value to copy. This can be useful if only a portion
* of the string should be copied, or if \p value contains NUL characters (i.e. byte data). The default value of
* size_t(-1) treats \p value as a NUL-terminated string, or ignored if \p value is `nullptr`.
*/
void setStringAt(Item* item, size_t index, const char* value, size_t stringLen = size_t(-1)) const
{
internalSetStringAt(item, index, value, stringLen);
}
/**
* Returns internal raw data pointers to the string value of all child items of an array. Thus, doesn't perform any
* conversions.
*
* @warning This function returns the internal string buffer(s) for several items. Another thread may change the
* setting value which can cause the string buffer to be destroyed. It is recommended to take a \ref ScopedRead lock
* around calling this function and use of the filled items in @p arrayOut.
*
* If the provided @p item is not a dictionary item, @p arrayOut is not modified. If the array item contains more
* items than @p arrayBufferLength, a warning message is logged and items past the end are ignored.
* @warning Dictionary items that are not arrays will only have child keys which are convertible to array indices
* written to @p arrayOut. For example, if @p item is a dictionary item with keys '5' and '10',
* only those items will be written into @p arrayOut; the other indices will not be written. It is highly
* recommended to take a \ref ScopedRead lock around calling this function, and call only if
* \ref isAccessibleAsArray or \ref isAccessibleAsArrayOf return \c true.
*
* @param item \ref dictionary::Item
* @param arrayOut Array buffer to fill with integer values.
* @param arrayBufferLength Size of the supplied array buffer.
*/
void(CARB_ABI* getStringBufferArray)(const Item* item, const char** arrayOut, size_t arrayBufferLength);
/**
* Creates or updates an item to be a string array.
*
* \p item is changed to `eDictionary`, or if it already was, has all child items cleared (change notifications will
* will be queued for any child items that are destroyed). New child items will be created for all elements of the
* given @p array.
*
* @param item \ref dictionary::Item
* @param array String array to copy values from.
* @param arrayLength Array length.
*/
void(CARB_ABI* setStringArray)(Item* item, const char* const* array, size_t arrayLength);
/**
* Returns opaque pointer to a read-only child item by its index. Mostly for dynamic dictionary processing.
* This function is designed for array view access, if you just want to enumerate all children, use
* getItemChildCount and getItemChildByIndex instead.
*
* @param item \ref dictionary::Item
* @return Opaque const item pointer if the item and index are valid, or nullptr otherwise.
*/
const Item*(CARB_ABI* getItemAt)(const Item* item, size_t index);
/**
* Returns opaque pointer to a mutable item by its index. Mostly for dynamic dictionary processing.
* This function is designed for array view access, if you just want to enumerate all children, use
* getItemChildCount and getItemChildByIndex instead.
*
* @param item \ref dictionary::Item
* @return Opaque item pointer if the item and index are valid, or nullptr otherwise.
*/
Item*(CARB_ABI* getItemAtMutable)(Item* item, size_t index);
/**
* Attempts to securely fill the supplied arrayOut buffer with read-only opaque pointers to
* the items that are array elements.
* arrayBufferLength is used as a buffer overflow detection.
* This function should not be used for simple dynamic processing purposes, use
* getItemChildCount and getItemChildByIndex instead.
*
* @param item The \ref Item.
* @param arrayOut Array buffer to fill with opaque item pointers.
* @param arrayBufferLength Size of the supplied array buffer.
*/
void(CARB_ABI* getItemArray)(const Item* item, const Item** arrayOut, size_t arrayBufferLength);
/**
* Updates an item to be an array of the given type with provided values.
*
* This is a helper function that will call the appropriate API function based on \c ArrayElementType:
* * `bool`: \ref setBoolArray
* * `int32_t`: \ref setIntArray
* * `int64_t`: \ref setInt64Array
* * `float`: \ref setFloatArray
* * `double`: \ref setFloat64Array
* * `const char*`: \ref setStringArray
*
* @note Any existing children in \p item are removed.
* @tparam ArrayElementType The type of elements to fill the array with.
* @param item The \ref Item that will be changed to an array type
* @param array An array that is used to fill \p item. The values are copied.
* @param arrayLength The length of \p array as a count of the items.
*/
template <typename ArrayElementType>
void setArray(Item* item, const ArrayElementType* array, size_t arrayLength);
/**
* Merges the source item into a destination item. Destination path may be non-existing, then
* missing items in the path will be created as new dictionary items.
*
* @param dstBaseItem Destination base item to apply path from.
* @param dstPath Destination Child path, separated with forward slash ('/'), can be nullptr.
* @param srcBaseItem Source base item to apply path from.
* @param srcPath Source Child path, separated with forward slash ('/'), can be nullptr.
* @param onUpdateItemFn Function that will tell whether the update should overwrite the destination item with the
* source item. See \ref carb::dictionary::keepOriginal(), \ref carb::dictionary::overwriteOriginal(), or
* \ref carb::dictionary::overwriteOriginalWithArrayHandling().
* @param userData User data pointer that will be passed into the onUpdateItemFn.
*/
void(CARB_ABI* update)(Item* dstBaseItem,
const char* dstPath,
const Item* srcBaseItem,
const char* srcPath,
OnUpdateItemFn onUpdateItemFn,
void* userData);
/**
* Destroys given item, and all of its children, if any. Change notifications are queued.
*
* @param item \ref dictionary::Item
*/
void(CARB_ABI* destroyItem)(Item* item);
/**
* Frees a string buffer.
*
* The string buffers are created by \ref createStringBufferFromItemValue(), \ref createStringBufferFromItemName or
* \ref createStringBufferFromItemValueAt().
* @param stringBuffer String buffer to destroy. Undefined behavior results if this is not a value returned from one
* of the functions listed above.
*/
void(CARB_ABI* destroyStringBuffer)(const char* stringBuffer);
/**
* Delete all children of a specified item.
*
* @param item \ref dictionary::Item
*/
void deleteChildren(Item* item);
/**
* Retrieves the value of an item flag.
* @param item \ref dictionary::Item
* @param flag ItemFlag.
* @return Original item type if item is valid, eCount otherwise.
*/
bool(CARB_ABI* getItemFlag)(const Item* item, ItemFlag flag);
/**
* Sets the value of an item flag.
* @param item \ref dictionary::Item
* @param flag ItemFlag.
* @return Original item type if item is valid, eCount otherwise.
*/
void(CARB_ABI* setItemFlag)(Item* item, ItemFlag flag, bool flagValue);
/**
* Copies all item flags from one Item to another.
* @param dstItem The destination \ref Item that will receive the flags.
* @param srcItem The source \ref Item from which flags are copied.
*/
void copyItemFlags(Item* dstItem, const Item* srcItem);
/**
* Subscribes to change events about a specific item.
*
* When finished with the subscription, call \ref unsubscribeToChangeEvents.
*
* @param baseItem An item
* @param path The subpath from \p baseItem or `nullptr`
* @param onChangeEventFn The function to call when a change event occurs
* @param userData User-specific data that will be provided to \p onChangeEventFn
* @return A subscription handle that can be used with \ref unsubscribeToChangeEvents
*/
SubscriptionId*(CARB_ABI* subscribeToNodeChangeEvents)(Item* baseItem,
const char* path,
OnNodeChangeEventFn onChangeEventFn,
void* userData);
/**
* Subscribes to change events for all items in a subtree.
*
* All items under `<baseItem>/<path>` will trigger change notifications.
*
* When finished with the subscription, call \ref unsubscribeToChangeEvents.
*
* @param baseItem An item
* @param path The subpath from \p baseItem or `nullptr`
* @param onChangeEventFn The function to call when a change event occurs
* @param userData User-specific data that will be provided to \p onChangeEventFn
* @return A subscription handle that can be used with \ref unsubscribeToChangeEvents()
*/
SubscriptionId*(CARB_ABI* subscribeToTreeChangeEvents)(Item* baseItem,
const char* path,
OnTreeChangeEventFn onChangeEventFn,
void* userData);
/**
* Unsubscribes from change events
*
* @param subscriptionId The subscription handle from \ref subscribeToNodeChangeEvents or
* \ref subscribeToTreeChangeEvents
*/
void(CARB_ABI* unsubscribeToChangeEvents)(SubscriptionId* subscriptionId);
/**
* Unsubscribes all node change handles for a specific Item.
*
* @param item An item
*/
void(CARB_ABI* unsubscribeItemFromNodeChangeEvents)(Item* item);
/**
* Unsubscribes all subtree change handles for a specific Item.
*
* @param item An item
*/
void(CARB_ABI* unsubscribeItemFromTreeChangeEvents)(Item* item);
/**
* Locks an Item for reading.
*
* Mutex-locks `item` for reading. This is only necessary if you are doing multiple read operations and require
* thread-safe consistency across the multiple operations. May be called multiple times within a thread, but
* unlock() must be called for each readLock() call, once the read lock is no longer needed.
* @warning Do not use readLock() directly; prefer \ref ScopedRead instead.
* @warning If a read lock is held and the same thread later takes a write lock, all locks will be released for a
* brief period of time, in which case another thread may be able to make changes.
* @see unlock writeLock ScopedRead ScopedWrite
* @param item The item to read-lock. `item`'s entire hierarchy will be read-locked. `nullptr` is ignored, but
* passing `nullptr` may cause synchronization issues. `item` must not be destroyed while the read lock is held.
* The same `item` should be passed to \ref unlock to unlock at a later point.
*/
void(CARB_ABI* readLock)(const Item* item);
/**
* Locks an Item for writing.
*
* Mutex-locks `item` for writing (exclusive access). This is only necessary if you are doing multiple read/write
* operations and require thread-safe consistency across the multiple operations. May be called multiple times
* within a thread, but unlock() must be called for each writeLock() call, once the write lock is no longer
* needed. Calling writeLock() when a readLock() is already held will release the lock and wait until exclusive
* write lock can be gained.
* @warning Do not use writeLock() directly; prefer \ref ScopedWrite instead.
* @see readLock unlock ScopedRead ScopedWrite
* @param item The item to write-lock. `item`'s entire hierarchy will be write-locked. `nullptr` is ignored, but
* passing `nullptr` may cause synchronization issues. `item` must not be destroyed while the write lock is held.
* The same `item` should be passed to \ref unlock to unlock at a later point.
*/
void(CARB_ABI* writeLock)(Item* item);
/**
* Releases a lock from a prior readLock() or writeLock().
*
* Releases a held read- or write-lock. Must be called once for each read- or write-lock that is held. Must be
* called in the same thread that initiated the read- or write-lock.
* @warning Do not use unlock() directly; prefer using \ref ScopedWrite or \ref ScopedRead instead.
* @see readLock writeLock ScopedRead ScopedWrite
* @param item The item to unlock. `item`'s entire hierarchy will be unlocked
*/
void(CARB_ABI* unlock)(const Item* item);
/**
* Returns a 128-bit hash representing the value.
*
* This hash is invariant to the order of keys and values.
*
* We guarantee that the hashing algorithm will not change unless the version number
* of the interface changes.
*
* @param item The item to take the hash of.
* @return The 128-bit hash of the item.
*/
const extras::hash128_t(CARB_ABI* getHash)(const Item* item);
/**
* Duplicates a dictionary
*
* @note The function duplicateItem should be preferred to this internal function.
*
* @param srcItem The item to duplicate
* @param newParent The new parent of the dictionary, if nullptr, makes a new root.
* @param newKey If new parent is given, then newKey is new the key of that parent. If, the key already exists it is
* overwritten.
* @returns The created item.
*/
Item*(CARB_ABI* duplicateItemInternal)(const Item* item, Item* newParent, const char* newKey);
/**
* Lexicographically compares two Items.
*
* The items being compared do not need to be root items. If the items are a key of a parent object, that key is not
* included in the comparison.
*
* The rules match https://en.cppreference.com/w/cpp/algorithm/lexicographical_compare, treating the key and values
* in the dictionary as an ordered set. The "type" is included in the comparison, however the rules of what types
* are ordered differently than others may change on a version change of this interface.
*
* The function will return a negative, zero, or positive number. The magnitude of the number bears no importance,
* and shouldn't be assumed to. A negative number means that itemA < itemB, zero itemA = itemB, and positive itemA >
* itemB.
*
* @param itemA The first item to compare.
* @param itemB The second item to compare.
* @returns The result of a lexicographical compare of itemA and itemB.
*/
int(CARB_ABI* lexicographicalCompare)(const Item* itemA, const Item* itemB);
/**
* Duplicates a given item as a new root.
*
* @param item The item to duplicate.
* @returns A new item that where item is to root.
*/
Item* duplicateItem(const Item* item)
{
return duplicateItemInternal(item, nullptr, nullptr);
}
/**
* Duplicates a item where another item is the parent.
*
* If the key already exists, the item will be overridden.
*
* @param item The item to duplicate.
* @param newParent The parent item to own the duplicated item.
* @param newKey the key in the parent.
* @returns The newly duplicated item.
*/
Item* duplicateItem(const Item* item, Item* newParent, const char* newKey)
{
return duplicateItemInternal(item, newParent, newKey);
}
};
/**
* A helper class for calling writeLock() and unlock(). Similar to `std::unique_lock`.
*/
class ScopedWrite
{
const IDictionary* m_pDictionary;
Item* m_pItem;
public:
/**
* RAII constructor. Immediately takes a write lock until *this is destroyed.
* @note Dictionary locks are recursive.
* @warning If the current thread already owns a read lock (i.e. via \ref ScopedRead), promotion to a write lock
* necessitates *releasing* all locks and then waiting for a write lock. This can cause state to change. Always
* re-evaluate state if this is the case.
* @param dictionary A reference to \ref IDictionary.
* @param item The item in the subtree to lock. The root of this item is found and the entire subtree from the root
* is locked. **NOTE:** Undefined behavior occurs if `item` is destroyed before `*this` is destroyed.
*/
ScopedWrite(const IDictionary& dictionary, Item* item) : m_pDictionary(std::addressof(dictionary)), m_pItem(item)
{
m_pDictionary->writeLock(m_pItem);
}
//! Destructor. Releases the write lock.
~ScopedWrite()
{
m_pDictionary->unlock(m_pItem);
}
CARB_PREVENT_COPY_AND_MOVE(ScopedWrite);
};
/**
* A helper class for calling readLock() and unlock(). Similar to `std::shared_lock`.
*/
class ScopedRead
{
const IDictionary* m_pDictionary;
const Item* m_pItem;
public:
/**
* RAII constructor. Immediately takes a read lock until *this is destroyed.
*
* @note Dictionary locks are recursive.
* @param dictionary A reference to \ref IDictionary.
* @param item The item in the subtree to lock. The root of this item is found and the entire subtree from the root
* is locked. **NOTE:** Undefined behavior occurs if `item` is destroyed before `*this` is destroyed.
*/
ScopedRead(const IDictionary& dictionary, const Item* item)
: m_pDictionary(std::addressof(dictionary)), m_pItem(item)
{
m_pDictionary->readLock(m_pItem);
}
//! Destructor. Releases the read lock.
~ScopedRead()
{
m_pDictionary->unlock(m_pItem);
}
CARB_PREVENT_COPY_AND_MOVE(ScopedRead);
};
inline int32_t IDictionary::getAsInt(const Item* item)
{
auto val = getAsInt64(item);
CARB_ASSERT(val >= INT_MIN && val <= INT_MAX);
return int32_t(val);
}
inline void IDictionary::setInt(Item* item, int32_t value)
{
setInt64(item, static_cast<int64_t>(value));
}
inline Item* IDictionary::makeIntAtPath(Item* baseItem, const char* path, int32_t value)
{
return makeInt64AtPath(baseItem, path, static_cast<int64_t>(value));
}
inline float IDictionary::getAsFloat(const Item* item)
{
return static_cast<float>(getAsFloat64(item));
}
inline void IDictionary::setFloat(Item* item, float value)
{
setFloat64(item, static_cast<double>(value));
}
inline Item* IDictionary::makeFloatAtPath(Item* baseItem, const char* path, float value)
{
return makeFloat64AtPath(baseItem, path, static_cast<double>(value));
}
inline int32_t IDictionary::getAsIntAt(const Item* item, size_t index)
{
auto val = getAsInt64At(item, index);
CARB_ASSERT(val >= INT_MIN && val <= INT_MAX);
return int32_t(val);
}
inline void IDictionary::setIntAt(Item* item, size_t index, int32_t value)
{
setInt64At(item, index, static_cast<int64_t>(value));
}
inline float IDictionary::getAsFloatAt(const Item* item, size_t index)
{
return static_cast<float>(getAsFloat64At(item, index));
}
inline void IDictionary::setFloatAt(Item* item, size_t index, float value)
{
setFloat64At(item, index, static_cast<double>(value));
}
inline Item* IDictionary::makeInt64AtPath(Item* parentItem, const char* path, int64_t value)
{
ScopedWrite g(*this, parentItem);
Item* item = getItemMutable(parentItem, path);
if (!item)
{
item = createItem(parentItem, path, ItemType::eInt);
}
setInt64(item, value);
return item;
}
inline Item* IDictionary::makeFloat64AtPath(Item* parentItem, const char* path, double value)
{
ScopedWrite g(*this, parentItem);
Item* item = getItemMutable(parentItem, path);
if (!item)
{
item = createItem(parentItem, path, ItemType::eFloat);
}
setFloat64(item, value);
return item;
}
inline Item* IDictionary::makeBoolAtPath(Item* parentItem, const char* path, bool value)
{
ScopedWrite g(*this, parentItem);
Item* item = getItemMutable(parentItem, path);
if (!item)
{
item = createItem(parentItem, path, ItemType::eBool);
}
setBool(item, value);
return item;
}
inline Item* IDictionary::makeStringAtPath(Item* parentItem, const char* path, const char* value, size_t stringLen)
{
ScopedWrite g(*this, parentItem);
Item* item = getItemMutable(parentItem, path);
if (!item)
{
item = createItem(parentItem, path, ItemType::eString);
}
setString(item, value, stringLen);
return item;
}
inline Item* IDictionary::makeDictionaryAtPath(Item* parentItem, const char* path)
{
ScopedWrite g(*this, parentItem);
Item* item = getItemMutable(parentItem, path);
if (!item)
{
item = createItem(parentItem, path, ItemType::eDictionary);
return item;
}
ItemType itemType = getItemType(item);
if (itemType != ItemType::eDictionary)
{
destroyItem(item);
item = createItem(parentItem, path, ItemType::eDictionary);
}
return item;
}
#ifndef DOXYGEN_BUILD
template <>
inline int32_t IDictionary::get<int32_t>(const dictionary::Item* item)
{
return getAsInt(item);
}
template <>
inline int64_t IDictionary::get<int64_t>(const dictionary::Item* item)
{
return getAsInt64(item);
}
template <>
inline float IDictionary::get<float>(const dictionary::Item* item)
{
return getAsFloat(item);
}
template <>
inline double IDictionary::get<double>(const dictionary::Item* item)
{
return getAsFloat64(item);
}
template <>
inline bool IDictionary::get<bool>(const dictionary::Item* item)
{
return getAsBool(item);
}
template <>
inline const char* IDictionary::get<const char*>(const dictionary::Item* item)
{
return getStringBuffer(item);
}
template <>
inline Int2 IDictionary::get<Int2>(const dictionary::Item* item)
{
Int2 value;
getAsIntArray(item, &value.x, 2);
return value;
}
template <>
inline Int3 IDictionary::get<Int3>(const dictionary::Item* item)
{
Int3 value;
getAsIntArray(item, &value.x, 3);
return value;
}
template <>
inline Int4 IDictionary::get<Int4>(const dictionary::Item* item)
{
Int4 value;
getAsIntArray(item, &value.x, 4);
return value;
}
template <>
inline Uint2 IDictionary::get<Uint2>(const dictionary::Item* item)
{
int64_t value[2];
getAsInt64Array(item, value, 2);
return { static_cast<uint32_t>(value[0]), static_cast<uint32_t>(value[1]) };
}
template <>
inline Uint3 IDictionary::get<Uint3>(const dictionary::Item* item)
{
int64_t value[3];
getAsInt64Array(item, value, 3);
return { static_cast<uint32_t>(value[0]), static_cast<uint32_t>(value[1]), static_cast<uint32_t>(value[2]) };
}
template <>
inline Uint4 IDictionary::get<Uint4>(const dictionary::Item* item)
{
int64_t value[4];
getAsInt64Array(item, value, 4);
return { static_cast<uint32_t>(value[0]), static_cast<uint32_t>(value[1]), static_cast<uint32_t>(value[2]),
static_cast<uint32_t>(value[3]) };
}
template <>
inline Float2 IDictionary::get<Float2>(const dictionary::Item* item)
{
Float2 value;
getAsFloatArray(item, &value.x, 2);
return value;
}
template <>
inline Float3 IDictionary::get<Float3>(const dictionary::Item* item)
{
Float3 value;
getAsFloatArray(item, &value.x, 3);
return value;
}
template <>
inline Float4 IDictionary::get<Float4>(const dictionary::Item* item)
{
Float4 value;
getAsFloatArray(item, &value.x, 4);
return value;
}
template <>
inline Double2 IDictionary::get<Double2>(const dictionary::Item* item)
{
Double2 value;
getAsFloat64Array(item, &value.x, 2);
return value;
}
template <>
inline Double3 IDictionary::get<Double3>(const dictionary::Item* item)
{
Double3 value;
getAsFloat64Array(item, &value.x, 3);
return value;
}
template <>
inline Double4 IDictionary::get<Double4>(const dictionary::Item* item)
{
Double4 value;
getAsFloat64Array(item, &value.x, 4);
return value;
}
template <class T>
inline T IDictionary::get(const dictionary::Item* baseItem, const char* path)
{
return get<T>(getItem(baseItem, path));
}
template <>
inline void IDictionary::makeAtPath<int32_t>(dictionary::Item* baseItem, const char* path, int32_t value)
{
makeIntAtPath(baseItem, path, value);
}
template <>
inline void IDictionary::makeAtPath<int64_t>(dictionary::Item* baseItem, const char* path, int64_t value)
{
makeInt64AtPath(baseItem, path, value);
}
template <>
inline void IDictionary::makeAtPath<float>(dictionary::Item* baseItem, const char* path, float value)
{
makeFloatAtPath(baseItem, path, value);
}
template <>
inline void IDictionary::makeAtPath<double>(dictionary::Item* baseItem, const char* path, double value)
{
makeFloat64AtPath(baseItem, path, value);
}
template <>
inline void IDictionary::makeAtPath<bool>(dictionary::Item* baseItem, const char* path, bool value)
{
makeBoolAtPath(baseItem, path, value);
}
template <>
inline void IDictionary::makeAtPath<const char*>(dictionary::Item* baseItem, const char* path, const char* value)
{
makeStringAtPath(baseItem, path, value);
}
template <>
inline void IDictionary::makeAtPath<std::string>(dictionary::Item* baseItem, const char* path, std::string value)
{
makeStringAtPath(baseItem, path, value.data(), value.size());
}
template <>
inline void IDictionary::makeAtPath<cpp::string_view>(dictionary::Item* baseItem, const char* path, cpp::string_view value)
{
makeStringAtPath(baseItem, path, value.data(), value.length());
}
template <>
inline void IDictionary::makeAtPath<omni::string>(dictionary::Item* baseItem, const char* path, omni::string value)
{
makeStringAtPath(baseItem, path, value.data(), value.size());
}
template <>
inline void IDictionary::setArray(Item* baseItem, const bool* array, size_t arrayLength)
{
setBoolArray(baseItem, array, arrayLength);
}
template <>
inline void IDictionary::setArray(Item* baseItem, const int32_t* array, size_t arrayLength)
{
setIntArray(baseItem, array, arrayLength);
}
template <>
inline void IDictionary::setArray(Item* baseItem, const int64_t* array, size_t arrayLength)
{
setInt64Array(baseItem, array, arrayLength);
}
template <>
inline void IDictionary::setArray(Item* baseItem, const float* array, size_t arrayLength)
{
setFloatArray(baseItem, array, arrayLength);
}
template <>
inline void IDictionary::setArray(Item* baseItem, const double* array, size_t arrayLength)
{
setFloat64Array(baseItem, array, arrayLength);
}
template <>
inline void IDictionary::setArray(Item* baseItem, const char* const* array, size_t arrayLength)
{
setStringArray(baseItem, array, arrayLength);
}
template <>
inline void IDictionary::makeAtPath<Int2>(dictionary::Item* baseItem, const char* path, Int2 value)
{
dictionary::Item* item = baseItem;
if (path && path[0] != '\0')
{
item = makeDictionaryAtPath(baseItem, path);
}
setArray<int32_t>(item, &value.x, 2);
}
template <>
inline void IDictionary::makeAtPath<Int3>(dictionary::Item* baseItem, const char* path, Int3 value)
{
dictionary::Item* item = baseItem;
if (path && path[0] != '\0')
{
item = makeDictionaryAtPath(baseItem, path);
}
setArray<int32_t>(item, &value.x, 3);
}
template <>
inline void IDictionary::makeAtPath<Int4>(dictionary::Item* baseItem, const char* path, Int4 value)
{
dictionary::Item* item = baseItem;
if (path && path[0] != '\0')
{
item = makeDictionaryAtPath(baseItem, path);
}
setArray<int32_t>(item, &value.x, 4);
}
template <>
inline void IDictionary::makeAtPath<Uint2>(dictionary::Item* baseItem, const char* path, Uint2 value)
{
dictionary::Item* item = baseItem;
if (path && path[0] != '\0')
{
item = makeDictionaryAtPath(baseItem, path);
}
int64_t values64[] = { value.x, value.y };
setArray<int64_t>(item, values64, 2);
}
template <>
inline void IDictionary::makeAtPath<Uint3>(dictionary::Item* baseItem, const char* path, Uint3 value)
{
dictionary::Item* item = baseItem;
if (path && path[0] != '\0')
{
item = makeDictionaryAtPath(baseItem, path);
}
int64_t values64[] = { value.x, value.y, value.z };
setArray<int64_t>(item, values64, 3);
}
template <>
inline void IDictionary::makeAtPath<Uint4>(dictionary::Item* baseItem, const char* path, Uint4 value)
{
dictionary::Item* item = baseItem;
if (path && path[0] != '\0')
{
item = makeDictionaryAtPath(baseItem, path);
}
int64_t values64[] = { value.x, value.y, value.z, value.w };
setArray<int64_t>(item, values64, 4);
}
template <>
inline void IDictionary::makeAtPath<Float2>(dictionary::Item* baseItem, const char* path, Float2 value)
{
dictionary::Item* item = baseItem;
if (path && path[0] != '\0')
{
item = makeDictionaryAtPath(baseItem, path);
}
setArray<float>(item, &value.x, 2);
}
template <>
inline void IDictionary::makeAtPath<Float3>(dictionary::Item* baseItem, const char* path, Float3 value)
{
dictionary::Item* item = baseItem;
if (path && path[0] != '\0')
{
item = makeDictionaryAtPath(baseItem, path);
}
setArray<float>(item, &value.x, 3);
}
template <>
inline void IDictionary::makeAtPath<Float4>(dictionary::Item* baseItem, const char* path, Float4 value)
{
dictionary::Item* item = baseItem;
if (path && path[0] != '\0')
{
item = makeDictionaryAtPath(baseItem, path);
}
setArray<float>(item, &value.x, 4);
}
template <>
inline void IDictionary::makeAtPath<Double2>(dictionary::Item* baseItem, const char* path, Double2 value)
{
dictionary::Item* item = baseItem;
if (path && path[0] != '\0')
{
item = makeDictionaryAtPath(baseItem, path);
}
setArray<double>(item, &value.x, 2);
}
template <>
inline void IDictionary::makeAtPath<Double3>(dictionary::Item* baseItem, const char* path, Double3 value)
{
dictionary::Item* item = baseItem;
if (path && path[0] != '\0')
{
item = makeDictionaryAtPath(baseItem, path);
}
setArray<double>(item, &value.x, 3);
}
template <>
inline void IDictionary::makeAtPath<Double4>(dictionary::Item* baseItem, const char* path, Double4 value)
{
dictionary::Item* item = baseItem;
if (path && path[0] != '\0')
{
item = makeDictionaryAtPath(baseItem, path);
}
setArray<double>(item, &value.x, 4);
}
#endif
inline void IDictionary::deleteChildren(Item* item)
{
ScopedWrite g(*this, item);
size_t childCount = getItemChildCount(item);
while (childCount != 0)
destroyItem(getItemChildByIndexMutable(item, --childCount));
}
inline void IDictionary::copyItemFlags(Item* dstItem, const Item* srcItem)
{
setItemFlag(dstItem, ItemFlag::eUnitSubtree, getItemFlag(srcItem, ItemFlag::eUnitSubtree));
}
} // namespace dictionary
} // namespace carb
|
omniverse-code/kit/include/carb/variant/VariantUtils.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Utilities for *carb.variant.plugin*
#pragma once
#include "VariantTypes.h"
namespace carb
{
namespace variant
{
/**
* A helper function to translate a typed value into a \ref VariantData via a \ref Translator. A compile error will
* occur if no \ref Translator could be found for the decayed type.
* @param type The value of type \c Type.
* @returns A VariantData constructed from \p type. \ref traits::destruct() must be called on the VariantData when
* finished with it.
*/
template <class Type>
VariantData translate(Type&& type) noexcept;
//! A structure containing functions for performing the prescribed actions on a \ref VariantData. The functions handle
//! the default behavior if the v-table or v-table function are \c nullptr.
struct traits
{
/**
* Swaps two \ref VariantData members.
* @note \ref VariantData is treated as a trivial type and no v-table functions are required to perform this.
* @param lhs The left-hand VariantData.
* @param rhs The right-hand VariantData.
*/
static void swap(VariantData& lhs, VariantData& rhs) noexcept;
/**
* Destructs a \ref VariantData.
* @note The default behavior (if `!self.vtable->Destructor`) treats \p self as trivially destructible.
* @param self The VariantData to destruct.
*/
static void destruct(VariantData& self) noexcept;
/**
* Copies a \ref VariantData.
* @note The default behavior (if `!self.vtable->Copy`) treats \p self as trivially copyable.
* @param self The VariantData to copy.
* @returns A VariantData that represents a copy of \p self. The v-table of the return value must be the same as
* `self.vtable`. When finished with the return value, it must be destroyed via destruct().
*/
static VariantData copy(const VariantData& self) noexcept;
/**
* Tests two \ref VariantData instances for equality.
* @note The default behavior (if `!self.vtable->Equals`) treats \p self and \p other as trivially comparable (i.e.
* bitwise compare via \c std::memcmp).
* @param self The VariantData to compare. This parameter provides the v-table for the comparison.
* @param other The other VariantData to compare.
* @returns \c true if \p self and \p other are equal; \c false otherwise.
*/
static bool equals(const VariantData& self, const VariantData& other) noexcept;
/**
* Renders a \ref VariantData as a string for debugging purposes.
* @note The default behavior (if `!self.vtable->ToString`) produces `"<vtable>:<data>"`.
* @param self The VariantData to render as a string.
* @returns A string representing \p self for debugging purposes.
*/
static omni::string toString(const VariantData& self) noexcept;
/**
* Attempts to convert a \ref VariantData to a different type. If \p newType is the same as `self.vtable`, then
* \ref traits::copy() is invoked instead.
* @note The default behavior (if `!self.vtable->ConvertTo`) merely returns \c false.
* @param self The VariantData to convert. This parameter provides the v-table for the comparison.
*/
static bool convertTo(const VariantData& self, const VTable* newType, VariantData& out) noexcept;
/**
* Computes a hash of a \ref VariantData.
* @note The default behavior (if `!self.vtable->Hash`) produces `size_t(self.data)`.
* @param self The VariantData to hash.
* @returns A hash value representing \p self.
*/
static size_t hash(const VariantData& self) noexcept;
};
//! A wrapper class for managing the lifetime of \ref VariantData and converting the contained value to C++ types.
class Variant final : protected VariantData
{
public:
/**
* Default constructor. Produces an empty Variant, that is, \ref hasValue() will return \c false. Any attempt to
* \ref getValue() will fail and \ref convertTo() will produce an empty Variant. Empty Variants are only equal to
* other empty Variants.
*/
Variant() noexcept;
/**
* Construct based on given type.
*
* To allow copy/move constructors to work properly, this constructor participates in overload resolution only if
* \c T is not \ref Variant.
*
* @warning This function will fail to compile if a \ref Translator cannot be found for \c T.
* @param val The value to store in the variant.
*/
#ifndef DOXYGEN_BUILD
template <class T, typename std::enable_if_t<!std::is_same<std::decay_t<T>, Variant>::value, bool> = true>
#else
template <class T>
#endif
explicit Variant(T&& val) noexcept
{
// This function cannot be externally inlined due to MSVC not understanding the enable_if in the inlined
// function.
data() = translate(std::forward<T>(val));
}
/**
* Destructor.
*/
~Variant() noexcept;
/**
* Copy constructor.
* @param other The Variant to copy.
*/
Variant(const Variant& other) noexcept;
/**
* Copy-assign operator.
* @param other The Variant to copy.
* @returns \c *this
*/
Variant& operator=(const Variant& other) noexcept;
/**
* Move constructor.
* @param other The Variant to move from. \p other is left in an empty state.
*/
Variant(Variant&& other) noexcept;
/**
* Move-assign operator.
* @param other The Variant to move from.
* @returns \c *this
*/
Variant& operator=(Variant&& other) noexcept;
/**
* Tests for equality between two variants.
* @param other The other Variant.
* @returns \c true if the Variants are equal; \c false otherwise.
*/
bool operator==(const Variant& other) const noexcept;
/**
* Tests for inequality between two variants.
* @param other The other Variant.
* @returns \c true if the Variants are not equal; \c false otherwise.
*/
bool operator!=(const Variant& other) const noexcept;
/**
* Tests if a Variant is empty (i.e. contains no value).
* @returns \c true if the Variant is empty; \c false otherwise.
*/
bool hasValue() const noexcept;
/**
* Renders the Variant as a string for debugging purposes.
* @returns The string value of the variant.
*/
omni::string toString() const noexcept;
/**
* Obtains the hash value of the variant.
* @returns The hash value of the variant.
*/
size_t getHash() const noexcept;
/**
* Attempts to convert the Variant to the given type.
* @returns A \c optional containing the requested value if conversion succeeds; an empty \c optional otherwise.
*/
template <class T>
cpp::optional<T> getValue() const noexcept;
/**
* Attempts to convert the Variant to the given type with a fallback value if conversion fails.
* @param fallback The default value to return if conversion fails.
* @returns The contained value if conversion succeeds, or \p fallback if conversion fails.
*/
template <class T>
T getValueOr(T&& fallback) const noexcept;
/**
* Attempts to convert to a Variant of a different type.
* @returns A Variant representing a different C++ type if conversion succeeds, otherwise returns an empty Variant.
*/
template <class T>
Variant convertTo() const noexcept;
/**
* Access the underlying \ref VariantData.
* @returns The underlying \ref VariantData.
*/
const VariantData& data() const noexcept
{
return *this;
}
private:
VariantData& data() noexcept
{
return *this;
}
};
// The Variant class is intended only to add functions on top of VariantData. Therefore, the size must match.
static_assert(sizeof(Variant) == sizeof(VariantData), "");
// This is an ABI-stable representation of std::pair<Variant, Variant>, used only by Translator and internally by
// carb.variant.plugin.
//! @private
struct VariantPair
{
Variant first;
Variant second;
};
static_assert(std::is_standard_layout<VariantPair>::value, ""); // not interop-safe because not trivially copyable
/**
* A representation of a key value pair, similar to `std::pair<const Variant, Variant>`.
* ABI-stable representation to transact with *carb.variant.plugin*.
*/
struct KeyValuePair
{
const Variant first; //!< The first item in the pair; the key.
Variant second; //!< The second item in the pair; the value.
};
static_assert(std::is_standard_layout<KeyValuePair>::value, ""); // not interop-safe because not trivially copyable
//! Lifetime management wrapper for @ref IVariant::registerType().
class Registrar
{
public:
/**
* Default constructor. Constructs an empty registrar.
*/
constexpr Registrar() noexcept;
/**
* Constructor. Registers the type.
* @note If registration fails, isEmpty() will return `true`.
* @see IVariant::registerType()
* @param vtable The v-table pointer to register.
*/
Registrar(const VTable* vtable) noexcept;
/**
* Destructor. Unregisters the type. No-op if isEmpty() returns `true`.
*/
~Registrar() noexcept;
/**
* Move-construct. Moves the registered type to \c *this and leaves \p other empty.
* @param other The other Registrar to move from.
*/
Registrar(Registrar&& other) noexcept;
/**
* Move-assign. Swaps state with \p other.
* @param other The other Registrar to swap state with.
* @returns \c *this
*/
Registrar& operator=(Registrar&& other) noexcept;
/**
* Checks whether \c *this is empty.
* @returns \c true if \c *this does not contain a valid type; \c false otherwise.
*/
bool isEmpty() const noexcept;
/**
* Retrieves the registered type.
* @returns The managed type, or an empty \c RString if \c *this is empty and no type is managed.
*/
RString getType() const noexcept;
/**
* Resets \c *this to an empty state, unregistering any registered type.
*/
void reset() noexcept;
private:
RString m_type;
};
} // namespace variant
//! Namespace for *carb.variant* literal helpers.
namespace variant_literals
{
/** Literal cast operator for an unsigned long long variant value.
*
* @param[in] val The value to be contained in the variant object.
* @returns The variant object containing the requested value.
*/
CARB_NODISCARD inline variant::Variant operator"" _v(unsigned long long val) noexcept
{
return variant::Variant{ val };
}
/** Literal cast operator for a long double variant value.
*
* @param[in] val The value to be contained in the variant object.
* @returns The variant object containing the requested value.
*/
CARB_NODISCARD inline variant::Variant operator"" _v(long double val) noexcept
{
return variant::Variant{ (double)val };
}
/** Literal cast operator for a string variant value.
*
* @param[in] str The string to be contained in the variant object.
* @param[in] length The length of the string to be contained in the variant object.
* @returns The variant object containing the requested value.
*/
CARB_NODISCARD inline variant::Variant operator"" _v(const char* str, size_t length) noexcept
{
CARB_UNUSED(length);
return variant::Variant{ str };
}
} // namespace variant_literals
} // namespace carb
#ifndef DOXYGEN_SHOULD_SKIP_THIS
namespace std
{
template <>
struct hash<carb::variant::Variant>
{
size_t operator()(const carb::variant::Variant& v) const noexcept
{
return v.getHash();
}
};
inline std::string to_string(const carb::variant::Variant& v)
{
auto str = v.toString();
return std::string(str.begin(), str.end());
}
} // namespace std
#endif
|
omniverse-code/kit/include/carb/variant/IVariant.inl | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#include "VariantUtils.h"
#include "Translator.inl"
#include "VariantUtils.inl"
#include "VariantTypes.inl"
namespace carb
{
namespace variant
{
inline VariantArrayPtr IVariant::createArray(const Variant* p, size_t count)
{
return VariantArrayPtr{ internalCreateArray(p, count) };
}
inline VariantMapPtr IVariant::createMap()
{
return VariantMapPtr{ internalCreateMap() };
}
} // namespace variant
} // namespace carb
|
omniverse-code/kit/include/carb/variant/IVariant.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Interface definition for *carb.variant.plugin*
#pragma once
#include "../Interface.h"
#include "VariantTypes.h"
namespace carb
{
//! Namespace for *carb.variant.plugin* and related utilities.
namespace variant
{
//! Interface for *carb.variant.plugin*
struct IVariant
{
CARB_PLUGIN_INTERFACE("carb::variant::IVariant", 0, 1)
/**
* Retrieves a v-table by variant type. Typically not used; see \ref Translator instead.
* @param type The type to translate.
* @returns A v-table for the given type, or \c nullptr in case of an unknown type.
*/
const VTable*(CARB_ABI* getVTable)(RString type) noexcept;
/**
* Registers a user variant type.
*
* @note `vtable->typeName` must be a unique name within the running process and may not match any of the built-in
* type names.
* @param vtable Must not be \c nullptr. This pointer is retained by *carb.variant.plugin* so the caller must
* guarantee its lifetime until it is unregistered with \ref unregisterType(). A program that still references the
* v-table in a \ref VariantData (or \ref Variant) after it is unregistered is malformed.
* @returns \c true if `vtable->typeName` was available and the type was successfully registered; \c false
* otherwise.
*/
bool(CARB_ABI* registerType)(const VTable* vtable) noexcept;
/**
* Registers a user variant type.
*
* @param type The name of the type previously registered with \ref registerType().
* @returns \c true if the type was unregistered; \c false otherwise (i.e. the type was not previously registered).
*/
bool(CARB_ABI* unregisterType)(RString type) noexcept;
//! @private
VariantArray*(CARB_ABI* internalCreateArray)(const Variant* p, size_t count);
/**
* Creates a \ref VariantArray object from the given parameters.
*
* @param p The raw array to copy into the new \ref VariantArray; may be \c nullptr to produce an empty array.
* @param count The count of items in @p p.
* @returns A newly created \ref VariantArray object.
*/
CARB_NODISCARD VariantArrayPtr createArray(const Variant* p = nullptr, size_t count = 0);
//! @private
VariantMap*(CARB_ABI* internalCreateMap)();
/**
* Creates a \ref VariantMap object.
* @returns A newly created \ref VariantMap object.
*/
CARB_NODISCARD VariantMapPtr createMap();
};
} // namespace variant
} // namespace carb
#include "IVariant.inl"
|
omniverse-code/kit/include/carb/variant/Translator.inl | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Translator definitions for *carb.variant.plugin*
#pragma once
#include "IVariant.h"
#include "../InterfaceUtils.h"
#include "../Strong.h"
namespace carb
{
namespace dictionary
{
struct Item;
}
namespace variant
{
template <>
struct Translator<std::nullptr_t, void>
{
RString type() const noexcept
{
return eNull;
}
void* data(std::nullptr_t) const noexcept
{
return nullptr;
}
std::nullptr_t value(void*) const noexcept
{
return nullptr;
}
};
template <class T>
struct Translator<T,
typename std::enable_if_t<std::is_integral<T>::value && !std::is_same<bool, T>::value &&
(sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8) &&
sizeof(T) <= sizeof(void*)>>
{
constexpr RString type() const noexcept
{
constexpr bool s = std::is_signed<T>::value;
switch (sizeof(T))
{
case 1:
return s ? eInt8 : eUInt8;
case 2:
return s ? eInt16 : eUInt16;
case 4:
return s ? eInt32 : eUInt32;
case 8:
return s ? eInt64 : eUInt64;
}
CARB_ASSERT(false); // Should never get here
return {};
}
void* data(T val) const noexcept
{
void* p{};
memcpy(&p, &val, sizeof(T));
return p;
}
T value(void* val) const noexcept
{
T t;
memcpy(&t, &val, sizeof(T));
return t;
}
};
template <class T>
struct Translator<T,
typename std::enable_if_t<std::is_floating_point<T>::value && (sizeof(T) == 4 || sizeof(T) == 8) &&
sizeof(T) <= sizeof(void*)>>
{
RString type() const noexcept
{
return sizeof(T) == 4 ? eFloat : eDouble;
}
void* data(T val) const noexcept
{
void* p{};
memcpy(&p, &val, sizeof(T));
return p;
}
T value(void* val) const noexcept
{
T t;
memcpy(&t, &val, sizeof(T));
return t;
}
};
template <>
struct Translator<omni::string, void>
{
RString type() const noexcept
{
return eString;
}
void* data(omni::string str) const noexcept
{
return new (carb::allocate(sizeof(omni::string))) omni::string(std::move(str));
}
const omni::string& value(void* val) const noexcept
{
return *static_cast<const omni::string*>(val);
}
};
template <>
struct Translator<bool, void>
{
RString type() const noexcept
{
return eBool;
}
void* data(bool b) const noexcept
{
void* p{};
memcpy(&p, &b, sizeof(b));
return p;
}
bool value(void* val) const noexcept
{
bool b;
memcpy(&b, &val, sizeof(b));
return b;
}
};
// Helper that is true_type if pointer-type T is pointer-type U excluding const/volatile
// Used to enable types for both const/volatile and non-const/volatile
template <class T, class U>
struct IsCVOrNot
: std::integral_constant<bool,
std::is_pointer<T>::value &&
std::is_same<std::remove_cv_t<std::remove_pointer_t<T>>, std::remove_pointer_t<U>>::value>
{
};
// [const] char*
template <class T>
struct Translator<T, typename std::enable_if_t<IsCVOrNot<T, char*>::value>>
{
RString type() const noexcept
{
return eCharPtr;
}
void* data(T p) const noexcept
{
return (void*)p;
}
const char* value(void* val) const noexcept
{
return static_cast<const char*>(val);
}
};
// [const] dictionary::Item*
template <class T>
struct Translator<T, typename std::enable_if_t<IsCVOrNot<T, dictionary::Item*>::value>>
{
RString type() const noexcept
{
return eDictionary;
}
void* data(T p) const noexcept
{
return (void*)p;
}
const dictionary::Item* value(void* p) const noexcept
{
return static_cast<const dictionary::Item*>(p);
}
};
template <class T, class U>
struct Translator<Strong<T, U>, void> : public Translator<T>
{
using Parent = Translator<T>;
void* data(Strong<T, U> val) const noexcept
{
return Parent::data(val.get());
}
const Strong<T, U> value(void* p) const noexcept
{
return Strong<T, U>{ Parent::value(p) };
}
};
template <>
struct Translator<VariantArrayPtr, void>
{
RString type() const noexcept
{
return eVariantArray;
}
void* data(VariantArrayPtr p) const noexcept
{
// Keep the ref-count that `p` held by detaching the smart pointer.
return p.detach();
}
VariantArrayPtr value(void* p) const noexcept
{
return VariantArrayPtr(static_cast<VariantArray*>(p));
}
};
// [const] VariantArray*
template <class T>
struct Translator<T, typename std::enable_if_t<IsCVOrNot<T, VariantArray*>::value>>
{
RString type() const noexcept
{
return eVariantArray;
}
void* data(VariantArray* p) const noexcept // IObject requires non-const
{
if (p)
p->addRef();
return p;
}
T value(void* p) const noexcept
{
return static_cast<VariantArray*>(p);
}
};
template <>
struct Translator<std::pair<Variant, Variant>, void>
{
RString type() const noexcept
{
return eVariantPair;
}
void* data(std::pair<Variant, Variant> pair) const noexcept
{
void* mem = carb::allocate(sizeof(VariantPair));
return new (mem) VariantPair{ std::move(pair.first), std::move(pair.second) };
}
std::pair<Variant, Variant> value(void* p) const noexcept
{
auto pair = static_cast<VariantPair*>(p);
return std::make_pair(pair->first, pair->second);
}
};
template <bool Uncased, class Base>
struct Translator<carb::detail::RStringTraits<Uncased, Base>, void>
{
constexpr static bool IsKey = std::is_same<Base, carb::detail::RStringKeyBase>::value;
using RStringType = typename std::conditional_t<Uncased,
std::conditional_t<IsKey, RStringUKey, RStringU>,
std::conditional_t<IsKey, RStringKey, RString>>;
static_assert(sizeof(RStringType) <= sizeof(void*), "RStringType must fit within a void*");
RString type() const noexcept
{
if (IsKey)
return Uncased ? eRStringUKey : eRStringKey;
return Uncased ? eRStringU : eRString;
}
void* data(const RStringType& rstring) const noexcept
{
union
{
RStringType in;
void* out;
} u{ rstring };
return u.out;
}
RStringType value(void* p) const noexcept
{
union
{
void* in;
RStringType out;
} u{ p };
return u.out;
}
};
template <class T>
struct Translator<T,
std::enable_if_t<cpp::disjunction<typename std::is_same<RString, T>,
typename std::is_same<RStringU, T>,
typename std::is_same<RStringKey, T>,
typename std::is_same<RStringUKey, T>>::value>>
: public Translator<carb::detail::RStringTraits<
T::IsUncased,
typename std::conditional_t<
cpp::disjunction<typename std::is_same<RStringKey, T>, typename std::is_same<RStringUKey, T>>::value,
carb::detail::RStringKeyBase,
carb::detail::RStringBase>>>
{
};
template <>
struct Translator<VariantMapPtr, void>
{
RString type() const noexcept
{
return eVariantMap;
}
void* data(VariantMapPtr p) const noexcept
{
// Keep the ref-count that `p` held by detaching the smart pointer.
return p.detach();
}
VariantMapPtr value(void* p) const noexcept
{
return VariantMapPtr(static_cast<VariantMap*>(p));
}
};
// [const] VariantMap*
template <class T>
struct Translator<T, typename std::enable_if_t<IsCVOrNot<T, VariantMap*>::value>>
{
RString type() const noexcept
{
return eVariantMap;
}
void* data(VariantMap* p) const noexcept // IObject requires non-const
{
if (p)
p->addRef();
return p;
}
T value(void* p) const noexcept
{
return static_cast<VariantMap*>(p);
}
};
// NOTE: If you add a new Translator here, make sure that it is documented in the comment block for Translator in
// VariantTypes.h
} // namespace variant
} // namespace carb
|
omniverse-code/kit/include/carb/variant/VariantUtils.inl | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
namespace carb
{
namespace variant
{
template <class Type, class TranslatorType>
VariantData internalTranslate(Type&& type, TranslatorType&& t)
{
auto iface = getCachedInterface<IVariant>();
CARB_ASSERT(iface, "Missing required interface: IVariant");
return { iface->getVTable(t.type()), t.data(std::forward<Type>(type)) };
}
template <class Type>
VariantData translate(Type&& type) noexcept
{
// NOTE: If you get an error on this line about "use of undefined type<...>" it means that there is no
// Translator<> for the type you're trying to convert to. See the comment block for Translator in VariantTypes.h.
// The Translator structs themselves are in Translator.inl.
return internalTranslate(std::forward<Type>(type), Translator<std::decay_t<Type>>{});
}
inline void traits::swap(VariantData& lhs, VariantData& rhs) noexcept
{
std::swap(lhs, rhs);
}
inline void traits::destruct(VariantData& self) noexcept
{
if (self.vtable && self.vtable->Destructor)
self.vtable->Destructor(&self);
self = {};
}
inline VariantData traits::copy(const VariantData& self) noexcept
{
if (self.vtable && self.vtable->Copy)
{
VariantData vd = self.vtable->Copy(&self);
CARB_ASSERT(vd.vtable == self.vtable, "v-table %s does not match expected type %s!",
vd.vtable->typeName.c_str(), self.vtable->typeName.c_str());
return vd;
}
return self;
}
inline bool traits::equals(const VariantData& self, const VariantData& other) noexcept
{
if (self.vtable && self.vtable->Equals)
return self.vtable->Equals(&self, &other);
return std::memcmp(&self, &other, sizeof(VariantData)) == 0;
}
inline omni::string traits::toString(const VariantData& self) noexcept
{
if (self.vtable)
{
if (self.vtable->ToString)
return self.vtable->ToString(&self);
else
return omni::string(omni::formatted, "%p:%p", self.vtable, self.data);
}
return {};
}
inline bool traits::convertTo(const VariantData& self, const VTable* newType, VariantData& out) noexcept
{
if (self.vtable && newType)
{
if (self.vtable == newType)
return (out = traits::copy(self), true);
if (self.vtable->ConvertTo)
{
bool b = self.vtable->ConvertTo(&self, newType, &out);
CARB_ASSERT(!b || out.vtable == newType, "v-table %s doesn't match requested type %s!",
out.vtable->typeName.c_str(), newType->typeName.c_str());
return b;
}
}
return false;
}
inline size_t traits::hash(const VariantData& self) noexcept
{
return self.vtable && self.vtable->Hash ? self.vtable->Hash(&self) : size_t(self.data);
}
inline Variant::Variant() noexcept : VariantData{}
{
}
inline Variant::~Variant() noexcept
{
traits::destruct(data());
}
inline Variant::Variant(const Variant& other) noexcept
{
data() = traits::copy(other);
}
inline Variant& Variant::operator=(const Variant& other) noexcept
{
if (this != &other)
{
traits::destruct(data());
data() = traits::copy(other);
}
return *this;
}
inline Variant::Variant(Variant&& other) noexcept : VariantData{}
{
traits::swap(data(), other.data());
}
inline Variant& Variant::operator=(Variant&& other) noexcept
{
traits::swap(data(), other.data());
return *this;
}
inline bool Variant::operator==(const Variant& other) const noexcept
{
return traits::equals(data(), other.data());
}
inline bool Variant::operator!=(const Variant& other) const noexcept
{
return !(*this == other);
}
inline bool Variant::hasValue() const noexcept
{
return data().vtable != nullptr;
}
inline omni::string Variant::toString() const noexcept
{
return traits::toString(data());
}
inline size_t Variant::getHash() const noexcept
{
return traits::hash(data());
}
template <class T>
cpp::optional<T> Variant::getValue() const noexcept
{
Translator<std::decay_t<T>> t{};
// If the type matches exactly, we can interpret the data.
if (data().vtable && t.type() == data().vtable->typeName)
return t.value(data().data);
VariantData temp;
auto iface = getCachedInterface<IVariant>();
CARB_ASSERT(iface, "Failed to acquire interface IVariant");
if (traits::convertTo(data(), iface->getVTable(t.type()), temp))
{
CARB_SCOPE_EXIT
{
traits::destruct(temp);
};
return t.value(temp.data);
}
return {};
}
template <class T>
T Variant::getValueOr(T&& fallback) const noexcept
{
return getValue<T>().value_or(std::forward<T>(fallback));
}
template <class T>
Variant Variant::convertTo() const noexcept
{
Variant other;
Translator<std::decay_t<T>> t{};
VariantData temp;
auto iface = getCachedInterface<IVariant>();
CARB_ASSERT(iface, "Failed to acquire interface IVariant");
if (traits::convertTo(data(), iface->getVTable(t.type()), temp))
{
Variant& v = static_cast<Variant&>(temp);
return v;
}
return {};
}
constexpr Registrar::Registrar() noexcept : m_type{}
{
}
inline Registrar::Registrar(const VTable* vtable) noexcept
{
CARB_ASSERT(vtable); // Null v-table not allowed.
auto iface = getCachedInterface<IVariant>();
CARB_ASSERT(iface, "Failed to acquire interface IVariant");
if (iface->registerType(vtable))
m_type = vtable->typeName;
}
inline Registrar::~Registrar() noexcept
{
reset();
}
inline Registrar::Registrar(Registrar&& other) noexcept : m_type(std::exchange(other.m_type, {}))
{
}
inline Registrar& Registrar::operator=(Registrar&& other) noexcept
{
std::swap(m_type, other.m_type);
return *this;
}
inline bool Registrar::isEmpty() const noexcept
{
return m_type.isEmpty();
}
inline RString Registrar::getType() const noexcept
{
return m_type;
}
inline void Registrar::reset() noexcept
{
auto type = std::exchange(m_type, {});
if (!type.isEmpty())
{
auto iface = getCachedInterface<IVariant>();
if (iface)
{
iface->unregisterType(type);
}
}
}
} // namespace variant
} // namespace carb
|
omniverse-code/kit/include/carb/variant/VariantBindingsPython.h | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../BindingsPythonUtils.h"
#include "IVariant.h"
#include "VariantUtils.h"
#include "../extras/EnvironmentVariable.h"
namespace carb
{
namespace variant
{
// PyObjectVTable for python variant types
struct PyObjectVTable
{
static_assert(sizeof(py::object) == sizeof(void*), "Bad assumption");
static void Destructor(VariantData* self) noexcept
{
try
{
py::object* p = reinterpret_cast<py::object*>(&self->data);
py::gil_scoped_acquire gil;
p->~object();
}
catch (...)
{
}
}
static VariantData Copy(const VariantData* self) noexcept
{
const py::object* p = reinterpret_cast<const py::object*>(&self->data);
VariantData d{ self->vtable, nullptr };
try
{
py::gil_scoped_acquire gil;
new (&d.data) py::object(*p);
}
catch (...)
{
}
return d;
}
static bool Equals(const VariantData* self, const VariantData* other) noexcept
{
if (self->vtable == other->vtable)
{
CARB_ASSERT(self->vtable == get());
const py::object* pself = reinterpret_cast<const py::object*>(&self->data);
const py::object* pother = reinterpret_cast<const py::object*>(&other->data);
try
{
py::gil_scoped_acquire gil;
return pself->is(*pother);
}
catch (...)
{
return false;
}
}
// Try to convert us into the other type since it's not a python type
VariantData temp;
if (traits::convertTo(*self, other->vtable, temp))
{
bool b = traits::equals(temp, *other);
traits::destruct(temp);
return b;
}
return false;
}
static omni::string ToString(const VariantData* self) noexcept
{
const py::object* pself = reinterpret_cast<const py::object*>(&self->data);
try
{
py::gil_scoped_acquire gil;
auto str = pself->cast<std::string>();
return omni::string(str.begin(), str.end());
}
catch (...)
{
return omni::string(omni::formatted, "py::object:%p", pself->ptr());
}
}
template <class T>
static bool Convert(const py::object& val, void*& out) noexcept
{
Translator<T> t{};
try
{
out = t.data(val.cast<T>());
return true;
}
catch (...)
{
}
return false;
}
static bool ConvertTo(const VariantData* self, const VTable* newtype, VariantData* target) noexcept
{
const py::object* pself = reinterpret_cast<const py::object*>(&self->data);
try
{
static std::unordered_map<RString, bool (*)(const py::object&, void*&)> converters{
{ eBool, Convert<bool> },
{ eUInt8, Convert<uint8_t> },
{ eUInt16, Convert<uint16_t> },
{ eUInt32, Convert<uint32_t> },
{ eUInt64, Convert<uint64_t> },
{ eInt8, Convert<int8_t> },
{ eInt16, Convert<int16_t> },
{ eInt32, Convert<int32_t> },
{ eInt64, Convert<int64_t> },
{ eFloat, Convert<float> },
{ eDouble, Convert<double> },
{ eString,
[](const py::object& val, void*& out) {
Translator<omni::string> t{};
try
{
py::gil_scoped_acquire gil;
auto str = val.cast<std::string>();
out = t.data(omni::string(str.begin(), str.end()));
return true;
}
catch (...)
{
}
return false;
} },
};
auto iter = converters.find(newtype->typeName);
if (iter != converters.end() && iter->second(*pself, target->data))
{
auto iface = getCachedInterface<IVariant>();
CARB_ASSERT(iface, "Failed to acquire interface IVariant");
target->vtable = iface->getVTable(iter->first);
return true;
}
return false;
}
catch (...)
{
}
return false;
}
static size_t Hash(const VariantData* self) noexcept
{
const py::object* pself = reinterpret_cast<const py::object*>(&self->data);
try
{
py::gil_scoped_acquire gil;
return py::hash(*pself);
}
catch (...)
{
return size_t(pself);
}
}
static const VTable* get() noexcept
{
static const VTable v{
sizeof(VTable), RString("py::object"), Destructor, Copy, Equals, ToString, ConvertTo, Hash
};
return &v;
}
};
// Translator for python
template <>
struct Translator<py::object, void>
{
static_assert(sizeof(py::object) == sizeof(void*), "Bad assumption");
RString type() const noexcept
{
static const RString t("py::object");
return t;
}
void* data(py::object o) const noexcept
{
void* d{};
py::gil_scoped_acquire gil;
new (&d) py::object(std::move(o));
return d;
}
py::object value(void* d) const noexcept
{
py::object* p = reinterpret_cast<py::object*>(&d);
py::gil_scoped_acquire gil;
return *p;
}
};
inline void definePythonModule(py::module& m)
{
// Try to load the variant module
IVariant* v = getFramework()->tryAcquireInterface<IVariant>();
if (!v)
{
PluginLoadingDesc desc = PluginLoadingDesc::getDefault();
extras::EnvironmentVariable ev("CARB_APP_PATH");
std::string str = ev.getValue().value_or(std::string{});
const char* p = str.c_str();
desc.searchPaths = &p;
desc.searchPathCount = 1;
const char* loaded[] = { "carb.variant.plugin" };
desc.loadedFileWildcards = loaded;
desc.loadedFileWildcardCount = CARB_COUNTOF(loaded);
getFramework()->loadPlugins(desc);
v = getFramework()->tryAcquireInterface<IVariant>();
CARB_ASSERT(v);
}
// Make sure our python handler is registered
if (v)
v->registerType(PyObjectVTable::get());
defineInterfaceClass<IVariant>(m, "IVariant", "acquire_variant_interface");
}
} // namespace variant
} // namespace carb
|
omniverse-code/kit/include/carb/variant/VariantTypes.inl | // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Type implementations for *carb.variant.plugin*
#pragma once
namespace carb
{
namespace variant
{
inline void VariantArray::clear() noexcept
{
resize(0);
}
inline bool VariantArray::empty() const noexcept
{
return size() == 0;
}
inline Variant& VariantArray::at(size_t index)
{
if (index >= size())
{
#if CARB_EXCEPTIONS_ENABLED
throw std::out_of_range("out-of-range index specified");
#else
CARB_FATAL_UNLESS(0, "out-of-range index specified");
#endif
}
return *(data() + index);
}
inline const Variant& VariantArray::at(size_t index) const
{
if (index >= size())
{
#if CARB_EXCEPTIONS_ENABLED
throw std::out_of_range("out-of-range index specified");
#else
CARB_FATAL_UNLESS(0, "out-of-range index specified");
#endif
}
return *(data() + index);
}
inline Variant& VariantArray::operator[](size_t index) noexcept
{
return *(data() + index);
}
inline const Variant& VariantArray::operator[](size_t index) const noexcept
{
return *(data() + index);
}
inline Variant& VariantArray::front() noexcept
{
return *data();
}
inline const Variant& VariantArray::front() const noexcept
{
return *data();
}
inline Variant& VariantArray::back() noexcept
{
return *(data() + size() - 1);
}
inline const Variant& VariantArray::back() const noexcept
{
return *(data() + size() - 1);
}
inline auto VariantArray::begin() noexcept -> iterator
{
return iterator(data());
}
inline auto VariantArray::end() noexcept -> iterator
{
return iterator(data() + size());
}
inline auto VariantArray::begin() const noexcept -> const_iterator
{
return const_iterator(data());
}
inline auto VariantArray::end() const noexcept -> const_iterator
{
return const_iterator(data() + size());
}
inline auto VariantMap::cbegin() const noexcept -> const_iterator
{
return { this, internalBegin() };
}
inline auto VariantMap::begin() const noexcept -> const_iterator
{
return cbegin();
}
inline auto VariantMap::begin() noexcept -> iterator
{
return { this, internalBegin() };
}
inline auto VariantMap::cend() const noexcept -> const_iterator
{
return { this, nullptr };
}
inline auto VariantMap::end() const noexcept -> const_iterator
{
return cend();
}
inline auto VariantMap::end() noexcept -> iterator
{
return { this, nullptr };
}
inline bool VariantMap::empty() const noexcept
{
return size() == 0;
}
inline auto VariantMap::insert(const Variant& key, Variant value) -> std::pair<iterator, bool>
{
std::pair<iterator, bool> result;
result.first = iterator(this, internalInsert(key, result.second));
if (result.second)
result.first->second = std::move(value);
return result;
}
inline size_t VariantMap::erase(const Variant& key) noexcept
{
auto where = internalFind(key);
return where ? (internalErase(where), 1) : 0;
}
inline auto VariantMap::erase(const_iterator pos) noexcept -> iterator
{
CARB_ASSERT(pos.owner == this && pos.where);
internalErase(pos.where);
return iterator{ this, iterNext(pos.where) };
}
inline auto VariantMap::erase(const_find_iterator pos) noexcept -> find_iterator
{
CARB_ASSERT(pos.owner == this && pos.where);
auto next = findNext(pos.where);
internalErase(pos.where);
return find_iterator{ this, next };
}
inline auto VariantMap::find(const Variant& key) noexcept -> find_iterator
{
return { this, internalFind(key) };
}
inline auto VariantMap::find(const Variant& key) const noexcept -> const_find_iterator
{
return { this, internalFind(key) };
}
inline bool VariantMap::contains(const Variant& key) const noexcept
{
return !!internalFind(key);
}
inline size_t VariantMap::count(const Variant& key) const noexcept
{
return !internalFind(key) ? 0 : 1;
}
#if CARB_EXCEPTIONS_ENABLED
inline auto VariantMap::at(const Variant& key) -> mapped_type&
{
auto vt = internalFind(key);
if (!vt)
throw std::out_of_range("key not found");
return vt->second;
}
inline auto VariantMap::at(const Variant& key) const -> const mapped_type&
{
auto vt = internalFind(key);
if (!vt)
throw std::out_of_range("key not found");
return vt->second;
}
#endif
inline auto VariantMap::operator[](const Variant& key) -> mapped_type&
{
bool success;
auto vt = internalInsert(key, success);
return vt->second;
}
} // namespace variant
} // namespace carb
|
omniverse-code/kit/include/carb/variant/VariantTypes.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Type definitions for *carb.variant.plugin*
#pragma once
#include "../RString.h"
#include "../IObject.h"
#include "../../omni/String.h"
#include "../../omni/detail/PointerIterator.h"
namespace carb
{
namespace variant
{
//! @defgroup Types that are known (by default) to *carb.variant.plugin*.
//! @{
constexpr RString eNull{ carb::eRString::RS_null }; //!< Represents \c nullptr
constexpr RString eBool{ carb::eRString::RS_bool }; //!< Represents \c bool
constexpr RString eUInt8{ carb::eRString::RS_uint8 }; //!< Represents \c uint8_t or `unsigned char`
constexpr RString eUInt16{ carb::eRString::RS_uint16 }; //!< Represents \c uint16_t or `unsigned short`
constexpr RString eUInt32{ carb::eRString::RS_uint32 }; //!< Represents \c uint32_t or `unsigned int`
constexpr RString eUInt64{ carb::eRString::RS_uint64 }; //!< Represents \c uint64_t or `unsigned long long`
constexpr RString eInt8{ carb::eRString::RS_int8 }; //!< Represents \c int8_t or `signed char`
constexpr RString eInt16{ carb::eRString::RS_int16 }; //!< Represents \c int16_t or `short`
constexpr RString eInt32{ carb::eRString::RS_int32 }; //!< Represents \c int32_t or `int`
constexpr RString eInt64{ carb::eRString::RS_int64 }; //!< Represents \c int64_t or `long long`
constexpr RString eFloat{ carb::eRString::RS_float }; //!< Represents \c float
constexpr RString eDouble{ carb::eRString::RS_double }; //!< Represents \c double
constexpr RString eString{ carb::eRString::RS_string }; //!< Represents \c omni::string
constexpr RString eCharPtr{ carb::eRString::RS_charptr }; //!< Represents `char*` or `const char*`
constexpr RString eDictionary{ carb::eRString::RS_dictionary }; //!< Represents `dictionary::Item`.
constexpr RString eVariantPair{ carb::eRString::RS_variant_pair }; //!< Represents `std::pair<Variant, Variant>`
constexpr RString eVariantArray{ carb::eRString::RS_variant_array }; //!< Represents `VariantArray*`.
constexpr RString eVariantMap{ carb::eRString::RS_variant_map }; //!< Represents `VariantMap*`.
constexpr RString eRString{ carb::eRString::RS_RString }; //!< Represents \c RString
constexpr RString eRStringU{ carb::eRString::RS_RStringU }; //!< Represents \c RStringU
constexpr RString eRStringKey{ carb::eRString::RS_RStringKey }; //!< Represents \c RStringKey
constexpr RString eRStringUKey{ carb::eRString::RS_RStringUKey }; //!< Represents \c RStringUKey
//! @}
struct VTable;
class Variant;
/**
* A standard-layout @rstref{ABI-safe <abi-compatibility>} struct for communicating variant data. This struct is filled
* out by \ref Translator specializations.
*
* This class should generally not be used directly except by \ref Translator specializations. Instead use the
* \ref Variant wrapper-class.
*
* @see \ref Translator, \ref Variant
*/
struct VariantData
{
//! The v-table for this variant. Only empty variants are allowed a \c nullptr v-table. The v-table is used to
//! provide functions for variant behavior and can be used as a type-identifier of sorts.
const VTable* vtable;
//! A generic pointer whose interpretation is based on the v-table and the \ref Translator specialization that
//! created it.
void* data;
};
CARB_ASSERT_INTEROP_SAFE(VariantData);
//! A v-table definition for a variant type. Each registered type has a unique v-table pointer that is retrievable via
//! IVariant::getVTable(). Each entry in the v-table is a function with a default behavior if \c nullptr.
//! @note This class is applicable only to users of *carb.variant.plugin* that author a custom \ref Translator class.
//! @warning Functions in the v-table should not be called directly; the Variant wrapper class calls them through
//! various \ref traits functions.
//! @warning All functions require that `self->vtable->[function]` is equal to the function called.
struct VTable
{
/**
* A member used as version control. This member should be set to `sizeof(VTable)` for the version of the
* v-table that a module is built against.
*/
uint32_t sizeOf;
/**
* Indicates the type name of the v-table. Once registered with \ref IVariant::registerType(), this name can be used
* to look up the type with \ref IVariant::getVTable().
*
* @warning This must be a unique name within the running process and may not match any of the built-in type names.
*/
RString typeName;
/**
* Used to destroy the VariantData::data member. A \c nullptr destructor function indicates that no destruction
* needs to take place.
*
* @param self The VariantData to destroy. Can assume that `self->vtable->Destructor` is the same as the function
* called.
*/
void (*Destructor)(VariantData* self) noexcept;
/**
* Called to create a functional copy of the given VariantData. A \c nullptr function indicates that VariantData can
* be trivially copied.
* @note The resulting VariantData need not have the same v-table as \p self.
* @param self The VariantData to copy. Can assume that `self->vtable->Copy` is the same as the function called.
* @returns A VariantData that is a functional copy of \p self. \ref traits::equals() should be \c true for \c *self
* and the returned VariantData.
*/
VariantData (*Copy)(const VariantData* self) noexcept;
/**
* Called to test equality of \c *self with (possibly different type) \c *other. A \c nullptr function indicates
* that a trivial comparison of the \ref VariantData is performed (i.e. \c memcmp).
* @warning Generally speaking, order should not matter: assuming that \c lhs and \c rhs are both
* `const VariantData*` with non-null \c Equals, it should hold that `lhs->vtable->Equals(lhs, rhs)` should always
* equal `rhs->vtable->Equals(rhs, lhs)` regardless of their respective v-tables.
* @param self The VariantData performing the compare. Can assume that `self->vtable->Equals` is the same as the
* function called.
* @param other The same or a different VariantData to compare with \p self. May have a different \c vtable than
* \p self.
* @returns \c true if the types are equal; \c false otherwise.
*/
bool (*Equals)(const VariantData* self, const VariantData* other) noexcept;
/**
* Called to render the \ref VariantData as a string. A \c nullptr function indicates that a string is produced that
* contains "<vtable pointer>:<data pointer>".
* @param self The VariantData to render. Can assume that `self->vtable->ToString` is the same as the function
* called.
* @returns A type-dependent printable string representing the type, useful for debugging.
*/
omni::string (*ToString)(const VariantData* self) noexcept;
/**
* Called to attempt to convert \p self to a different type. A \c nullptr function is the same as returning false.
* @warning If \c false is returned, \p out is in an undefined state. If and only if \c true is returned,
* \ref traits::destruct() must be called at some later point on \c *out.
* @note Generally speaking, \ref Equals() and \ref ConvertTo() should understand the same types.
* @param self The VariantData performing the conversion. Can assume that `self->vtable->ConvertTo` is this same as
* the function called.
* @param newtype A v-table representing the type to attempt to convert to. If the function recognizes the given
* v-table (which should not be the same as `self->vtable`) and can convert to that type then the function should
* write to \p out and return \c true. If the v-table is not recognized, the function must return \c false.
* @param out If \c true is returned from the function, this must be a valid \ref VariantData and must be later
* destroyed with \ref traits::destruct(). If \c false is returned then the state of \p out is undefined. There is
* no requirement that `out->vtable` matches \p newtype if \c true is returned; it must merely be valid.
* @returns \c true if and only if \p out contains a valid converted representation of \p self; \c false otherwise.
*/
bool (*ConvertTo)(const VariantData* self, const VTable* newtype, VariantData* out) noexcept;
/**
* Computes a hash of \p self. A \c nullptr function casts `self->data` to a `size_t` for use as a hash.
* @param self The VariantData to hash. Can assume that `self->vtable->Hash` is the same as the function called.
* @returns A value to use as a hash identifying \c *self.
*/
size_t (*Hash)(const VariantData* self) noexcept;
// Note to maintainers: adding new functions here does not necessarily require a version change for IVariant. Add a
// `struct traits` function that performs a default behavior if the function is `nullptr` or if the `sizeOf` is less
// than the offset of your new member. All calls to the v-table function should happen in the new `traits` function.
};
CARB_ASSERT_INTEROP_SAFE(VTable);
/**
* An array-of-variants type that can itself be contained in a \ref Variant.
*
* Similar in many respects to `std::vector`, but reference-counted and implemented within *carb.variant.plugin*.
*
* Created via \ref IVariant::createArray().
*/
class VariantArray : public IObject
{
public:
//! A type conforming to RandomAccessIterator.
using iterator = omni::detail::PointerIterator<Variant*, VariantArray>;
//! A type conforming to RandomAccessIterator.
using const_iterator = omni::detail::PointerIterator<const Variant*, VariantArray>;
/**
* Provides direct access to the underlying array.
* @returns the beginning of the underlying array.
*/
virtual Variant* data() noexcept = 0;
//! @copydoc data()
virtual const Variant* data() const noexcept = 0;
/**
* Returns the number of variants contained.
* @returns the number of variants contained.
*/
virtual size_t size() const noexcept = 0;
/**
* Adds a variant to the end of the array.
* @param v The \ref Variant to add to the array.
*/
virtual void push_back(Variant v) noexcept = 0;
/**
* Attempts to insert a variant at the given offset.
*
* The given @p offset must be in `[0, size()]`, otherwise \c false is returned.
*
* @warning This is an O(n) operation.
*
* @param offset The 0-based offset indicating where to insert. The elements at that position (and all subsequent
* elements) will be pushed back to make room for @p v.
* @param v The \ref Variant to insert.
* @returns \c true if the given variant was inserted; \c false otherwise.
*/
virtual bool insert(size_t offset, Variant v) noexcept = 0;
/**
* Attempts to erase the variant at the given offset.
*
* The given @p offset must be in `[0, size())`, otherwise \c false is returned.
*
* @warning This is an O(n) operation.
*
* @param offset The 0-based offset indicating which element to erase. The elements following that position will be
* moved forward to fill in the gap removed at @p offset.
* @returns \c true if the variant was erased; \c false otherwise.
*/
virtual bool erase(size_t offset) noexcept = 0;
/**
* Pops the last element from the array.
*
* @returns \c true if the element was popped; \c false if the array is empty.
*/
virtual bool pop_back() noexcept = 0;
/**
* Clears the existing array elements and assigns new elements.
*
* @param p The beginning of the new array elements.
* @param count The number of array elements in the raw array @p p.
*/
virtual void assign(const Variant* p, size_t count) noexcept = 0;
/**
* Reserves space for elements.
*
* @param count The number of elements to reserve space for, exactly. If this amount is less than the current space,
* the request is ignored.
*/
virtual void reserve(size_t count) noexcept = 0;
/**
* Changes the number of elements stored.
*
* @param count The number of elements to store in the array. Elements at the end of the array are added (as via
* \ref Variant default construction) or removed so that following this call \ref size() matches @p count. Note that
* resizing heuristics may be applied, so \ref capacity() following this call may be greater than @p count.
*/
virtual void resize(size_t count) noexcept = 0;
/**
* Returns the number of elements that can be stored with the current allocated space.
* @returns The number of elements that can be stored with the current allocated space.
*/
virtual size_t capacity() const noexcept = 0;
/**
* Erases all elements from the array and leaves the array empty.
*/
void clear() noexcept;
/**
* Checks whether the array is empty.
*
* @returns \c true if the array is empty (contains no elements); \c false otherwise.
*/
bool empty() const noexcept;
/**
* Accesses an element with bounds checking.
* @throws std::out_of_range if @p index is outside of `[0, size())`.
* @param index The index of the array to access.
* @returns a reference to the element at the requested @p index.
*/
Variant& at(size_t index);
//! @copydoc at()
const Variant& at(size_t index) const;
/**
* Accesses an element without bounds checking.
* @warning Providing an @p index value outside of `[0, size())` is undefined behavior.
* @param index The index of the array to access.
* @returns a reference to the element at the requested @p index.
*/
Variant& operator[](size_t index) noexcept;
//! @copydoc operator[]()
const Variant& operator[](size_t index) const noexcept;
/**
* Accesses the element at the front of the array.
* @warning Undefined behavior if \ref empty().
* @returns a reference to the element at the front of the array.
*/
Variant& front() noexcept;
//! @copydoc front()
const Variant& front() const noexcept;
/**
* Accesses the element at the back of the array.
* @warning Undefined behavior if \ref empty().
* @returns a reference to the element at the back of the array.
*/
Variant& back() noexcept;
//! @copydoc back()
const Variant& back() const noexcept;
/**
* Provides iteration and ranged-for support; returns an iterator to the first element.
*
* @warning Iterators follow invalidation rules for `std::vector`.
* @returns An iterator to the first element.
*/
iterator begin() noexcept;
/**
* Provides iteration and ranged-for support; returns an iterator representing the iteration end.
*
* @warning Iterators follow invalidation rules for `std::vector`.
* @returns An iterator representing the iteration end.
*/
iterator end() noexcept;
//! @copydoc begin()
const_iterator begin() const noexcept;
//! @copydoc end()
const_iterator end() const noexcept;
};
//! Helper definition.
using VariantArrayPtr = ObjectPtr<VariantArray>;
struct KeyValuePair;
/**
* An associative array (i.e. "map") of key/value Variant pairs that can itself be contained in a \ref Variant.
*
* Similar in many respects to `std::unordered_map`, but reference-counted and implemented within *carb.variant.plugin*.
*
* @note This is an *unordered* container, meaning that iterating over all values may not be in the same order as they
* were inserted. This is a *unique* container, meaning that inserting a key that already exists in the container will
* replace the previous key/value pair.
*
* Created via \ref IVariant::createMap().
*/
class VariantMap : public IObject
{
public:
//! The key type
using key_type = Variant;
//! The mapped value type
using mapped_type = Variant;
//! The value type
using value_type = KeyValuePair;
//! Unsigned integer type
using size_type = size_t;
//! Signed integer type
using difference_type = ptrdiff_t;
//! Reference type
using reference = value_type&;
//! Const reference type
using const_reference = const value_type&;
//! Pointer type
using pointer = value_type*;
//! Const pointer type
using const_pointer = const value_type*;
// clang-format off
#ifndef DOXYGEN_SHOULD_SKIP_THIS
private:
class iter_base
{
public:
constexpr iter_base() noexcept = default;
bool operator == (const iter_base& other) const noexcept { CARB_ASSERT(owner == other.owner); return where == other.where; }
bool operator != (const iter_base& other) const noexcept { CARB_ASSERT(owner == other.owner); return where != other.where; }
protected:
constexpr iter_base(const VariantMap* owner_, pointer where_) noexcept : owner(owner_), where(where_) {}
const VariantMap* owner{ nullptr };
pointer where{ nullptr };
};
public:
class const_find_iterator : public iter_base
{
using Base = iter_base;
public:
using iterator_category = std::forward_iterator_tag;
using value_type = VariantMap::value_type;
using difference_type = VariantMap::difference_type;
using pointer = VariantMap::const_pointer;
using reference = VariantMap::const_reference;
constexpr const_find_iterator() noexcept = default;
reference operator * () const noexcept { CARB_ASSERT(this->where); return *this->where; }
pointer operator -> () const noexcept { CARB_ASSERT(this->where); return this->where; }
const_find_iterator& operator ++ () noexcept { incr(); return *this; }
const_find_iterator operator ++ (int) noexcept { const_find_iterator i{ *this }; incr(); return i; }
protected:
friend class VariantMap;
constexpr const_find_iterator(const VariantMap* owner_, value_type* where_) noexcept : Base(owner_, where_) {}
void incr() { CARB_ASSERT(this->owner && this->where); this->where = this->owner->findNext(this->where); }
};
class find_iterator : public const_find_iterator
{
using Base = const_find_iterator;
public:
using iterator_category = std::forward_iterator_tag;
using value_type = VariantMap::value_type;
using difference_type = VariantMap::difference_type;
using pointer = VariantMap::pointer;
using reference = VariantMap::reference;
constexpr find_iterator() noexcept = default;
reference operator * () const noexcept { CARB_ASSERT(this->where); return *this->where; }
pointer operator -> () const noexcept { CARB_ASSERT(this->where); return this->where; }
find_iterator& operator ++ () noexcept { this->incr(); return *this; }
find_iterator operator ++ (int) noexcept { find_iterator i{ *this }; this->incr(); return i; }
protected:
friend class VariantMap;
constexpr find_iterator(const VariantMap* owner_, value_type* where_) noexcept : Base(owner_, where_) {}
};
class const_iterator : public iter_base
{
using Base = iter_base;
public:
using iterator_category = std::forward_iterator_tag;
using value_type = VariantMap::value_type;
using difference_type = VariantMap::difference_type;
using pointer = VariantMap::const_pointer;
using reference = VariantMap::const_reference;
constexpr const_iterator() noexcept = default;
reference operator * () const noexcept { CARB_ASSERT(this->where); return *this->where; }
pointer operator -> () const noexcept { CARB_ASSERT(this->where); return this->where; }
const_iterator& operator ++ () noexcept { incr(); return *this;}
const_iterator operator ++ (int) noexcept { const_iterator i{ *this }; incr(); return i; }
protected:
friend class VariantMap;
constexpr const_iterator(const VariantMap* owner_, value_type* where_) noexcept : Base{ owner_, where_ } {}
void incr() { CARB_ASSERT(this->owner && this->where); this->where = this->owner->iterNext(this->where); }
};
class iterator : public const_iterator
{
using Base = const_iterator;
public:
using iterator_category = std::forward_iterator_tag;
using value_type = VariantMap::value_type;
using difference_type = VariantMap::difference_type;
using pointer = VariantMap::pointer;
using reference = VariantMap::reference;
constexpr iterator() noexcept = default;
reference operator * () const noexcept { CARB_ASSERT(this->where); return *this->where; }
pointer operator -> () const noexcept { CARB_ASSERT(this->where); return this->where; }
iterator& operator ++ () noexcept { this->incr(); return *this; }
iterator operator ++ (int) noexcept { iterator i{ *this }; this->incr(); return i; }
protected:
friend class VariantMap;
constexpr iterator(const VariantMap* owner_, value_type* where_) noexcept : Base(owner_, where_) {}
};
#endif
// clang-format on
/**
* Creates an iterator to the first element in the container.
*
* @returns a `const_iterator` to the first element in the container. If the container is empty() the iterator
* will be equal to \ref cend().
*/
const_iterator cbegin() const noexcept;
/**
* Creates an iterator to the first element in the container.
*
* @returns a `const_iterator` to the first element in the container. If the container is empty() the iterator
* will be equal to \ref end().
*/
const_iterator begin() const noexcept;
/**
* Creates an iterator to the first element in the container.
*
* @returns an `iterator` to the first element in the container. If the container is empty() the iterator
* will be equal to \ref end().
*/
iterator begin() noexcept;
/**
* Creates an iterator to the past-the-end element in the container.
*
* @returns a `const_iterator` to the past-the-end element in the container. This iterator is a placeholder;
* attempting to access it results in undefined behavior.
*/
const_iterator cend() const noexcept;
/**
* Creates an iterator to the past-the-end element in the container.
*
* @returns a `const_iterator` to the past-the-end element in the container. This iterator is a placeholder;
* attempting to access it results in undefined behavior.
*/
const_iterator end() const noexcept;
/**
* Creates an iterator to the past-the-end element in the container.
*
* @returns an `iterator` to the past-the-end element in the container. This iterator is a placeholder;
* attempting to access it results in undefined behavior.
*/
iterator end() noexcept;
/**
* Checks if the container is empty.
* @returns `true` if the container is empty; `false` otherwise.
*/
bool empty() const noexcept;
/**
* Returns the number of keys contained.
* @returns the number of keys contained.
*/
virtual size_t size() const noexcept = 0;
/**
* Attempts to insert a new element into the container.
*
* If insertion is successful, all iterators, references and pointers are invalidated.
*
* \warning Variant comparison rules are taken in account. For instance, since Variant(bool) is considered equal
* with Variant(int) for false/0 and true/1, these values would conflict.
*
* @param key The key to insert into the map.
* @param value The value to associate with \p key. If the key already exists in the container, this value is not
* used.
* @returns A `pair` consisting of an `iterator` to the inserted element (or the existing element that prevented the
* insertion) and a `bool` that will be `true` if insertion took place or `false` if insertion did *not* take place.
*/
std::pair<iterator, bool> insert(const Variant& key, Variant value);
/**
* Erases a key from the map.
* @param key The key value to erase.
* @returns The number of entries removed from the map. This will be `0` if the key was not found or `1` if the key
* was found and removed.
*/
size_t erase(const Variant& key) noexcept;
/**
* Removes the given element.
*
* References, pointers and iterators to the erased element are invalidated. All other iterators, pointers and
* references remain valid.
*
* @param pos The `const_iterator` to the element to remove. This iterator must be valid and dereferenceable.
* @returns the iterator immediately following \p pos.
*/
iterator erase(const_iterator pos) noexcept;
/**
* Removes the given element.
*
* References, pointers and iterators to the erased element are invalidated. All other iterators, pointers and
* references remain valid.
*
* @param pos The `const_find_iterator` to the element to remove. This iterator must be valid and dereferenceable.
* @returns the iterator immediately following \p pos.
*/
find_iterator erase(const_find_iterator pos) noexcept;
/**
* Finds the first element with the specified key.
*
* \note `find_iterator` objects returned from this function will only iterate through elements with the same key;
* they cannot be used to iterate through the entire container.
*
* @param key The key to search for.
* @returns a `find_iterator` to the first element matching \p key, or \ref end() if no element was found matching
* \p key.
*/
find_iterator find(const Variant& key) noexcept;
/**
* Finds the first element with the specified key.
*
* \note `const_find_iterator` objects returned from this function will only iterate through elements with the same
* key; they cannot be used to iterate through the entire container.
*
* @param key The key to search for.
* @returns a `const_find_iterator` to the first element matching \p key, or \ref end() if no element was found
* matching \p key.
*/
const_find_iterator find(const Variant& key) const noexcept;
/**
* Checks whether the container has an element matching a given key.
*
* @param key The key of the element to search for.
* @returns `true` if at least one element matching \p key exists in the container; \c false otherwise.
*/
bool contains(const Variant& key) const noexcept;
/**
* Counts the number of elements matching a given key.
*
* \note as this is a unique container, this will always be either 0 or 1.
* @param key The key to count.
* @returns the number of elements matching \p key.
*/
size_t count(const Variant& key) const noexcept;
#if CARB_EXCEPTIONS_ENABLED || defined(DOXYGEN_BUILD)
/**
* Access specified element with bounds checking.
*
* This function is only available if exceptions are enabled.
*
* @param key The key of the element to find.
* @returns a reference to the mapped value of the element with key equivalent to \p key.
* @throws std::out_of_range if no such element exists.
*/
mapped_type& at(const Variant& key);
//! @copydoc at()
const mapped_type& at(const Variant& key) const;
#endif
/**
* Returns a reference to a value that is mapped to the given key, performing an insertion if such key does not
* already exist.
*
* If \p key does not exist, the returned type will be a default-constructed \ref Variant.
*
* @param key the key of the element to find or insert
* @returns a reference to the \ref mapped_type mapped to \p key.
*/
mapped_type& operator[](const Variant& key);
/**
* Clears the contents. O(n) over \ref capacity().
*
* Erases all elements from the container. After this call \ref size() returns zero. Invalidates all iterators,
* pointers and references to contained elements.
*
* @note This does not free the memory used by the container. To free the hash table memory, use `rehash(0)` after
* this call.
*/
virtual void clear() noexcept = 0;
/**
* Returns the number of elements that can be stored with the current memory usage.
* @see reserve()
* @returns the number of elements that can be stored with the current memory usage.
*/
virtual size_t capacity() const noexcept = 0;
/**
* Reserves space for at least the specified number of elements and regenerates the hash table.
*
* Sets \ref capacity() of \c *this to a value greater-than-or-equal-to \p n. If \ref capacity() already exceeds
* \p n, nothing happens.
*
* If a rehash occurs, all iterators, pointers and references to existing elements are invalidated.
*
* @param n The desired minimum capacity of `*this`.
*/
virtual void reserve(size_t n) noexcept = 0;
/**
* Sets the capacity of the container to the lowest valid value greater-than-or-equal-to the given value, and
* rehashes the container.
*
* If \p n is less-than \ref size(), \ref size() is used instead.
*
* If the container is empty and \p n is zero, the memory for the container is freed.
*
* After this function is called, all iterators, pointers and references to existing elements are invalidated.
*
* @param n The minimum capacity for the container. The actual size of the container may be larger than this.
*/
virtual void rehash(size_t n) noexcept = 0;
private:
virtual KeyValuePair* internalInsert(const Variant& key, bool& success) noexcept = 0;
virtual void internalErase(const KeyValuePair*) noexcept = 0;
virtual KeyValuePair* internalFind(const Variant& key) const noexcept = 0;
virtual KeyValuePair* internalBegin() const noexcept = 0;
virtual KeyValuePair* iterNext(KeyValuePair*) const noexcept = 0;
virtual KeyValuePair* findNext(KeyValuePair*) const noexcept = 0;
};
//! Helper definition.
using VariantMapPtr = ObjectPtr<VariantMap>;
/**
* Default implementation of a Translator type.
*
* Translator structs provide a \ref VTable and instruct the Variant system in how the \ref VariantData::data
* member is to be interpreted for conversion to-and-from C++ types.
*
* All Translator specializations must provide three functions:
* - `RString type() const noexcept` - Retrieves the registered name of the type known to \ref IVariant via
* \ref IVariant::registerType(). The v-table will be looked up via \ref translate().
* - `void* data(T&&) const noexcept` - This function must convert the given value to a \c void* representation that
* is stored in the \ref VariantData struct. If this function allocates memory it should be from
* \ref carb::allocate() or originate within the plugin that contains the \ref VTable::Destructor() function
* that will be freeing the memory.
* - `T value(void*) const noexcept` - This function is the opposite of the \c data() function--it converts the
* \c void* value from \ref VariantData::data and converts it back to type \c T.
*
* Translator specializations are present for the following built-in types:
* - \c std::nullptr_t.
* * Does not convert to any other type.
* * Is only equal with other \c std::nullptr_t types.
* - \c bool
* * Can be convert to any integral type (will produce 0 or 1).
* * Will be equal with integer values of 0 or 1.
* - Integral types (8-, 16-, 32- and 64-bit; signed and unsigned).
* * Will convert to any other integral type as long as the value is representable in that type. For instance, a
* `Variant(-1)` would fail to convert to `unsigned`, and `Variant(999)` would fail to convert to `uint8_t`, but
* `Variant(uint64_t(-1))` would convert just fine to `int8_t`.
* * Equality checks follow the same rules as conversion.
* * Not convertible to floating point due to potential data loss.
* * Convertible to \c bool only if the value is 0 or 1.
* - \c float and \c double
* * Will convert to each other, but will not convert to integral types due to potential data loss.
* * Equality checks follows conversion rules, but will compare as the larger type.
* - \c omni::string
* * Convertible to `const char*`, but this value must only be used transiently--it is equivalent to `c_str()` and
* follows the same rules for lifetime of the pointer from `c_str()`.
* * Equality compares via `operator ==` for \c omni::string, and comparable with `[const] char*`.
* - `const char*`
* * Stores the pointer, so memory lifetime must be longer than the Variant.
* * Accepts a `char*` but stored as a `const char*`; any attempts to convert to `char*` will fail.
* * Attempts to copy a Variant containing a `const char*` just copy the same pointer, so the lifetime guarantee
* must include these copies as well.
* * Comparable with \c omni::string.
* - `dictionary::Item*`
* * Stores the pointer, so memory lifetime must be longer than the Variant.
* * Copying the variant will trivially copy the pointer.
* * Comparison will trivially compare the pointer.
* - `carb::Strong` (Carbonite strong types)
* * Auto-converts to and from the underlying numeric type (i.e. `int`, `size_t`, etc.), so it will lose the type
* safety of the strong type.
* * Comparable with similar numeric types.
* - \ref VariantArray* / \ref VariantArrayPtr
* * Comparable only with other VariantArray types, by pointer value.
* * Hashes based on the pointer value, not the contained values.
* * Variants containing this type always hold a reference.
* - \ref VariantMap* / \ref VariantMapPtr
* * Comparable only with other VariantMap types, by pointer value.
* * Hashes based on the pointer value, not the contained values.
* * Variants containing this type always hold a reference.
* - \ref RString / \ref RStringU / \ref RStringKey / \ref RStringUKey
* * Types are comparable with other instances of the same type.
* * Key types are only comparable with Key types; RString and RStringKey will compare with RStringU and RStringKeyU
* respectively, as uncased comparisons.
* * Hashing is as by the `getHash()` function for each of the RString types.
* * RString and RStringU can be converted to `const char*` or `omni::string` as if by `c_str()`.
* * RStringKey and RStringUKey can be converted to `omni::string` as if by `toString()`.
*
* @warning The default template does not provide the above functions which will allow compile to fail for unrecognized
* types. Translations are available through specializations only.
*
* @tparam T The type handled by the specialization.
* @tparam Enable A template parameter that can be used for SFINAE.
*/
template <class T, class Enable = void>
struct Translator;
} // namespace variant
} // namespace carb
|
omniverse-code/kit/include/carb/events/IEvents.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief carb.events interface definition file.
#pragma once
#include "../IObject.h"
#include "../Interface.h"
#include "../InterfaceUtils.h"
#include "../dictionary/IDictionary.h"
#include <utility>
namespace carb
{
/**
* Namespace for the *carb.events* plugin.
*/
namespace events
{
/**
* Event type is 64-bit number. To use strings as event types hash function can be used. @see CARB_EVENTS_TYPE_FROM_STR
*/
using EventType = uint64_t;
/**
* Event sender identifier. It is useful in some cases to differentiate who send an event. E.g. to know that is was you.
*/
using SenderId = uint32_t;
/**
* Default sender id to use if you don't want it to be unique
*/
constexpr SenderId kGlobalSenderId = 0;
/**
* Event notification order.
*/
using Order = int32_t;
/**
* Default order. If 2 subscriptions are done with the same Order value, their callback order is undefined.
*/
constexpr Order kDefaultOrder = 0;
/**
* Event object which is sent and received. Lifetime is managed by carb::IObject refcounting.
*/
class IEvent : public IObject
{
public:
EventType type; //!< Event type.
SenderId sender; //!< The sender of the event, or carb::events::kGlobalSenderId.
dictionary::Item* payload; //!< Event payload is dictionary Item. Any data can be put into.
/**
* Helper function to build a *carb.dictionary* of values.
* @param param The key/value pair to include in the *carb.dictionary*.
*/
template <typename ValueT>
void setValues(const std::pair<const char*, ValueT>& param);
/**
* Helper function to build a *carb.dictionary* of values.
* @param param The key/value pair to include in the *carb.dictionary*.
* @param params Additional key/value pairs.
*/
template <typename ValueT, typename... ValuesT>
void setValues(const std::pair<const char*, ValueT>& param, ValuesT&&... params);
/**
* Stops propagation of an event during dispatch.
*/
virtual void consume() = 0;
/**
* Attaches a \ref carb::IObject to this event by name and holds a reference to the object.
*
* The object can be retrieved with \ref retrieveObject(). If an object is already stored under the given name, the
* stored object is released and \p ptr is stored instead. References to stored objects are released when `*this` is
* destroyed. To clear the current reference for a given \p name, pass \p ptr as `nullptr`.
* @thread_safety This function is thread safe: all `attachObject()` calls are serialized with each other and all
* calls with \ref retrieveObject().
* @param name The name of the object to attach. Must not be `nullptr`. The same name must be passed to
* \ref retrieveObject() in order to retrieve the pointer. This parameter is case-sensitive.
* @param ptr The pointer to attach to \c *this with the given \p name.
*/
virtual void attachObject(const char* name, carb::IObject* ptr) = 0;
/**
* Retrieves a previously-stored \ref carb::IObject from this event by name.
*
* The object was previously set with \ref attachObject(). The object remains attached to `*this` until `*this` is
* destroyed or it is overwritten with a different object (or `nullptr`) by calling \ref attachObject() again.
* @thread_safety This function is thread safe: `retrieveObject()` can be called concurrently and will serialize
* calls to \ref attachObject().
* @param name The name of the object to retrieve. Must not be `nullptr`. This parameter is case-sensitive.
* @returns The object previously passed to \ref attachObject(). May be nullptr
*/
carb::ObjectPtr<carb::IObject> retrieveObject(const char* name)
{
// cannot pass ObjectPtr by value across ABI boundaries
carb::ObjectPtr<carb::IObject> ptr{};
internalRetrieveObject(name, ptr);
return ptr;
}
//! @private
virtual void internalRetrieveObject(const char* name, carb::ObjectPtr<carb::IObject>& out) = 0;
};
/// Helper definition for IEvent smart pointer.
using IEventPtr = ObjectPtr<IEvent>;
/**
* Interface to implement for event listener.
*/
class IEventListener : public IObject
{
public:
/**
* Function to be implemented that will handle an event.
* @param e The event to process. To stop further propagation of the event, call IEvent::consume().
*/
virtual void onEvent(IEvent* e) = 0;
};
/// Helper definition for IEventListener smart pointer.
using IEventListenerPtr = ObjectPtr<IEventListener>;
/**
* Subscription holder is created by all event listening subscription functions. Subscription is valid while the holder
* is alive.
*/
class ISubscription : public IObject
{
public:
/**
* Unsubscribes the associated IEventListener.
*/
virtual void unsubscribe() = 0;
/**
* Returns the name of the subscription.
*
* @note If a name was not given when the @ref IEventStream subscription function was called, a name is generated
* based on the address of the @ref IEventListener::onEvent() function.
*
* @returns The name of the subscription.
*/
virtual const char* getName() const noexcept = 0;
};
/// Helper definition for ISubscription smart pointer.
using ISubscriptionPtr = ObjectPtr<ISubscription>;
/**
* Compile-time conversion of string to carb::events::EventType.
* @param STR The string to convert.
* @returns The EventType corresponding to the given string.
*/
#define CARB_EVENTS_TYPE_FROM_STR(STR) CARB_HASH_STRING(STR)
/**
* Run-time conversion of string to carb::events::EventType.
* @param str The string to convert.
* @returns The EventType corresponding to the given string.
*/
inline EventType typeFromString(const char* str)
{
return carb::hashString(str);
}
/**
* Event stream is fundamental primitive used to send, receive and listen for events. Different event system models can
* be designed using it. It is thread-safe and can also be used as a sync primitive. Similar to Go Language Channels.
*
* Basically stream is just a queue with listeners:
*
* @code{.txt}
* +------------------+
* push() / pushBlocked() | | tryPop() / pop()
* +--------------------------->+ IEventStream +------------------------>
* ^ | | ^
* | +------------------+ |
* subscribeToPush() | | subscribeToPop()
* | |
* | |
* +---------+--------+ +--------+---------+
* | IEventListener | | IEventListener |
* +------------------+ +------------------+
* @endcode
*
* 1. You can push to stream and pop events (acts like a queue).
* 2. Given the stream you can listen for pushed and/or popped events. That is basically immediate callbacks vs deferred
* callbacks.
* 3. Blocking methods (pushBlocked() and pop()) will block the thread until event is popped (for push method) or any
* event is pushed (for pop method). That can be used as thread sync.
* 4. Pumping of stream is just popping all events until it is empty.
* 5. EventType allows you to filter which events to listen for. IEventStream may contain only one type, conceptually
* more like channels, or a lot of events - like event bus.
* 6. Listeners are subscribed specifying the order in which they want to receive events. When processing, they may
* consume an event which stops other listeners from receiving it (only when being dispatched).
*/
class IEventStream : public IObject
{
public:
/**
* Create new event of certain type.
* @param eventType The type of event to create.
* @param sender The sender of the event, or carb::events::kGlobalSenderId.
* @returns A pointer to a newly created event.
*/
virtual IEvent* createEventPtr(EventType eventType, SenderId sender) = 0;
/**
* @copydoc createEventPtr(EventType,SenderId)
* @param values Key/value pairs as passed to IEvent::setValues().
*/
template <typename... ValuesT>
ObjectPtr<IEvent> createEvent(EventType eventType, SenderId sender, ValuesT&&... values);
/**
* Dispatch event immediately without putting it into stream.
* @param e The event to dispatch.
*/
virtual void dispatch(IEvent* e) = 0;
/**
* Push event into the stream.
* @param e The event to push.
*/
virtual void push(IEvent* e) = 0;
/**
* Push event into the stream and block until it is dispatched by some other thread.
* @param e The event to push.
*/
virtual void pushBlocked(IEvent* e) = 0;
/**
* Get approximate number of events waiting for dispatch in the stream.
* @thread_safety While safe to call from multiple threads, since there is no lock involved this value should be
* considered approximate as another thread could modify the number before the return value is read. This call
* is strongly ordered with respect to other calls.
* @returns The approximate number of events waiting for dispatch in the stream.
*/
virtual size_t getCount() = 0;
/**
* Pop and dispatches a single event from the stream, blocking until one becomes available.
* @warning This function blocks until an event is available.
* @returns The event that was popped and dispatched.
*/
ObjectPtr<IEvent> pop();
/// @copydoc pop()
virtual IEvent* popPtr() = 0;
/**
* Pops and dispatches a single event from the stream, if available.
* @note Unlike pop(), this function does not wait for an event to become available.
* @returns The event that was popped and dispatched, or `nullptr` if no event was available.
*/
ObjectPtr<IEvent> tryPop();
/// @copydoc tryPop()
virtual IEvent* tryPopPtr() = 0;
/**
* Dispatches and pops all available events from the stream.
*/
void pump();
/**
* Subscribe to receive notification when event stream is popped.
* @sa pump() pop() tryPop()
* @note Adding or removing a subscription from \ref IEventListener::onEvent() is perfectly valid. The newly added
* subscription will not be called until the next event.
* @warning Recursively pushing and/or popping events from within \ref IEventListener::onEvent() is not recommended
* and can lead to undefined behavior.
* @param listener The listener to subscribe. IEventListener::onEvent() will be called for each popped event. If an
* event listener calls IEvent::consume() on the given event, propagation of the event will stop and no more event
* listeners will receive the event.
* @param order An optional value used to specify the order tier. Lower order tiers will be notified first.
* Multiple IEventListener objects at the same order tier are notified in an undefined order.
* @param subscriptionName An optional name for the subscription for debugging purposes. Names do not need to be
* unique. If `nullptr`, an internal name will be determined.
* @returns A ISubscription pointer. The subscription is valid as long as the ISubscription pointer is referenced.
* Alternately, the IEventListener can be unsubscribed by calling ISubscription::unsubscribe().
*/
ObjectPtr<ISubscription> createSubscriptionToPop(IEventListener* listener,
Order order = kDefaultOrder,
const char* subscriptionName = nullptr);
/// @copydoc createSubscriptionToPop()
/// @param eventType A specific event to listen for.
ObjectPtr<ISubscription> createSubscriptionToPopByType(EventType eventType,
IEventListener* listener,
Order order = kDefaultOrder,
const char* subscriptionName = nullptr);
/// @copydoc createSubscriptionToPop()
virtual ISubscription* createSubscriptionToPopPtr(IEventListener* listener,
Order order = kDefaultOrder,
const char* subscriptionName = nullptr) = 0;
/// @copydoc createSubscriptionToPop()
/// @param eventType A specific event to listen for.
virtual ISubscription* createSubscriptionToPopByTypePtr(EventType eventType,
IEventListener* listener,
Order order = kDefaultOrder,
const char* subscriptionName = nullptr) = 0;
/**
* Subscribe to receive notification when an event is pushed into the event stream.
* @sa push() pushBlocked()
* @note Adding or removing a subscription from \ref IEventListener::onEvent() is perfectly valid. The newly added
* subscription will not be called until the next event.
* @warning Recursively pushing and/or popping events from within \ref IEventListener::onEvent() is not recommended
* and can lead to undefined behavior.
* @param listener The listener to subscribe. IEventListener::onEvent() will be called for each pushed event. The
* IEvent::consume() function has no effect for notifications of push events.
* @param order An optional value used to specify the order tier. Lower order tiers will be notified first.
* Multiple IEventListener objects at the same order tier are notified in an undefined order.
* @param subscriptionName An optional name for the subscription for debugging purposes. Names do not need to be
* unique. If `nullptr`, an internal name will be determined.
* @returns A ISubscription pointer. The subscription is valid as long as the ISubscription pointer is referenced.
* Alternately, the IEventListener can be unsubscribed by calling ISubscription::unsubscribe().
*/
ObjectPtr<ISubscription> createSubscriptionToPush(IEventListener* listener,
Order order = kDefaultOrder,
const char* subscriptionName = nullptr);
/// @copydoc createSubscriptionToPush()
/// @param eventType A specific event to listen for.
ObjectPtr<ISubscription> createSubscriptionToPushByType(EventType eventType,
IEventListener* listener,
Order order = kDefaultOrder,
const char* subscriptionName = nullptr);
/// @copydoc createSubscriptionToPush()
virtual ISubscription* createSubscriptionToPushPtr(IEventListener* listener,
Order order = kDefaultOrder,
const char* subscriptionName = nullptr) = 0;
/// @copydoc createSubscriptionToPush()
/// @param eventType A specific event to listen for.
virtual ISubscription* createSubscriptionToPushByTypePtr(EventType eventType,
IEventListener* listener,
Order order = kDefaultOrder,
const char* subscriptionName = nullptr) = 0;
/**
* Sets the notification order for named subscriptions.
* @note If multiple subscriptions exist with the same name, all are updated.
* @param subscriptionName the name previously assigned when the subscription was created.
* @param order The new order tier. Lower order tiers will be notified first. Multiple IEventListener objects at the
* same order tier are notified in an undefined order.
* @returns `true` if the subscription was found and updated; `false` otherwise.
*/
virtual bool setSubscriptionToPopOrder(const char* subscriptionName, Order order) = 0;
/// @copydoc setSubscriptionToPopOrder
virtual bool setSubscriptionToPushOrder(const char* subscriptionName, Order order) = 0;
/**
* Retrieves the notification order for a named subscription.
* @param subscriptionName the name previously assigned when the subscription was created.
* @param order Must be a valid pointer that will receive the current order tier.
* @returns `true` if the subscription was found; `false` if the subscription was not found or @p order is
* `nullptr`.
*/
virtual bool getSubscriptionToPopOrder(const char* subscriptionName, Order* order) = 0;
/// @copydoc setSubscriptionToPopOrder
virtual bool getSubscriptionToPushOrder(const char* subscriptionName, Order* order) = 0;
/**
* Returns the name of the IEventStream.
*
* @note If a name was not given when @ref IEvents::createEventStream() was called, a name is generated based on the
* function that called @ref IEvents::createEventStream().
*
* @returns The name of the IEventStream.
*/
virtual const char* getName() const noexcept = 0;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Helpers //
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* @defgroup helpers Helper functions
* @sa createEvent() push() pushBlocked() dispatch()
* @{
*/
/**
* @copydoc createEvent
*/
template <typename... ValuesT>
IEvent* createEventPtr(EventType eventType, SenderId sender, ValuesT&&... values);
/**
* Helper function that combines createEvent() with push().
* @param eventType The type of event to create.
* @param sender The sender of the event, or carb::events::kGlobalSenderId.
*/
void pushWithSender(EventType eventType, SenderId sender);
/// @copydoc pushWithSender
/// @param values Key/value pairs as passed to IEvent::setValues().
template <typename... ValuesT>
void pushWithSender(EventType eventType, SenderId sender, ValuesT&&... values);
/**
* Helper function that combines createEvent() with push().
* @param eventType The type of event to create.
* @param values Key/value pairs as passed to IEvent::setValues().
*/
template <typename... ValuesT>
void push(EventType eventType, ValuesT&&... values);
/**
* Helper function that combines createEvent() with pushBlocked().
* @param eventType The type of event to create.
* @param sender The sender of the event, or carb::events::kGlobalSenderId.
*/
void pushBlockedWithSender(EventType eventType, SenderId sender);
/// @copydoc pushBlockedWithSender
/// @param values Key/value pairs as passed to IEvent::setValues().
template <typename... ValuesT>
void pushBlockedWithSender(EventType eventType, SenderId sender, ValuesT&&... values);
/**
* Helper function that combines createEvent() with pushBlocked().
* @param eventType The type of event to create.
* @param values Key/value pairs as passed to IEvent::setValues().
*/
template <typename... ValuesT>
void pushBlocked(EventType eventType, ValuesT&&... values);
/**
* Helper function that combines createEvent() with dispatch().
* @param eventType The type of event to create.
* @param sender The sender of the event, or carb::events::kGlobalSenderId.
* @param values Key/value pairs as passed to IEvent::setValues().
*/
template <typename... ValuesT>
void dispatch(EventType eventType, SenderId sender, ValuesT&&... values);
/**
* Helper function that combines createEvent() with dispatch().
* @param eventType The type of event to create.
* @param sender The sender of the event, or carb::events::kGlobalSenderId.
*/
void dispatch(EventType eventType, SenderId sender);
///@}
};
/// Helper definition for IEventStream smart pointer.
using IEventStreamPtr = ObjectPtr<IEventStream>;
/**
* Interface definition for *carb.events*.
*/
struct IEvents
{
// 1.0 - Initial release
// 1.1 - added internalCreateEventStream
// 1.2 - added attachObject() and retrieveObject() to IEvent
CARB_PLUGIN_INTERFACE("carb::events::IEvents", 1, 2)
/**
* Create new event stream.
* @param name An optional name to give the event stream for logging and profiling. Names do not need to be unique
* but are recommended to be unique. If `nullptr`, an internal name will be determined.
* @returns A pointer to the new event stream.
*/
IEventStreamPtr createEventStream(const char* name = nullptr) noexcept;
/**
* Create new event stream.
* @rst
* .. deprecated:: ``carb::events::IEvents::internalCreateEventStream`` instead.
* @endrst
* @returns A pointer to the new event stream.
*/
CARB_DEPRECATED("Use internalCreateEventStream instead") virtual IEventStream* createEventStreamPtr() = 0;
/**
* Get a unique sender identifier.
* @note The sender identifier may be a previously released sender identifier.
* @returns A unique sender identifier. When finished with the sender identifier, it should be returned with
* releaseUniqueSenderId().
*/
virtual SenderId acquireUniqueSenderId() = 0;
/**
* Releases a unique sender identifier previously acquired with acquireUniqueSenderId().
* @param senderId The previously acquired senderId.
*/
virtual void releaseUniqueSenderId(SenderId senderId) = 0;
//! @private
virtual IEventStream* internalCreateEventStream(const char* name) noexcept = 0;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Inline Functions //
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////// IEvent //////////////
template <typename ValueT>
inline void IEvent::setValues(const std::pair<const char*, ValueT>& param)
{
carb::getCachedInterface<dictionary::IDictionary>()->makeAtPath<ValueT>(this->payload, param.first, param.second);
}
template <typename ValueT, typename... ValuesT>
inline void IEvent::setValues(const std::pair<const char*, ValueT>& param, ValuesT&&... params)
{
this->setValues<ValueT>(param);
this->setValues(std::forward<ValuesT>(params)...);
}
////////////// IEventStream //////////////
template <typename... ValuesT>
inline IEvent* IEventStream::createEventPtr(EventType type, SenderId sender, ValuesT&&... values)
{
IEvent* e = createEventPtr(type, sender);
e->setValues(std::forward<ValuesT>(values)...);
return e;
}
inline ObjectPtr<ISubscription> IEventStream::createSubscriptionToPop(IEventListener* listener,
Order order,
const char* name)
{
return stealObject(this->createSubscriptionToPopPtr(listener, order, name));
}
inline ObjectPtr<ISubscription> IEventStream::createSubscriptionToPopByType(EventType eventType,
IEventListener* listener,
Order order,
const char* name)
{
return stealObject(this->createSubscriptionToPopByTypePtr(eventType, listener, order, name));
}
inline ObjectPtr<ISubscription> IEventStream::createSubscriptionToPush(IEventListener* listener,
Order order,
const char* name)
{
return stealObject(this->createSubscriptionToPushPtr(listener, order, name));
}
inline ObjectPtr<ISubscription> IEventStream::createSubscriptionToPushByType(EventType eventType,
IEventListener* listener,
Order order,
const char* name)
{
return stealObject(this->createSubscriptionToPushByTypePtr(eventType, listener, order, name));
}
template <typename... ValuesT>
inline void IEventStream::pushWithSender(EventType type, SenderId sender, ValuesT&&... values)
{
IEvent* e = createEventPtr(type, sender, std::forward<ValuesT>(values)...);
push(e);
e->release();
}
inline void IEventStream::pushWithSender(EventType type, SenderId sender)
{
IEvent* e = createEventPtr(type, sender);
push(e);
e->release();
}
template <typename... ValuesT>
inline ObjectPtr<IEvent> IEventStream::createEvent(EventType eventType, SenderId sender, ValuesT&&... values)
{
return carb::stealObject(this->createEventPtr(eventType, sender, std::forward<ValuesT>(values)...));
}
template <typename... ValuesT>
inline void IEventStream::push(EventType type, ValuesT&&... values)
{
return pushWithSender(type, kGlobalSenderId, std::forward<ValuesT>(values)...);
}
template <typename... ValuesT>
inline void IEventStream::pushBlockedWithSender(EventType type, SenderId sender, ValuesT&&... values)
{
IEvent* e = createEventPtr(type, sender, std::forward<ValuesT>(values)...);
pushBlocked(e);
e->release();
}
inline void IEventStream::pushBlockedWithSender(EventType type, SenderId sender)
{
IEvent* e = createEventPtr(type, sender);
pushBlocked(e);
e->release();
}
template <typename... ValuesT>
inline void IEventStream::pushBlocked(EventType type, ValuesT&&... values)
{
return pushBlockedWithSender(type, kGlobalSenderId, std::forward<ValuesT>(values)...);
}
template <typename... ValuesT>
inline void IEventStream::dispatch(EventType type, SenderId sender, ValuesT&&... values)
{
IEvent* e = createEventPtr(type, sender, std::forward<ValuesT>(values)...);
dispatch(e);
e->release();
}
inline void IEventStream::dispatch(EventType type, SenderId sender)
{
IEvent* e = createEventPtr(type, sender);
dispatch(e);
e->release();
}
inline void IEventStream::pump()
{
const size_t eventCount = this->getCount();
for (size_t i = 0; i < eventCount; i++)
{
IEvent* e = this->tryPopPtr();
if (!e)
break;
e->release();
}
}
inline ObjectPtr<IEvent> IEventStream::pop()
{
return carb::stealObject(this->popPtr());
}
inline ObjectPtr<IEvent> IEventStream::tryPop()
{
return carb::stealObject(this->tryPopPtr());
}
inline ObjectPtr<IEventStream> IEvents::createEventStream(const char* name) noexcept
{
return carb::stealObject(this->internalCreateEventStream(name));
}
} // namespace events
} // namespace carb
|
omniverse-code/kit/include/carb/events/EventsBindingsPython.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../BindingsPythonUtils.h"
#include "../Framework.h"
#include "../dictionary/DictionaryBindingsPython.h"
#include "../cpp/Optional.h"
#include "EventsUtils.h"
#include "IEvents.h"
#include <memory>
#include <string>
#include <vector>
DISABLE_PYBIND11_DYNAMIC_CAST(carb::events::IEvents)
DISABLE_PYBIND11_DYNAMIC_CAST(carb::events::ISubscription)
DISABLE_PYBIND11_DYNAMIC_CAST(carb::events::IEvent)
DISABLE_PYBIND11_DYNAMIC_CAST(carb::events::IEventStream)
namespace carb
{
namespace events
{
struct IEventsHolder
{
IEvents* events{ nullptr };
IEventsHolder()
{
update();
}
~IEventsHolder()
{
if (events)
{
g_carbFramework->removeReleaseHook(events, sReleaseHook, this);
}
}
explicit operator bool() const noexcept
{
return events != nullptr;
}
IEvents* get(bool canUpdate = false)
{
if (canUpdate)
{
update();
}
return events;
}
private:
void update()
{
if (events)
return;
events = g_carbFramework->tryAcquireInterface<IEvents>();
if (events)
{
g_carbFramework->addReleaseHook(events, sReleaseHook, this);
}
}
CARB_PREVENT_COPY_AND_MOVE(IEventsHolder);
static void sReleaseHook(void* iface, void* user)
{
static_cast<IEventsHolder*>(user)->events = nullptr;
g_carbFramework->removeReleaseHook(iface, sReleaseHook, user);
}
};
inline IEvents* getIEvents(bool update = false)
{
static IEventsHolder holder;
return holder.get(update);
}
// Wrap ObjectPtr because the underlying carb.events module could be unloaded before things destruct. In this case, we
// want to prevent destroying them to avoid crashing.
template <class T>
class ObjectPtrWrapper : public carb::ObjectPtr<T>
{
using Base = ObjectPtr<T>;
public:
ObjectPtrWrapper() = default;
ObjectPtrWrapper(ObjectPtrWrapper&& other) : Base(std::move(other))
{
}
explicit ObjectPtrWrapper(T* o) : Base(o)
{
getIEvents(); // Ensure that we've resolved IEvents
}
~ObjectPtrWrapper()
{
// If IEvents no longer exists, detach the pointer without releasing it.
if (!getIEvents())
this->detach();
}
ObjectPtrWrapper& operator=(ObjectPtrWrapper&& other)
{
Base::operator=(std::move(other));
return *this;
}
using Base::operator=;
};
} // namespace events
} // namespace carb
PYBIND11_DECLARE_HOLDER_TYPE(T, carb::events::ObjectPtrWrapper<T>, true);
namespace carb
{
namespace events
{
namespace
{
class PythonEventListener : public IEventListener
{
public:
PythonEventListener(std::function<void(IEvent*)> fn) : m_fn(std::move(fn))
{
}
void onEvent(IEvent* e) override
{
carb::callPythonCodeSafe(m_fn, e);
}
private:
std::function<void(IEvent*)> m_fn;
CARB_IOBJECT_IMPL
};
// GIL must be held
std::string getPythonCaller() noexcept
{
// Equivalent python:
// def get_python_caller():
// try:
// from traceback import extract_stack
// tb = extract_stack(limit=1)
// l = tb.format()
// if len(l) >= 1:
// s = l[0]
// return s[0:s.find('\n')]
// except:
// pass
// return ""
// Determine a name based on the caller
try
{
// This cannot be static because the GIL won't be held for cleanup at shutdown
auto extract_stack = py::module::import("traceback").attr("extract_stack");
auto tb = extract_stack(py::arg("limit") = 1);
py::list list = tb.attr("format")();
if (list.size() >= 1)
{
std::string entry = py::str(list[0]);
// sample entry:
// ```File "C:\src\carbonite\source\tests\python\test_events.py", line 28, in main
// stream = events.create_event_stream()
// ```
// Return just the first line
auto index = entry.find('\n');
return entry.substr(0, index);
}
}
catch (...)
{
}
return {};
}
inline void definePythonModule(py::module& m)
{
using namespace carb::events;
m.def("acquire_events_interface", []() { return getIEvents(true); }, py::return_value_policy::reference,
py::call_guard<py::gil_scoped_release>());
m.def("type_from_string", [](const char* s) { return carb::events::typeFromString(s); },
py::call_guard<py::gil_scoped_release>());
py::class_<ISubscription, ObjectPtrWrapper<ISubscription>>(m, "ISubscription", R"(
Subscription holder.
)")
.def("unsubscribe", &ISubscription::unsubscribe, R"(
Unsubscribes this subscription.
)",
py::call_guard<py::gil_scoped_release>())
.def_property_readonly("name", &ISubscription::getName, R"(
Returns the name of this subscription.
Returns:
The name of this subscription.
)",
py::call_guard<py::gil_scoped_release>());
py::class_<IEvent, ObjectPtrWrapper<IEvent>>(m, "IEvent", R"(
Event.
Event has an Event type, a sender id and a payload. Payload is a dictionary like item with arbitrary data.
)")
.def_readonly("type", &IEvent::type)
.def_readonly("sender", &IEvent::sender)
.def_readonly("payload", &IEvent::payload)
.def("consume", &IEvent::consume, "Consume event to stop it propagating to other listeners down the line.",
py::call_guard<py::gil_scoped_release>());
py::class_<IEventStream, ObjectPtrWrapper<IEventStream>>(m, "IEventStream")
.def("create_subscription_to_pop",
[](IEventStream* stream, std::function<void(IEvent*)> onEventFn, Order order, const char* name) {
std::string sname;
if (!name || name[0] == '\0')
{
sname = getPythonCaller();
name = sname.empty() ? nullptr : sname.c_str();
}
py::gil_scoped_release gil;
return stream->createSubscriptionToPop(
carb::stealObject(new PythonEventListener(std::move(onEventFn))).get(), order, name);
},
R"(
Subscribes to event dispatching on the stream.
See :class:`.Subscription` for more information on subscribing mechanism.
Args:
fn: The callback to be called on event dispatch.
order: An integer order specifier. Lower numbers are called first. Negative numbers are allowed. Default is 0.
name: The name of the subscription for profiling. If no name is provided one is generated from the traceback of the calling function.
Returns:
The subscription holder.)",
py::arg("fn"), py::arg("order") = kDefaultOrder, py::arg("name") = nullptr)
.def("create_subscription_to_pop_by_type",
[](IEventStream* stream, EventType eventType, const std::function<void(IEvent*)>& onEventFn, Order order,
const char* name) {
std::string sname;
if (!name || name[0] == '\0')
{
sname = getPythonCaller();
name = sname.empty() ? nullptr : sname.c_str();
}
py::gil_scoped_release gil;
return stream->createSubscriptionToPopByType(
eventType, carb::stealObject(new PythonEventListener(onEventFn)).get(), order, name);
},
R"(
Subscribes to event dispatching on the stream.
See :class:`.Subscription` for more information on subscribing mechanism.
Args:
event_type: Event type to listen to.
fn: The callback to be called on event dispatch.
order: An integer order specifier. Lower numbers are called first. Negative numbers are allowed. Default is 0.
name: The name of the subscription for profiling. If no name is provided one is generated from the traceback of the calling function.
Returns:
The subscription holder.)",
py::arg("event_type"), py::arg("fn"), py::arg("order") = kDefaultOrder, py::arg("name") = nullptr)
.def("create_subscription_to_push",
[](IEventStream* stream, const std::function<void(IEvent*)>& onEventFn, Order order, const char* name) {
std::string sname;
if (!name || name[0] == '\0')
{
sname = getPythonCaller();
name = sname.empty() ? nullptr : sname.c_str();
}
py::gil_scoped_release gil;
return stream->createSubscriptionToPush(
carb::stealObject(new PythonEventListener(onEventFn)).get(), order, name);
},
R"(
Subscribes to pushing events into stream.
See :class:`.Subscription` for more information on subscribing mechanism.
Args:
fn: The callback to be called on event push.
order: An integer order specifier. Lower numbers are called first. Negative numbers are allowed. Default is 0.
name: The name of the subscription for profiling. If no name is provided one is generated from the traceback of the calling function.
Returns:
The subscription holder.)",
py::arg("fn"), py::arg("order") = kDefaultOrder, py::arg("name") = nullptr)
.def("create_subscription_to_push_by_type",
[](IEventStream* stream, EventType eventType, const std::function<void(IEvent*)>& onEventFn, Order order,
const char* name) {
std::string sname;
if (!name || name[0] == '\0')
{
sname = getPythonCaller();
name = sname.empty() ? nullptr : sname.c_str();
}
py::gil_scoped_release gil;
return stream->createSubscriptionToPushByType(
eventType, carb::stealObject(new PythonEventListener(onEventFn)).get(), order, name);
},
R"(
Subscribes to pushing events into stream.
See :class:`.Subscription` for more information on subscribing mechanism.
Args:
event_type: Event type to listen to.
fn: The callback to be called on event push.
order: An integer order specifier. Lower numbers are called first. Negative numbers are allowed. Default is 0.
name: The name of the subscription for profiling. If no name is provided one is generated from the traceback of the calling function.
Returns:
The subscription holder.)",
py::arg("event_type"), py::arg("fn"), py::arg("order") = kDefaultOrder, py::arg("name") = nullptr)
.def_property_readonly("event_count", &IEventStream::getCount, py::call_guard<py::gil_scoped_release>())
.def("set_subscription_to_pop_order", &IEventStream::setSubscriptionToPopOrder,
R"(
Set subscription to pop order by name of subscription.
)",
py::arg("name"), py::arg("order"), py::call_guard<py::gil_scoped_release>())
.def("set_subscription_to_push_order", &IEventStream::setSubscriptionToPushOrder,
R"(
Set subscription to push order by name of subscription.
)",
py::arg("name"), py::arg("order"), py::call_guard<py::gil_scoped_release>())
.def("get_subscription_to_pop_order",
[](IEventStream* self, const char* subscriptionName) -> py::object {
Order order;
bool b;
{
py::gil_scoped_release nogil;
b = self->getSubscriptionToPopOrder(subscriptionName, &order);
}
if (b)
return py::int_(order);
return py::none();
},
R"(
Get subscription to pop order by name of subscription. Return None if subscription was not found.
)",
py::arg("name"))
.def("get_subscription_to_push_order",
[](IEventStream* self, const char* subscriptionName) -> py::object {
Order order;
bool b;
{
py::gil_scoped_release nogil;
b = self->getSubscriptionToPushOrder(subscriptionName, &order);
}
if (b)
return py::int_(order);
return py::none();
},
R"(
Get subscription to push order by name of subscription. Return None if subscription was not found.
)",
py::arg("name"))
.def("pop", &IEventStream::pop,
R"(
Pop event.
This function blocks execution until there is an event to pop.
Returns:
(:class:`.Event`) object. You own this object, it can be stored.
)",
py::call_guard<py::gil_scoped_release>())
.def("try_pop", &IEventStream::tryPop,
R"(
Try pop event.
Returns:
Pops (:class:`.Event`) if stream is not empty or return `None`.
)",
py::call_guard<py::gil_scoped_release>()
)
.def("pump", &IEventStream::pump,
R"(
Pump event stream.
Dispatches all events in a stream.
)",
py::call_guard<py::gil_scoped_release>()
)
.def("push",
[](IEventStream* self, EventType eventType, SenderId sender, py::dict dict) {
ObjectPtrWrapper<IEvent> e;
{
py::gil_scoped_release nogil;
e = self->createEvent(eventType, sender);
}
carb::dictionary::setPyObject(carb::dictionary::getDictionary(), e->payload, nullptr, dict);
{
py::gil_scoped_release nogil;
self->push(e.get());
}
},
R"(
Push :class:`.Event` into stream.
Args:
event_type (int): :class:`.Event` type.
sender (int): Sender id. Unique can be acquired using :func:`.acquire_unique_sender_id`.
dict (typing.Dict): :class:`.Event` payload.
)",
py::arg("event_type") = 0, py::arg("sender") = 0, py::arg("payload") = py::dict())
.def("dispatch",
[](IEventStream* self, EventType eventType, SenderId sender, py::dict dict) {
ObjectPtrWrapper<IEvent> e;
{
py::gil_scoped_release nogil;
e = self->createEvent(eventType, sender);
}
carb::dictionary::setPyObject(carb::dictionary::getDictionary(), e->payload, nullptr, dict);
{
py::gil_scoped_release nogil;
self->dispatch(e.get());
}
},
R"(
Dispatch :class:`.Event` immediately without putting it into stream.
Args:
event_type (int): :class:`.Event` type.
sender (int): Sender id. Unique can be acquired using :func:`.acquire_unique_sender_id`.
dict (typing.Dict): :class:`.Event` payload.
)",
py::arg("event_type") = 0, py::arg("sender") = 0, py::arg("payload") = py::dict())
.def_property_readonly("name", &IEventStream::getName, R"(
Gets the name of the :class:`.EventStream`.
Returns:
The name of the event stream.
)",
py::call_guard<py::gil_scoped_release>());
CARB_IGNOREWARNING_MSC_WITH_PUSH(5205)
py::class_<IEvents>(m, "IEvents")
.def("create_event_stream",
[](IEvents* events, const char* name) {
std::string sname;
if (!name)
{
sname = getPythonCaller();
name = sname.empty() ? nullptr : sname.c_str();
}
py::gil_scoped_release gil;
return events->createEventStream(name);
},
R"(
Create new `.EventStream`.
Args:
name: The name of the .EventStream for profiling. If no name is provided one is generated from the traceback of the calling function.
)",
py::arg("name") = (const char*)nullptr)
.def("acquire_unique_sender_id", &IEvents::acquireUniqueSenderId,
R"(
Acquire unique sender id.
Call :func:`.release_unique_sender_id` when it is not needed anymore. It can be reused then.
)",
py::call_guard<py::gil_scoped_release>())
.def("release_unique_sender_id", &IEvents::releaseUniqueSenderId, py::call_guard<py::gil_scoped_release>());
CARB_IGNOREWARNING_MSC_POP
}
} // namespace
} // namespace events
} // namespace carb
|
omniverse-code/kit/include/carb/events/EventsUtils.h | // Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Helper utilities for carb.events.
#pragma once
#include "../Framework.h"
#include "../InterfaceUtils.h"
#include "../ObjectUtils.h"
#include "../dictionary/DictionaryUtils.h"
#include "IEvents.h"
#include <functional>
#include <utility>
namespace carb
{
namespace events
{
/**
* Helper for carb::getCachedInterface<IEvents>().
* @returns The cached carb::events::IEvents interface.
*/
inline IEvents* getCachedEventsInterface()
{
return getCachedInterface<IEvents>();
}
/**
* A helper to use a `std::function` as an carb::events::IEventListener.
*/
class LambdaEventListener : public IEventListener
{
public:
/**
* Constructor.
* @param fn The `std::function` to call when onEvent() is called.
*/
LambdaEventListener(std::function<void(IEvent*)> fn) : m_fn(std::move(fn))
{
}
/**
* Passes the event to the `std::function` given to the constructor.
* @param e The carb::events::IEvent to process.
*/
void onEvent(IEvent* e) override
{
if (m_fn)
m_fn(e);
}
private:
std::function<void(IEvent*)> m_fn;
CARB_IOBJECT_IMPL
};
/**
* A helper for IEvents::createSubscriptionToPop() that creates a @ref LambdaEventListener.
* @param stream The @ref IEventStream to use.
* @param onEventFn A handler that will be invoked for each carb::events::IEvent that is popped.
* @param order An optional value used to specify the order tier. Lower order tiers will be notified first.
* Multiple IEventListener objects at the same order tier are notified in an undefined order.
* @param subscriptionName An optional name for the subscription. Names do not need to be unique.
* @returns A ISubscription pointer. The subscription is valid as long as the ISubscription pointer is referenced.
* Alternately, the IEventListener can be unsubscribed by calling ISubscription::unsubscribe().
*/
inline ObjectPtr<ISubscription> createSubscriptionToPop(IEventStream* stream,
std::function<void(IEvent*)> onEventFn,
Order order = kDefaultOrder,
const char* subscriptionName = nullptr)
{
return stream->createSubscriptionToPop(
carb::stealObject(new LambdaEventListener(std::move(onEventFn))).get(), order, subscriptionName);
}
/// @copydoc createSubscriptionToPop
/// @param eventType A specific event to listen for.
inline ObjectPtr<ISubscription> createSubscriptionToPopByType(IEventStream* stream,
EventType eventType,
std::function<void(IEvent*)> onEventFn,
Order order = kDefaultOrder,
const char* subscriptionName = nullptr)
{
return stream->createSubscriptionToPopByType(
eventType, carb::stealObject(new LambdaEventListener(std::move(onEventFn))).get(), order, subscriptionName);
}
/**
* A helper for IEvents::createSubscriptionToPush() that creates a @ref LambdaEventListener.
* @param stream The @ref IEventStream to use.
* @param onEventFn A handler that will be invoked for each carb::events::IEvent that is pushed.
* @param order An optional value used to specify the order tier. Lower order tiers will be notified first.
* Multiple IEventListener objects at the same order tier are notified in an undefined order.
* @param subscriptionName An optional name for the subscription. Names do not need to be unique.
* @returns A ISubscription pointer. The subscription is valid as long as the ISubscription pointer is referenced.
* Alternately, the IEventListener can be unsubscribed by calling ISubscription::unsubscribe().
*/
inline ObjectPtr<ISubscription> createSubscriptionToPush(IEventStream* stream,
std::function<void(IEvent*)> onEventFn,
Order order = kDefaultOrder,
const char* subscriptionName = nullptr)
{
return stream->createSubscriptionToPush(
carb::stealObject(new LambdaEventListener(std::move(onEventFn))).get(), order, subscriptionName);
}
/// @copydoc createSubscriptionToPush
/// @param eventType A specific event to listen for.
inline ObjectPtr<ISubscription> createSubscriptionToPushByType(IEventStream* stream,
EventType eventType,
std::function<void(IEvent*)> onEventFn,
Order order = kDefaultOrder,
const char* subscriptionName = nullptr)
{
return stream->createSubscriptionToPushByType(
eventType, carb::stealObject(new LambdaEventListener(std::move(onEventFn))).get(), order, subscriptionName);
}
} // namespace events
} // namespace carb
|
omniverse-code/kit/include/carb/assets/AssetsUtils.h | // Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Utilities for *carb.assets.plugin*
#pragma once
#include "IAssets.h"
namespace carb
{
namespace assets
{
/**
* A RAII-style helper class to manage the result of \ref IAssets::acquireSnapshot().
* `operator bool()` can be used to test if the asset successfully acquired.
* `getReason()` can be used to check why an asset failed to load.
* If the asset successfully loaded, it can be obtained with `get()`.
*/
template <class Type>
class ScopedSnapshot
{
public:
/**
* Default Constructor; produces an empty object
*/
ScopedSnapshot() = default;
/**
* \c nullptr Constructor; produces an empty object
*/
ScopedSnapshot(std::nullptr_t)
{
}
/**
* Constructs a \c ScopedSnapshot for the given asset ID.
*
* If snapshot acquisition fails, `*this` will be \c false; use \ref getReason() to determine why.
* @param assets The IAssets interface
* @param assetId The asset ID to acquire a snapshot for.
*/
ScopedSnapshot(IAssets* assets, Id assetId) : m_assets(assets)
{
m_snapshot = assets->acquireSnapshot(assetId, getAssetType<Type>(), m_reason);
m_value = reinterpret_cast<Type*>(assets->getDataFromSnapshot(m_snapshot));
}
//! Destructor
~ScopedSnapshot()
{
release();
}
//! ScopedSnapshot is move-constructible.
//! @param other The other \c ScopedSnapshot to move from; \c other will be empty.
ScopedSnapshot(ScopedSnapshot&& other)
{
m_value = other.m_value;
m_assets = other.m_assets;
m_snapshot = other.m_snapshot;
m_reason = other.m_reason;
other.m_assets = nullptr;
other.m_value = nullptr;
other.m_snapshot = kInvalidSnapshot;
other.m_reason = Reason::eFailed;
}
/**
* Move-assignment operator.
* @param other The other \c ScopedSnapshot to move from; \c other will be empty.
* @returns \c *this
*/
ScopedSnapshot& operator=(ScopedSnapshot&& other)
{
// This assert should never happen, but it is possible to accidentally write this
// code, though one has to contort themselves to do it. It is considered
// invalid nonetheless.
CARB_ASSERT(this != &other);
release();
m_value = other.m_value;
m_assets = other.m_assets;
m_snapshot = other.m_snapshot;
m_reason = other.m_reason;
other.m_assets = nullptr;
other.m_value = nullptr;
other.m_snapshot = kInvalidSnapshot;
other.m_reason = Reason::eFailed;
return *this;
}
CARB_PREVENT_COPY(ScopedSnapshot);
/**
* Obtain the asset data from the snapshot.
* @returns The loaded asset if the asset load was successful; \c nullptr otherwise.
*/
Type* get()
{
return m_value;
}
//! @copydoc get()
const Type* get() const
{
return m_value;
}
/**
* Dereference-access operator.
* @returns The loaded asset; malformed if `*this == false`.
*/
Type* operator->()
{
return get();
}
//! @copydoc operator->()
const Type* operator->() const
{
return get();
}
/**
* Dereference operator.
* @returns A reference to the loaded asset; malformed if `*this == false`.
*/
Type& operator*()
{
return *get();
}
//! @copydoc operator*()
const Type& operator*() const
{
return *get();
}
/**
* Test if the asset snapshot successfully loaded.
* @returns \c true if the asset snapshot successfully loaded and its value can be retrieved via \ref get();
* \c false otherwise.
*/
constexpr explicit operator bool() const noexcept
{
return m_value != nullptr;
}
/**
* Obtain the current asset status.
* @returns the \ref Reason status code based on acquiring the snapshot. An empty \c ScopedSnapshot will return
* \ref Reason::eFailed.
*/
Reason getReason() const
{
return m_reason;
}
private:
void release()
{
if (m_assets && m_snapshot)
{
m_assets->releaseSnapshot(m_snapshot);
}
m_value = nullptr;
m_assets = nullptr;
m_snapshot = kInvalidSnapshot;
m_reason = Reason::eFailed;
}
// Note this member is first to help in debugging.
Type* m_value = nullptr;
carb::assets::IAssets* m_assets = nullptr;
Snapshot m_snapshot = kInvalidSnapshot;
Reason m_reason = Reason::eFailed;
};
//! \c ScopedSnapshot equality operator
//! @param a A \c ScopedSnapshot to compare
//! @param b A \c ScopedSnapshot to compare
//! @returns \c true if \c a and \c b are equal; \c false otherwise.
template <class Type>
bool operator==(const carb::assets::ScopedSnapshot<Type>& a, const carb::assets::ScopedSnapshot<Type>& b)
{
return a.get() == b.get();
}
//! \c ScopedSnapshot inequality operator
//! @param a A \c ScopedSnapshot to compare
//! @param b A \c ScopedSnapshot to compare
//! @returns \c true if \c a and \c b are unequal; \c false otherwise.
template <class Type>
bool operator!=(const carb::assets::ScopedSnapshot<Type>& a, const carb::assets::ScopedSnapshot<Type>& b)
{
return a.get() != b.get();
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
template <class Type>
bool operator==(const carb::assets::ScopedSnapshot<Type>& a, std::nullptr_t)
{
return a.get() == nullptr;
}
template <class Type>
bool operator==(std::nullptr_t, const carb::assets::ScopedSnapshot<Type>& a)
{
return a.get() == nullptr;
}
template <class Type>
bool operator!=(const carb::assets::ScopedSnapshot<Type>& a, std::nullptr_t)
{
return a.get() != nullptr;
}
template <class Type>
bool operator!=(std::nullptr_t, const carb::assets::ScopedSnapshot<Type>& a)
{
return a.get() != nullptr;
}
#endif
} // namespace assets
} // namespace carb
|
omniverse-code/kit/include/carb/assets/IAssetsBlob.h | // Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief "Blob" (Binary Large OBject) asset type definition
#pragma once
#include "AssetsTypes.h"
namespace carb
{
namespace assets
{
//! An opaque type representing a binary large object. Use \ref IAssetsBlob to access the data.
//! Can be loaded with `IAssets::loadAsset<Blob>(...)`.
struct Blob;
/**
* Defines an interface for managing assets that are loaded asynchronously.
*/
struct IAssetsBlob
{
CARB_PLUGIN_INTERFACE("carb::assets::IAssetsBlob", 1, 0)
/**
* Gets the data from a blob.
*
* @param blob The blob to use.
* @return The blob byte data.
*/
const uint8_t*(CARB_ABI* getBlobData)(Blob* blob);
/**
* Gets the size of the blob in bytes.
*
* @param blob The blob to use.
* @return The size of the blob in bytes.
*/
size_t(CARB_ABI* getBlobSize)(Blob* blob);
};
} // namespace assets
} // namespace carb
CARB_ASSET(carb::assets::Blob, 1, 0)
|
omniverse-code/kit/include/carb/assets/IAssets.h | // Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Interface definition for *carb.assets.plugin*
#pragma once
#include "../Interface.h"
#include "../Types.h"
#include "../datasource/IDataSource.h"
#include "../tasking/TaskingHelpers.h"
#include "AssetsTypes.h"
namespace carb
{
//! Namespace for *carb.assets.plugin* and related utilities.
namespace assets
{
/**
* Defines an interface for managing assets that are loaded asynchronously.
*/
struct IAssets
{
CARB_PLUGIN_INTERFACE("carb::assets::IAssets", 2, 0)
/**
* Creates an asset pool for managing and caching assets together.
*
* @param name The name of the pool.
* @return The asset pool handle.
*/
Pool(CARB_ABI* createPool)(const char* name);
/**
* Destroys an asset pool previously created with \ref createPool().
*
* @param pool The pool to destroy.
*/
void(CARB_ABI* destroyPool)(Pool pool);
/**
* Gets basic statistics about a pool
*
* @note The resulting values from this function are transitive and the values may have changed by other threads
* before the results can be read. They should be used for debugging purposes only.
*
* @param pool The pool to get stats about.
* @param totalAssets Receives the number of assets in the pool.
* @param assetsLoading Receives the number of assets currently loading.
*/
void(CARB_ABI* poolStats)(Pool pool, int& totalAssets, int& assetsLoading);
//! @private
Id(CARB_ABI* internalLoadAsset)(carb::datasource::IDataSource* dataSource,
carb::datasource::Connection* connection,
const char* path,
Pool pool,
const Type& assetType,
const LoadParameters* loadParameters,
carb::tasking::Object const* trackers,
size_t numTrackers);
//! @private
CARB_DEPRECATED("Use loadAsset<> instead.")
Id loadAssetEx(carb::datasource::IDataSource* dataSource,
carb::datasource::Connection* connection,
const char* path,
Pool pool,
const Type& assetType,
const LoadParameters* loadParameters,
carb::tasking::Object const* trackers,
size_t numTrackers)
{
return internalLoadAsset(dataSource, connection, path, pool, assetType, loadParameters, trackers, numTrackers);
}
/**
* Unloads an asset previously loaded with \ref loadAsset().
*
* If an asset that is currently loading is unloaded, this will attempt to cancel the load.
*
* @param assetId The asset to unload.
*/
void(CARB_ABI* unloadAsset)(Id assetId);
/**
* Unloads all assets from the specified asset pool.
*
* If any assets in the pool are currently loading, this will attempt to cancel the load.
*
* @param pool The pool to clear the assets from.
*/
void(CARB_ABI* unloadAssets)(Pool pool);
/**
* Pauses the current thread or task until the requested asset has finished loading.
*
* @param assetId The assetId to wait for.
*/
void(CARB_ABI* yieldForAsset)(Id assetId);
/**
* Pauses the current thread or task until all assets in the given pool have finished loading.
*
* @param pool The pool containing the assets to wait for.
*/
void(CARB_ABI* yieldForAssets)(Pool pool);
/**
* Registers a callback that will be notified when an asset changes.
*
* The callback occurs after the asset has changed. At the point of the callback, \ref acquireSnapshot() will return
* the updated data.
*
* @note Only one callback can be registered for a given \p assetId. If the given \p assetId already has a callback
* registered, it is revoked in favor of this new callback.
*
* @param assetId The asset to monitor for changes.
* @param onChangeEvent The callback function to be called once the changes are made.
* @param userData The user data associated with the callback.
*/
void(CARB_ABI* subscribeToChangeEvent)(Id assetId, OnChangeEventFn onChangeEvent, void* userData);
/**
* Unsubscribes any asset change callbacks for a given asset previously registered with
* \ref subscribeToChangeEvent().
*
* When this function returns, it is guaranteed that the previously registered \c onChangeEvent function is not
* currently being called from another thread, and will not be called again.
*
* @param assetId The assetId to remove subscriptions for.
*/
void(CARB_ABI* unsubscribeToChangeEvent)(Id assetId);
/**
* Acquires a \ref Snapshot of the asset of the given type.
*
* @note It is the caller's responsibility to release the snapshot via \ref releaseSnapshot() when finished with it.
*
* If an asset changes (i.e. a change event was issued for the given \p assetId), existing snapshots are not
* updated; you will have to release existing snapshots and acquire a new snapshot to get the updated data.
*
* @param assetId The asset to take a snapshot of.
* @param assetType The asset type being requested.
* @param reason The reason the snapshot could, or couldn't be taken.
* While the snapshot is loading, it will return \ref Reason::eLoading;
* any other value returned means the snapshot has finished loading.
* It is recommended to use \ref carb::tasking::Trackers with \ref loadAsset() instead of polling this
* function until \ref Reason::eLoading is no longer returned.
* @returns The snapshot handle for the asset at the present time.
*/
Snapshot(CARB_ABI* acquireSnapshot)(Id assetId, const Type& assetType, Reason& reason);
/**
* Releases a snapshot of an asset previously returned by \ref acquireSnapshot().
*
* @param snapshot The snapshot to release.
*/
void(CARB_ABI* releaseSnapshot)(Snapshot snapshot);
/**
* Gets the underlying data for the asset based on a snapshot.
*
* @param snapshot The snapshot of the asset to get the data from.
* @returns The raw data of the asset at the snapshot. If the asset's \c Type has been unregistered then `nullptr`
* is returned.
*/
void*(CARB_ABI* getDataFromSnapshot)(Snapshot snapshot);
/**
* Forces all dirty (that is, assets with changed data) assets of a given \c Type to reload.
*
* This function is only necessary for registered types where \ref AssetTypeParams::autoReload is set to \c false.
*
* @param assetType The \c Type of asset to request a reload for.
*/
void(CARB_ABI* reloadAnyDirty)(Type assetType);
/**
* Used to register a loader for a specific asset \c Type.
*
* @warning Typically \ref registerAssetType() should be used instead of this lower-level function.
*
* @param assetType The asset type to register.
* @param desc The loader descriptor.
* @param params The \ref AssetTypeParams settings for this assetType
*/
void(CARB_ABI* registerAssetTypeEx)(const Type& assetType, const LoaderDesc& desc, const AssetTypeParams& params);
/**
* Unregisters a specific asset loader.
*
* @note Typically \ref unregisterAssetType() should be used instead of this lower-level function.
*
* @warning All data from any \ref Snapshot objects (i.e. from \ref acquireSnapshot()) is invalid after calling
* this function. The \ref Snapshot handle remains valid but any attempts to retrieve data from the \ref Snapshot
* (with \ref getDataFromSnapshot()) will return \c nullptr. Using a \ref ScopedSnapshot of the given \c Type after
* calling this function produces undefined behavior.
*
* @note This function will attempt to cancel all loading tasks for this \p assetType and will wait for all loading
* tasks to complete.
*
* @param assetType The asset type to unregister.
*/
void(CARB_ABI* unregisterAssetTypeEx)(const Type& assetType);
/**
* Loads an asset of the given type. This overload uses default \ref LoadParameters.
*
* Events:
* - `Asset.BeginLoading` - Sent in the calling thread if asset load starts. Also sent by \ref reloadAnyDirty() or
* by a background thread if underlying data changes. Parameters:
* - `Path` (`const char*`) - the path to the asset.
* - `AssetId` (\ref Id) - the asset ID that is loading.
* - `Asset.EndLoading` - Sent from a background thread whenever asset load finishes, only for assets that have
* previously sent a `Asset.BeginLoading` event.
* - `Path` (`const char*`) - the path to the asset.
* - `AssetId` (\ref Id) - the asset ID that finished loading.
* - `Success` (bool) - whether the load was successful or not. If \c true, \ref acquireSnapshot() will acquire
* the new data for the asset.
*
* @tparam Type The type of the asset. A compile error will occur if \ref getAssetType() for the given type does not
* resolve to a function. @see CARB_ASSET.
* @param dataSource The data source to load from.
* @param connection The connection (from the given data source) to load from.
* @param path The asset path to load.
* @param pool The pool to load the asset into.
* @param trackers (Optional) Trackers that can be queried to see when the asset is done loading.
* @returns A unique \ref Id that identifies this asset. The asset system internally de-duplicates based on path,
* datasource, connection and \ref LoadParameters, so several different asset \ref Id results may reference the same
* underlying asset.
*/
template <typename Type>
Id loadAsset(carb::datasource::IDataSource* dataSource,
carb::datasource::Connection* connection,
const char* path,
Pool pool,
carb::tasking::Trackers trackers = carb::tasking::Trackers{});
/**
* Loads an asset of the given type, with the given \ref LoadParameters.
*
* Events:
* - `Asset.BeginLoading` - Sent in the calling thread if asset load starts. Also sent by \ref reloadAnyDirty() or
* by a background thread if underlying data changes. Parameters:
* - `Path` (`const char*`) - the path to the asset.
* - `AssetId` (\ref Id) - the asset ID that is loading.
* - `Asset.EndLoading` - Sent from a background thread whenever asset load finishes, only for assets that have
* previously sent a `Asset.BeginLoading` event.
* - `Path` (`const char*`) - the path to the asset.
* - `AssetId` (\ref Id) - the asset ID that finished loading.
* - `Success` (bool) - whether the load was successful or not. If \c true, \ref acquireSnapshot() will acquire
* the new data for the asset.
*
* @tparam Type The type of the asset. A compile error will occur if \ref getAssetType() for the given type does not
* resolve to a function. @see CARB_ASSET.
* @param dataSource The data source to load from.
* @param connection The connection (from the given data source) to load from.
* @param path The asset path to load.
* @param pool The pool to load the asset into.
* @param loadParameters The \ref LoadParameters derived class containing information about how to load the asset.
* @param trackers (Optional) Trackers that can be queried to see when the asset is done loading.
* @returns A unique \ref Id that identifies this asset. The asset system internally de-duplicates based on path,
* datasource, connection and \ref LoadParameters, so several different asset \ref Id results may reference the same
* underlying asset.
*/
template <typename Type>
Id loadAsset(carb::datasource::IDataSource* dataSource,
carb::datasource::Connection* connection,
const char* path,
Pool pool,
const LoadParameters& loadParameters,
carb::tasking::Trackers trackers = carb::tasking::Trackers{});
/**
* Takes a snapshot of the asset in a RAII-style object.
*
* @tparam Type The asset type.
* @param assetId The assetId to take a snapshot of.
* @returns A RAII-style object that manages the snapshot of data for the object.
*/
template <typename Type>
ScopedSnapshot<Type> takeSnapshot(Id assetId);
/**
* Used to register a loader for a specific asset \c Type.
*
* @tparam Type The asset type to register.
* @param loaderDesc The loader descriptor.
* @param params The \ref AssetTypeParams settings for this assetType
*/
template <typename Type>
void registerAssetType(const LoaderDesc& loaderDesc, const AssetTypeParams& params = AssetTypeParams::getDefault());
/**
* Unregisters a specific asset loader.
*
* @note Typically \ref unregisterAssetType() should be used instead of this lower-level function.
*
* @warning All data from any \ref Snapshot objects (i.e. from \ref acquireSnapshot()) is invalid after calling
* this function. The \ref Snapshot handle remains valid but any attempts to retrieve data from the \ref Snapshot
* (with \ref getDataFromSnapshot()) will return \c nullptr. Using a \ref ScopedSnapshot of the given \c Type after
* calling this function produces undefined behavior.
*
* @note This function will attempt to cancel all loading tasks for this \p assetType and will wait for all loading
* tasks to complete.
*
* @tparam Type The asset type to unregister.
*/
template <typename Type>
void unregisterAssetType();
};
} // namespace assets
} // namespace carb
#include "IAssets.inl"
|
omniverse-code/kit/include/carb/assets/IAssets.inl | // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
namespace carb
{
namespace assets
{
template <typename Type>
inline ScopedSnapshot<Type> IAssets::takeSnapshot(Id assetId)
{
return ScopedSnapshot<Type>(this, assetId);
}
template <typename Type>
inline Id IAssets::loadAsset(carb::datasource::IDataSource* dataSource,
carb::datasource::Connection* connection,
const char* path,
Pool pool,
carb::tasking::Trackers trackers)
{
carb::tasking::Tracker const* objects;
size_t size;
trackers.output(objects, size);
return internalLoadAsset(dataSource, connection, path, pool, getAssetType<Type>(), nullptr, objects, size);
}
template <typename Type>
inline Id IAssets::loadAsset(carb::datasource::IDataSource* dataSource,
carb::datasource::Connection* connection,
const char* path,
Pool pool,
const LoadParameters& loadParameters,
carb::tasking::Trackers trackers)
{
carb::tasking::Tracker const* objects;
size_t size;
trackers.output(objects, size);
return internalLoadAsset(dataSource, connection, path, pool, getAssetType<Type>(), &loadParameters, objects, size);
}
template <typename Type>
inline void IAssets::registerAssetType(const LoaderDesc& loaderDesc, const AssetTypeParams& params)
{
registerAssetTypeEx(getAssetType<Type>(), loaderDesc, params);
}
template <typename Type>
inline void IAssets::unregisterAssetType()
{
unregisterAssetTypeEx(getAssetType<Type>());
}
} // namespace assets
} // namespace carb
|
omniverse-code/kit/include/carb/assets/AssetsTypes.h | // Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//!
//! @brief Type definitions for *carb.assets.plugin*
#pragma once
#include "../Defines.h"
#include "../extras/Hash.h"
#include "../Strong.h"
namespace carb
{
#ifndef DOXYGEN_SHOULD_SKIP_THIS
namespace datasource
{
struct IDataSource;
struct Connection;
} // namespace datasource
#endif
namespace assets
{
/**
* The reason a Snapshot was taken or not.
*/
enum class Reason
{
eSuccess, //!< The asset was loaded, and the snapshot is valid.
eInvalidHandle, //!< The asset handle was invalid, this may mean the asset was canceled.
eInvalidType, //!< Although the asset may or may not have loaded, the snapshot type did not match the type the asset
//!< was loaded from. This should be construed as a programmer error.
eFailed, //!< The asset was not loaded, because load asset failed. Notably loadAsset returned nullptr.
eLoading, //!< The asset is still in the process of loading.
};
// The following are handles to detect incorrect usage patters such as using a handle
// after it has be destroyed.
//! An Asset ID, used to identify a particular asset.
CARB_STRONGTYPE(Id, size_t);
//! An Asset Pool, used to group assets together.
CARB_STRONGTYPE(Pool, size_t);
//! A snapshot, representing asset data at a given point in time.
CARB_STRONGTYPE(Snapshot, size_t);
//! C++ Type hash used to identify a C++ type. Typically a hash of a type name string.
using HashedType = uint64_t;
//! Used to identify an invalid asset id.
constexpr Id kInvalidAssetId{};
//! Used to identify an invalid pool.
constexpr Pool kInvalidPool{};
//! Used to identify an invalid snapshot.
constexpr Snapshot kInvalidSnapshot{};
//! A load context that exists for the duration of the load phase. A loader may create a subclass of this to maintain
//! context data about a type.
struct LoadContext
{
};
//! Parameters that can be passed into \ref IAssets::loadAsset(). Asset types can create a subclass of this to pass
//! asset-type-specific load parameters through the asset system.
struct LoadParameters
{
};
/**
* Wrapper for an asset type that can be passed to various functions, identifies the asset as a hashed string
* plus the version number, which can impact the LoadParameters structure.
*/
struct Type
{
//! Constructor
Type(HashedType hashedType, uint32_t majorVersion, uint32_t minorVersion)
: hashedType(hashedType), majorVersion(majorVersion), minorVersion(minorVersion)
{
}
uint64_t hashedType; //!< The hashed string name of the Type.
uint32_t majorVersion; //!< The major version
uint32_t minorVersion; //!< The minor version
};
//! A scoped snapshot, this automatically releases the snapshot when it goes out of scope.
template <class Type>
class ScopedSnapshot;
/**
* The hash must be a strong (not necessarily cryptographically secure) 128-bit hash.
* 128-bit hashes are chosen because the if the hash is well distributed, then the probability of a collision even given
* a large set size is low. See: https://en.wikipedia.org/wiki/Birthday_problem#Probability_table
*
* For this reason when using hashing we do not do a deep compare on the objects.
*/
using AssetHash = extras::hash128_t;
/**
* Defines a template for users to specialize their asset type as unique identifiers.
*
* The \ref CARB_ASSET macro specializes this function for registered asset types.
*
* @see CARB_ASSET
*
* Example:
* ```cpp
* CARB_ASSET(carb::imaging::Image, 1, 0)
* ```
*/
template <typename T>
Type getAssetType();
/**
* Determines if a currently processed load has been canceled.
*
* @note This callback is only valid within the scope of the \ref LoadAssetFn function
*
* This function is used by the \ref LoadAssetFn to determine if it can abort processing early.
* Calling this function is optional. For longer or multi-stage loading processes, calling this function can be an
* indicator as to whether any assets still exist which are interested in the data that \ref LoadAssetFn is processing.
*
* @param userData Must be the `isLoadCanceledUserData` provided to \ref LoadAssetFn.
* @returns \c true if the load was canceled and should be aborted; \c false otherwise.
*/
using IsLoadCanceledFn = bool (*)(void* userData);
/**
* Loader function (member of \ref LoaderDesc) used to construct an asset from raw data.
*
* Though the raw data for \p path has already been loaded from \p dataSource and \p connection, they are all provided
* to this function in case additional data must be loaded.
*
* @note This function is called in Task Context (from carb::tasking::ITasking), therefore the called function is free
* to await any fiber-aware I/O functions (i.e. sleep in a fiber safe manner) without bottlenecking the system.
*
* @param dataSource The datasource the asset is being loaded from.
* @param connection The connection the asset is being loaded from.
* @param path The path the file was loaded from.
* @param data The data to be loaded.
* @param size The size of the data (in bytes) to be loaded.
* @param loadParameters Optional parameters passed from \ref IAssets::loadAsset() (Asset Type specific).
* @param loadContext A context generated by \ref CreateContextFn, if one was provided.
* @param isLoadCanceled A function that can be called periodically to determine if load should be canceled. This
* function need only be called if the load process has multiple steps or lots of processing.
* @param isLoadCanceledUserData User data that must be provided to \p isLoadCanceled when (if) it is called.
* @return The loaded Asset, or \c nullptr if loading is aborted or an error occurred.
*/
using LoadAssetFn = void* (*)(carb::datasource::IDataSource* dataSource,
carb::datasource::Connection* connection,
const char* path,
const uint8_t* data,
size_t size,
const LoadParameters* loadParameters,
LoadContext* loadContext,
IsLoadCanceledFn isLoadCanceled,
void* isLoadCanceledUserData);
/**
* Loader function (member of \ref LoaderDesc) used to unload an asset.
*
* @param asset The asset to be unloaded. This data was previously returned from \ref LoadAssetFn.
*/
using UnloadAssetFn = void (*)(void* asset);
/**
* Loader function (member of \ref LoaderDesc) that informs the asset type that loading a file has started and creates
* any load-specific context data necessary.
*
* @note This callback is optional, if it isn't provided, in future callbacks \c loadContext will be \c nullptr. If this
* function is provided, \ref DestroyContextFn should also be provided to destroy the context.
*
* @note This function is called in Task Context (from carb::tasking::ITasking), therefore the called function is free
* to await any fiber-aware I/O functions without bottlenecking the system.
*
* This function gives the programmer the option to do initial work that may be repetitive in the function calls
* that load the file.
*
* The created context only exists during the loading of the asset, after the asset is loaded this context is destroyed
* with \ref DestroyContextFn.
*
* @param dataSource The datasource the asset is being loaded from.
* @param connection The connection the asset is being loaded from.
* @param path The path of the file that is being loaded.
* @param data The data read at the asset's URI path.
* @param size The size of the data read in bytes.
* @param loadParameters The load parameters passed to \ref IAssets::loadAsset(), or \c nullptr.
* @returns A derivative of \ref LoadContext that is passed to \ref LoadAssetFn and \ref CreateDependenciesFn.
*/
using CreateContextFn = LoadContext* (*)(carb::datasource::IDataSource* dataSource,
carb::datasource::Connection* connection,
const char* path,
const uint8_t* data,
size_t size,
const LoadParameters* loadParameters);
/**
* Loader function (member of \ref LoaderDesc) that destroys the data created by \ref CreateContextFn.
*
* This function is optional, but always called if present, even if \p context is \c nullptr.
*
* @param context The context to destroy, previously created by \ref CreateContextFn.
*/
using DestroyContextFn = void (*)(LoadContext* context);
/**
* Loader function (member of \ref LoaderDesc) that returns a string of the asset dependencies, that is, other files to
* watch for changes.
*
* This function is optional, if it isn't provided, then only \p path (passed to \ref IAssets::loadAsset()) will be
* monitored for changes.
*
* Many asset types, such as shaders, include additional files to generate their content. In this case it isn't just
* the original file changing that requires the asset to be reloaded, if any dependent file changes, then the asset
* has to be reloaded as well.
*
* Multiple dependencies must separated by the `|` character.
*
* @param dataSource The datasource the asset is being loaded from.
* @param connection The connection the asset is being loaded from.
* @param path The path of the file that is being loaded.
* @param data The loaded data of the requested assets file.
* @param size The size of the requested asset file.
* @param loadParameters The load parameters provided to \ref IAssets::loadAsset().
* @param context The context if any generated by \ref CreateContextFn.
* @return A string containing dependencies to watch, delimited by `|`; \c nullptr may be returned to indicate no
* dependencies. The returned pointer will be passed to \ref DestroyDependenciesFn to clean up the returned memory.
*/
using CreateDependenciesFn = const char* (*)(carb::datasource::IDataSource* dataSource,
carb::datasource::Connection* connection,
const char* path,
const uint8_t* data,
size_t size,
const LoadParameters* loadParameters,
LoadContext* context);
/**
* Loader function (member of \ref LoaderDesc) that cleans up the previously returned value from
* \ref CreateDependenciesFn.
*
* @note This function is required and called if and only if \ref CreateDependenciesFn is provided.
*
* @param dependencies The string generated by a previous call to \ref CreateDependenciesFn.
* @param context The context if any generated by \ref CreateContextFn.
*/
using DestroyDependenciesFn = void (*)(const char* dependencies, LoadContext* context);
/**
* Loader function (member of \ref LoaderDesc) that is called when a dependency changes.
*
* @param dataSource The datasource of the dependency that changed.
* @param connection The connection of the dependency that changed.
* @param path The path of the dependency that changed.
*/
using OnDependencyChangedFn = void (*)(carb::datasource::IDataSource* dataSource,
carb::datasource::Connection* connection,
const char* path);
/**
* Loader function (member of \ref LoaderDesc) that hashes an asset's data, this is used to combine collisions in the
* asset system.
*
* This function is optional; if not provided, the path of the loaded file is hashed.
*
* If two different files return the same hash, then the system will return a copy of the first
* asset load. An example of where this is useful is programmatically generated shaders. In this
* context this system ensures that only one unique shader is created from many sources that generate
* the same shader.
*
* @param dataSource The datasource the asset is being loaded from.
* @param connection The connection the asset is being loaded from.
* @param path The path of the file that is being loaded.
* @param data The data to be loaded.
* @param size The size of the data (in bytes) to be loaded.
* @param loadParameters The load parameters passed to \ref IAssets::loadAsset().
* @param context A context generated by \ref CreateContextFn, if one was provided.
* @return The hash of the asset.
*/
using HashAssetFn = AssetHash (*)(carb::datasource::IDataSource* dataSource,
carb::datasource::Connection* connection,
const char* path,
const uint8_t* data,
size_t size,
const LoadParameters* loadParameters,
LoadContext* context);
/**
* Loader function (member of \ref LoaderDesc) that copies a \ref LoadParameters structure.
*
* @note This function is required for any types where a \ref LoadParameters derivative may be passed to
* \ref IAssets::loadAsset().
*
* @param loadParameters The load parameters to copy.
* @return The copied load parameters.
*/
using CreateLoadParametersFn = LoadParameters* (*)(const LoadParameters* loadParameters);
/**
* Loader function (member of \ref LoaderDesc) that destroys a copied \ref LoadParameters structure.
*
* @note This function is required for any types where a \ref LoadParameters derivative may be passed to
* \ref IAssets::loadAsset().
*
* @param loadParameters The load parameters to destroy.
*/
using DestroyLoadParametersFn = void (*)(LoadParameters* loadParameters);
/**
* Loader function (member of \ref LoaderDesc) that hashes a LoadParameters structure.
*
* @note This function is required for any types where a \ref LoadParameters derivative may be passed to
* \ref IAssets::loadAsset().
*
* @param loadParameters The load parameters to hash.
* @returns The hashed value of the load parameters structure.
*
* @note Be aware of struct padding when hashing the load parameter data.
* Passing an entire parameter struct into a hash function may result in
* padding being hashed, which will cause undefined behavior.
*/
using HashLoadParametersFn = uint64_t (*)(const LoadParameters* loadParameters);
/**
* Loader function (member of \ref LoaderDesc) that determines if two \ref LoadParameters derivatives are equal.
*
* @note This function is required for any types where a \ref LoadParameters derivative may be passed to
* \ref IAssets::loadAsset().
*
* @param loadParametersA A \ref LoadParameters to compare.
* @param loadParametersB A \ref LoadParameters to compare.
* @return \c true if loadParametersA == loadParametersB; \c false otherwise.
*
* @note Avoid using \c memcmp() to compare parameters structs as padding within
* the struct could cause the comparison to unexpectedly fail.
*/
using LoadParametersEqualsFn = bool (*)(const LoadParameters* loadParametersA, const LoadParameters* loadParametersB);
/**
* Defines the loader functions for an asset type.
*
* The following is the basic call order for loader functions (for clarity, parameters have been simplified).
*
* When an asset is being loaded for the first time, or reloaded:
* ```cpp
* context = createContext ? createContext() : nullptr;
*
* dependencies = createDependencies ? createDependencies() : nullptr;
* if (dependencies)
* {
* // dependencies are processed
* destroyDependencies(dependencies);
* }
*
* hash = hashAsset();
* // If the hash is already loaded then return that already loaded asset, otherwise:
*
* asset = loadAsset();
* if (context)
* destroyContext(context);
* ```
*
* When the asset is destroyed:
* ```cpp
* unloadAsset(asset)
* ```
*
*/
struct LoaderDesc
{
LoadAssetFn loadAsset; //!< @see LoadAssetFn
UnloadAssetFn unloadAsset; //!< @see UnloadAssetFn
CreateLoadParametersFn createLoadParameters; //!< @see CreateLoadParametersFn
DestroyLoadParametersFn destroyLoadParameters; //!< @see DestroyLoadParametersFn
HashLoadParametersFn hashLoadParameters; //!< @see HashLoadParametersFn
LoadParametersEqualsFn loadParametersEquals; //!< @see LoadParametersEqualsFn
HashAssetFn hashAsset; //!< @see HashAssetFn
CreateContextFn createContext; //!< @see CreateContextFn
DestroyContextFn destroyContext; //!< @see DestroyContextFn
CreateDependenciesFn createDependencies; //!< @see CreateDependenciesFn
DestroyDependenciesFn destroyDependencies; //!< @see DestroyDependenciesFn
OnDependencyChangedFn onDependencyChanged; //!< @see OnDependencyChangedFn
};
//! Parameters that describe an asset type's characteristics.
struct AssetTypeParams
{
//! Must be initialized to `sizeof(AssetTypeParams)`. This is used as a version for future expansion of this
//! `struct`.
size_t sizeofThis;
//! The maximum number of outstanding concurrent loads of this asset type. A value of \c 0 indicates "unlimited." A
//! value of \c 1 indicates that loading is not thread-safe and only one asset may be loading at a given time. Any
//! other value limits the number of loading assets for the given type. (default=`0`)
uint32_t maxConcurrency;
//! Specifies that the assets should automatically reload when the file or one of its dependencies changes. If this
//! is false, you must call \ref IAssets::reloadAnyDirty() in order to manually trigger a reload. (default=`true`)
bool autoReload;
//! The amount of time in milliseconds to delay when automatically reloading an asset. This is because a reload is
//! triggered immediately when a change is detected from the datasource. A filesystem for example can trigger
//! multiple change notifications for a write to a file. This value gives a sliding window of changes so that all
//! changes that happen within the window get condensed into a single reload. (default=`100` ms)
uint32_t reloadDelayMs;
/**
* Returns the default values for \c AssetTypeParams.
* @returns Default values.
*/
constexpr static AssetTypeParams getDefault()
{
return AssetTypeParams{ sizeof(AssetTypeParams), 0, true, 100 };
}
};
/**
* Function to provide as a callback on asset changes.
*
* @see IAssets::subscribeToChangeEvent()
*
* @param assetId The asset ID of the asset that was modified.
* @param userData The user data given when the subscription was created.
*/
using OnChangeEventFn = void (*)(Id assetId, void* userData);
} // namespace assets
} // namespace carb
#ifdef DOXYGEN_BUILD
/**
* Registers an asset type.
*
* The version is used to protect the LoadParameter class as it's definition could change thereby causing undefined
* behavior if the asset loader version doesn't match the users of that loader. Therefore, the version should be updated
* any time the LoadParameters is update to avoid runtime issues.
*
* Note the additional two parameters which specify the version of the Asset, in this case
* the imaging asset is version 1.0. This value should only need to be updated if the LoadParmeters structure is
* updated.
*
* @param t The type of the asset to register
* @param majorVersion The major version of the asset type.
* @param minorVersion The minor version of the asset type.
*/
# define CARB_ASSET(t, majorVersion, minorVersion)
#else
# define CARB_ASSET(t, majorVersion, minorVersion) \
namespace carb \
{ \
namespace assets \
{ \
template <> \
inline Type getAssetType<t>() \
{ \
return Type(CARB_HASH_TYPE(t), majorVersion, minorVersion); \
} \
} \
}
#endif
|
omniverse-code/kit/include/carb/logging/LoggingSettingsUtils.h | // Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
#pragma once
#include "../Framework.h"
#include "../settings/ISettings.h"
#include "ILogging.h"
#include "Log.h"
#include "StandardLogger.h"
#include <vector>
namespace carb
{
namespace logging
{
/**
* Converts a string to its equivalent OutputStream value.
* @param[in] name The case-insensitive name of an output stream value.
*
* @returns This returns OutputStream::eStderr, if name is "stderr".
* @reutrns This returns OutputStream::eDefault for any other name.
*/
inline OutputStream stringToOutputStream(const char* name)
{
static constexpr struct
{
const char* name;
OutputStream value;
} kMappings[] = { { "stderr", OutputStream::eStderr } };
for (size_t i = 0; i < CARB_COUNTOF(kMappings); i++)
{
#if CARB_PLATFORM_WINDOWS
if (_stricmp(kMappings[i].name, name) == 0)
#else
if (strcasecmp(kMappings[i].name, name) == 0)
#endif
return kMappings[i].value;
}
return OutputStream::eDefault;
}
/**
* Configures global logging plugin with values from the config plugin values. Global logging
* configuration specifies behavior for any loggers registered later, and doesn't dictate
* neither how exactly any specific logger should operate, nor how the output will look like.
*
* Supported config fields:
* - "level": string log level value, available options: "verbose"|"info"|"warning"|"error"|"fatal"
* - "enabled": boolean value, enable or disable logging
*
* These values could be specified either per-source, in the source collection ("/log/sources/"),
* for example, <source> level should be specified as "/log/sources/<source>/level", or globally,
* as "/log/level". Similar pattern applies to "enabled" property.
*/
inline void configureLogging(settings::ISettings* settings)
{
Framework* f = getFramework();
logging::ILogging* logging = f->acquireInterface<logging::ILogging>();
if (settings == nullptr)
return;
if (logging)
{
const char* kLogLevel = "/log/level";
const char* kLogEnabled = "/log/enabled";
const char* kLogAsync = "/log/async";
// setting defaults
settings->setDefaultString(kLogLevel, "Warning");
settings->setDefaultBool(kLogEnabled, true);
settings->setDefaultBool(kLogAsync, false);
// The first order of business is to set logging according to config (this can be from file or command line):
const int32_t logLevel = logging::stringToLevel(settings->getStringBuffer(kLogLevel));
logging->setLevelThreshold(logLevel);
const bool logEnabled = settings->getAsBool(kLogEnabled);
logging->setLogEnabled(logEnabled);
logging->setLogAsync(settings->getAsBool(kLogAsync));
// Read config for source-specific setting overrides
// First, read the sources collection
const char* kLogSourcesKey = "/log/sources";
const carb::dictionary::Item* logSources = settings->getSettingsDictionary(kLogSourcesKey);
if (logSources != nullptr)
{
auto* dictInterface = f->acquireInterface<carb::dictionary::IDictionary>();
// Traverse the sources collection to set per-source overrides
for (size_t i = 0, totalChildren = dictInterface->getItemChildCount(logSources); i < totalChildren; ++i)
{
const carb::dictionary::Item* curSource = dictInterface->getItemChildByIndex(logSources, i);
if (curSource == nullptr)
{
CARB_LOG_ERROR("Null log source present in the configuration.");
continue;
}
const char* curSourceName = dictInterface->getItemName(curSource);
if (curSourceName == nullptr)
{
CARB_LOG_ERROR("Log source with no name present in the configuration.");
continue;
}
// Read the source level setting
const carb::dictionary::Item* curLogLevel = dictInterface->getItem(curSource, "level");
if (curLogLevel != nullptr)
{
logging->setLevelThresholdForSource(
curSourceName, logging::LogSettingBehavior::eOverride,
logging::stringToLevel(dictInterface->getStringBuffer(curLogLevel)));
}
// Read the source enabled setting
const carb::dictionary::Item* curLogEnabled = dictInterface->getItem(curSource, "enabled");
if (curLogEnabled != nullptr)
{
const bool isCurLogEnabled =
dictInterface->isAccessibleAs(dictionary::ItemType::eBool, curLogEnabled) ?
dictInterface->getAsBool(curLogEnabled) :
logEnabled;
logging->setLogEnabledForSource(
curSourceName, logging::LogSettingBehavior::eOverride, isCurLogEnabled);
}
}
}
}
}
// example-begin Configure StandardLogger
/**
* Configures default logger with values from the config plugin values. Default logger configuration
* specifies where to output the log stream and how the output will look.
*
* Further instructions on the meaning of fields is available from StandardLogger.h
*/
inline void configureDefaultLogger(settings::ISettings* settings)
{
Framework* f = getFramework();
logging::ILogging* logging = f->acquireInterface<logging::ILogging>();
if (settings == nullptr)
return;
if (logging)
{
// Config settings for default logger
auto* logger = logging->getDefaultLogger();
// setting defaults
const char* kFilePath = "/log/file";
const char* kFileFlushLevelPath = "/log/fileFlushLevel";
const char* kFlushStandardStreamOutputPath = "/log/flushStandardStreamOutput";
const char* kEnableStandardStreamOutputPath = "/log/enableStandardStreamOutput";
const char* kEnableDebugConsoleOutputPath = "/log/enableDebugConsoleOutput";
const char* kEnableColorOutputPath = "/log/enableColorOutput";
const char* kProcessGroupIdPath = "/log/processGroupId";
const char* kIncludeSourcePath = "/log/includeSource";
const char* kIncludeChannelPath = "/log/includeChannel";
const char* kIncludeFilenamePath = "/log/includeFilename";
const char* kIncludeLineNumberPath = "/log/includeLineNumber";
const char* kIncludeFunctionNamePath = "/log/includeFunctionName";
const char* kIncludeTimeStampPath = "/log/includeTimeStamp";
const char* kIncludeThreadIdPath = "/log/includeThreadId";
const char* kSetElapsedTimeUnitsPath = "/log/setElapsedTimeUnits";
const char* kIncludeProcessIdPath = "/log/includeProcessId";
const char* kLogOutputStream = "/log/outputStream";
const char* kOutputStreamLevelThreshold = "/log/outputStreamLevel";
const char* kDebugConsoleLevelThreshold = "/log/debugConsoleLevel";
const char* kFileOutputLevelThreshold = "/log/fileLogLevel";
const char* kDetailLogPath = "/log/detail";
const char* kFullDetailLogPath = "/log/fullDetail";
const char* kFileAppend = "/log/fileAppend";
const char* kForceAnsiColor = "/log/forceAnsiColor";
settings->setDefaultString(kFileFlushLevelPath, "verbose");
settings->setDefaultBool(kFlushStandardStreamOutputPath, false);
settings->setDefaultBool(kEnableStandardStreamOutputPath, true);
settings->setDefaultBool(kEnableDebugConsoleOutputPath, true);
settings->setDefaultBool(kEnableColorOutputPath, true);
settings->setDefaultInt(kProcessGroupIdPath, 0);
settings->setDefaultBool(kIncludeSourcePath, true);
settings->setDefaultBool(kIncludeChannelPath, true);
settings->setDefaultBool(kIncludeFilenamePath, false);
settings->setDefaultBool(kIncludeLineNumberPath, false);
settings->setDefaultBool(kIncludeFunctionNamePath, false);
settings->setDefaultBool(kIncludeTimeStampPath, false);
settings->setDefaultBool(kIncludeThreadIdPath, false);
settings->setDefaultBool(kIncludeProcessIdPath, false);
settings->setDefaultBool(kDetailLogPath, false);
settings->setDefaultBool(kFullDetailLogPath, false);
settings->setDefaultBool(kForceAnsiColor, false);
settings->setDefaultString(kOutputStreamLevelThreshold, "verbose");
settings->setDefaultString(kDebugConsoleLevelThreshold, "verbose");
settings->setDefaultString(kFileOutputLevelThreshold, "verbose");
// getting values from the settings
logger->setStandardStreamOutput(settings->getAsBool(kEnableStandardStreamOutputPath));
logger->setDebugConsoleOutput(settings->getAsBool(kEnableDebugConsoleOutputPath));
LogFileConfiguration config{};
settings->setDefaultBool(kFileAppend, config.append);
config.append = settings->getAsBool(kFileAppend);
logger->setFileConfiguration(settings->getStringBuffer(kFilePath), &config);
logger->setFileOuputFlushLevel(logging::stringToLevel(settings->getStringBuffer(kFileFlushLevelPath)));
logger->setFlushStandardStreamOutput(settings->getAsBool(kFlushStandardStreamOutputPath));
logger->setForceAnsiColor(settings->getAsBool(kForceAnsiColor));
logger->setColorOutputIncluded(settings->getAsBool(kEnableColorOutputPath));
logger->setMultiProcessGroupId(settings->getAsInt(kProcessGroupIdPath));
bool channel = settings->getAsBool(kIncludeSourcePath) && settings->getAsBool(kIncludeChannelPath);
// if this is set, it enabled everything
bool fullDetail = settings->getAsBool(kFullDetailLogPath);
// if this is set, it enables everything except file name and PID
bool detail = fullDetail || settings->getAsBool(kDetailLogPath);
logger->setSourceIncluded(detail || channel);
logger->setFilenameIncluded(fullDetail || settings->getAsBool(kIncludeFilenamePath));
logger->setLineNumberIncluded(detail || settings->getAsBool(kIncludeLineNumberPath));
logger->setFunctionNameIncluded(detail || settings->getAsBool(kIncludeFunctionNamePath));
logger->setTimestampIncluded(detail || settings->getAsBool(kIncludeTimeStampPath));
logger->setThreadIdIncluded(detail || settings->getAsBool(kIncludeThreadIdPath));
logger->setElapsedTimeUnits(settings->getStringBuffer(kSetElapsedTimeUnitsPath));
logger->setProcessIdIncluded(fullDetail || settings->getAsBool(kIncludeProcessIdPath));
if (auto buffer = settings->getStringBuffer(kLogOutputStream))
{
logger->setOutputStream(stringToOutputStream(buffer));
}
logger->setStandardStreamOutputLevelThreshold(
logging::stringToLevel(settings->getStringBuffer(kOutputStreamLevelThreshold)));
logger->setDebugConsoleOutputLevelThreshold(
logging::stringToLevel(settings->getStringBuffer(kDebugConsoleLevelThreshold)));
logger->setFileOutputLevelThreshold(logging::stringToLevel(settings->getStringBuffer(kFileOutputLevelThreshold)));
}
}
// example-end
} // namespace logging
} // namespace carb
|
omniverse-code/kit/include/carb/logging/StandardLogger2.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//! @brief carb.logging StandardLogger2 definitions
#pragma once
#include "LoggingTypes.h"
namespace carb
{
namespace logging
{
struct Logger;
struct StandardLogger;
/**
* A sub-interface of \ref ILogging for \ref StandardLogger instances.
*/
class StandardLogger2
{
// This class's ABI is versioned by the ILogging version.
public:
/**
* Adds a reference to *this.
*/
virtual void addRef() = 0;
/**
* Releases a reference to *this. When the last reference is released, *this is destroyed.
*/
virtual void release() = 0;
/**
* Retrieves access to the underlying \ref Logger for *this.
* @see ILogging::addLogger() ILogging::removeLogger()
* @returns the underlying \ref Logger.
*/
virtual Logger* getLogger() = 0;
/**
* Includes or excludes the filename of where the log message came from. A new instance
* will by default exclude this information.
*
* @param included Whether the filename information should be included in the log message
*/
virtual void setFilenameIncluded(bool included) = 0;
/**
* Includes or excludes the line number of where the log message came from. A new instance
* will by default exclude this information.
*
* @param included Whether the line number information should be included in the log message
*/
virtual void setLineNumberIncluded(bool included) = 0;
/**
* Includes or excludes the function name of where the log message came from. A new instance
* will by default exclude this information.
*
* @param included Whether the function name information should be included in the log message
*/
virtual void setFunctionNameIncluded(bool included) = 0;
/**
* Includes or excludes the timestamp of when the log message was issued. A new instance
* will by default exclude this information. The time is in UTC format.
*
* @param included Whether the timestamp information should be included in the log message
*/
virtual void setTimestampIncluded(bool included) = 0;
/**
* Includes or excludes the id of a thread from which the log message was issued. A new instance
* will by default exclude this information.
*
* @param included Whether the thread id should be included in the log message
*/
virtual void setThreadIdIncluded(bool included) = 0;
/**
* Includes or excludes the source (module) of where the log message came from. A new instance
* will by default include this information.
*
* @param included Whether the source (module) information should be included in the log message
*/
virtual void setSourceIncluded(bool included) = 0;
/**
* Enables (or disables) standard stream output (stdout and stderr) for the logger. Error messages are written
* to stderr, all other messages to stdout. A new FrameworkLogger will have this output enabled.
*
* @param enabled Whether log output should go to standard streams (stdout and stderr)
*/
virtual void setStandardStreamOutput(bool enabled) = 0;
/**
* (Windows only) Enables (or disables) debug console output for the logger via `OutputDebugStringW()`. By default,
* debug output is only supplied if a debugger is attached (via `IsDebuggerPresent()`). Calling this with @p enabled
* as `true` will always produce debug output which is useful for non-debugger tools such as SysInternals DebugView.
*
* @param enabled Whether log output should be sent to the debug console.
*/
virtual void setDebugConsoleOutput(bool enabled) = 0;
/** sets the path to the log file to open.
*
* @param[in] filePath the local file path to write the log file to. This may be a
* relative or absolute path. Relative paths will be resolved
* relative to the process's current working directory at the time
* of the call. This may be nullptr to not write to a log file at
* all or to close the current log file. If a log file was
* previously open during this call, it will be closed first. If
* nullptr is passed in here, logging to a file will effectively
* be disabled. This path must be UTF-8 encoded. See the remarks
* below for more information on formatting of the log file path.
* @returns no return value.
*
* @remarks This sets the path to the log file to write to for the given instance of a
* standard logger object. The log file name may contain the string "${pid}" to
* have the process ID inserted in its place. By default, a new standard logger
* will disable logging to a file.
*
* @note Setting the log file name with this function will preserve the previous log file
* configuration. If the configuration needs to changes as well (ie: change the
* 'append' state of the log file), setFileConfiguration() should be used instead.
*/
virtual void setFileOutput(const char* filePath) = 0;
/**
* Enables flushing on every log message to file specified severity or higher.
* A new instance will have this set to flush starting from kLevelVerbose, so that file logging will be
* reliable out of the box. The idea is that file logging will be used for debugging purposes by default, with a
* price of significant performance penalty.
*
* @param level The starting log level to flush file log output at.
*/
virtual void setFileOuputFlushLevel(int32_t level) = 0;
/**
* Enables flushing of stdout after each message is printed to it.
* By default, this option will be disabled. The default behavior will be to only flush stdout just before
* writing a message to stderr.
*
* @param enabled Set to true to cause stdout to be flushed after each message is written. Set to false to
* use the default behavior of only flushing stdout before writing to stderr.
*/
virtual void setFlushStandardStreamOutput(bool enabled) = 0;
/**
* Enables a high resolution time index to be printed with each message.
* By default, this option is disabled (ie: no time index printed). When enabled, the current time index
* (since the first message was printed) will be printed with each message. The time index may be in
* milliseconds, microseconds, or nanoseconds depending on the string @p units. The printing of the time
* index may be enabled at the same time as the timestamp.
*
* @param[in] units the units that the time index should be printed in. This can be one of the following
* supported unit names:
* * nullptr, "", or "none": the time index printing is disabled (default state).
* * "ms", "milli", or "milliseconds": print the time index in milliseconds.
* * "us", "µs", "micro", or "microseconds": print the time index in microseconds.
* * "ns", "nano", or "nanoseconds": print the time index in nanoseconds.
*/
virtual void setElapsedTimeUnits(const char* units) = 0;
/**
* Includes or excludes the id of the process from which the log message was issued. A new instance
* will by default exclude this information.
*
* @param enabled Whether the process id should be included in the log message
*/
virtual void setProcessIdIncluded(bool enabled) = 0;
/**
* sets the process group ID for the logger. If a non-zero identifier is given, inter-process
* locking will be enabled on both the log file and the stdout/stderr streams. This will prevent
* simultaneous messages from multiple processes in the logs from becoming interleaved within
* each other. If a zero identifier is given, inter-process locking will be disabled.
*
* @param[in] id an arbitrary process group identifier to set.
*/
virtual void setMultiProcessGroupId(int32_t id) = 0;
/**
* Enables (or disables) color codes output for the logger. A new instance will have this output enabled
* unless the output is piped to a file, in which case this will be disabled.
*
* @param enabled Whether log output should include color codes
*/
virtual void setColorOutputIncluded(bool enabled) = 0;
/**
* Specify the output stream that logging should go to.
* By default, messages are sent to stdout and errors are sent to stderr.
*
* @param[in] outputStream The output stream setting to use.
* If this is OutputStream::eStderr, all logging
* output will be sent to stderr.
* If this is OutputStream::eDefault, the default
* logging behavior will be used.
*/
virtual void setOutputStream(OutputStream outputStream) = 0;
/**
* Sets the log level threshold for the messages going to the standard stream. Messages below this threshold will be
* dropped.
*
* @param level The log level to set.
*/
virtual void setStandardStreamOutputLevelThreshold(int32_t level) = 0;
/**
* Sets the log level threshold for the messages going to the debug console output. Messages below this threshold
* will be dropped.
*
* @param level The log level to set.
*/
virtual void setDebugConsoleOutputLevelThreshold(int32_t level) = 0;
/**
* Sets the log level threshold for the messages going to the file output. Messages below this threshold
* will be dropped.
*
* @param level The log level to set.
*/
virtual void setFileOutputLevelThreshold(int32_t level) = 0;
/**
* Sets the file path and configuration for file logging. If nullptr is provided the file logging is disabled. A new
* instance will by default disable file output.
*
* @param filePath The local file path to write to or nullptr, if you want to disable logging to file.
* Parameter is encoded as UTF8 character string with forward slashes as path separator. The path
* should include the extension .log but this is not a requirement. If a relative path is provided
* it is interpreted to be relative to the current working directory for the application. Can be kKeepSameFile to
* keep logging to the same file but set a new LogFileConfiguration.
* @param config The LogFileConfiguration structure with parameters to use for the file configuration. Required.
*/
virtual void setFileConfiguration(const char* filePath, const LogFileConfiguration* config) = 0;
/**
* Returns the file path (in buffer) and configuration for file logging.
*
* @param buffer The buffer that will receive the UTF-8 file name that is being logged to. May be nullptr.
* @param bufferSize The maximum number of bytes available in \p buffer.
* @param config The LogFileConfiguration to receive the current configuration. May be nullptr.
* @returns If successful, the number of non-NUL bytes written to \p buffer. If not successful, contains the
* required size of a buffer to receive the filename (not including the NUL terminator).
*/
virtual size_t getFileConfiguration(char* buffer, size_t bufferSize, LogFileConfiguration* config) = 0;
/**
* Pauses file logging (and closes the file) until resumeFileLogging() is called.
*
* @note This is a counted call. Each call to pauseFileLogging() must have a matching call to resumeFileLogging()
*
*/
virtual void pauseFileLogging() = 0;
/**
* Resumes file logging (potentially reopening the file)
*
* @note This is a counted call. Each call to pauseFileLogging() must have a matching call to resumeFileLogging()
*
*/
virtual void resumeFileLogging() = 0;
/**
* Forces the logger to use ANSI escape code's to annotate the log with color.
*
* By default, on Windows ANSI escape codes will never be used, rather the Console API will be used to place
* colors in a console. Linux uses the isatty() to determine if the terminal supports ANSI escape codes. However,
* the isatty check doesn't work in all cases. One notable case where this doesn't work is running a process in a
* CI/CD that returns false from isatty() yet still supports ANSI escape codes.
*
* See: https://en.wikipedia.org/wiki/ANSI_escape_code for more information about ANSI escape codes.
*
* @param forceAnsiColor if true forces terminal to use ANSI escape codes for color
*/
virtual void setForceAnsiColor(bool forceAnsiColor) = 0;
/**
* Overrides the current log level, for the given stream, only for the calling thread.
*
* Call \ref clearLevelThresholdThreadOverride() to clear the override.
* @param type The \ref OutputType to override.
* @param level The \ref loglevel to use for override.
*/
virtual void setLevelThresholdThreadOverride(OutputType type, int32_t level) = 0;
/**
* Clears any override for the given stream, only for the calling thread.
*
* The override was previously set with \ref setLevelThresholdThreadOverride().
* @param type The \ref OutputType to override.
*/
virtual void clearLevelThresholdThreadOverride(OutputType type) = 0;
// Functions may be added to this class as long as it is not inherited from (as an interface) and ILogging's minor
// version is incremented.
};
} // namespace logging
} // namespace carb
|
omniverse-code/kit/include/carb/logging/StandardLogger.h | // Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//! @brief carb.logging StandardLogger definitions
#pragma once
#include "Logger.h"
#include "StandardLogger2.h"
namespace carb
{
namespace logging
{
/**
* The default logger provided by the Framework. It is quite flexible and you can use multiple
* instances if you want different configurations for different output destinations. It can
* also be safely called from multiple threads.
*
* @see ILogging::getDefaultLogger
* @see ILogging::createStandardLogger
* @see ILogging::destroyStandardLogger
*/
#if CARB_PLATFORM_WINDOWS && !defined(DOXYGEN_BUILD)
struct CARB_DEPRECATED("Use StandardLogger2 instead") StandardLogger : public Logger
#else // Linux is very warning-heavy about [[deprecated]]
struct StandardLogger : public Logger
#endif
{
//! \copydoc StandardLogger2::setFilenameIncluded
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`. May not be
//! `nullptr`.
void(CARB_ABI* setFilenameIncluded)(StandardLogger* instance, bool included);
//! \copydoc StandardLogger2::setLineNumberIncluded
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setLineNumberIncluded)(StandardLogger* instance, bool included);
//! \copydoc StandardLogger2::setFunctionNameIncluded
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setFunctionNameIncluded)(StandardLogger* instance, bool included);
//! \copydoc StandardLogger2::setTimestampIncluded
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setTimestampIncluded)(StandardLogger* instance, bool included);
//! \copydoc StandardLogger2::setThreadIdIncluded
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setThreadIdIncluded)(StandardLogger* instance, bool included);
//! \copydoc StandardLogger2::setSourceIncluded
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setSourceIncluded)(StandardLogger* instance, bool included);
//! \copydoc StandardLogger2::setStandardStreamOutput
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setStandardStreamOutput)(StandardLogger* instance, bool enabled);
//! \copydoc StandardLogger2::setDebugConsoleOutput
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setDebugConsoleOutput)(StandardLogger* instance, bool enabled);
//! \copydoc StandardLogger2::setFileOutput
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setFileOutput)(StandardLogger* instance, const char* filePath);
//! \copydoc StandardLogger2::setFileOuputFlushLevel
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setFileOuputFlushLevel)(StandardLogger* instance, int32_t level);
//! \copydoc StandardLogger2::setFlushStandardStreamOutput
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setFlushStandardStreamOutput)(StandardLogger* instance, bool enabled);
//! \copydoc StandardLogger2::setElapsedTimeUnits
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setElapsedTimeUnits)(StandardLogger* instance, const char* units);
//! \copydoc StandardLogger2::setProcessIdIncluded
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setProcessIdIncluded)(StandardLogger* instance, bool enabled);
//! \copydoc StandardLogger2::setMultiProcessGroupId
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setMultiProcessGroupId)(StandardLogger* instance, int32_t id);
//! \copydoc StandardLogger2::setColorOutputIncluded
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setColorOutputIncluded)(StandardLogger* instance, bool enabled);
//! \copydoc StandardLogger2::setOutputStream
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setOutputStream)(StandardLogger* instance, OutputStream outputStream);
//! \copydoc StandardLogger2::setStandardStreamOutputLevelThreshold
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setStandardStreamOutputLevelThreshold)(StandardLogger* instance, int32_t level);
//! \copydoc StandardLogger2::setDebugConsoleOutputLevelThreshold
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setDebugConsoleOutputLevelThreshold)(StandardLogger* instance, int32_t level);
//! \copydoc StandardLogger2::setFileOutputLevelThreshold
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setFileOutputLevelThreshold)(StandardLogger* instance, int32_t level);
//! \copydoc StandardLogger2::setFileConfiguration
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setFileConfiguration)(StandardLogger* instance,
const char* filePath,
const LogFileConfiguration* config);
//! \copydoc StandardLogger2::getFileConfiguration
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
size_t(CARB_ABI* getFileConfiguration)(StandardLogger* instance,
char* buffer,
size_t bufferSize,
LogFileConfiguration* config);
//! \copydoc StandardLogger2::pauseFileLogging
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* pauseFileLogging)(StandardLogger* instance);
//! \copydoc StandardLogger2::resumeFileLogging
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* resumeFileLogging)(StandardLogger* instance);
//! \copydoc StandardLogger2::setForceAnsiColor
//! \param instance The instance of the StandardLogger interface being used. May not be `nullptr`.
void(CARB_ABI* setForceAnsiColor)(StandardLogger* instance, bool forceAnsiColor);
// NOTE: This interface, because it is inherited from, is CLOSED and may not have any additional functions added
// without an ILogging major version increase. See StandardLogger2 for adding additional functionality.
};
} // namespace logging
} // namespace carb
|
omniverse-code/kit/include/carb/logging/LoggingTypes.h | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//
//! @file
//! @brief Type definitions for carb.logging
#pragma once
#include "../Defines.h"
namespace carb
{
namespace logging
{
//! Enumerations of output stream types
enum class OutputStream
{
eDefault, //!< Default selection, stdout for \ref kLevelWarn and below, stderr for \ref kLevelError and above.
eStderr, //!< Force all output to stderr.
};
//! Can be used by setFileConfiguration
const char* const kKeepSameFile = (const char*)size_t(-1);
/**
* Describes the configuration for logging to a file for setFileConfiguration
*
* @note Do not rearrange below members as it disrupts @rstref{ABI compatibility <abi-compatibility>}. Add members at
* the bottom.
*/
struct LogFileConfiguration
{
/// Size of the struct used for versioning. Adding members to this struct will change the size and therefore act as
/// a version for the struct.
size_t size{ sizeof(LogFileConfiguration) };
/// Indicates whether opening the file should append to it. If false, file is overwritten.
///
/// @note Setting (boolean): "/log/fileAppend"
/// @note Default = false
bool append{ false };
};
//! StandardLogger2 output types
enum class OutputType
{
eFile, //!< Output to log file.
eDebugConsole, //!< Output to debug console.
eStandardStream, //!< Output to standard stream (i.e. stdout/stderr).
eCount //!< Number of OutputTypes
};
} // namespace logging
} // namespace carb
|
Subsets and Splits