file_path
stringlengths 21
207
| content
stringlengths 5
1.02M
| size
int64 5
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowStringHash.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowTypes.h"
#include "NvFlowArray.h"
NV_FLOW_INLINE NvFlowUint NvFlowStringHashFNV(const char* a)
{
// FNV-1a
NvFlowUint hash = 2166136261u;
NvFlowUint idx = 0u;
if (a)
{
while (a[idx])
{
hash = 16777619u * (hash ^ (NvFlowUint)(a[idx]));
idx++;
}
}
return hash;
}
template<class T, NvFlowUint64 staticCapacity = 0u>
struct NvFlowStringHashTable
{
NvFlowArray<NvFlowUint, staticCapacity> hashs;
NvFlowArray<NvFlowArray<char>, staticCapacity> keys;
NvFlowArray<T, staticCapacity> values;
NvFlowUint64 keyCount = 0llu;
NvFlowUint64 find(const char* path, NvFlowUint hash)
{
path = path ? path : "";
NvFlowUint64 beginIdx = hash & (hashs.size - 1u);
for (NvFlowUint64 iterIdx = 0u; iterIdx < hashs.size; iterIdx++)
{
NvFlowUint64 idx = (iterIdx + beginIdx) & (hashs.size - 1u);
if (hashs[idx] == hash &&
keys[idx].size > 0u &&
strcmp(keys[idx].data, path) == 0)
{
return idx;
}
}
return ~0llu;
}
NvFlowUint64 insertNoResize(const char* path, NvFlowUint hash, const T& value, NvFlowBool32* pSuccess = nullptr)
{
path = path ? path : "";
if (pSuccess)
{
*pSuccess = NV_FLOW_FALSE;
}
NvFlowUint64 beginIdx = hash & (hashs.size - 1u);
for (NvFlowUint64 iterIdx = 0u; iterIdx < hashs.size; iterIdx++)
{
NvFlowUint64 idx = (iterIdx + beginIdx) & (hashs.size - 1u);
if (keys[idx].size == 0u)
{
keyCount++;
hashs[idx] = hash;
for (NvFlowUint64 strIdx = 0u; path[strIdx]; strIdx++)
{
keys[idx].pushBack(path[strIdx]);
}
keys[idx].pushBack('\0');
values[idx] = value;
if (pSuccess)
{
*pSuccess = NV_FLOW_TRUE;
}
return idx;
}
else if (hashs[idx] == hash &&
keys[idx].size > 0u &&
strcmp(keys[idx].data, path) == 0)
{
return idx;
}
}
return ~0llu;
}
NvFlowUint64 insert(const char* path, NvFlowUint hash, const T& value, NvFlowBool32* pSuccess = nullptr)
{
// resize if adding key would make 50+% full
if (2u * (keyCount + 1u) >= hashs.size)
{
NvFlowArray<NvFlowUint, staticCapacity> hashs_old(std::move(hashs));
NvFlowArray<NvFlowArray<char>, staticCapacity> keys_old(std::move(keys));
NvFlowArray<T, staticCapacity> values_old(std::move(values));
NvFlowUint64 newSize = 1u;
while (newSize <= hashs_old.size)
{
newSize *= 2u;
}
hashs.reserve(newSize);
keys.reserve(newSize);
values.reserve(newSize);
hashs.size = newSize;
keys.size = newSize;
values.size = newSize;
keyCount = 0u; // reset key count, because insert counts it again
for (NvFlowUint64 idx = 0u; idx < hashs_old.size; idx++)
{
if (keys_old[idx].size > 0u)
{
insertNoResize(keys_old[idx].data, hashs_old[idx], values_old[idx], nullptr);
}
}
}
return insertNoResize(path, hash, value, pSuccess);
}
NvFlowBool32 erase(const char* path, NvFlowUint hash)
{
NvFlowUint64 findIdx = find(path, hash);
if (findIdx != ~0llu)
{
keyCount--;
hashs[findIdx] = 0u;
keys[findIdx].size = 0u;
values[findIdx] = T();
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
};
| 5,539 | C | 34.741935 | 116 | 0.575194 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowString.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#include "NvFlowString.h"
#include "NvFlowArray.h"
#include <stdio.h>
struct NvFlowStringPool
{
NvFlowArray<NvFlowArray<char>, 16u> heaps;
};
NvFlowStringPool* NvFlowStringPoolCreate()
{
return new NvFlowStringPool();
}
void NvFlowStringPoolAllocate_newHeap(NvFlowStringPool* ptr, NvFlowUint64 allocSize)
{
auto& currentHeap = ptr->heaps[ptr->heaps.allocateBack()];
NvFlowUint64 heapSize = 4096u; // default heap size
while (heapSize < allocSize)
{
heapSize *= 2u;
}
currentHeap.reserve(heapSize);
}
NvFlowUint64 NvFlowStringPool_alignment(NvFlowUint64 size)
{
return 8u * ((size + 7u) / 8u);
}
char* NvFlowStringPoolAllocate_internal(NvFlowStringPool* ptr, NvFlowUint64 size)
{
NvFlowUint64 allocSize = NvFlowStringPool_alignment(size);
if (ptr->heaps.size > 0u)
{
auto& currentHeap = ptr->heaps[ptr->heaps.size - 1u];
if (currentHeap.size + allocSize <= currentHeap.capacity)
{
char* ret = currentHeap.data + currentHeap.size;
ret[size - 1] = 0;
currentHeap.size += allocSize;
return ret;
}
}
NvFlowStringPoolAllocate_newHeap(ptr, allocSize);
return NvFlowStringPoolAllocate_internal(ptr, size);
}
char* NvFlowStringPoolAllocate(NvFlowStringPool* ptr, NvFlowUint64 size)
{
return NvFlowStringPoolAllocate_internal(ptr, size + 1);
}
void NvFlowStringPoolTempAllocate(NvFlowStringPool* ptr, char** p_str_data, NvFlowUint64* p_str_size)
{
if (ptr->heaps.size > 0u)
{
auto& currentHeap = ptr->heaps[ptr->heaps.size - 1u];
char* str_data = currentHeap.data + currentHeap.size;
NvFlowUint64 str_size = currentHeap.capacity - currentHeap.size;
if (str_size > 0)
{
str_data[str_size - 1] = 0;
str_size--;
*p_str_size = str_size;
*p_str_data = str_data;
return;
}
}
NvFlowStringPoolAllocate_newHeap(ptr, 8u);
NvFlowStringPoolTempAllocate(ptr, p_str_data, p_str_size);
}
void NvFlowStringPoolTempAllocateCommit(NvFlowStringPool* ptr, char* str_data, NvFlowUint64 str_size)
{
// to reverse the str_size-- in NvFlowStringPoolTempAllocate()
str_size++;
if (ptr->heaps.size > 0u)
{
auto& currentHeap = ptr->heaps[ptr->heaps.size - 1u];
char* compStr_data = currentHeap.data + currentHeap.size;
NvFlowUint64 compStr_size = currentHeap.capacity - currentHeap.size;
if (str_data == compStr_data && str_size <= compStr_size)
{
NvFlowUint64 allocSize = NvFlowStringPool_alignment(str_size);
currentHeap.size += allocSize;
}
}
}
void NvFlowStringPoolDestroy(NvFlowStringPool* ptr)
{
delete ptr;
}
void NvFlowStringPoolReset(NvFlowStringPool* ptr)
{
for (NvFlowUint64 heapIdx = 0u; heapIdx < ptr->heaps.size; heapIdx++)
{
ptr->heaps[heapIdx].size = 0u;
}
ptr->heaps.size = 0u;
}
char* NvFlowStringPrintV(NvFlowStringPool* pool, const char* format, va_list args)
{
va_list argsCopy;
va_copy(argsCopy, args);
NvFlowUint64 str_size = ~0llu;
char* str_data = nullptr;
NvFlowStringPoolTempAllocate(pool, &str_data, &str_size);
NvFlowUint64 count = (NvFlowUint64)vsnprintf(str_data, str_size + 1, format, args);
if (count <= str_size)
{
str_size = count;
NvFlowStringPoolTempAllocateCommit(pool, str_data, str_size);
}
else
{
str_data = NvFlowStringPoolAllocate(pool, count);
str_size = count;
count = vsnprintf(str_data, str_size + 1, format, argsCopy);
}
va_end(argsCopy);
return str_data;
}
char* NvFlowStringPrint(NvFlowStringPool* pool, const char* format, ...)
{
va_list args;
va_start(args, format);
char* str = NvFlowStringPrintV(pool, format, args);
va_end(args);
return str;
}
/// ************************** File Utils *********************************************
const char* NvFlowTextFileLoad(NvFlowStringPool* pool, const char* filename)
{
FILE* file = nullptr;
#if defined(_WIN32)
fopen_s(&file, filename, "r");
#else
file = fopen(filename, "r");
#endif
if (file == nullptr)
{
return nullptr;
}
NvFlowUint64 chunkSize = 4096u;
NvFlowArray<const char*, 8u> chunks;
size_t readBytes = 0u;
do
{
chunkSize *= 2u;
char* chunkStr = NvFlowStringPoolAllocate(pool, chunkSize);
chunkStr[0] = '\0';
readBytes = fread(chunkStr, 1u, chunkSize, file);
chunkStr[readBytes] = '\0';
chunks.pushBack(chunkStr);
} while(readBytes == chunkSize);
fclose(file);
const char* text_data = (chunks.size == 1u) ? chunks[0u] : NvFlowStringConcatN(pool, chunks.data, chunks.size);
//NvFlowUint64 strLength = NvFlowStringLength(text_data);
//printf("NvFlowTextureFileLoad(%s) %llu bytes in %llu chunks\n", filename, strLength, chunks.size);
return text_data;
}
void NvFlowTextFileStore(const char* text_data, const char* filename)
{
FILE* file = nullptr;
#if defined(_WIN32)
fopen_s(&file, filename, "w");
#else
file = fopen(filename, "w");
#endif
if (file == nullptr)
{
return;
}
NvFlowUint64 text_size = NvFlowStringLength(text_data);
fwrite(text_data, 1u, text_size, file);
fclose(file);
}
NvFlowBool32 NvFlowTextFileTestOpen(const char* filename)
{
FILE* file = nullptr;
#if defined(_WIN32)
fopen_s(&file, filename, "r");
#else
file = fopen(filename, "r");
#endif
if (file)
{
fclose(file);
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
void NvFlowTextFileRemove(const char* name)
{
remove(name);
}
void NvFlowTextFileRename(const char* oldName, const char* newName)
{
rename(oldName, newName);
}
NvFlowBool32 NvFlowTextFileDiffAndWriteIfModified(const char* filenameDst, const char* filenameTmp)
{
FILE* fileTmp = nullptr;
FILE* fileDst = nullptr;
bool match = true;
#if defined(_WIN32)
fopen_s(&fileDst, filenameDst, "r");
#else
fileDst = fopen(filenameDst, "r");
#endif
if (fileDst)
{
#if defined(_WIN32)
fopen_s(&fileTmp, filenameTmp, "r");
#else
fileTmp = fopen(filenameTmp, "r");
#endif
if (fileTmp)
{
while (1)
{
int a = fgetc(fileTmp);
int b = fgetc(fileDst);
if (a == EOF && b == EOF)
{
break;
}
else if (a != b)
{
match = false;
break;
}
}
fclose(fileTmp);
}
else
{
match = false;
}
fclose(fileDst);
}
else
{
match = false;
}
if (!match)
{
remove(filenameDst);
rename(filenameTmp, filenameDst);
}
// always cleanup temp file
remove(filenameTmp);
return !match;
}
| 7,751 | C++ | 22.002967 | 112 | 0.700426 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowArrayBuffer.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowUploadBuffer.h"
#include <string.h>
struct NvFlowArrayBufferData
{
const void* data;
NvFlowUint64 elementCount;
NvFlowUint64 version;
};
struct NvFlowArrayBufferState
{
NvFlowBool32 isDirty;
NvFlowUint64 elementCount;
NvFlowUint64 version;
NvFlowUint64 firstElement;
};
struct NvFlowArrayBuffer
{
NvFlowUploadBuffer uploadBuffer = {};
NvFlowArray<NvFlowArrayBufferState> state;
NvFlowArray<NvFlowUploadBufferCopyRange> copyRanges;
NvFlowUint64 totalSizeInBytes = 0llu;
};
NV_FLOW_INLINE void NvFlowArrayBuffer_init_custom(
NvFlowContextInterface* contextInterface,
NvFlowContext* context,
NvFlowArrayBuffer* ptr,
NvFlowBufferUsageFlags flags,
NvFlowFormat format,
NvFlowUint structureStride,
NvFlowBuffer*(NV_FLOW_ABI* createBuffer)(NvFlowContext* context, NvFlowMemoryType memoryType, const NvFlowBufferDesc* desc, void* userdata),
void(NV_FLOW_ABI* addPassCopyBuffer)(NvFlowContext* context, const NvFlowPassCopyBufferParams* params, void* userdata),
void* userdata
)
{
NvFlowUploadBuffer_init_custom(contextInterface, context, &ptr->uploadBuffer, flags, format, structureStride, createBuffer, addPassCopyBuffer, userdata);
}
NV_FLOW_INLINE NvFlowBuffer* NvFlowArrayBuffer_createBuffer(NvFlowContext* context, NvFlowMemoryType memoryType, const NvFlowBufferDesc* desc, void* userdata)
{
NvFlowArrayBuffer* ptr = (NvFlowArrayBuffer*)userdata;
return ptr->uploadBuffer.contextInterface->createBuffer(context, memoryType, desc);
}
NV_FLOW_INLINE void NvFlowArrayBuffer_addPassCopyBuffer(NvFlowContext* context, const NvFlowPassCopyBufferParams* params, void* userdata)
{
NvFlowArrayBuffer* ptr = (NvFlowArrayBuffer*)userdata;
ptr->uploadBuffer.contextInterface->addPassCopyBuffer(context, params);
}
NV_FLOW_INLINE void NvFlowArrayBuffer_init(NvFlowContextInterface* contextInterface, NvFlowContext* context, NvFlowArrayBuffer* ptr, NvFlowBufferUsageFlags flags, NvFlowFormat format, NvFlowUint structureStride)
{
NvFlowArrayBuffer_init_custom(contextInterface, context, ptr, flags, format, structureStride, NvFlowArrayBuffer_createBuffer, NvFlowArrayBuffer_addPassCopyBuffer, ptr);
}
NV_FLOW_INLINE void NvFlowArrayBuffer_destroy(NvFlowContext* context, NvFlowArrayBuffer* ptr)
{
NvFlowUploadBuffer_destroy(context, &ptr->uploadBuffer);
ptr->state.size = 0u;
ptr->copyRanges.size = 0u;
}
NV_FLOW_INLINE NvFlowBufferTransient* NvFlowArrayBuffer_update(
NvFlowContext* context,
NvFlowArrayBuffer* ptr,
const NvFlowArrayBufferData* arrayDatas,
NvFlowUint64* outFirstElements,
NvFlowUint64 arrayCount,
NvFlowUint64* outTotalSizeInBytes,
const char* debugName
)
{
// if arrayCount changes, reset all state
bool shouldResetState = false;
if (ptr->state.size != arrayCount)
{
shouldResetState = true;
}
// if any array size changes, reset all state, since buffer resize might occur
if (!shouldResetState)
{
for (NvFlowUint64 arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
if (ptr->state[arrayIdx].elementCount != arrayDatas[arrayIdx].elementCount)
{
shouldResetState = true;
}
}
}
if (shouldResetState)
{
ptr->state.reserve(arrayCount);
ptr->state.size = arrayCount;
for (NvFlowUint64 arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
ptr->state[arrayIdx].isDirty = NV_FLOW_TRUE;
ptr->state[arrayIdx].elementCount = 0llu;
ptr->state[arrayIdx].version = 0llu;
ptr->state[arrayIdx].firstElement = 0llu;
}
}
// mark any array dirty if version changes
for (NvFlowUint64 arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
if (arrayDatas[arrayIdx].elementCount != 0u || ptr->state[arrayIdx].elementCount != 0u)
{
if (arrayDatas[arrayIdx].version == 0llu || arrayDatas[arrayIdx].version != ptr->state[arrayIdx].version)
{
ptr->state[arrayIdx].isDirty = NV_FLOW_TRUE;
}
}
}
NvFlowBool32 anyDirty = NV_FLOW_FALSE;
for (NvFlowUint64 arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
if (ptr->state[arrayIdx].isDirty)
{
anyDirty = NV_FLOW_TRUE;
}
}
// compute total size
NvFlowUint64 totalSizeInBytes = 0llu;
for (NvFlowUint arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
totalSizeInBytes += ptr->uploadBuffer.structureStride * arrayDatas[arrayIdx].elementCount;
}
NvFlowUint8* mapped = nullptr;
if (anyDirty)
{
mapped = (NvFlowUint8*)NvFlowUploadBuffer_map(context, &ptr->uploadBuffer, totalSizeInBytes);
}
// update state
NvFlowUint64 globalFirstElement = 0llu;
for (NvFlowUint arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
ptr->state[arrayIdx].elementCount = arrayDatas[arrayIdx].elementCount;
ptr->state[arrayIdx].version = arrayDatas[arrayIdx].version;
ptr->state[arrayIdx].firstElement = globalFirstElement;
globalFirstElement += ptr->state[arrayIdx].elementCount;
}
ptr->copyRanges.size = 0u;
for (NvFlowUint arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
if (ptr->state[arrayIdx].isDirty)
{
NvFlowUint64 offsetInBytes = ptr->uploadBuffer.structureStride * ptr->state[arrayIdx].firstElement;
NvFlowUint64 sizeInBytes = ptr->uploadBuffer.structureStride * ptr->state[arrayIdx].elementCount;
// copy to host memory
memcpy(mapped + offsetInBytes, arrayDatas[arrayIdx].data, sizeInBytes);
// add copy range
NvFlowUploadBufferCopyRange copyRange = { offsetInBytes, sizeInBytes };
ptr->copyRanges.pushBack(copyRange);
}
}
NvFlowBufferTransient* bufferTransient = nullptr;
if (anyDirty)
{
bufferTransient = NvFlowUploadBuffer_unmapDeviceN(context, &ptr->uploadBuffer, ptr->copyRanges.data, ptr->copyRanges.size, debugName);
}
else
{
bufferTransient = NvFlowUploadBuffer_getDevice(context, &ptr->uploadBuffer, totalSizeInBytes);
}
// mark all arrays as clean
for (NvFlowUint arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
ptr->state[arrayIdx].isDirty = NV_FLOW_FALSE;
}
if (outFirstElements)
{
for (NvFlowUint arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
outFirstElements[arrayIdx] = ptr->state[arrayIdx].firstElement;
}
}
ptr->totalSizeInBytes = totalSizeInBytes;
if (outTotalSizeInBytes)
{
*outTotalSizeInBytes = totalSizeInBytes;
}
return bufferTransient;
}
| 7,728 | C | 32.604348 | 211 | 0.762034 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowArray.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#define NV_FLOW_ARRAY_CACHE_ENABLED 1
#include <new>
#include <utility>
template<class T, NvFlowUint64 staticCapacity = 0u, void(prerelease)(void* data, NvFlowUint64 size) = nullptr>
struct NvFlowArray
{
#if NV_FLOW_ARRAY_CACHE_ENABLED
static const NvFlowUint64 s_staticCapacity = staticCapacity;
#else
static const NvFlowUint64 s_staticCapacity = 0u;
#endif
T* data = nullptr;
NvFlowUint64 capacity = 0u;
NvFlowUint64 size = 0u;
unsigned char cache[s_staticCapacity * sizeof(T) + 8u];
void release()
{
for (NvFlowUint64 i = 0; i < capacity; i++)
{
data[i].~T();
}
if (data != nullptr && (T*)cache != data)
{
operator delete[](data);
}
data = nullptr;
capacity = 0u;
size = 0u;
}
void move(NvFlowArray& rhs)
{
data = rhs.data;
capacity = rhs.capacity;
size = rhs.size;
if (rhs.data == (T*)rhs.cache)
{
data = (T*)cache;
for (NvFlowUint64 idx = 0u; idx < capacity; idx++)
{
new(data + idx) T(std::move(rhs.data[idx]));
}
}
// to match destructed state
rhs.data = nullptr;
rhs.capacity = 0u;
rhs.size = 0u;
}
void reserve(NvFlowUint64 requestedCapacity)
{
if (requestedCapacity <= capacity)
{
return;
}
NvFlowUint64 newSize = size;
NvFlowUint64 newCapacity = capacity;
if (newCapacity < s_staticCapacity)
{
newCapacity = s_staticCapacity;
}
if (newCapacity == 0u)
{
newCapacity = 1u;
}
while (newCapacity < requestedCapacity)
{
newCapacity *= 2u;
}
T* newData = (T*)(newCapacity <= s_staticCapacity ? (void*)cache : operator new[](newCapacity * sizeof(T)));
// copy to new
for (NvFlowUint64 i = 0; i < newSize; i++)
{
new(newData + i) T(std::move(data[i]));
}
for (NvFlowUint64 i = newSize; i < newCapacity; i++)
{
new(newData + i) T();
}
if (prerelease)
{
prerelease(data + size, capacity - size);
}
// cleanup old
release();
// commit new
data = newData;
capacity = newCapacity;
size = newSize;
}
NvFlowArray()
{
reserve(s_staticCapacity);
}
NvFlowArray(NvFlowArray&& rhs)
{
move(rhs);
}
~NvFlowArray()
{
if (prerelease)
{
prerelease(data, capacity);
}
release();
}
T& operator[](NvFlowUint64 idx)
{
return data[idx];
}
const T& operator[](NvFlowUint64 idx) const
{
return data[idx];
}
NvFlowUint64 allocateBack()
{
reserve(size + 1);
size++;
return size - 1;
}
void pushBack(const T& v)
{
operator[](allocateBack()) = v;
}
T& back()
{
return operator[](size - 1);
}
void popBack()
{
size--;
}
};
/// Copy utility
template <class T, NvFlowUint64 staticCapacity = 0u, void(prerelease)(void* data, NvFlowUint64 size) = nullptr>
NV_FLOW_INLINE void NvFlowArray_copy(NvFlowArray<T, staticCapacity, prerelease>& dst, const NvFlowArray<T, staticCapacity, prerelease>& src)
{
dst.size = 0u;
dst.reserve(src.size);
dst.size = src.size;
for (NvFlowUint64 idx = 0u; idx < dst.size; idx++)
{
dst[idx] = src[idx];
}
}
template<class T>
NV_FLOW_INLINE void NvFlowArrayPointer_prerelease(void* dataIn, NvFlowUint64 size)
{
T* data = (T*)dataIn;
for (NvFlowUint64 idx = 0u; idx < size; idx++)
{
if (data[idx])
{
delete data[idx];
data[idx] = nullptr;
}
}
}
template<class T>
NV_FLOW_INLINE void NvFlowArrayPointer_allocate(T*& ptr)
{
ptr = new T();
}
template<class T, NvFlowUint64 staticCapacity = 0u>
struct NvFlowArrayPointer : public NvFlowArray<T, staticCapacity, NvFlowArrayPointer_prerelease<T>>
{
NvFlowArrayPointer() : NvFlowArray<T, staticCapacity, NvFlowArrayPointer_prerelease<T>>()
{
}
NvFlowArrayPointer(NvFlowArrayPointer&& rhs) : NvFlowArray<T, staticCapacity, NvFlowArrayPointer_prerelease<T>>(std::move(rhs))
{
}
~NvFlowArrayPointer()
{
}
T allocateBackPointer()
{
NvFlowUint64 allocIdx = this->allocateBack();
if (!(*this)[allocIdx])
{
NvFlowArrayPointer_allocate((*this)[allocIdx]);
}
return (*this)[allocIdx];
}
void pushBackPointer(const T& v)
{
NvFlowUint64 allocIdx = this->allocateBack();
deletePointerAtIndex(allocIdx);
(*this)[allocIdx] = v;
}
void swapPointers(NvFlowUint64 idxA, NvFlowUint64 idxB)
{
T temp = (*this)[idxA];
(*this)[idxA] = (*this)[idxB];
(*this)[idxB] = temp;
}
void removeSwapPointerAtIndex(NvFlowUint64 idx)
{
swapPointers(idx, this->size - 1u);
this->size--;
}
void removeSwapPointer(T ptr)
{
for (NvFlowUint64 idx = 0u; idx < this->size; idx++)
{
if ((*this)[idx] == ptr)
{
removeSwapPointerAtIndex(idx);
break;
}
}
}
void deletePointerAtIndex(NvFlowUint64 idx)
{
if ((*this)[idx])
{
delete (*this)[idx];
(*this)[idx] = nullptr;
}
}
void deletePointers()
{
this->size = this->capacity;
for (NvFlowUint64 idx = 0u; idx < this->size; idx++)
{
deletePointerAtIndex(idx);
}
this->size = 0u;
}
};
template<class T, NvFlowUint64 staticCapacity = 0u>
struct NvFlowRingBufferPointer
{
NvFlowArrayPointer<T, staticCapacity> arr;
NvFlowUint64 freeIdx = 0u;
NvFlowUint64 frontIdx = 0u;
NvFlowUint64 backIdx = 0u;
NvFlowRingBufferPointer() : arr()
{
}
NvFlowRingBufferPointer(NvFlowRingBufferPointer&& rhs) :
arr(std::move(rhs.arr)),
freeIdx(rhs.freeIdx),
frontIdx(rhs.frontIdx),
backIdx(rhs.backIdx)
{
}
~NvFlowRingBufferPointer()
{
}
T& front()
{
return arr[frontIdx];
}
T& back()
{
return arr[(backIdx - 1u) & (arr.size - 1)];
}
NvFlowUint64 activeCount()
{
return (backIdx - frontIdx) & (arr.size - 1);
}
NvFlowUint64 freeCount()
{
return (frontIdx - freeIdx) & (arr.size - 1);
}
void popFront()
{
frontIdx = (frontIdx + 1u) & (arr.size - 1);
}
void popFree()
{
freeIdx = (freeIdx + 1u) & (arr.size - 1);
}
T& operator[](NvFlowUint64 idx)
{
return arr[(frontIdx + idx) & (arr.size - 1)];
}
const T& operator[](NvFlowUint64 idx) const
{
return arr[(frontIdx + idx) & (arr.size - 1)];
}
NvFlowUint64 allocateBack()
{
if (arr.size == 0u)
{
arr.allocateBack();
}
if (freeCount() > 0u)
{
auto tmp = arr[freeIdx];
arr[freeIdx] = arr[backIdx];
arr[backIdx] = tmp;
popFree();
}
else if ((activeCount() + 1u) > (arr.size - 1))
{
NvFlowUint64 oldSize = arr.size;
arr.reserve(2u * oldSize);
arr.size = 2u * oldSize;
if (backIdx < frontIdx)
{
for (NvFlowUint64 idx = 0u; idx < backIdx; idx++)
{
auto tmp = arr[idx + oldSize];
arr[idx + oldSize] = arr[idx];
arr[idx] = tmp;
}
backIdx += oldSize;
}
}
NvFlowUint64 allocIdx = backIdx;
backIdx = (backIdx + 1u) & (arr.size - 1);
return allocIdx;
}
void pushBack(const T& v)
{
NvFlowUint64 allocIdx = allocateBack();
arr.deletePointerAtIndex(allocIdx);
arr[allocIdx] = v;
}
T allocateBackPointer()
{
NvFlowUint64 allocIdx = allocateBack();
if (!arr[allocIdx])
{
NvFlowArrayPointer_allocate(arr[allocIdx]);
}
return arr[allocIdx];
}
void deletePointers()
{
arr.deletePointers();
}
}; | 8,492 | C | 20.44697 | 140 | 0.662506 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowResourceCPU.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
// Workaround to scope includes per shader
#ifdef NV_FLOW_CPU_SHADER
#undef NV_FLOW_SHADER_TYPES_H
#undef NV_FLOW_SHADER_HLSLI
#undef NV_FLOW_RAY_MARCH_PARAMS_H
#undef NV_FLOW_RAY_MARCH_HLSLI
#undef NV_FLOW_RAY_MARCH_COMMON_HLSLI
#endif
// Disabled by default, to save build time
#define NV_FLOW_CPU_SHADER_DISABLE
#ifndef NV_FLOW_RESOURCE_CPU_H
#define NV_FLOW_RESOURCE_CPU_H
#include "NvFlowContext.h"
#include <math.h>
#include <atomic>
#include <string.h>
typedef NvFlowUint NvFlowCPU_Uint;
struct NvFlowCPU_Float2;
struct NvFlowCPU_Float3;
struct NvFlowCPU_Float4;
struct NvFlowCPU_Float4x4;
struct NvFlowCPU_Int2;
struct NvFlowCPU_Int3;
struct NvFlowCPU_Int4;
struct NvFlowCPU_Uint2;
struct NvFlowCPU_Uint3;
struct NvFlowCPU_Uint4;
NV_FLOW_INLINE int NvFlowCPU_max(int a, int b)
{
return a > b ? a : b;
}
NV_FLOW_INLINE int NvFlowCPU_min(int a, int b)
{
return a < b ? a : b;
}
NV_FLOW_INLINE float NvFlowCPU_round(float v)
{
return roundf(v);
}
NV_FLOW_INLINE float NvFlowCPU_abs(float v)
{
return fabsf(v);
}
NV_FLOW_INLINE float NvFlowCPU_floor(float v)
{
return floorf(v);
}
NV_FLOW_INLINE int NvFlowCPU_abs(int v)
{
return v < 0 ? -v : v;
}
NV_FLOW_INLINE float NvFlowCPU_sqrt(float v)
{
return sqrtf(v);
}
NV_FLOW_INLINE float NvFlowCPU_exp(float v)
{
return expf(v);
}
NV_FLOW_INLINE float NvFlowCPU_pow(float a, float b)
{
return powf(a, b);
}
NV_FLOW_INLINE float NvFlowCPU_log2(float v)
{
return log2f(v);
}
NV_FLOW_INLINE float NvFlowCPU_min(float a, float b)
{
//return fminf(a, b);
return a < b ? a : b;
}
NV_FLOW_INLINE float NvFlowCPU_max(float a, float b)
{
//return fmaxf(a, b);
return a > b ? a : b;
}
NV_FLOW_INLINE float NvFlowCPU_clamp(float v, float min, float max)
{
return NvFlowCPU_max(min, NvFlowCPU_min(v, max));
}
struct NvFlowCPU_Float2
{
float x, y;
NvFlowCPU_Float2() {}
NvFlowCPU_Float2(float x, float y) : x(x), y(y) {}
NV_FLOW_INLINE NvFlowCPU_Float2(const NvFlowCPU_Int2& rhs);
NvFlowCPU_Float2 operator+(const NvFlowCPU_Float2& rhs) const { return NvFlowCPU_Float2(x + rhs.x, y + rhs.y); }
NvFlowCPU_Float2 operator-(const NvFlowCPU_Float2& rhs) const { return NvFlowCPU_Float2(x - rhs.x, y - rhs.y); }
NvFlowCPU_Float2 operator*(const NvFlowCPU_Float2& rhs) const { return NvFlowCPU_Float2(x * rhs.x, y * rhs.y); }
NvFlowCPU_Float2 operator/(const NvFlowCPU_Float2& rhs) const { return NvFlowCPU_Float2(x / rhs.x, y / rhs.y); }
NvFlowCPU_Float2 operator+(const float& rhs) const { return NvFlowCPU_Float2(x + rhs, y + rhs); }
NvFlowCPU_Float2 operator-(const float& rhs) const { return NvFlowCPU_Float2(x - rhs, y - rhs); }
NvFlowCPU_Float2 operator*(const float& rhs) const { return NvFlowCPU_Float2(x * rhs, y * rhs); }
NvFlowCPU_Float2 operator/(const float& rhs) const { return NvFlowCPU_Float2(x / rhs, y / rhs); }
NvFlowCPU_Float2& operator+=(const NvFlowCPU_Float2& rhs) { x += rhs.x; y += rhs.y; return *this; }
NvFlowCPU_Float2& operator-=(const NvFlowCPU_Float2& rhs) { x -= rhs.x; y -= rhs.y; return *this; }
NvFlowCPU_Float2& operator*=(const NvFlowCPU_Float2& rhs) { x *= rhs.x; y *= rhs.y; return *this; }
NvFlowCPU_Float2& operator/=(const NvFlowCPU_Float2& rhs) { x /= rhs.x; y /= rhs.y; return *this; }
NvFlowCPU_Float2& operator+=(const float& rhs) { x += rhs; y += rhs; return *this; }
NvFlowCPU_Float2& operator-=(const float& rhs) { x -= rhs; y -= rhs; return *this; }
NvFlowCPU_Float2& operator*=(const float& rhs) { x *= rhs; y *= rhs; return *this; }
NvFlowCPU_Float2& operator/=(const float& rhs) { x /= rhs; y /= rhs; return *this; }
NvFlowCPU_Float2 operator+() const { return NvFlowCPU_Float2(+x, +y); }
NvFlowCPU_Float2 operator-() const { return NvFlowCPU_Float2(-x, -y); }
};
NV_FLOW_INLINE NvFlowCPU_Float2 operator+(const float& lhs, const NvFlowCPU_Float2& rhs) { return NvFlowCPU_Float2(lhs + rhs.x, lhs + rhs.y); }
NV_FLOW_INLINE NvFlowCPU_Float2 operator-(const float& lhs, const NvFlowCPU_Float2& rhs) { return NvFlowCPU_Float2(lhs - rhs.x, lhs - rhs.y); }
NV_FLOW_INLINE NvFlowCPU_Float2 operator*(const float& lhs, const NvFlowCPU_Float2& rhs) { return NvFlowCPU_Float2(lhs * rhs.x, lhs * rhs.y); }
NV_FLOW_INLINE NvFlowCPU_Float2 operator/(const float& lhs, const NvFlowCPU_Float2& rhs) { return NvFlowCPU_Float2(lhs / rhs.x, lhs / rhs.y); }
NV_FLOW_INLINE NvFlowCPU_Float2 NvFlowCPU_floor(NvFlowCPU_Float2 v)
{
return NvFlowCPU_Float2(floorf(v.x), floorf(v.y));
}
struct NvFlowCPU_Float3
{
float x, y, z;
NvFlowCPU_Float3() {}
NvFlowCPU_Float3(float x, float y, float z) : x(x), y(y), z(z) {}
NV_FLOW_INLINE NvFlowCPU_Float3(const NvFlowCPU_Int3& v);
NvFlowCPU_Float3 operator+(const NvFlowCPU_Float3& rhs) const { return NvFlowCPU_Float3(x + rhs.x, y + rhs.y, z + rhs.z); }
NvFlowCPU_Float3 operator-(const NvFlowCPU_Float3& rhs) const { return NvFlowCPU_Float3(x - rhs.x, y - rhs.y, z - rhs.z); }
NvFlowCPU_Float3 operator*(const NvFlowCPU_Float3& rhs) const { return NvFlowCPU_Float3(x * rhs.x, y * rhs.y, z * rhs.z); }
NvFlowCPU_Float3 operator/(const NvFlowCPU_Float3& rhs) const { return NvFlowCPU_Float3(x / rhs.x, y / rhs.y, z / rhs.z); }
NvFlowCPU_Float3 operator+(const float& rhs) const { return NvFlowCPU_Float3(x + rhs, y + rhs, z + rhs); }
NvFlowCPU_Float3 operator-(const float& rhs) const { return NvFlowCPU_Float3(x - rhs, y - rhs, z - rhs); }
NvFlowCPU_Float3 operator*(const float& rhs) const { return NvFlowCPU_Float3(x * rhs, y * rhs, z * rhs); }
NvFlowCPU_Float3 operator/(const float& rhs) const { return NvFlowCPU_Float3(x / rhs, y / rhs, z / rhs); }
NvFlowCPU_Float3& operator+=(const NvFlowCPU_Float3& rhs) { x += rhs.x; y += rhs.y; z += rhs.z; return *this; }
NvFlowCPU_Float3& operator-=(const NvFlowCPU_Float3& rhs) { x -= rhs.x; y -= rhs.y; z -= rhs.z; return *this; }
NvFlowCPU_Float3& operator*=(const NvFlowCPU_Float3& rhs) { x *= rhs.x; y *= rhs.y; z *= rhs.z; return *this; }
NvFlowCPU_Float3& operator/=(const NvFlowCPU_Float3& rhs) { x /= rhs.x; y /= rhs.y; z /= rhs.z; return *this; }
NvFlowCPU_Float3& operator+=(const float& rhs) { x += rhs; y += rhs; z += rhs; return *this; }
NvFlowCPU_Float3& operator-=(const float& rhs) { x -= rhs; y -= rhs; z -= rhs; return *this; }
NvFlowCPU_Float3& operator*=(const float& rhs) { x *= rhs; y *= rhs; z *= rhs; return *this; }
NvFlowCPU_Float3& operator/=(const float& rhs) { x /= rhs; y /= rhs; z /= rhs; return *this; }
NvFlowCPU_Float3 operator+() const { return NvFlowCPU_Float3(+x, +y, +z); }
NvFlowCPU_Float3 operator-() const { return NvFlowCPU_Float3(-x, -y, -z); }
};
NV_FLOW_INLINE NvFlowCPU_Float3 operator*(const float& lhs, const NvFlowCPU_Float3& rhs) { return NvFlowCPU_Float3(lhs * rhs.x, lhs * rhs.y, lhs * rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Float3 NvFlowCPU_abs(NvFlowCPU_Float3 v)
{
return NvFlowCPU_Float3(fabsf(v.x), fabsf(v.y), fabsf(v.z));
}
NV_FLOW_INLINE NvFlowCPU_Float3 NvFlowCPU_floor(NvFlowCPU_Float3 v)
{
return NvFlowCPU_Float3(floorf(v.x), floorf(v.y), floorf(v.z));
}
NV_FLOW_INLINE float NvFlowCPU_length(NvFlowCPU_Float3 v)
{
return sqrtf(v.x * v.x + v.y * v.y + v.z * v.z);
}
NV_FLOW_INLINE NvFlowCPU_Float3 NvFlowCPU_max(NvFlowCPU_Float3 a, NvFlowCPU_Float3 b)
{
return NvFlowCPU_Float3(NvFlowCPU_max(a.x, b.x), NvFlowCPU_max(a.y, b.y), NvFlowCPU_max(a.z, b.z));
}
NV_FLOW_INLINE NvFlowCPU_Float3 NvFlowCPU_min(NvFlowCPU_Float3 a, NvFlowCPU_Float3 b)
{
return NvFlowCPU_Float3(NvFlowCPU_min(a.x, b.x), NvFlowCPU_min(a.y, b.y), NvFlowCPU_min(a.z, b.z));
}
NV_FLOW_INLINE float NvFlowCPU_dot(NvFlowCPU_Float3 a, NvFlowCPU_Float3 b)
{
return a.x * b.x + a.y * b.y + a.z * b.z;
}
NV_FLOW_INLINE NvFlowCPU_Float3 NvFlowCPU_normalize(NvFlowCPU_Float3 v)
{
float length = NvFlowCPU_length(v);
if (length > 0.f)
{
v /= length;
}
return v;
}
struct NvFlowCPU_Float4
{
float x, y, z, w;
NvFlowCPU_Float4() {}
NvFlowCPU_Float4(float x, float y, float z, float w) : x(x), y(y), z(z), w(w) {}
NvFlowCPU_Float4(const NvFlowCPU_Float3& rhs, float w) : x(rhs.x), y(rhs.y), z(rhs.z), w(w) {}
NvFlowCPU_Float3& rgb() { return *((NvFlowCPU_Float3*)this); }
NvFlowCPU_Float2& rg() { return *((NvFlowCPU_Float2*)this); }
float& r() { return *((float*)this); }
NvFlowCPU_Float2& ba() { return *((NvFlowCPU_Float2*)&z); }
const NvFlowCPU_Float3& rgb() const { return *((const NvFlowCPU_Float3*)this); }
const NvFlowCPU_Float2& rg() const { return *((const NvFlowCPU_Float2*)this); }
const float& r() const { return *((const float*)this); }
const NvFlowCPU_Float2& ba() const { return *((const NvFlowCPU_Float2*)&z); }
NvFlowCPU_Float4 operator+(const NvFlowCPU_Float4& rhs) const { return NvFlowCPU_Float4(x + rhs.x, y + rhs.y, z + rhs.z, w + rhs.w); }
NvFlowCPU_Float4 operator-(const NvFlowCPU_Float4& rhs) const { return NvFlowCPU_Float4(x - rhs.x, y - rhs.y, z - rhs.z, w - rhs.w); }
NvFlowCPU_Float4 operator*(const NvFlowCPU_Float4& rhs) const { return NvFlowCPU_Float4(x * rhs.x, y * rhs.y, z * rhs.z, w * rhs.w); }
NvFlowCPU_Float4 operator/(const NvFlowCPU_Float4& rhs) const { return NvFlowCPU_Float4(x / rhs.x, y / rhs.y, z / rhs.z, w / rhs.w); }
NvFlowCPU_Float4 operator+(const float& rhs) const { return NvFlowCPU_Float4(x + rhs, y + rhs, z + rhs, w + rhs); }
NvFlowCPU_Float4 operator-(const float& rhs) const { return NvFlowCPU_Float4(x - rhs, y - rhs, z - rhs, w - rhs); }
NvFlowCPU_Float4 operator*(const float& rhs) const { return NvFlowCPU_Float4(x * rhs, y * rhs, z * rhs, w * rhs); }
NvFlowCPU_Float4 operator/(const float& rhs) const { return NvFlowCPU_Float4(x / rhs, y / rhs, z / rhs, w / rhs); }
NvFlowCPU_Float4& operator+=(const NvFlowCPU_Float4& rhs) { x += rhs.x; y += rhs.y; z += rhs.z; w += rhs.w; return *this; }
NvFlowCPU_Float4& operator-=(const NvFlowCPU_Float4& rhs) { x -= rhs.x; y -= rhs.y; z -= rhs.z; w -= rhs.w; return *this; }
NvFlowCPU_Float4& operator*=(const NvFlowCPU_Float4& rhs) { x *= rhs.x; y *= rhs.y; z *= rhs.z; w *= rhs.w; return *this; }
NvFlowCPU_Float4& operator/=(const NvFlowCPU_Float4& rhs) { x /= rhs.x; y /= rhs.y; z /= rhs.z; w /= rhs.w; return *this; }
NvFlowCPU_Float4& operator*=(const float& rhs) { x *= rhs; y *= rhs; z *= rhs; w *= rhs; return *this; }
};
NV_FLOW_INLINE NvFlowCPU_Float4 operator*(const float& lhs, const NvFlowCPU_Float4& rhs) { return NvFlowCPU_Float4(lhs * rhs.x, lhs * rhs.y, lhs * rhs.z, lhs * rhs.w); }
NV_FLOW_INLINE float NvFlowCPU_dot(NvFlowCPU_Float4 a, NvFlowCPU_Float4 b)
{
return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w;
}
NV_FLOW_INLINE NvFlowCPU_Float4 NvFlowCPU_max(NvFlowCPU_Float4 a, NvFlowCPU_Float4 b)
{
return NvFlowCPU_Float4(NvFlowCPU_max(a.x, b.x), NvFlowCPU_max(a.y, b.y), NvFlowCPU_max(a.z, b.z), NvFlowCPU_max(a.w, b.w));
}
NV_FLOW_INLINE NvFlowCPU_Float4 NvFlowCPU_min(NvFlowCPU_Float4 a, NvFlowCPU_Float4 b)
{
return NvFlowCPU_Float4(NvFlowCPU_min(a.x, b.x), NvFlowCPU_min(a.y, b.y), NvFlowCPU_min(a.z, b.z), NvFlowCPU_min(a.w, b.w));
}
NV_FLOW_INLINE NvFlowCPU_Float4 NvFlowCPU_sign(NvFlowCPU_Float4 v)
{
return NvFlowCPU_Float4(
v.x == 0.f ? 0.f : (v.x < 0.f ? -1.f : +1.f),
v.y == 0.f ? 0.f : (v.y < 0.f ? -1.f : +1.f),
v.z == 0.f ? 0.f : (v.z < 0.f ? -1.f : +1.f),
v.w == 0.f ? 0.f : (v.w < 0.f ? -1.f : +1.f)
);
}
NV_FLOW_INLINE NvFlowCPU_Float4 NvFlowCPU_abs(NvFlowCPU_Float4 v)
{
return NvFlowCPU_Float4(fabsf(v.x), fabsf(v.y), fabsf(v.z), fabsf(v.w));
}
struct NvFlowCPU_Float4x4
{
NvFlowCPU_Float4 x, y, z, w;
NvFlowCPU_Float4x4() {}
NvFlowCPU_Float4x4(const NvFlowCPU_Float4& x, const NvFlowCPU_Float4& y, const NvFlowCPU_Float4& z, const NvFlowCPU_Float4& w) : x(x), y(y), z(z), w(w) {}
};
NV_FLOW_INLINE NvFlowCPU_Float4 NvFlowCPU_mul(const NvFlowCPU_Float4& x, const NvFlowCPU_Float4x4 A)
{
return NvFlowCPU_Float4(
{ A.x.x * x.x + A.x.y * x.y + A.x.z * x.z + A.x.w * x.w },
{ A.y.x * x.x + A.y.y * x.y + A.y.z * x.z + A.y.w * x.w },
{ A.z.x * x.x + A.z.y * x.y + A.z.z * x.z + A.z.w * x.w },
{ A.w.x * x.x + A.w.y * x.y + A.w.z * x.z + A.w.w * x.w }
);
}
struct NvFlowCPU_Int2
{
int x, y;
NvFlowCPU_Int2() {}
NvFlowCPU_Int2(int x, int y) : x(x), y(y) {}
NvFlowCPU_Int2(const NvFlowCPU_Float2& rhs) : x(int(rhs.x)), y(int(rhs.y)) {}
NV_FLOW_INLINE NvFlowCPU_Int2(const NvFlowCPU_Uint2& rhs);
NvFlowCPU_Int2 operator+(const NvFlowCPU_Int2& rhs) const { return NvFlowCPU_Int2(x + rhs.x, y + rhs.y); }
NvFlowCPU_Int2 operator-(const NvFlowCPU_Int2& rhs) const { return NvFlowCPU_Int2(x - rhs.x, y - rhs.y); }
NvFlowCPU_Int2 operator*(const NvFlowCPU_Int2& rhs) const { return NvFlowCPU_Int2(x * rhs.x, y * rhs.y); }
NvFlowCPU_Int2 operator/(const NvFlowCPU_Int2& rhs) const { return NvFlowCPU_Int2(x / rhs.x, y / rhs.y); }
NvFlowCPU_Int2 operator+(const int& rhs) const { return NvFlowCPU_Int2(x + rhs, y + rhs); }
NvFlowCPU_Int2 operator-(const int& rhs) const { return NvFlowCPU_Int2(x - rhs, y - rhs); }
NvFlowCPU_Int2 operator*(const int& rhs) const { return NvFlowCPU_Int2(x * rhs, y * rhs); }
NvFlowCPU_Int2 operator/(const int& rhs) const { return NvFlowCPU_Int2(x / rhs, y / rhs); }
};
NV_FLOW_INLINE NvFlowCPU_Float2::NvFlowCPU_Float2(const NvFlowCPU_Int2& rhs) : x(float(rhs.x)), y(float(rhs.y)) {}
NV_FLOW_INLINE NvFlowCPU_Int2 NvFlowCPU_max(NvFlowCPU_Int2 a, NvFlowCPU_Int2 b)
{
return NvFlowCPU_Int2(NvFlowCPU_max(a.x, b.x), NvFlowCPU_max(a.y, b.y));
}
NV_FLOW_INLINE NvFlowCPU_Int2 NvFlowCPU_min(NvFlowCPU_Int2 a, NvFlowCPU_Int2 b)
{
return NvFlowCPU_Int2(NvFlowCPU_min(a.x, b.x), NvFlowCPU_min(a.y, b.y));
}
struct NvFlowCPU_Int3
{
int x, y, z;
NvFlowCPU_Int3() {}
NvFlowCPU_Int3(int x, int y, int z) : x(x), y(y), z(z) {}
NV_FLOW_INLINE NvFlowCPU_Int3(const NvFlowCPU_Uint3& v);
NV_FLOW_INLINE NvFlowCPU_Int3(const NvFlowCPU_Float3& v);
NvFlowCPU_Int2& rg() { return *((NvFlowCPU_Int2*)this); }
int& r() { return *((int*)this); }
NvFlowCPU_Int3 operator+(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Int3(x + rhs.x, y + rhs.y, z + rhs.z); }
NvFlowCPU_Int3 operator-(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Int3(x - rhs.x, y - rhs.y, z - rhs.z); }
NvFlowCPU_Int3 operator*(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Int3(x * rhs.x, y * rhs.y, z * rhs.z); }
NvFlowCPU_Int3 operator/(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Int3(x / rhs.x, y / rhs.y, z / rhs.z); }
NvFlowCPU_Int3& operator+=(const NvFlowCPU_Int3& rhs) { x += rhs.x; y += rhs.y; z += rhs.z; return *this; }
NvFlowCPU_Int3& operator-=(const NvFlowCPU_Int3& rhs) { x -= rhs.x; y -= rhs.y; z -= rhs.z; return *this; }
NvFlowCPU_Int3& operator*=(const NvFlowCPU_Int3& rhs) { x *= rhs.x; y *= rhs.y; z *= rhs.z; return *this; }
NvFlowCPU_Int3& operator/=(const NvFlowCPU_Int3& rhs) { x /= rhs.x; y /= rhs.y; z /= rhs.z; return *this; }
NV_FLOW_INLINE NvFlowCPU_Int3 operator>>(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Int3(x >> rhs.x, y >> rhs.y, z >> rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Int3 operator<<(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Int3(x << rhs.x, y << rhs.y, z << rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Int3 operator&(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Int3(x & rhs.x, y & rhs.y, z & rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Int3 operator|(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Int3(x | rhs.x, y | rhs.y, z | rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Int3 operator>>(const int& rhs) const { return NvFlowCPU_Int3(x >> rhs, y >> rhs, z >> rhs); }
NV_FLOW_INLINE NvFlowCPU_Int3 operator<<(const int& rhs) const { return NvFlowCPU_Int3(x << rhs, y << rhs, z << rhs); }
NV_FLOW_INLINE NvFlowCPU_Int3 operator>>(const NvFlowCPU_Uint& rhs) const { return NvFlowCPU_Int3(x >> rhs, y >> rhs, z >> rhs); }
NV_FLOW_INLINE NvFlowCPU_Int3 operator<<(const NvFlowCPU_Uint& rhs) const { return NvFlowCPU_Int3(x << rhs, y << rhs, z << rhs); }
NV_FLOW_INLINE NvFlowCPU_Int3 operator>>(const NvFlowCPU_Uint3& rhs) const;
NV_FLOW_INLINE NvFlowCPU_Int3 operator<<(const NvFlowCPU_Uint3& rhs) const;
};
NV_FLOW_INLINE NvFlowCPU_Int3 NvFlowCPU_max(NvFlowCPU_Int3 a, NvFlowCPU_Int3 b)
{
return NvFlowCPU_Int3(NvFlowCPU_max(a.x, b.x), NvFlowCPU_max(a.y, b.y), NvFlowCPU_max(a.z, b.z));
}
NV_FLOW_INLINE NvFlowCPU_Int3 NvFlowCPU_min(NvFlowCPU_Int3 a, NvFlowCPU_Int3 b)
{
return NvFlowCPU_Int3(NvFlowCPU_min(a.x, b.x), NvFlowCPU_min(a.y, b.y), NvFlowCPU_min(a.z, b.z));
}
struct NvFlowCPU_Int4
{
int x, y, z, w;
NvFlowCPU_Int4() {}
NvFlowCPU_Int4(int x, int y, int z, int w) : x(x), y(y), z(z), w(w) {}
NvFlowCPU_Int4(const NvFlowCPU_Int2& a, const NvFlowCPU_Int2& b) : x(a.x), y(a.y), z(b.x), w(b.y) {}
NvFlowCPU_Int4(const NvFlowCPU_Int3& rhs, int w) : x(rhs.x), y(rhs.y), z(rhs.z), w(w) {}
NvFlowCPU_Int4(const NvFlowCPU_Uint4& rhs);
NvFlowCPU_Int3& rgb() { return *((NvFlowCPU_Int3*)this); }
NvFlowCPU_Int2& rg() { return *((NvFlowCPU_Int2*)this); }
int& r() { return *((int*)this); }
NvFlowCPU_Int2& ba() { return *((NvFlowCPU_Int2*)&z); }
const NvFlowCPU_Int3& rgb()const { return *((const NvFlowCPU_Int3*)this); }
const NvFlowCPU_Int2& rg()const { return *((const NvFlowCPU_Int2*)this); }
const int& r()const { return *((const int*)this); }
const NvFlowCPU_Int2& ba()const { return *((const NvFlowCPU_Int2*)&z); }
NvFlowCPU_Int4 operator+(const NvFlowCPU_Int4& rhs) const { return NvFlowCPU_Int4(x + rhs.x, y + rhs.y, z + rhs.z, w + rhs.w); }
};
struct NvFlowCPU_Uint2
{
NvFlowUint x, y;
NvFlowCPU_Uint2() {}
NvFlowCPU_Uint2(NvFlowUint x, NvFlowUint y) : x(x), y(y) {}
NvFlowCPU_Uint2(const NvFlowCPU_Int2& rhs) : x(rhs.x), y(rhs.y) {}
};
NV_FLOW_INLINE NvFlowCPU_Int2::NvFlowCPU_Int2(const NvFlowCPU_Uint2& rhs) : x(rhs.x), y(rhs.y) {}
struct NvFlowCPU_Uint3
{
NvFlowUint x, y, z;
NvFlowCPU_Uint3() {}
NvFlowCPU_Uint3(NvFlowUint x, NvFlowUint y, NvFlowUint z) : x(x), y(y), z(z) {}
NV_FLOW_INLINE NvFlowCPU_Uint3(const NvFlowCPU_Int3& v);
NvFlowCPU_Uint2& rg() { return *((NvFlowCPU_Uint2*)this); }
NvFlowCPU_Uint& r() { return *((NvFlowCPU_Uint*)this); }
NvFlowCPU_Uint3 operator+(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Uint3(x + rhs.x, y + rhs.y, z + rhs.z); }
NvFlowCPU_Uint3 operator-(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Uint3(x - rhs.x, y - rhs.y, z - rhs.z); }
NvFlowCPU_Uint3 operator*(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Uint3(x * rhs.x, y * rhs.y, z * rhs.z); }
NvFlowCPU_Uint3 operator/(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Uint3(x / rhs.x, y / rhs.y, z / rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator&(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Uint3(x & rhs.x, y & rhs.y, z & rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator|(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Uint3(x | rhs.x, y | rhs.y, z | rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator>>(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Uint3(x >> rhs.x, y >> rhs.y, z >> rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator<<(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Uint3(x << rhs.x, y << rhs.y, z << rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator>>(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Uint3(x >> rhs.x, y >> rhs.y, z >> rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator<<(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Uint3(x << rhs.x, y << rhs.y, z << rhs.z); }
};
NV_FLOW_INLINE NvFlowCPU_Uint3 operator>>(const NvFlowCPU_Uint& lhs, const NvFlowCPU_Uint3& rhs) { return NvFlowCPU_Uint3(lhs >> rhs.x, lhs >> rhs.y, lhs >> rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator>>(const NvFlowCPU_Uint3& lhs, const NvFlowCPU_Uint& rhs) { return NvFlowCPU_Uint3(lhs.x >> rhs, lhs.y >> rhs, lhs.z >> rhs); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator<<(const NvFlowCPU_Uint& lhs, const NvFlowCPU_Uint3& rhs) { return NvFlowCPU_Uint3(lhs << rhs.x, lhs << rhs.y, lhs << rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator<<(const NvFlowCPU_Uint3& lhs, const NvFlowCPU_Uint& rhs) { return NvFlowCPU_Uint3(lhs.x << rhs, lhs.y << rhs, lhs.z << rhs); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator+(const NvFlowCPU_Uint& lhs, const NvFlowCPU_Uint3& rhs) { return NvFlowCPU_Uint3(lhs + rhs.x, lhs + rhs.y, lhs + rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator+(const NvFlowCPU_Uint3& lhs, const NvFlowCPU_Uint& rhs) { return NvFlowCPU_Uint3(lhs.x + rhs, lhs.y + rhs, lhs.z + rhs); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator-(const NvFlowCPU_Uint& lhs, const NvFlowCPU_Uint3& rhs) { return NvFlowCPU_Uint3(lhs - rhs.x, lhs - rhs.y, lhs - rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator-(const NvFlowCPU_Uint3& lhs, const NvFlowCPU_Uint& rhs) { return NvFlowCPU_Uint3(lhs.x - rhs, lhs.y - rhs, lhs.z - rhs); }
struct NvFlowCPU_Uint4
{
NvFlowUint x, y, z, w;
NvFlowCPU_Uint4() {}
NvFlowCPU_Uint4(NvFlowUint x, NvFlowUint y, NvFlowUint z, NvFlowUint w) : x(x), y(y), z(z), w(w) {}
NvFlowCPU_Uint4 operator+(const NvFlowCPU_Uint4& rhs) const { return NvFlowCPU_Uint4(x + rhs.x, y + rhs.y, z + rhs.z, w + rhs.w); }
NvFlowCPU_Uint4 operator-(const NvFlowCPU_Uint4& rhs) const { return NvFlowCPU_Uint4(x - rhs.x, y - rhs.y, z - rhs.z, w - rhs.w); }
NvFlowCPU_Uint4 operator*(const NvFlowCPU_Uint4& rhs) const { return NvFlowCPU_Uint4(x * rhs.x, y * rhs.y, z * rhs.z, w * rhs.w); }
NvFlowCPU_Uint4 operator/(const NvFlowCPU_Uint4& rhs) const { return NvFlowCPU_Uint4(x / rhs.x, y / rhs.y, z / rhs.z, w / rhs.w); }
NvFlowCPU_Uint4& operator+=(const NvFlowCPU_Uint4& rhs) { x += rhs.x; y += rhs.y; z += rhs.z; w += rhs.w; return *this; }
NvFlowCPU_Uint4& operator-=(const NvFlowCPU_Uint4& rhs) { x -= rhs.x; y -= rhs.y; z -= rhs.z; w -= rhs.w; return *this; }
NvFlowCPU_Uint4& operator*=(const NvFlowCPU_Uint4& rhs) { x *= rhs.x; y *= rhs.y; z *= rhs.z; w *= rhs.w; return *this; }
NvFlowCPU_Uint4& operator/=(const NvFlowCPU_Uint4& rhs) { x /= rhs.x; y /= rhs.y; z /= rhs.z; w /= rhs.w; return *this; }
};
NV_FLOW_INLINE NvFlowCPU_Int3 NvFlowCPU_Int3::operator>>(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Int3(x >> rhs.x, y >> rhs.y, z >> rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Int3 NvFlowCPU_Int3::operator<<(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Int3(x << rhs.x, y << rhs.y, z << rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Float3::NvFlowCPU_Float3(const NvFlowCPU_Int3& v) : x(float(v.x)), y(float(v.y)), z(float(v.z)) {}
NV_FLOW_INLINE NvFlowCPU_Int3::NvFlowCPU_Int3(const NvFlowCPU_Uint3& v) : x(int(v.x)), y(int(v.y)), z(int(v.z)) {}
NV_FLOW_INLINE NvFlowCPU_Int3::NvFlowCPU_Int3(const NvFlowCPU_Float3& v) : x(int(v.x)), y(int(v.y)), z(int(v.z)) {}
NV_FLOW_INLINE NvFlowCPU_Uint3::NvFlowCPU_Uint3(const NvFlowCPU_Int3& v) : x(int(v.x)), y(int(v.y)), z(int(v.z)) {}
NV_FLOW_INLINE NvFlowCPU_Int4::NvFlowCPU_Int4(const NvFlowCPU_Uint4& rhs) : x(int(rhs.x)), y(int(rhs.y)), z(int(rhs.z)), w(int(rhs.w)) {}
NV_FLOW_INLINE NvFlowCPU_Float4 NvFlowCPU_asfloat(NvFlowCPU_Uint4 v) {return *((NvFlowCPU_Float4*)&v);}
NV_FLOW_INLINE NvFlowCPU_Float3 NvFlowCPU_asfloat(NvFlowCPU_Uint3 v) {return *((NvFlowCPU_Float3*)&v);}
NV_FLOW_INLINE NvFlowCPU_Float2 NvFlowCPU_asfloat(NvFlowCPU_Uint2 v) {return *((NvFlowCPU_Float2*)&v);}
NV_FLOW_INLINE float NvFlowCPU_asfloat(NvFlowUint v) {return *((float*)&v);}
NV_FLOW_INLINE NvFlowCPU_Float4 NvFlowCPU_asfloat(NvFlowCPU_Int4 v) {return *((NvFlowCPU_Float4*)&v);}
NV_FLOW_INLINE NvFlowCPU_Float3 NvFlowCPU_asfloat(NvFlowCPU_Int3 v) {return *((NvFlowCPU_Float3*)&v);}
NV_FLOW_INLINE NvFlowCPU_Float2 NvFlowCPU_asfloat(NvFlowCPU_Int2 v) {return *((NvFlowCPU_Float2*)&v);}
NV_FLOW_INLINE float NvFlowCPU_asfloat(int v) {return *((float*)&v);}
NV_FLOW_INLINE NvFlowCPU_Uint4 NvFlowCPU_asuint(NvFlowCPU_Float4 v) {return *((NvFlowCPU_Uint4*)&v);}
NV_FLOW_INLINE NvFlowCPU_Uint3 NvFlowCPU_asuint(NvFlowCPU_Float3 v) {return *((NvFlowCPU_Uint3*)&v);}
NV_FLOW_INLINE NvFlowCPU_Uint2 NvFlowCPU_asuint(NvFlowCPU_Float2 v) {return *((NvFlowCPU_Uint2*)&v);}
NV_FLOW_INLINE NvFlowUint NvFlowCPU_asuint(float v) {return *((NvFlowUint*)&v);}
NV_FLOW_INLINE NvFlowCPU_Int4 NvFlowCPU_asint(NvFlowCPU_Float4 v) {return *((NvFlowCPU_Int4*)&v);}
NV_FLOW_INLINE NvFlowCPU_Int3 NvFlowCPU_asint(NvFlowCPU_Float3 v) {return *((NvFlowCPU_Int3*)&v);}
NV_FLOW_INLINE NvFlowCPU_Int2 NvFlowCPU_asint(NvFlowCPU_Float2 v) {return *((NvFlowCPU_Int2*)&v);}
NV_FLOW_INLINE int NvFlowCPU_asint(float v) {return *((int*)&v);}
struct NvFlowCPU_Resource
{
void* data;
NvFlowUint64 sizeInBytes;
NvFlowUint elementSizeInBytes;
NvFlowUint elementCount;
NvFlowFormat format;
NvFlowUint width;
NvFlowUint height;
NvFlowUint depth;
NvFlowSamplerDesc samplerDesc;
};
template <typename T>
struct NvFlowCPU_ConstantBuffer
{
const T* data;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (const T*)resource->data;
}
};
template <typename T>
struct NvFlowCPU_StructuredBuffer
{
const T* data;
NvFlowUint count;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (const T*)resource->data;
count = resource->elementCount;
}
const T& operator[](int index) {
if (index < 0 || index >= int(count)) index = 0;
return data[index];
}
};
template <typename T>
struct NvFlowCPU_RWStructuredBuffer
{
T* data;
NvFlowUint count;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (T*)resource->data;
count = resource->elementCount;
}
T& operator[](int index) {
if (index < 0 || index >= int(count)) index = 0;
return data[index];
}
};
struct NvFlowCPU_SamplerState
{
NvFlowSamplerDesc desc;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
desc = resource->samplerDesc;
}
};
template <typename T>
struct NvFlowCPU_Texture1D
{
const T* data;
NvFlowFormat format;
NvFlowUint width;
T out_of_bounds = {};
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (const T*)resource->data;
format = resource->format;
width = resource->width;
memset(&out_of_bounds, 0, sizeof(out_of_bounds));
}
};
template <typename T>
NV_FLOW_FORCE_INLINE const T NvFlowCPU_textureRead(NvFlowCPU_Texture1D<T>& tex, int index)
{
if (index < 0 || index >= int(tex.width))
{
return tex.out_of_bounds;
}
return tex.data[index];
}
template <typename T>
NV_FLOW_FORCE_INLINE T NvFlowCPU_textureSampleLevel(NvFlowCPU_Texture1D<T>& tex, NvFlowCPU_SamplerState state, const float pos, float lod)
{
float posf(float(tex.width) * pos);
// clamp sampler
if (posf < 0.5f) posf = 0.5f;
if (posf > float(tex.width) - 0.5f) posf = float(tex.width) - 0.5f;
int pos0 = int(NvFlowCPU_floor(posf - 0.5f));
float f = posf - 0.5f - float(pos0);
float of = 1.f - f;
T sum = of * NvFlowCPU_textureRead(tex, pos0 + 0);
sum += f * NvFlowCPU_textureRead(tex, pos0 + 1);
return sum;
}
template <typename T>
struct NvFlowCPU_RWTexture1D
{
T* data;
NvFlowFormat format;
NvFlowUint width;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (T*)resource->data;
format = resource->format;
width = resource->width;
}
};
template <typename T>
NV_FLOW_FORCE_INLINE const T NvFlowCPU_textureRead(NvFlowCPU_RWTexture1D<T>& tex, int index)
{
return tex.data[index];
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_textureWrite(NvFlowCPU_RWTexture1D<T>& tex, int index, const T value)
{
tex.data[index] = value;
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_textureWrite(bool pred, NvFlowCPU_RWTexture1D<T>& tex, int index, const T value)
{
if (pred)
{
NvFlowCPU_textureWrite(tex, index, value);
}
}
template <typename T>
struct NvFlowCPU_Texture2D
{
const T* data;
NvFlowFormat format;
NvFlowUint width;
NvFlowUint height;
T out_of_bounds;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (const T*)resource->data;
format = resource->format;
width = resource->width;
height = resource->height;
memset(&out_of_bounds, 0, sizeof(out_of_bounds));
}
};
template <typename T>
NV_FLOW_FORCE_INLINE const T NvFlowCPU_textureRead(NvFlowCPU_Texture2D<T>& tex, NvFlowCPU_Int2 index)
{
if (index.x < 0 || index.x >= int(tex.width) ||
index.y < 0 || index.y >= int(tex.height))
{
return tex.out_of_bounds;
}
return tex.data[index.y * tex.width + index.x];
}
template <typename T>
NV_FLOW_FORCE_INLINE T NvFlowCPU_textureSampleLevel(NvFlowCPU_Texture2D<T>& tex, NvFlowCPU_SamplerState state, const NvFlowCPU_Float2 pos, float lod)
{
NvFlowCPU_Float2 posf(NvFlowCPU_Float2(float(tex.width), float(tex.height)) * pos);
// clamp sampler
if (posf.x < 0.5f) posf.x = 0.5f;
if (posf.x > float(tex.width) - 0.5f) posf.x = float(tex.width) - 0.5f;
if (posf.y < 0.5f) posf.y = 0.5f;
if (posf.y > float(tex.height) - 0.5f) posf.y = float(tex.height) - 0.5f;
NvFlowCPU_Int2 pos00 = NvFlowCPU_Int2(NvFlowCPU_floor(posf - NvFlowCPU_Float2(0.5f, 0.5f)));
NvFlowCPU_Float2 f = posf - NvFlowCPU_Float2(0.5f, 0.5f) - NvFlowCPU_Float2(pos00);
NvFlowCPU_Float2 of = NvFlowCPU_Float2(1.f, 1.f) - f;
T sum = of.x * of.y * NvFlowCPU_textureRead(tex, pos00 + NvFlowCPU_Int2(0, 0));
sum += f.x * of.y * NvFlowCPU_textureRead(tex, pos00 + NvFlowCPU_Int2(1, 0));
sum += of.x * f.y * NvFlowCPU_textureRead(tex, pos00 + NvFlowCPU_Int2(0, 1));
sum += f.x * f.y * NvFlowCPU_textureRead(tex, pos00 + NvFlowCPU_Int2(1, 1));
return sum;
}
template <typename T>
struct NvFlowCPU_RWTexture2D
{
T* data;
NvFlowFormat format;
NvFlowUint width;
NvFlowUint height;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (T*)resource->data;
format = resource->format;
width = resource->width;
height = resource->height;
}
};
template <typename T>
NV_FLOW_FORCE_INLINE const T NvFlowCPU_textureRead(NvFlowCPU_RWTexture2D<T>& tex, NvFlowCPU_Int2 index)
{
return tex.data[index.y * tex.width + index.x];
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_textureWrite(NvFlowCPU_RWTexture2D<T>& tex, NvFlowCPU_Int2 index, const T value)
{
tex.data[index.y * tex.width + index.x] = value;
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_textureWrite(bool pred, NvFlowCPU_RWTexture2D<T>& tex, NvFlowCPU_Int2 index, const T value)
{
if (pred)
{
NvFlowCPU_textureWrite(tex, index, value);
}
}
template <typename T>
struct NvFlowCPU_Texture3D
{
const T* data;
NvFlowFormat format;
NvFlowUint width;
NvFlowUint height;
NvFlowUint depth;
T out_of_bounds;
NvFlowUint wh;
NvFlowUint whd;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (const T*)resource->data;
format = resource->format;
width = resource->width;
height = resource->height;
depth = resource->depth;
memset(&out_of_bounds, 0, sizeof(out_of_bounds));
wh = width * height;
whd = wh * depth;
}
};
template <typename T>
NV_FLOW_FORCE_INLINE const T NvFlowCPU_textureRead(NvFlowCPU_Texture3D<T>& tex, NvFlowCPU_Int3 index)
{
if (index.x < 0 || index.x >= int(tex.width) ||
index.y < 0 || index.y >= int(tex.height) ||
index.z < 0 || index.z >= int(tex.depth))
{
return tex.out_of_bounds;
}
return tex.data[(index.z * tex.height + index.y) * tex.width + index.x];
}
template <typename T>
NV_FLOW_FORCE_INLINE T NvFlowCPU_textureSampleLevel(NvFlowCPU_Texture3D<T>& tex, NvFlowCPU_SamplerState state, const NvFlowCPU_Float3 pos, float lod)
{
NvFlowCPU_Float3 posf(NvFlowCPU_Float3(float(tex.width), float(tex.height), float(tex.depth)) * pos);
// clamp sampler
if (posf.x < 0.5f) posf.x = 0.5f;
if (posf.x > float(tex.width) - 0.5f) posf.x = float(tex.width) - 0.5f;
if (posf.y < 0.5f) posf.y = 0.5f;
if (posf.y > float(tex.height) - 0.5f) posf.y = float(tex.height) - 0.5f;
if (posf.z < 0.5f) posf.z = 0.5f;
if (posf.z > float(tex.depth) - 0.5f) posf.z = float(tex.depth) - 0.5f;
NvFlowCPU_Int4 pos000 = NvFlowCPU_Int4(NvFlowCPU_floor(posf - NvFlowCPU_Float3(0.5f, 0.5f, 0.5f)), 0);
NvFlowCPU_Float3 f = posf - NvFlowCPU_Float3(0.5f, 0.5f, 0.5f) - NvFlowCPU_Float3(float(pos000.x), float(pos000.y), float(pos000.z));
NvFlowCPU_Float3 of = NvFlowCPU_Float3(1.f, 1.f, 1.f) - f;
NvFlowCPU_Float4 wl(
of.x * of.y * of.z,
f.x * of.y * of.z,
of.x * f.y * of.z,
f.x * f.y * of.z
);
NvFlowCPU_Float4 wh(
of.x * of.y * f.z,
f.x * of.y * f.z,
of.x * f.y * f.z,
f.x * f.y * f.z
);
T sum;
if (pos000.x >= 0 && pos000.y >= 0 && pos000.z >= 0 &&
pos000.x <= int(tex.width - 2) && pos000.y <= int(tex.height - 2) && pos000.z <= int(tex.depth - 2))
{
NvFlowUint idx000 = pos000.z * tex.wh + pos000.y * tex.width + pos000.x;
sum = wl.x * tex.data[idx000];
sum += wl.y * tex.data[idx000 + 1u];
sum += wl.z * tex.data[idx000 + tex.width];
sum += wl.w * tex.data[idx000 + 1u + tex.width];
sum += wh.x * tex.data[idx000 + tex.wh];
sum += wh.y * tex.data[idx000 + 1u + tex.wh];
sum += wh.z * tex.data[idx000 + tex.width + tex.wh];
sum += wh.w * tex.data[idx000 + 1u + tex.width + tex.wh];
}
else
{
sum = wl.x * NvFlowCPU_textureRead(tex, pos000.rgb() + NvFlowCPU_Int3(0, 0, 0));
sum += wl.y * NvFlowCPU_textureRead(tex, pos000.rgb() + NvFlowCPU_Int3(1, 0, 0));
sum += wl.z * NvFlowCPU_textureRead(tex, pos000.rgb() + NvFlowCPU_Int3(0, 1, 0));
sum += wl.w * NvFlowCPU_textureRead(tex, pos000.rgb() + NvFlowCPU_Int3(1, 1, 0));
sum += wh.x * NvFlowCPU_textureRead(tex, pos000.rgb() + NvFlowCPU_Int3(0, 0, 1));
sum += wh.y * NvFlowCPU_textureRead(tex, pos000.rgb() + NvFlowCPU_Int3(1, 0, 1));
sum += wh.z * NvFlowCPU_textureRead(tex, pos000.rgb() + NvFlowCPU_Int3(0, 1, 1));
sum += wh.w * NvFlowCPU_textureRead(tex, pos000.rgb() + NvFlowCPU_Int3(1, 1, 1));
}
return sum;
}
template <typename T>
struct NvFlowCPU_RWTexture3D
{
T* data;
NvFlowFormat format;
NvFlowUint width;
NvFlowUint height;
NvFlowUint depth;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (T*)resource->data;
format = resource->format;
width = resource->width;
height = resource->height;
depth = resource->depth;
}
};
template <typename T>
NV_FLOW_FORCE_INLINE const T NvFlowCPU_textureRead(NvFlowCPU_RWTexture3D<T>& tex, NvFlowCPU_Int3 index)
{
return tex.data[(index.z * tex.height + index.y) * tex.width + index.x];
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_textureWrite(NvFlowCPU_RWTexture3D<T>& tex, NvFlowCPU_Int3 index, const T value)
{
tex.data[(index.z * tex.height + index.y) * tex.width + index.x] = value;
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_textureWrite(bool pred, NvFlowCPU_RWTexture3D<T>& tex, NvFlowCPU_Int3 index, const T value)
{
if (pred)
{
NvFlowCPU_textureWrite(tex, index, value);
}
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_InterlockedAdd(NvFlowCPU_RWTexture3D<T>& tex, NvFlowCPU_Int3 index, T value)
{
((std::atomic<T>*)&tex.data[(index.z * tex.height + index.y) * tex.width + index.x])->fetch_add(value);
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_InterlockedMin(NvFlowCPU_RWTexture3D<T>& tex, NvFlowCPU_Int3 index, T value)
{
((std::atomic<T>*)&tex.data[(index.z * tex.height + index.y) * tex.width + index.x])->fetch_min(value);
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_InterlockedOr(NvFlowCPU_RWTexture3D<T>& tex, NvFlowCPU_Int3 index, T value)
{
((std::atomic<T>*)&tex.data[(index.z * tex.height + index.y) * tex.width + index.x])->fetch_or(value);
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_InterlockedAnd(NvFlowCPU_RWTexture3D<T>& tex, NvFlowCPU_Int3 index, T value)
{
((std::atomic<T>*)&tex.data[(index.z * tex.height + index.y) * tex.width + index.x])->fetch_and(value);
}
template <class T>
struct NvFlowCPU_Groupshared
{
T data;
};
template <class T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_swrite(int _groupshared_pass, int _groupshared_sync_count, NvFlowCPU_Groupshared<T>& g, const T& value)
{
if (_groupshared_pass == _groupshared_sync_count)
{
g.data = value;
}
}
template <class T>
NV_FLOW_FORCE_INLINE T NvFlowCPU_sread(NvFlowCPU_Groupshared<T>& g)
{
return g.data;
}
template <class T, unsigned int arraySize>
struct NvFlowCPU_GroupsharedArray
{
T data[arraySize];
};
template <class T, unsigned int arraySize>
NV_FLOW_FORCE_INLINE void NvFlowCPU_swrite(int _groupshared_pass, int _groupshared_sync_count, NvFlowCPU_GroupsharedArray<T, arraySize>& g, int index, const T& value)
{
if (_groupshared_pass == _groupshared_sync_count)
{
g.data[index] = value;
}
}
template <class T, unsigned int arraySize>
NV_FLOW_FORCE_INLINE T NvFlowCPU_sread(NvFlowCPU_GroupsharedArray<T, arraySize>& g, int index)
{
return g.data[index];
}
#endif | 37,820 | C | 37.790769 | 169 | 0.684479 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowDynamicBuffer.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowContext.h"
#include "NvFlowArray.h"
struct NvFlowDynamicBuffer
{
NvFlowContextInterface* contextInterface = nullptr;
NvFlowBufferUsageFlags flags = 0u;
NvFlowFormat format = eNvFlowFormat_unknown;
NvFlowUint structureStride = 0u;
NvFlowBuffer* deviceBuffer = nullptr;
NvFlowUint64 deviceNumBytes = 0llu;
NvFlowBufferTransient* transientBuffer = nullptr;
NvFlowUint64 transientFrame = ~0llu;
};
NV_FLOW_INLINE void NvFlowDynamicBuffer_init(NvFlowContextInterface* contextInterface, NvFlowContext* context, NvFlowDynamicBuffer* ptr, NvFlowBufferUsageFlags flags, NvFlowFormat format, NvFlowUint structureStride)
{
ptr->contextInterface = contextInterface;
ptr->flags = flags;
ptr->format = format;
ptr->structureStride = structureStride;
}
NV_FLOW_INLINE void NvFlowDynamicBuffer_destroy(NvFlowContext* context, NvFlowDynamicBuffer* ptr)
{
if (ptr->deviceBuffer)
{
ptr->contextInterface->destroyBuffer(context, ptr->deviceBuffer);
ptr->deviceBuffer = nullptr;
}
}
NV_FLOW_INLINE void NvFlowDynamicBuffer_resize(NvFlowContext* context, NvFlowDynamicBuffer* ptr, NvFlowUint64 numBytes)
{
if (ptr->deviceBuffer && ptr->deviceNumBytes < numBytes)
{
ptr->contextInterface->destroyBuffer(context, ptr->deviceBuffer);
ptr->deviceBuffer = nullptr;
ptr->deviceNumBytes = 0llu;
ptr->transientFrame = ~0llu;
}
if (!ptr->deviceBuffer)
{
NvFlowBufferDesc bufDesc = {};
bufDesc.format = ptr->format;
bufDesc.usageFlags = ptr->flags;
bufDesc.structureStride = ptr->structureStride;
bufDesc.sizeInBytes = 65536u;
while (bufDesc.sizeInBytes < numBytes)
{
bufDesc.sizeInBytes *= 2u;
}
ptr->deviceNumBytes = bufDesc.sizeInBytes;
ptr->deviceBuffer = ptr->contextInterface->createBuffer(context, eNvFlowMemoryType_device, &bufDesc);
}
}
NV_FLOW_INLINE NvFlowBufferTransient* NvFlowDynamicBuffer_getTransient(NvFlowContext* context, NvFlowDynamicBuffer* ptr)
{
if (ptr->transientFrame == ptr->contextInterface->getCurrentFrame(context))
{
return ptr->transientBuffer;
}
if (ptr->deviceBuffer)
{
ptr->transientBuffer = ptr->contextInterface->registerBufferAsTransient(context, ptr->deviceBuffer);
ptr->transientFrame = ptr->contextInterface->getCurrentFrame(context);
}
return ptr->transientBuffer;
} | 3,846 | C | 36.715686 | 215 | 0.771711 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowBufferVariable.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowContext.h"
struct NvFlowBufferVariable
{
NvFlowContextInterface* contextInterface = nullptr;
NvFlowBufferTransient* transientBuffer = nullptr;
NvFlowUint64 transientFrame = ~0llu;
NvFlowBuffer* buffer = nullptr;
NvFlowArray<NvFlowBufferAcquire*, 4u> acquires;
};
NV_FLOW_INLINE void NvFlowBufferVariable_init(NvFlowContextInterface* contextInterface, NvFlowBufferVariable* ptr)
{
ptr->contextInterface = contextInterface;
}
NV_FLOW_INLINE void NvFlowBufferVariable_flush(NvFlowContext* context, NvFlowBufferVariable* ptr)
{
// process acquire queue
NvFlowUint acquireWriteIdx = 0u;
for (NvFlowUint acquireReadIdx = 0u; acquireReadIdx < ptr->acquires.size; acquireReadIdx++)
{
NvFlowBuffer* acquiredBuffer = nullptr;
if (ptr->contextInterface->getAcquiredBuffer(context, ptr->acquires[acquireReadIdx], &acquiredBuffer))
{
if (ptr->buffer)
{
ptr->contextInterface->destroyBuffer(context, ptr->buffer);
ptr->buffer = nullptr;
}
ptr->buffer = acquiredBuffer;
}
else
{
ptr->acquires[acquireWriteIdx++] = ptr->acquires[acquireReadIdx];
}
}
ptr->acquires.size = acquireWriteIdx;
}
NV_FLOW_INLINE NvFlowBufferTransient* NvFlowBufferVariable_get(NvFlowContext* context, NvFlowBufferVariable* ptr)
{
if (ptr->transientFrame == ptr->contextInterface->getCurrentFrame(context))
{
return ptr->transientBuffer;
}
NvFlowBufferVariable_flush(context, ptr);
if (ptr->buffer)
{
ptr->transientBuffer = ptr->contextInterface->registerBufferAsTransient(context, ptr->buffer);
ptr->transientFrame = ptr->contextInterface->getCurrentFrame(context);
}
else
{
ptr->transientBuffer = nullptr;
ptr->transientFrame = ~0llu;
}
return ptr->transientBuffer;
}
NV_FLOW_INLINE void NvFlowBufferVariable_set(NvFlowContext* context, NvFlowBufferVariable* ptr, NvFlowBufferTransient* transientBuffer)
{
NvFlowBufferVariable_flush(context, ptr);
if (ptr->buffer)
{
ptr->contextInterface->destroyBuffer(context, ptr->buffer);
ptr->buffer = nullptr;
}
ptr->transientBuffer = nullptr;
ptr->transientFrame = ~0llu;
if (transientBuffer)
{
ptr->transientBuffer = transientBuffer;
ptr->transientFrame = ptr->contextInterface->getCurrentFrame(context);
// push acquire
ptr->acquires.pushBack(ptr->contextInterface->enqueueAcquireBuffer(context, transientBuffer));
}
}
NV_FLOW_INLINE void NvFlowBufferVariable_destroy(NvFlowContext* context, NvFlowBufferVariable* ptr)
{
NvFlowBufferVariable_set(context, ptr, nullptr);
} | 4,068 | C | 35.008849 | 135 | 0.76647 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowTimer.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowTypes.h"
#if defined(_WIN32)
#include <Windows.h>
#else
#include <time.h>
#endif
#include "NvFlowArray.h"
NV_FLOW_INLINE void NvFlowTimeStamp_capture(NvFlowUint64* ptr)
{
#if defined(_WIN32)
LARGE_INTEGER tmpCpuTime = {};
QueryPerformanceCounter(&tmpCpuTime);
(*ptr) = tmpCpuTime.QuadPart;
#else
timespec timeValue = {};
clock_gettime(CLOCK_MONOTONIC, &timeValue);
(*ptr) = 1E9 * NvFlowUint64(timeValue.tv_sec) + NvFlowUint64(timeValue.tv_nsec);
#endif
}
NV_FLOW_INLINE NvFlowUint64 NvFlowTimeStamp_frequency()
{
#if defined(_WIN32)
LARGE_INTEGER tmpCpuFreq = {};
QueryPerformanceFrequency(&tmpCpuFreq);
return tmpCpuFreq.QuadPart;
#else
return 1E9;
#endif
}
NV_FLOW_INLINE float NvFlowTimeStamp_diff(NvFlowUint64 begin, NvFlowUint64 end, NvFlowUint64 freq)
{
return (float)(((double)(end - begin) / (double)(freq)));
}
#if 1
#define NV_FLOW_PROFILE_BEGIN(profileInterval, profileOffset)
#define NV_FLOW_PROFILE_TIMESTAMP(name)
#define NV_FLOW_PROFILE_FLUSH(name, logPrint)
#else
#define NV_FLOW_PROFILE_BEGIN(profileInterval, profileOffset) \
static int profileCount = profileOffset; \
profileCount++; \
if (profileCount >= profileInterval) \
{ \
profileCount = 0; \
} \
NvFlowArray<NvFlowUint64, 32u> profileTimes; \
NvFlowArray<const char*, 32u> profileNames; \
const NvFlowBool32 profileEnabled = (profileCount == 0);
#define NV_FLOW_PROFILE_TIMESTAMP(name) \
if (profileEnabled) \
{ \
NvFlowTimeStamp_capture(&profileTimes[profileTimes.allocateBack()]); \
profileNames.pushBack(#name); \
}
#define NV_FLOW_PROFILE_FLUSH(name, logPrint) \
if (profileEnabled && logPrint && profileTimes.size >= 2u) \
{ \
NvFlowUint64 freq = NvFlowTimeStamp_frequency(); \
float totalTime = NvFlowTimeStamp_diff(profileTimes[0u], profileTimes[profileTimes.size - 1u], freq); \
for (NvFlowUint64 idx = 1u; idx < profileTimes.size; idx++) \
{ \
float time = NvFlowTimeStamp_diff(profileTimes[idx - 1u], profileTimes[idx], freq); \
if (time >= 0.001f * totalTime) \
{ \
logPrint(eNvFlowLogLevel_warning, "[%s] %f ms", profileNames[idx], 1000.f * time); \
} \
} \
logPrint(eNvFlowLogLevel_warning, "Total [%s] %f ms", #name, 1000.f * totalTime); \
}
#endif | 3,800 | C | 34.85849 | 105 | 0.737368 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowTextureVariable.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowContext.h"
struct NvFlowTextureVariable
{
NvFlowContextInterface* contextInterface = nullptr;
NvFlowTextureTransient* transientTexture = nullptr;
NvFlowUint64 transientFrame = ~0llu;
NvFlowFormat transientFormat = eNvFlowFormat_unknown;
NvFlowTexture* texture = nullptr;
NvFlowArray<NvFlowTextureAcquire*, 4u> acquires;
NvFlowTextureDesc hintTexDesc = {};
};
NV_FLOW_INLINE void NvFlowTextureVariable_init(NvFlowContextInterface* contextInterface, NvFlowTextureVariable* ptr)
{
ptr->contextInterface = contextInterface;
}
NV_FLOW_INLINE void NvFlowTextureVariable_flush(NvFlowContext* context, NvFlowTextureVariable* ptr)
{
// process acquire queue
NvFlowUint acquireWriteIdx = 0u;
for (NvFlowUint acquireReadIdx = 0u; acquireReadIdx < ptr->acquires.size; acquireReadIdx++)
{
NvFlowTexture* acquiredTexture = nullptr;
if (ptr->contextInterface->getAcquiredTexture(context, ptr->acquires[acquireReadIdx], &acquiredTexture))
{
if (ptr->texture)
{
ptr->contextInterface->destroyTexture(context, ptr->texture);
ptr->texture = nullptr;
}
ptr->texture = acquiredTexture;
}
else
{
ptr->acquires[acquireWriteIdx++] = ptr->acquires[acquireReadIdx];
}
}
ptr->acquires.size = acquireWriteIdx;
}
NV_FLOW_INLINE NvFlowTextureTransient* NvFlowTextureVariable_get(NvFlowContext* context, NvFlowTextureVariable* ptr, NvFlowFormat* pFormat)
{
if (ptr->transientFrame == ptr->contextInterface->getCurrentFrame(context))
{
if (pFormat)
{
*pFormat = ptr->transientFormat;
}
return ptr->transientTexture;
}
NvFlowTextureVariable_flush(context, ptr);
if (ptr->texture)
{
ptr->transientTexture = ptr->contextInterface->registerTextureAsTransient(context, ptr->texture);
ptr->transientFrame = ptr->contextInterface->getCurrentFrame(context);
}
else
{
ptr->transientTexture = nullptr;
ptr->transientFrame = ~0llu;
ptr->transientFormat = eNvFlowFormat_unknown;
}
if (pFormat)
{
*pFormat = ptr->transientFormat;
}
return ptr->transientTexture;
}
NV_FLOW_INLINE void NvFlowTextureVariable_set(NvFlowContext* context, NvFlowTextureVariable* ptr, NvFlowTextureTransient* transientTexture, NvFlowFormat transientFormat)
{
NvFlowTextureVariable_flush(context, ptr);
if (ptr->texture)
{
ptr->contextInterface->destroyTexture(context, ptr->texture);
ptr->texture = nullptr;
}
ptr->transientTexture = nullptr;
ptr->transientFrame = ~0llu;
ptr->transientFormat = eNvFlowFormat_unknown;
if (transientTexture)
{
ptr->transientTexture = transientTexture;
ptr->transientFrame = ptr->contextInterface->getCurrentFrame(context);
ptr->transientFormat = transientFormat;
// push acquire
ptr->acquires.pushBack(ptr->contextInterface->enqueueAcquireTexture(context, transientTexture));
}
}
NV_FLOW_INLINE void NvFlowTextureVariable_destroy(NvFlowContext* context, NvFlowTextureVariable* ptr)
{
NvFlowTextureVariable_set(context, ptr, nullptr, eNvFlowFormat_unknown);
} | 4,536 | C | 35.007936 | 169 | 0.768298 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowReadbackBuffer.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowContext.h"
#include "NvFlowArray.h"
struct NvFlowReadbackBufferInstance
{
NvFlowBuffer* buffer = nullptr;
NvFlowUint64 bufferSizeInBytes = 0llu;
NvFlowBool32 isActive = NV_FLOW_FALSE;
NvFlowUint64 completedFrame = ~0llu;
NvFlowUint64 completedGlobalFrame = ~0llu;
NvFlowUint64 version = ~0llu;
NvFlowUint64 validNumBytes = 0llu;
};
struct NvFlowReadbackBuffer
{
NvFlowContextInterface* contextInterface = nullptr;
NvFlowUint64 versionCounter = 0llu;
NvFlowArray<NvFlowReadbackBufferInstance, 8u> buffers;
NvFlowArray<NvFlowUint64, 8u> activeBuffers;
};
NV_FLOW_INLINE void NvFlowReadbackBuffer_init(NvFlowContextInterface* contextInterface, NvFlowContext* context, NvFlowReadbackBuffer* ptr)
{
ptr->contextInterface = contextInterface;
}
NV_FLOW_INLINE void NvFlowReadbackBuffer_destroy(NvFlowContext* context, NvFlowReadbackBuffer* ptr)
{
for (NvFlowUint idx = 0u; idx < ptr->buffers.size; idx++)
{
if (ptr->buffers[idx].buffer)
{
ptr->contextInterface->destroyBuffer(context, ptr->buffers[idx].buffer);
ptr->buffers[idx].buffer = nullptr;
}
}
ptr->buffers.size = 0u;
}
struct NvFlowReadbackBufferCopyRange
{
NvFlowUint64 offset;
NvFlowUint64 numBytes;
};
NV_FLOW_INLINE void NvFlowReadbackBuffer_copyN(NvFlowContext* context, NvFlowReadbackBuffer* ptr, NvFlowUint64 numBytes, NvFlowBufferTransient* src, const NvFlowReadbackBufferCopyRange* copyRanges, NvFlowUint copyRangeCount, NvFlowUint64* pOutVersion)
{
// find inactive buffer, create as needed
NvFlowUint64 bufferIdx = 0u;
for (; bufferIdx < ptr->buffers.size; bufferIdx++)
{
if (!ptr->buffers[bufferIdx].isActive)
{
break;
}
}
if (bufferIdx == ptr->buffers.size)
{
bufferIdx = ptr->buffers.allocateBack();
}
NvFlowReadbackBufferInstance* inst = &ptr->buffers[bufferIdx];
// resize buffer as needed
if (inst->buffer && inst->bufferSizeInBytes < numBytes)
{
ptr->contextInterface->destroyBuffer(context, inst->buffer);
inst->buffer = nullptr;
inst->bufferSizeInBytes = 0llu;
}
if (!inst->buffer)
{
NvFlowBufferDesc bufDesc = {};
bufDesc.usageFlags = eNvFlowBufferUsage_bufferCopyDst;
bufDesc.format = eNvFlowFormat_unknown;
bufDesc.structureStride = 0u;
bufDesc.sizeInBytes = 65536u;
while (bufDesc.sizeInBytes < numBytes)
{
bufDesc.sizeInBytes *= 2u;
}
inst->buffer = ptr->contextInterface->createBuffer(context, eNvFlowMemoryType_readback, &bufDesc);
inst->bufferSizeInBytes = bufDesc.sizeInBytes;
}
// set active state
ptr->versionCounter++;
inst->isActive = NV_FLOW_TRUE;
inst->completedFrame = ptr->contextInterface->getCurrentFrame(context);
inst->completedGlobalFrame = ptr->contextInterface->getCurrentGlobalFrame(context);
inst->version = ptr->versionCounter;
inst->validNumBytes = numBytes;
if (pOutVersion)
{
*pOutVersion = inst->version;
}
// copy
NvFlowBufferTransient* dst = ptr->contextInterface->registerBufferAsTransient(context, inst->buffer);
for (NvFlowUint copyRangeIdx = 0u; copyRangeIdx < copyRangeCount; copyRangeIdx++)
{
NvFlowPassCopyBufferParams copyParams = {};
copyParams.srcOffset = copyRanges[copyRangeIdx].offset;
copyParams.dstOffset = copyRanges[copyRangeIdx].offset;
copyParams.numBytes = copyRanges[copyRangeIdx].numBytes;
copyParams.src = src;
copyParams.dst = dst;
copyParams.debugLabel = "ReadbackBufferCopy";
ptr->contextInterface->addPassCopyBuffer(context, ©Params);
}
if (copyRangeCount == 0u)
{
NvFlowPassCopyBufferParams copyParams = {};
copyParams.srcOffset = 0llu;
copyParams.dstOffset = 0llu;
copyParams.numBytes = 0llu;
copyParams.src = src;
copyParams.dst = dst;
copyParams.debugLabel = "ReadbackBufferCopy";
ptr->contextInterface->addPassCopyBuffer(context, ©Params);
}
// push on active queue
ptr->activeBuffers.pushBack(bufferIdx);
}
NV_FLOW_INLINE void NvFlowReadbackBuffer_copy(NvFlowContext* context, NvFlowReadbackBuffer* ptr, NvFlowUint64 numBytes, NvFlowBufferTransient* src, NvFlowUint64* pOutVersion)
{
NvFlowReadbackBufferCopyRange copyRange = { 0llu, numBytes };
NvFlowReadbackBuffer_copyN(context, ptr, numBytes, src, ©Range, 1u, pOutVersion);
}
NV_FLOW_INLINE void NvFlowReadbackBuffer_flush(NvFlowContext* context, NvFlowReadbackBuffer* ptr)
{
// flush queue
NvFlowUint completedCount = 0u;
NvFlowUint64 lastFenceCompleted = ptr->contextInterface->getLastFrameCompleted(context);
for (NvFlowUint activeBufferIdx = 0u; activeBufferIdx < ptr->activeBuffers.size; activeBufferIdx++)
{
if (ptr->buffers[ptr->activeBuffers[activeBufferIdx]].completedFrame > lastFenceCompleted)
{
break;
}
completedCount++;
}
NvFlowUint popCount = completedCount >= 2u ? completedCount - 1u : 0u;
if (popCount > 0u)
{
for (NvFlowUint activeBufferIdx = 0u; activeBufferIdx < popCount; activeBufferIdx++)
{
ptr->buffers[ptr->activeBuffers[activeBufferIdx]].isActive = NV_FLOW_FALSE;
}
// compact
for (NvFlowUint activeBufferIdx = popCount; activeBufferIdx < ptr->activeBuffers.size; activeBufferIdx++)
{
ptr->activeBuffers[activeBufferIdx - popCount] = ptr->activeBuffers[activeBufferIdx];
}
ptr->activeBuffers.size = ptr->activeBuffers.size - popCount;
}
}
NV_FLOW_INLINE NvFlowUint NvFlowReadbackBuffer_getActiveCount(NvFlowContext* context, NvFlowReadbackBuffer* ptr)
{
return (NvFlowUint)ptr->activeBuffers.size;
}
NV_FLOW_INLINE NvFlowUint64 NvFlowReadbackBuffer_getCompletedGlobalFrame(NvFlowContext* context, NvFlowReadbackBuffer* ptr, NvFlowUint activeIdx)
{
if (activeIdx < ptr->activeBuffers.size)
{
return ptr->buffers[ptr->activeBuffers[activeIdx]].completedGlobalFrame;
}
return ~0llu;
}
NV_FLOW_INLINE void* NvFlowReadbackBuffer_map(NvFlowContext* context, NvFlowReadbackBuffer* ptr, NvFlowUint activeIdx, NvFlowUint64* pOutVersion, NvFlowUint64* pNumBytes)
{
if (activeIdx > ptr->activeBuffers.size)
{
if (pOutVersion)
{
*pOutVersion = 0llu;
}
if (pNumBytes)
{
*pNumBytes = 0llu;
}
return nullptr;
}
NvFlowReadbackBufferInstance* inst = &ptr->buffers[ptr->activeBuffers[activeIdx]];
if (pOutVersion)
{
*pOutVersion = inst->version;
}
if (pNumBytes)
{
*pNumBytes = inst->validNumBytes;
}
return ptr->contextInterface->mapBuffer(context, inst->buffer);
}
NV_FLOW_INLINE void* NvFlowReadbackBuffer_mapLatest(NvFlowContext* context, NvFlowReadbackBuffer* ptr, NvFlowUint64* pOutVersion, NvFlowUint64* pNumBytes)
{
NvFlowReadbackBuffer_flush(context, ptr);
NvFlowUint64 lastFenceCompleted = ptr->contextInterface->getLastFrameCompleted(context);
bool shouldMap = true;
if (ptr->activeBuffers.size > 0u)
{
if (ptr->buffers[ptr->activeBuffers[0u]].completedFrame > lastFenceCompleted)
{
shouldMap = false;
}
}
else if (ptr->buffers[ptr->activeBuffers[0u]].completedFrame > lastFenceCompleted)
{
shouldMap = false;
}
if (!shouldMap)
{
if (pOutVersion)
{
*pOutVersion = 0llu;
}
if (pNumBytes)
{
*pNumBytes = 0llu;
}
return nullptr;
}
return NvFlowReadbackBuffer_map(context, ptr, 0u, pOutVersion, pNumBytes);
}
NV_FLOW_INLINE void NvFlowReadbackBuffer_unmap(NvFlowContext* context, NvFlowReadbackBuffer* ptr, NvFlowUint activeIdx)
{
if (activeIdx < ptr->activeBuffers.size)
{
NvFlowReadbackBufferInstance* inst = &ptr->buffers[ptr->activeBuffers[activeIdx]];
ptr->contextInterface->unmapBuffer(context, inst->buffer);
}
}
NV_FLOW_INLINE void NvFlowReadbackBuffer_unmapLatest(NvFlowContext* context, NvFlowReadbackBuffer* ptr)
{
NvFlowReadbackBuffer_unmap(context, ptr, 0u);
} | 9,093 | C | 31.248227 | 251 | 0.759155 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowDeepCopy.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowReflect.h"
#include "NvFlowArray.h"
#include <string.h>
struct NvFlowReflectDeepCopyInfo
{
const char* debugname;
NvFlowUint64 size;
};
struct NvFlowReflectDeepCopyCached;
struct NvFlowReflectDeepCopy
{
NvFlowArray<NvFlowArray<NvFlowUint8>, 16u> heaps;
NvFlowArray<NvFlowReflectDeepCopyInfo> infos;
NvFlowArray<const char*> pathStack;
NvFlowArrayPointer<NvFlowReflectDeepCopyCached*> cached;
};
struct NvFlowReflectDeepCopyCached
{
NvFlowUint64 luid = 0llu;
NvFlowArray<const char*> pathStack;
NvFlowUint64 version = 0llu;
NvFlowReflectDeepCopy* deepCopy = nullptr;
NvFlowUint8* deepCopyData;
NvFlowUint64 lastUse = 0llu;
};
NV_FLOW_INLINE NvFlowReflectDeepCopy* NvFlowReflectDeepCopy_create()
{
auto ptr = new NvFlowReflectDeepCopy();
return ptr;
}
NV_FLOW_INLINE void NvFlowReflectDeepCopyCached_destroy(NvFlowReflectDeepCopyCached* ptr);
NV_FLOW_INLINE void NvFlowReflectDeepCopy_destroy(NvFlowReflectDeepCopy* ptr)
{
for (NvFlowUint64 cachedIdx = 0u; cachedIdx < ptr->cached.size; cachedIdx++)
{
NvFlowReflectDeepCopyCached_destroy(ptr->cached[cachedIdx]);
ptr->cached[cachedIdx] = nullptr;
}
ptr->cached.size = 0u;
delete ptr;
}
NV_FLOW_INLINE NvFlowReflectDeepCopyCached* NvFlowReflectDeepCopyCached_create(NvFlowUint64 luid, const char** pathStacks, NvFlowUint64 pathStackCount)
{
auto ptr = new NvFlowReflectDeepCopyCached();
ptr->luid = luid;
ptr->pathStack.size = 0u;
for (NvFlowUint64 pathStackIdx = 0u; pathStackIdx < pathStackCount; pathStackIdx++)
{
ptr->pathStack.pushBack(pathStacks[pathStackIdx]);
}
ptr->version = 0llu;
ptr->deepCopy = NvFlowReflectDeepCopy_create();
return ptr;
}
NV_FLOW_INLINE void NvFlowReflectDeepCopyCached_destroy(NvFlowReflectDeepCopyCached* ptr)
{
NvFlowReflectDeepCopy_destroy(ptr->deepCopy);
ptr->deepCopy = nullptr;
delete ptr;
}
NV_FLOW_INLINE void NvFlowReflectDeepCopy_newHeap(NvFlowReflectDeepCopy* ptr, NvFlowUint64 allocSize)
{
auto& currentHeap = ptr->heaps[ptr->heaps.allocateBack()];
NvFlowUint64 heapSize = 4096u; // default heap size
while (heapSize < allocSize)
{
heapSize *= 2u;
}
currentHeap.reserve(heapSize);
}
NV_FLOW_INLINE NvFlowUint64 NvFlowReflectDeepCopy_alignment(NvFlowUint64 size)
{
return 8u * ((size + 7u) / 8u);
}
NV_FLOW_INLINE NvFlowUint8* NvFlowReflectDeepCopy_allocate(NvFlowReflectDeepCopy* ptr, NvFlowUint64 size, const char* debugName)
{
NvFlowUint64 allocSize = NvFlowReflectDeepCopy_alignment(size);
if (ptr->heaps.size > 0u)
{
auto& currentHeap = ptr->heaps[ptr->heaps.size - 1u];
if (currentHeap.size + allocSize <= currentHeap.capacity)
{
NvFlowUint8* ret = currentHeap.data + currentHeap.size;
ret[size - 1] = 0;
currentHeap.size += allocSize;
NvFlowReflectDeepCopyInfo info = { debugName, size };
ptr->infos.pushBack(info);
return ret;
}
}
NvFlowReflectDeepCopy_newHeap(ptr, allocSize);
return NvFlowReflectDeepCopy_allocate(ptr, size, debugName);
}
NV_FLOW_INLINE void NvFlowReflectDeepCopy_reset(NvFlowReflectDeepCopy* ptr)
{
for (NvFlowUint64 heapIdx = 0u; heapIdx < ptr->heaps.size; heapIdx++)
{
ptr->heaps[heapIdx].size = 0u;
}
ptr->heaps.size = 0u;
ptr->infos.size = 0u;
ptr->pathStack.size = 0u;
}
NV_FLOW_INLINE NvFlowUint8* NvFlowReflectDeepCopy_recursive(NvFlowReflectDeepCopy* ptr, NvFlowUint64 luid, const NvFlowUint8* src, const NvFlowReflectDataType* type, NvFlowUint64 elementCount, NvFlowBool32 isPointerArray);
NV_FLOW_INLINE void NvFlowReflectDeepCopy_cleanCache(NvFlowReflectDeepCopy* ptr)
{
static const NvFlowUint64 cacheFreeTreshold = 8u;
NvFlowUint cachedIdx = 0u;
while (cachedIdx < ptr->cached.size)
{
ptr->cached[cachedIdx]->lastUse++;
if (ptr->cached[cachedIdx]->lastUse > cacheFreeTreshold)
{
NvFlowReflectDeepCopyCached_destroy(ptr->cached[cachedIdx]);
ptr->cached[cachedIdx] = nullptr;
ptr->cached.removeSwapPointerAtIndex(cachedIdx);
}
else
{
cachedIdx++;
}
}
}
NV_FLOW_INLINE NvFlowUint8* NvFlowReflectDeepCopy_cached(NvFlowReflectDeepCopy* ptr, NvFlowUint64 luid, const NvFlowUint8* src, const NvFlowReflectDataType* type, NvFlowUint64 elementCount, NvFlowUint64 version, NvFlowBool32 isPointerArray)
{
NvFlowUint64 cachedIdx = 0u;
for (; cachedIdx < ptr->cached.size; cachedIdx++)
{
auto& cached = ptr->cached[cachedIdx];
if (cached->luid == luid && ptr->pathStack.size == cached->pathStack.size)
{
// check path stack
bool pathStackMatches = true;
for (NvFlowUint64 pathStackIdx = 0u; pathStackIdx < ptr->pathStack.size; pathStackIdx++)
{
if (NvFlowReflectStringCompare(ptr->pathStack[pathStackIdx], cached->pathStack[pathStackIdx]) != 0)
{
pathStackMatches = false;
break;
}
}
if (pathStackMatches)
{
break;
}
}
}
if (cachedIdx == ptr->cached.size)
{
cachedIdx = ptr->cached.allocateBack();
ptr->cached[cachedIdx] = NvFlowReflectDeepCopyCached_create(luid, ptr->pathStack.data, ptr->pathStack.size);
}
auto cached = ptr->cached[cachedIdx];
if (ptr->cached[cachedIdx]->version != version)
{
NvFlowReflectDeepCopy_reset(cached->deepCopy);
NvFlowReflectDeepCopy_cleanCache(cached->deepCopy);
cached->deepCopyData = NvFlowReflectDeepCopy_recursive(cached->deepCopy, luid, src, type, elementCount, isPointerArray);
cached->version = version;
}
cached->lastUse = 0u;
return cached->deepCopyData;
}
NV_FLOW_INLINE void NvFlowReflectDeepCopy_structRecursive(NvFlowReflectDeepCopy* ptr, NvFlowUint64 luid, NvFlowUint8* dst, const NvFlowReflectDataType* type, NvFlowUint64 elementCount, NvFlowBool32 isPointerArray)
{
if (type->dataType == eNvFlowType_struct)
{
for (NvFlowUint64 elementIdx = 0u; elementIdx < elementCount; elementIdx++)
{
NvFlowUint8* dstArray = dst + type->elementSize * elementIdx;
// attempt to find luid
for (NvFlowUint childIdx = 0u; childIdx < type->childReflectDataCount; childIdx++)
{
const auto& childReflectData = type->childReflectDatas[childIdx];
if (childReflectData.reflectMode == eNvFlowReflectMode_value &&
childReflectData.dataType->dataType == eNvFlowType_uint64)
{
if (NvFlowReflectStringCompare(childReflectData.name, "luid") == 0)
{
luid = *((NvFlowUint64*)(dst + childReflectData.dataOffset));
break;
}
}
}
// traverse all elements, searching for pointers/arrays
for (NvFlowUint64 childIdx = 0u; childIdx < type->childReflectDataCount; childIdx++)
{
const auto& childReflectData = type->childReflectDatas[childIdx];
ptr->pathStack.pushBack(childReflectData.name);
if (childReflectData.dataType->dataType == eNvFlowType_struct &&
(childReflectData.reflectMode == eNvFlowReflectMode_value ||
childReflectData.reflectMode == eNvFlowReflectMode_valueVersioned))
{
NvFlowReflectDeepCopy_structRecursive(ptr, luid, dstArray + childReflectData.dataOffset, childReflectData.dataType, 1u, NV_FLOW_FALSE);
}
if (childReflectData.reflectMode & eNvFlowReflectMode_pointerArray)
{
// get pointer to pointer
NvFlowUint8** childPtr = (NvFlowUint8**)(dstArray + childReflectData.dataOffset);
NvFlowUint64 childElementCount = 1u;
NvFlowUint64 childVersion = 0u;
if ((*childPtr))
{
if (childReflectData.reflectMode & eNvFlowReflectMode_array)
{
childElementCount = *(NvFlowUint64*)(dstArray + childReflectData.arraySizeOffset);
}
if (childReflectData.reflectMode & eNvFlowReflectMode_valueVersioned)
{
childVersion = *(NvFlowUint64*)(dstArray + childReflectData.versionOffset);
}
NvFlowBool32 isPointerArray = (childReflectData.reflectMode & eNvFlowReflectMode_pointerArray) == eNvFlowReflectMode_pointerArray;
// conditionally attempt cached array
if (luid > 0u && childElementCount > 0u && childVersion > 0u && childReflectData.dataType->dataType != eNvFlowType_struct)
{
*childPtr = NvFlowReflectDeepCopy_cached(ptr, luid, *childPtr, childReflectData.dataType, childElementCount, childVersion, isPointerArray);
}
else
{
// recurse
*childPtr = NvFlowReflectDeepCopy_recursive(ptr, luid, *childPtr, childReflectData.dataType, childElementCount, isPointerArray);
}
}
}
ptr->pathStack.size--;
}
}
}
}
NV_FLOW_INLINE NvFlowUint8* NvFlowReflectDeepCopy_recursive(NvFlowReflectDeepCopy* ptr, NvFlowUint64 luid, const NvFlowUint8* src, const NvFlowReflectDataType* type, NvFlowUint64 elementCount, NvFlowBool32 isPointerArray)
{
const char* debugName = "root";
if (ptr->pathStack.size > 0u)
{
debugName = ptr->pathStack[ptr->pathStack.size - 1u];
}
if (isPointerArray)
{
NvFlowUint8* dstData = NvFlowReflectDeepCopy_allocate(ptr, sizeof(void*) * elementCount, debugName);
memcpy(dstData, src, sizeof(void*) * elementCount);
// for each non-null pointer, recurse
NvFlowUint8** dstArray = (NvFlowUint8**)dstData;
for (NvFlowUint64 elementIdx = 0u; elementIdx < elementCount; elementIdx++)
{
if (dstArray[elementIdx])
{
dstArray[elementIdx] = NvFlowReflectDeepCopy_recursive(ptr, luid, dstArray[elementIdx], type, 1u, NV_FLOW_FALSE);
}
}
return dstData;
}
NvFlowUint8* dstData = NvFlowReflectDeepCopy_allocate(ptr, type->elementSize * elementCount, debugName);
memcpy(dstData, src, type->elementSize * elementCount);
NvFlowReflectDeepCopy_structRecursive(ptr, luid, dstData, type, elementCount, isPointerArray);
return dstData;
}
NV_FLOW_INLINE NvFlowUint8* NvFlowReflectDeepCopy_update(NvFlowReflectDeepCopy* ptr, const void* srcVoid, const NvFlowReflectDataType* type)
{
const NvFlowUint8* src = (const NvFlowUint8*)srcVoid;
NvFlowReflectDeepCopy_reset(ptr);
NvFlowReflectDeepCopy_cleanCache(ptr);
return NvFlowReflectDeepCopy_recursive(ptr, 0llu, src, type, 1u, NV_FLOW_FALSE);
} | 11,378 | C | 33.586626 | 240 | 0.745737 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowUploadBuffer.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowContext.h"
#include "NvFlowArray.h"
#define NV_FLOW_DISPATCH_BATCH_SIZE 32768u
//#define NV_FLOW_DISPATCH_BATCH_SIZE 256
struct NvFlowDispatchBatch
{
NvFlowBufferTransient* globalTransient = nullptr;
NvFlowUint blockIdxOffset = 0u;
NvFlowUint blockCount = 0u;
};
typedef NvFlowArray<NvFlowDispatchBatch, 8u> NvFlowDispatchBatches;
NV_FLOW_INLINE void NvFlowDispatchBatches_init_custom(NvFlowDispatchBatches* ptr, NvFlowUint totalBlockCount, NvFlowUint batchSize)
{
ptr->size = 0u;
for (NvFlowUint blockIdxOffset = 0u; blockIdxOffset < totalBlockCount; blockIdxOffset += batchSize)
{
NvFlowDispatchBatch batch = {};
batch.globalTransient = nullptr;
batch.blockIdxOffset = blockIdxOffset;
batch.blockCount = totalBlockCount - blockIdxOffset;
if (batch.blockCount > batchSize)
{
batch.blockCount = batchSize;
}
ptr->pushBack(batch);
}
}
NV_FLOW_INLINE void NvFlowDispatchBatches_init(NvFlowDispatchBatches* ptr, NvFlowUint totalBlockCount)
{
NvFlowDispatchBatches_init_custom(ptr, totalBlockCount, NV_FLOW_DISPATCH_BATCH_SIZE);
}
struct NvFlowBufferVersioning
{
NvFlowUint64 mappedIdx = ~0llu;
NvFlowUint64 frontIdx = ~0llu;
NvFlowArray<NvFlowUint64, 16u> recycleFenceValues;
};
NV_FLOW_INLINE NvFlowUint64 NvFlowBufferVersioning_map(NvFlowBufferVersioning* ptr, NvFlowUint64 lastFenceCompleted)
{
NvFlowUint64 index = ptr->frontIdx + 1u;
for (; index < ptr->recycleFenceValues.size; index++)
{
if (ptr->recycleFenceValues[index] <= lastFenceCompleted)
{
break;
}
}
if (index == ptr->recycleFenceValues.size)
{
for (index = 0; index < ptr->frontIdx; index++)
{
if (ptr->recycleFenceValues[index] <= lastFenceCompleted)
{
break;
}
}
}
if (!(index < ptr->recycleFenceValues.size && ptr->recycleFenceValues[index] <= lastFenceCompleted))
{
index = ptr->recycleFenceValues.allocateBack();
}
ptr->recycleFenceValues[index] = ~0llu;
ptr->mappedIdx = index;
return ptr->mappedIdx;
}
NV_FLOW_INLINE void NvFlowBufferVersioning_unmap(NvFlowBufferVersioning* ptr, NvFlowUint64 nextFenceValue)
{
if (ptr->frontIdx < ptr->recycleFenceValues.size)
{
ptr->recycleFenceValues[ptr->frontIdx] = nextFenceValue;
}
ptr->frontIdx = ptr->mappedIdx;
}
struct NvFlowUploadBuffer
{
NvFlowContextInterface* contextInterface = nullptr;
NvFlowBuffer*(NV_FLOW_ABI* createBuffer)(NvFlowContext* context, NvFlowMemoryType memoryType, const NvFlowBufferDesc* desc, void* userdata) = nullptr;
void(NV_FLOW_ABI* addPassCopyBuffer)(NvFlowContext* context, const NvFlowPassCopyBufferParams* params, void* userdata) = nullptr;
void* userdata = nullptr;
NvFlowBufferUsageFlags flags = 0u;
NvFlowFormat format = eNvFlowFormat_unknown;
NvFlowUint structureStride = 0u;
NvFlowBufferVersioning versioning;
NvFlowArray<NvFlowBuffer*, 8u> buffers;
NvFlowArray<NvFlowUint64, 8u> bufferSizes;
NvFlowBuffer* deviceBuffer = nullptr;
NvFlowUint64 deviceNumBytes = 0llu;
};
NV_FLOW_INLINE void NvFlowUploadBuffer_init_custom(
NvFlowContextInterface* contextInterface,
NvFlowContext* context, NvFlowUploadBuffer* ptr,
NvFlowBufferUsageFlags flags, NvFlowFormat format, NvFlowUint structureStride,
NvFlowBuffer*(NV_FLOW_ABI* createBuffer)(NvFlowContext* context, NvFlowMemoryType memoryType, const NvFlowBufferDesc* desc, void* userdata),
void(NV_FLOW_ABI* addPassCopyBuffer)(NvFlowContext* context, const NvFlowPassCopyBufferParams* params, void* userdata),
void* userdata
)
{
ptr->contextInterface = contextInterface;
ptr->createBuffer = createBuffer;
ptr->addPassCopyBuffer = addPassCopyBuffer;
ptr->userdata = userdata;
ptr->flags = flags;
ptr->format = format;
ptr->structureStride = structureStride;
}
NV_FLOW_INLINE NvFlowBuffer* NvFlowUploadBuffer_createBuffer(NvFlowContext* context, NvFlowMemoryType memoryType, const NvFlowBufferDesc* desc, void* userdata)
{
NvFlowUploadBuffer* ptr = (NvFlowUploadBuffer*)userdata;
return ptr->contextInterface->createBuffer(context, memoryType, desc);
}
NV_FLOW_INLINE void NvFlowUploadBuffer_addPassCopyBuffer(NvFlowContext* context, const NvFlowPassCopyBufferParams* params, void* userdata)
{
NvFlowUploadBuffer* ptr = (NvFlowUploadBuffer*)userdata;
ptr->contextInterface->addPassCopyBuffer(context, params);
}
NV_FLOW_INLINE void NvFlowUploadBuffer_init(NvFlowContextInterface* contextInterface, NvFlowContext* context, NvFlowUploadBuffer* ptr, NvFlowBufferUsageFlags flags, NvFlowFormat format, NvFlowUint structureStride)
{
NvFlowUploadBuffer_init_custom(contextInterface, context, ptr, flags, format, structureStride, NvFlowUploadBuffer_createBuffer, NvFlowUploadBuffer_addPassCopyBuffer, ptr);
}
NV_FLOW_INLINE void NvFlowUploadBuffer_destroy(NvFlowContext* context, NvFlowUploadBuffer* ptr)
{
for (NvFlowUint64 idx = 0u; idx < ptr->buffers.size; idx++)
{
if (ptr->buffers[idx])
{
ptr->contextInterface->destroyBuffer(context, ptr->buffers[idx]);
ptr->buffers[idx] = nullptr;
}
}
ptr->buffers.size = 0u;
ptr->bufferSizes.size = 0u;
if (ptr->deviceBuffer)
{
ptr->contextInterface->destroyBuffer(context, ptr->deviceBuffer);
ptr->deviceBuffer = nullptr;
}
}
NV_FLOW_INLINE NvFlowUint64 NvFlowUploadBuffer_computeBufferSize(NvFlowUint64 requested)
{
NvFlowUint64 bufferSize = 65536u;
while (bufferSize < requested)
{
bufferSize *= 2u;
}
return bufferSize;
}
NV_FLOW_INLINE void* NvFlowUploadBuffer_map(NvFlowContext* context, NvFlowUploadBuffer* ptr, NvFlowUint64 numBytes)
{
NvFlowUint64 instanceIdx = NvFlowBufferVersioning_map(&ptr->versioning, ptr->contextInterface->getLastFrameCompleted(context));
while (instanceIdx >= ptr->buffers.size)
{
ptr->buffers.pushBack(nullptr);
ptr->bufferSizes.pushBack(0llu);
}
if (ptr->buffers[instanceIdx] && ptr->bufferSizes[instanceIdx] < numBytes)
{
ptr->contextInterface->destroyBuffer(context, ptr->buffers[instanceIdx]);
ptr->buffers[instanceIdx] = nullptr;
}
if (!ptr->buffers[instanceIdx])
{
NvFlowBufferDesc bufDesc = {};
bufDesc.format = ptr->format;
bufDesc.usageFlags = ptr->flags;
bufDesc.structureStride = ptr->structureStride;
bufDesc.sizeInBytes = NvFlowUploadBuffer_computeBufferSize(numBytes);
ptr->bufferSizes[instanceIdx] = bufDesc.sizeInBytes;
ptr->buffers[instanceIdx] = ptr->contextInterface->createBuffer(context, eNvFlowMemoryType_upload, &bufDesc);
}
return ptr->contextInterface->mapBuffer(context, ptr->buffers[instanceIdx]);
}
NV_FLOW_INLINE NvFlowBufferTransient* NvFlowUploadBuffer_unmap(NvFlowContext* context, NvFlowUploadBuffer* ptr)
{
ptr->contextInterface->unmapBuffer(context, ptr->buffers[ptr->versioning.mappedIdx]);
NvFlowBufferVersioning_unmap(&ptr->versioning, ptr->contextInterface->getCurrentFrame(context));
return ptr->contextInterface->registerBufferAsTransient(context, ptr->buffers[ptr->versioning.frontIdx]);
}
struct NvFlowUploadBufferCopyRange
{
NvFlowUint64 offset;
NvFlowUint64 numBytes;
};
NV_FLOW_INLINE NvFlowBufferTransient* NvFlowUploadBuffer_getDevice(NvFlowContext* context, NvFlowUploadBuffer* ptr, NvFlowUint64 numBytes)
{
NvFlowUint64 srcNumBytes = NvFlowUploadBuffer_computeBufferSize(numBytes);
if (ptr->deviceBuffer && ptr->deviceNumBytes < srcNumBytes)
{
ptr->contextInterface->destroyBuffer(context, ptr->deviceBuffer);
ptr->deviceBuffer = nullptr;
ptr->deviceNumBytes = 0llu;
}
if (!ptr->deviceBuffer)
{
NvFlowBufferDesc bufDesc = {};
bufDesc.format = ptr->format;
bufDesc.usageFlags = ptr->flags | eNvFlowBufferUsage_bufferCopyDst;
bufDesc.structureStride = ptr->structureStride;
bufDesc.sizeInBytes = srcNumBytes;
ptr->deviceBuffer = ptr->createBuffer(context, eNvFlowMemoryType_device, &bufDesc, ptr->userdata);
ptr->deviceNumBytes = srcNumBytes;
}
return ptr->contextInterface->registerBufferAsTransient(context, ptr->deviceBuffer);
}
NV_FLOW_INLINE NvFlowBufferTransient* NvFlowUploadBuffer_unmapDeviceN(NvFlowContext* context, NvFlowUploadBuffer* ptr, NvFlowUploadBufferCopyRange* copyRanges, NvFlowUint64 copyRangeCount, const char* debugName)
{
NvFlowBufferTransient* src = NvFlowUploadBuffer_unmap(context, ptr);
NvFlowUint64 srcNumBytes = ptr->bufferSizes[ptr->versioning.frontIdx];
NvFlowBufferTransient* dst = NvFlowUploadBuffer_getDevice(context, ptr, srcNumBytes);
NvFlowUint activeCopyCount = 0u;
for (NvFlowUint64 copyRangeIdx = 0u; copyRangeIdx < copyRangeCount; copyRangeIdx++)
{
NvFlowPassCopyBufferParams copyParams = {};
copyParams.srcOffset = copyRanges[copyRangeIdx].offset;
copyParams.dstOffset = copyRanges[copyRangeIdx].offset;
copyParams.numBytes = copyRanges[copyRangeIdx].numBytes;
copyParams.src = src;
copyParams.dst = dst;
copyParams.debugLabel = debugName ? debugName : "UploadBufferUnmapDevice";
if (copyParams.numBytes > 0u)
{
ptr->addPassCopyBuffer(context, ©Params, ptr->userdata);
activeCopyCount++;
}
}
// this ensures proper barriers
if (activeCopyCount == 0u)
{
NvFlowPassCopyBufferParams copyParams = {};
copyParams.srcOffset = 0llu;
copyParams.dstOffset = 0llu;
copyParams.numBytes = 0llu;
copyParams.src = src;
copyParams.dst = dst;
copyParams.debugLabel = debugName ? debugName : "UploadBufferUnmapDevice";
ptr->addPassCopyBuffer(context, ©Params, ptr->userdata);
}
return dst;
}
NV_FLOW_INLINE NvFlowBufferTransient* NvFlowUploadBuffer_unmapDevice(NvFlowContext* context, NvFlowUploadBuffer* ptr, NvFlowUint64 offset, NvFlowUint64 numBytes, const char* debugName)
{
NvFlowUploadBufferCopyRange copyRange = { offset, numBytes };
return NvFlowUploadBuffer_unmapDeviceN(context, ptr, ©Range, 1u, debugName);
} | 11,167 | C | 34.680511 | 213 | 0.776305 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowPreprocessor.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowString.h"
struct NvFlowPreprocessor;
struct NvFlowPreprocessorRange
{
NvFlowUint64 begin;
NvFlowUint64 end;
};
enum NvFlowPreprocessorTokenType
{
eNvFlowPreprocessorTokenType_unknown = 0, // unclassified
eNvFlowPreprocessorTokenType_whitespace, //
eNvFlowPreprocessorTokenType_newline, // \n
eNvFlowPreprocessorTokenType_comment, // // comment
eNvFlowPreprocessorTokenType_name, // alpha_1234
eNvFlowPreprocessorTokenType_number, // 1234
eNvFlowPreprocessorTokenType_string, // "string"
eNvFlowPreprocessorTokenType_char, // 's'
eNvFlowPreprocessorTokenType_pound, // #
eNvFlowPreprocessorTokenType_comma, // ,
eNvFlowPreprocessorTokenType_period, // .
eNvFlowPreprocessorTokenType_semicolon, // ;
eNvFlowPreprocessorTokenType_colon, // :
eNvFlowPreprocessorTokenType_equals, // =
eNvFlowPreprocessorTokenType_asterisk, // *
eNvFlowPreprocessorTokenType_leftParenthesis, // (
eNvFlowPreprocessorTokenType_rightParenthesis, // )
eNvFlowPreprocessorTokenType_leftBracket, // [
eNvFlowPreprocessorTokenType_rightBracket, // ]
eNvFlowPreprocessorTokenType_leftCurlyBrace, // {
eNvFlowPreprocessorTokenType_rightCurlyBrace, // }
eNvFlowPreprocessorTokenType_lessThan, // <
eNvFlowPreprocessorTokenType_greaterThan, // >
eNvFlowPreprocessorTokenType_anyWhitespace, // For delimiter usage, aligns with NvFlowPreprocessorTokenIsWhitespace()
eNvFlowPreprocessorTokenType_count,
eNvFlowPreprocessorTokenType_maxEnum = 0x7FFFFFFF
};
struct NvFlowPreprocessorToken
{
NvFlowPreprocessorTokenType type;
const char* str;
};
NV_FLOW_INLINE NvFlowBool32 NvFlowPreprocessorTokenIsWhitespace(const NvFlowPreprocessorToken token)
{
return token.type == eNvFlowPreprocessorTokenType_whitespace ||
token.type == eNvFlowPreprocessorTokenType_newline ||
token.type == eNvFlowPreprocessorTokenType_comment;
}
NV_FLOW_INLINE void NvFlowPreprocessorSkipWhitespaceTokens(NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens)
{
while ((*pTokenIdx) < numTokens && NvFlowPreprocessorTokenIsWhitespace(tokens[(*pTokenIdx)]))
{
(*pTokenIdx)++;
}
}
enum NvFlowPreprocessorType
{
eNvFlowPreprocessorType_constant = 0, // name
eNvFlowPreprocessorType_statement = 1, // name arg0 arg1;
eNvFlowPreprocessorType_function = 2, // name(arg0, arg1, arg2)
eNvFlowPreprocessorType_index = 3, // name[arg0] or name[arg0]= arg1 arg2 arg3;
eNvFlowPreprocessorType_attribute = 4, // [name(arg0, arg1, arg2)]
eNvFlowPreprocessorType_line = 5, // #name arg0 \n
eNvFlowPreprocessorType_body = 6, // name <arg0, arg1> arg2 arg3(arg4, arg5) { arg6; arg7; }
eNvFlowPreprocessorType_templateInstance = 7, // name<arg0, arg1>
eNvFlowPreprocessorType_statementComma = 8, // "name arg0," or "name arg0)"
eNvFlowPreprocessorType_maxEnum = 0x7FFFFFFF
};
struct NvFlowPreprocessorConstant
{
const char* name;
const char* value;
};
struct NvFlowPreprocessorFunction
{
const char* name;
NvFlowPreprocessorType type;
void* userdata;
char*(*substitute)(NvFlowPreprocessor* ptr, void* userdata, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens);
NvFlowBool32 allowRecursion;
};
enum NvFlowPreprocessorMode
{
eNvFlowPreprocessorMode_default = 0, // Input string evaluated and substitution evaluated, no recursion
eNvFlowPreprocessorMode_singlePass = 1, // Input string evaluated once, no substitution evaluation
eNvFlowPreprocessorMode_disable_passthrough = 2, // Do not passthrough strings
eNvFlowPreprocessorMode_maxEnum = 0x7FFFFFFF
};
NvFlowPreprocessor* NvFlowPreprocessorCreate(NvFlowStringPool* pool);
void NvFlowPreprocessorDestroy(NvFlowPreprocessor* ptr);
void NvFlowPreprocessorReset(NvFlowPreprocessor* ptr);
void NvFlowPreprocessorSetMode(NvFlowPreprocessor* ptr, NvFlowPreprocessorMode mode);
NvFlowPreprocessorMode NvFlowPreprocessorGetMode(NvFlowPreprocessor* ptr);
NvFlowStringPool* NvFlowPreprocessorStringPool(NvFlowPreprocessor* ptr);
void NvFlowPreprocessorAddConstants(NvFlowPreprocessor* ptr, NvFlowUint64 numConstants, const NvFlowPreprocessorConstant* constants);
void NvFlowPreprocessorAddFunctions(NvFlowPreprocessor* ptr, NvFlowUint64 numFunctions, const NvFlowPreprocessorFunction* functions);
NvFlowPreprocessorRange NvFlowPreprocessorExtractTokensDelimitedN(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowUint64 numDelimiters, const NvFlowPreprocessorTokenType* delimiters);
NvFlowPreprocessorRange NvFlowPreprocessorExtractTokensDelimited(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowPreprocessorTokenType delimiter);
const char* NvFlowPreprocessorExtractDelimited(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowPreprocessorTokenType delimiter);
const char* NvFlowPreprocessorExtractDelimitedN(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowUint64 numDelimiters, const NvFlowPreprocessorTokenType* delimiters);
const char* NvFlowPreprocessorExtractDelimitedPreserve(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowPreprocessorTokenType delimiter);
const char* NvFlowPreprocessorExtractIfType(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowPreprocessorTokenType type);
const char* NvFlowPreprocessorConcatTokens(NvFlowPreprocessor* ptr, const NvFlowPreprocessorToken* tokens, NvFlowUint64 numTokens);
NvFlowBool32 NvFlowPreprocessorFindKeyInSource(NvFlowPreprocessor* ptr, const NvFlowPreprocessorToken* keyTokens, NvFlowUint64 keyTokenCount, const NvFlowPreprocessorToken* sourceTokens, NvFlowUint64 sourceTokenCount, NvFlowUint64* pSourceIndex);
char* NvFlowPreprocessorExecute(NvFlowPreprocessor* ptr, const char* input);
void NvFlowPreprocessorTokenize(NvFlowPreprocessor* ptr, const char* input, NvFlowUint64* pTotalTokens, NvFlowPreprocessorToken** pTokens);
enum NvFlowPreprocessorGlobalType
{
eNvFlowPreprocessorGlobalType_unknown = 0, // Unknown global type
eNvFlowPreprocessorGlobalType_statement = 1, // ConstantBuffer<Params> gParams;
eNvFlowPreprocessorGlobalType_function = 2, // returnType functionName(arg1, arg2, arg3) { [functionbody] }
eNvFlowPreprocessorGlobalType_attribute = 3, // [name(arg0, arg1, arg2)]
eNvFlowPreprocessorGlobalType_line = 4, // #define CONSTANT \n
eNvFlowPreprocessorGlobalType_maxEnum = 0x7FFFFFFF
};
char* NvFlowPreprocessorExecuteGlobal(NvFlowPreprocessor* ptr, const char* input, void* userdata, char*(*substitute)(NvFlowPreprocessor* ptr, void* userdata, NvFlowPreprocessorGlobalType globalType, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens)); | 8,509 | C | 45.758242 | 263 | 0.811141 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowDatabase.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowReflect.h"
#include "NvFlowArray.h"
#include "NvFlowDeepCopy.h"
#include <string.h>
struct NvFlowDatabaseContext;
struct NvFlowDatabasePrim;
struct NvFlowDatabaseAttr;
struct NvFlowDatabaseInterface
{
NvFlowDatabasePrim*(NV_FLOW_ABI *createPrim)(
NvFlowDatabaseContext* context,
NvFlowUint64 version,
NvFlowDatabasePrim* parent,
const char* displayTypename,
const char* path,
const char* name);
void(NV_FLOW_ABI* updatePrim)(
NvFlowDatabaseContext* context,
NvFlowUint64 version,
NvFlowUint64 minActiveVersion,
NvFlowDatabasePrim* prim);
void(NV_FLOW_ABI* markDestroyedPrim)(NvFlowDatabaseContext* context, NvFlowDatabasePrim* prim);
void(NV_FLOW_ABI* destroyPrim)(NvFlowDatabaseContext* context, NvFlowDatabasePrim* prim);
NvFlowDatabaseAttr*(NV_FLOW_ABI* createAttr)(
NvFlowDatabaseContext* context,
NvFlowUint64 version,
NvFlowDatabasePrim* prim,
const NvFlowReflectData* reflectData,
NvFlowUint8* mappedData);
void(NV_FLOW_ABI* updateAttr)(
NvFlowDatabaseContext* context,
NvFlowUint64 version,
NvFlowUint64 minActiveVersion,
NvFlowDatabaseAttr* attr,
const NvFlowReflectData* reflectData,
NvFlowUint8* mappedData);
void(NV_FLOW_ABI* markDestroyedAttr)(NvFlowDatabaseContext* context, NvFlowDatabaseAttr* attr);
void(NV_FLOW_ABI* destroyAttr)(NvFlowDatabaseContext* context, NvFlowDatabaseAttr* attr);
};
struct NvFlowDatabaseString
{
NvFlowArray<char> data;
void append(const char* str)
{
if (data.size > 0u)
{
data.size--;
}
if (str)
{
NvFlowUint64 idx = 0u;
while (str[idx])
{
data.pushBack(str[idx]);
idx++;
}
data.pushBack('\0');
}
}
void set(const char* str)
{
data.size = 0u;
append(str);
}
const char* get()
{
return data.data;
}
};
struct NvFlowDatabaseInstance
{
struct Prim
{
NvFlowDatabasePrim* prim;
NvFlowArrayPointer<Prim*> childPrims;
NvFlowArray<NvFlowDatabaseAttr*> attrs;
NvFlowDatabaseString path;
};
struct Data
{
NvFlowArray<NvFlowUint8> data;
NvFlowUint64 version;
NvFlowReflectDeepCopy* deepCopy = nullptr;
~Data()
{
if (deepCopy)
{
NvFlowReflectDeepCopy_destroy(deepCopy);
deepCopy = nullptr;
}
}
};
const NvFlowReflectDataType* dataType = nullptr;
NvFlowDatabaseString displayTypename;
NvFlowDatabaseString name;
NvFlowUint64 luid = 0llu;
NvFlowUint64 luidByteOffset = ~0llu;
Prim rootPrim;
NvFlowRingBufferPointer<Data*> datas;
NvFlowBool32 markedForDestroy = NV_FLOW_FALSE;
NvFlowArray<NvFlowUint8> defaultData;
struct StackState
{
NvFlowUint64 childIdx;
const NvFlowReflectDataType* reflectDataType;
NvFlowUint8* data;
Prim* prim;
};
NvFlowUint8* mapDataVersionAndType(NvFlowUint64 version, const NvFlowReflectDataType** pReflectDataType)
{
for (NvFlowUint64 idx = datas.activeCount() - 1u; idx < datas.activeCount(); idx--)
{
if (datas[idx]->version == version)
{
if (pReflectDataType)
{
*pReflectDataType = dataType;
}
return datas[idx]->data.data;
}
}
NvFlowDatabaseInstance::Data* data = datas.allocateBackPointer();
data->version = version;
data->data.reserve(dataType->elementSize);
data->data.size = dataType->elementSize;
if (datas.activeCount() >= 2u)
{
NvFlowDatabaseInstance::Data* oldData = datas[datas.activeCount() - 2u];
memcpy(data->data.data, oldData->data.data, data->data.size);
}
else if (dataType->defaultValue)
{
memcpy(data->data.data, dataType->defaultValue, data->data.size);
}
else
{
memset(data->data.data, 0, data->data.size);
}
// enforce luid
if (luidByteOffset < dataType->elementSize)
{
*((NvFlowUint64*)(data->data.data + luidByteOffset)) = luid;
}
if (pReflectDataType)
{
*pReflectDataType = dataType;
}
return data->data.data;
}
NvFlowUint8* mapDataVersion(NvFlowUint64 version)
{
const NvFlowReflectDataType* reflectDataType = nullptr;
return mapDataVersionAndType(version, &reflectDataType);
}
void deepCopyDataVersion(NvFlowUint64 version)
{
Data* data = nullptr;
for (NvFlowUint64 idx = datas.activeCount() - 1u; idx < datas.activeCount(); idx--)
{
if (datas[idx]->version == version)
{
data = datas[idx];
}
}
if (data)
{
if (!data->deepCopy)
{
data->deepCopy = NvFlowReflectDeepCopy_create();
}
NvFlowUint8* copyData = NvFlowReflectDeepCopy_update(data->deepCopy, data->data.data, dataType);
// copy root struct over mapped to get safe pointers
memcpy(data->data.data, copyData, data->data.size);
}
}
NvFlowUint8* mapDataVersionReadOnly(NvFlowUint64 version)
{
// TODO: Simply update version to avoid copy
return mapDataVersion(version);
}
template<const NvFlowDatabaseInterface* iface>
void init(NvFlowDatabaseContext* context, NvFlowUint64 version, NvFlowUint64 luidIn, const NvFlowReflectDataType* dataTypeIn, const char* displayTypenameIn, const char* pathIn, const char* nameIn)
{
dataType = dataTypeIn;
displayTypename.set(displayTypenameIn);
name.set(nameIn);
luid = luidIn;
luidByteOffset = ~0llu;
// try to find luid offset in root
for (NvFlowUint64 childIdx = 0u; childIdx < dataType->childReflectDataCount; childIdx++)
{
if (strcmp(dataType->childReflectDatas[childIdx].name, "luid") == 0)
{
luidByteOffset = dataType->childReflectDatas[childIdx].dataOffset;
break;
}
}
rootPrim.path.set(pathIn);
rootPrim.prim = nullptr;
if (iface->createPrim)
{
rootPrim.prim = iface->createPrim(
context,
version,
nullptr,
displayTypename.get(),
rootPrim.path.get(),
name.get());
}
NvFlowUint8* mappedData = mapDataVersion(version);
StackState state = { 0llu, dataType, mappedData, &rootPrim };
NvFlowArray<StackState, 8u> stateStack;
for (; state.childIdx < state.reflectDataType->childReflectDataCount; state.childIdx++)
{
// push prims
while (state.reflectDataType->childReflectDatas[state.childIdx].dataType->dataType == eNvFlowType_struct)
{
const NvFlowReflectData* childReflectData = state.reflectDataType->childReflectDatas + state.childIdx;
auto childPrim = state.prim->childPrims.allocateBackPointer();
state.prim->attrs.pushBack(nullptr);
// form path
childPrim->path.set(state.prim->path.get());
childPrim->path.append("/");
childPrim->path.append(childReflectData->name);
childPrim->prim = nullptr;
if (iface->createPrim)
{
childPrim->prim = iface->createPrim(
context,
version,
state.prim->prim,
NvFlowReflectTrimPrefix(childReflectData->dataType->structTypename),
childPrim->path.get(),
childReflectData->name);
}
stateStack.pushBack(state);
state.childIdx = 0u;
state.reflectDataType = childReflectData->dataType;
state.data += childReflectData->dataOffset;
state.prim = childPrim;
}
// attributes
if (state.childIdx < state.reflectDataType->childReflectDataCount)
{
const NvFlowReflectData* childReflectData = state.reflectDataType->childReflectDatas + state.childIdx;
NvFlowDatabaseAttr* attr = nullptr;
if (iface->createAttr)
{
attr = iface->createAttr(context, version, state.prim->prim, childReflectData, state.data);
}
state.prim->attrs.pushBack(attr);
state.prim->childPrims.pushBack(nullptr);
}
// pop prims
while (state.childIdx + 1u >= state.reflectDataType->childReflectDataCount && stateStack.size > 0u)
{
state = stateStack.back();
stateStack.popBack();
}
}
}
void process(NvFlowUint64 version, NvFlowReflectProcess_t processReflect, void* userdata)
{
NvFlowUint8* mappedData = mapDataVersion(version);
processReflect(mappedData, dataType, userdata);
}
template<const NvFlowDatabaseInterface* iface>
void update(NvFlowDatabaseContext* context, NvFlowUint64 version, NvFlowUint64 minActiveVersion)
{
if (!markedForDestroy)
{
NvFlowUint8* mappedData = mapDataVersion(version);
if (rootPrim.prim)
{
iface->updatePrim(context, version, minActiveVersion, rootPrim.prim);
}
StackState state = { 0llu, dataType, mappedData, &rootPrim };
NvFlowArray<StackState, 8u> stateStack;
for (; state.childIdx < state.reflectDataType->childReflectDataCount; state.childIdx++)
{
// push prims
while (state.reflectDataType->childReflectDatas[state.childIdx].dataType->dataType == eNvFlowType_struct)
{
const NvFlowReflectData* childReflectData = state.reflectDataType->childReflectDatas + state.childIdx;
auto childPrim = state.prim->childPrims[state.childIdx];
if (childPrim->prim)
{
iface->updatePrim(context, version, minActiveVersion, childPrim->prim);
}
stateStack.pushBack(state);
state.childIdx = 0u;
state.reflectDataType = childReflectData->dataType;
state.data += childReflectData->dataOffset;
state.prim = childPrim;
}
// attributes
if (state.childIdx < state.reflectDataType->childReflectDataCount)
{
const NvFlowReflectData* childReflectData = state.reflectDataType->childReflectDatas + state.childIdx;
auto attr = state.prim->attrs[state.childIdx];
if (attr)
{
iface->updateAttr(context, version, minActiveVersion, attr, childReflectData, state.data);
}
}
// pop prims
while (state.childIdx + 1u >= state.reflectDataType->childReflectDataCount && stateStack.size > 0u)
{
state = stateStack.back();
stateStack.popBack();
}
}
}
NvFlowUint freeTreshold = markedForDestroy ? 0u : 1u;
while (datas.activeCount() > freeTreshold && datas.front()->version < minActiveVersion)
{
datas.popFront();
}
}
template<const NvFlowDatabaseInterface* iface>
void markForDestroy(NvFlowDatabaseContext* context)
{
NvFlowUint8* mappedData = nullptr;
StackState state = { 0llu, dataType, mappedData, &rootPrim };
NvFlowArray<StackState, 8u> stateStack;
for (; state.childIdx < state.reflectDataType->childReflectDataCount; state.childIdx++)
{
// push prims
while (state.reflectDataType->childReflectDatas[state.childIdx].dataType->dataType == eNvFlowType_struct)
{
const NvFlowReflectData* childReflectData = state.reflectDataType->childReflectDatas + state.childIdx;
auto childPrim = state.prim->childPrims[state.childIdx];
stateStack.pushBack(state);
state.childIdx = 0u;
state.reflectDataType = childReflectData->dataType;
state.data += childReflectData->dataOffset;
state.prim = childPrim;
}
// attributes
if (state.childIdx < state.reflectDataType->childReflectDataCount)
{
auto attr = state.prim->attrs[state.childIdx];
if (attr)
{
iface->markDestroyedAttr(context, attr);
}
}
// pop prims
while (state.childIdx + 1u >= state.reflectDataType->childReflectDataCount && stateStack.size > 0u)
{
if (state.prim->prim)
{
iface->markDestroyedPrim(context, state.prim->prim);
}
state = stateStack.back();
stateStack.popBack();
}
}
if (rootPrim.prim)
{
iface->markDestroyedPrim(context, rootPrim.prim);
}
markedForDestroy = NV_FLOW_TRUE;
}
template<const NvFlowDatabaseInterface* iface>
void destroy(NvFlowDatabaseContext* context)
{
NvFlowUint8* mappedData = nullptr;
StackState state = { 0llu, dataType, mappedData, &rootPrim };
NvFlowArray<StackState, 8u> stateStack;
for (; state.childIdx < state.reflectDataType->childReflectDataCount; state.childIdx++)
{
// push prims
while (state.reflectDataType->childReflectDatas[state.childIdx].dataType->dataType == eNvFlowType_struct)
{
const NvFlowReflectData* childReflectData = state.reflectDataType->childReflectDatas + state.childIdx;
auto childPrim = state.prim->childPrims[state.childIdx];
stateStack.pushBack(state);
state.childIdx = 0u;
state.reflectDataType = childReflectData->dataType;
state.data += childReflectData->dataOffset;
state.prim = childPrim;
}
// attributes
if (state.childIdx < state.reflectDataType->childReflectDataCount)
{
auto attr = state.prim->attrs[state.childIdx];
if (attr)
{
iface->destroyAttr(context, attr);
attr = nullptr;
}
}
// pop prims
while (state.childIdx + 1u >= state.reflectDataType->childReflectDataCount && stateStack.size > 0u)
{
if (state.prim->prim)
{
iface->destroyPrim(context, state.prim->prim);
state.prim->prim = nullptr;
}
state = stateStack.back();
stateStack.popBack();
}
}
if (rootPrim.prim)
{
iface->destroyPrim(context, rootPrim.prim);
rootPrim.prim = nullptr;
}
}
};
struct NvFlowDatabaseType
{
const NvFlowReflectDataType* dataType = nullptr;
NvFlowDatabaseString displayTypeName;
NvFlowArrayPointer<NvFlowDatabaseInstance*> instances;
struct TypeSnapshot
{
NvFlowDatabaseTypeSnapshot snapshot;
NvFlowArray<NvFlowUint8*> instanceDatas;
};
NvFlowRingBufferPointer<TypeSnapshot*> snapshots;
void init(const NvFlowReflectDataType* dataTypeIn, const char* displayTypeNameIn)
{
dataType = dataTypeIn;
displayTypeName.set(displayTypeNameIn);
}
template<const NvFlowDatabaseInterface* iface>
void update(NvFlowDatabaseContext* context, NvFlowUint64 version, NvFlowUint64 minActiveVersion)
{
for (NvFlowUint instanceIdx = 0u; instanceIdx < instances.size; instanceIdx++)
{
instances[instanceIdx]->update<iface>(context, version, minActiveVersion);
}
// release instances
{
NvFlowUint64 keepCount = 0llu;
for (NvFlowUint instanceIdx = 0u; instanceIdx < instances.size; instanceIdx++)
{
if (instances[instanceIdx]->markedForDestroy && instances[instanceIdx]->datas.activeCount() == 0u)
{
instances[instanceIdx]->destroy<iface>(context);
instances.deletePointerAtIndex(instanceIdx);
}
else
{
instances.swapPointers(keepCount, instanceIdx);
keepCount++;
}
}
instances.size = keepCount;
}
// release snapshots
while (snapshots.activeCount() > 0u && snapshots.front()->snapshot.version < minActiveVersion)
{
snapshots.popFront();
}
}
template<const NvFlowDatabaseInterface* iface>
void destroy(NvFlowDatabaseContext* context)
{
for (NvFlowUint instanceIdx = 0u; instanceIdx < instances.size; instanceIdx++)
{
instances[instanceIdx]->destroy<iface>(context);
}
instances.deletePointers();
}
void getSnapshot(NvFlowDatabaseTypeSnapshot* snapshot, NvFlowUint64 version)
{
auto ptr = snapshots.allocateBackPointer();
ptr->snapshot.version = version;
ptr->snapshot.dataType = dataType;
ptr->instanceDatas.size = 0u;
for (NvFlowUint instanceIdx = 0u; instanceIdx < instances.size; instanceIdx++)
{
if (!instances[instanceIdx]->markedForDestroy)
{
NvFlowUint8* data = instances[instanceIdx]->mapDataVersionReadOnly(version);
ptr->instanceDatas.pushBack(data);
}
}
ptr->snapshot.instanceDatas = ptr->instanceDatas.data;
ptr->snapshot.instanceCount = ptr->instanceDatas.size;
if (snapshot)
{
*snapshot = ptr->snapshot;
}
}
};
struct NvFlowDatabase
{
NvFlowArrayPointer<NvFlowDatabaseType*> types;
struct Snapshot
{
NvFlowDatabaseSnapshot snapshot;
NvFlowArray<NvFlowDatabaseTypeSnapshot> typeSnapshots;
};
NvFlowRingBufferPointer<Snapshot*> snapshots;
NvFlowUint64 luidCounter = 0llu;
NvFlowDatabaseType* createType(const NvFlowReflectDataType* dataTypeIn, const char* displayTypeName)
{
auto ptr = types.allocateBackPointer();
ptr->init(dataTypeIn, displayTypeName);
return ptr;
}
template<const NvFlowDatabaseInterface* iface>
NvFlowDatabaseInstance* createInstance(NvFlowDatabaseContext* context, NvFlowUint64 version, NvFlowDatabaseType* type, const char* pathIn, const char* name)
{
auto ptr = type->instances.allocateBackPointer();
luidCounter++;
ptr->init<iface>(context, version, luidCounter, type->dataType, type->displayTypeName.get(), pathIn, name);
return ptr;
}
template<const NvFlowDatabaseInterface* iface>
void update(NvFlowDatabaseContext* context, NvFlowUint64 version, NvFlowUint64 minActiveVersion)
{
for (NvFlowUint64 typeIdx = 0u; typeIdx < types.size; typeIdx++)
{
types[typeIdx]->update<iface>(context, version, minActiveVersion);
}
// release snapshots
while (snapshots.activeCount() > 0u && snapshots.front()->snapshot.version < minActiveVersion)
{
snapshots.popFront();
}
}
template<const NvFlowDatabaseInterface* iface>
void markInstanceForDestroy(NvFlowDatabaseContext* context, NvFlowDatabaseInstance* ptr)
{
ptr->markForDestroy<iface>(context);
}
template<const NvFlowDatabaseInterface* iface>
void markAllInstancesForDestroy(NvFlowDatabaseContext* context)
{
for (NvFlowUint64 typeIdx = 0u; typeIdx < types.size; typeIdx++)
{
NvFlowDatabaseType* type = types[typeIdx];
for (NvFlowUint instanceIdx = 0u; instanceIdx < type->instances.size; instanceIdx++)
{
type->instances[instanceIdx]->markForDestroy<iface>(context);
}
}
}
template<const NvFlowDatabaseInterface* iface>
NvFlowBool32 snapshotPending(NvFlowDatabaseContext* context, NvFlowUint64 version, NvFlowUint64 minActiveVersion)
{
update<iface>(context, version, minActiveVersion);
NvFlowBool32 anySnapshotPending = NV_FLOW_FALSE;
for (NvFlowUint64 typeIdx = 0u; typeIdx < types.size; typeIdx++)
{
if (types[typeIdx]->snapshots.activeCount() > 0u)
{
anySnapshotPending = NV_FLOW_TRUE;
}
}
if (snapshots.activeCount() > 0u)
{
anySnapshotPending = NV_FLOW_TRUE;
}
return anySnapshotPending;
}
template<const NvFlowDatabaseInterface* iface>
void destroy(NvFlowDatabaseContext* context)
{
for (NvFlowUint64 typeIdx = 0u; typeIdx < types.size; typeIdx++)
{
types[typeIdx]->destroy<iface>(context);
}
}
void getSnapshot(NvFlowDatabaseSnapshot* snapshot, NvFlowUint64 version)
{
auto ptr = snapshots.allocateBackPointer();
ptr->snapshot.version = version;
ptr->typeSnapshots.size = 0u;
for (NvFlowUint64 typeIdx = 0u; typeIdx < types.size; typeIdx++)
{
NvFlowDatabaseTypeSnapshot typeSnapshot = {};
types[typeIdx]->getSnapshot(&typeSnapshot, version);
ptr->typeSnapshots.pushBack(typeSnapshot);
}
ptr->snapshot.typeSnapshots = ptr->typeSnapshots.data;
ptr->snapshot.typeSnapshotCount = ptr->typeSnapshots.size;
if (snapshot)
{
*snapshot = ptr->snapshot;
}
}
void enumerateActiveInstances(NvFlowDatabaseInstance** pInstances, NvFlowUint64* pInstanceCount)
{
if (!pInstances && pInstanceCount)
{
NvFlowUint64 activeCount = 0llu;
for (NvFlowUint64 typeIdx = 0u; typeIdx < types.size; typeIdx++)
{
NvFlowDatabaseType* type = types[typeIdx];
for (NvFlowUint instanceIdx = 0u; instanceIdx < type->instances.size; instanceIdx++)
{
if (!type->instances[instanceIdx]->markedForDestroy)
{
activeCount++;
}
}
}
*pInstanceCount = activeCount;
}
if (pInstances && pInstanceCount)
{
NvFlowUint64 activeCount = 0llu;
for (NvFlowUint64 typeIdx = 0u; typeIdx < types.size; typeIdx++)
{
NvFlowDatabaseType* type = types[typeIdx];
for (NvFlowUint instanceIdx = 0u; instanceIdx < type->instances.size; instanceIdx++)
{
if (!type->instances[instanceIdx]->markedForDestroy)
{
if (activeCount < (*pInstanceCount))
{
pInstances[activeCount] = type->instances[instanceIdx];
activeCount++;
}
}
}
}
*pInstanceCount = activeCount;
}
}
};
| 21,017 | C | 27.326146 | 197 | 0.7178 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowLocationHashTable.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowTypes.h"
#include "NvFlowArray.h"
struct NvFlowLocationHashTableRange
{
NvFlowUint64 beginIdx;
NvFlowUint64 endIdx;
};
struct NvFlowLocationHashTable
{
NvFlowUint tableDimBits = 0llu;
NvFlowUint tableDimLessOne = 0llu;
NvFlowUint tableDim3 = 1u;
NvFlowArray<NvFlowLocationHashTableRange> ranges;
NvFlowArray<NvFlowUint64> nextIndices;
NvFlowArray<NvFlowInt4> locations;
NvFlowArray<NvFlowUint> masks;
NvFlowInt4 locationMin = { 0, 0, 0, 0 };
NvFlowInt4 locationMax = { 0, 0, 0, 0 };
NvFlowArray<NvFlowInt4> tmpLocations;
NvFlowArray<NvFlowUint> tmpMasks;
void reset()
{
tableDimBits = 0llu;
tableDimLessOne = 0llu;
tableDim3 = 1u;
ranges.size = 0u;
nextIndices.size = 0u;
NvFlowLocationHashTableRange nullRange = { ~0llu, ~0llu };
ranges.pushBack(nullRange);
locations.size = 0u;
masks.size = 0u;
}
NvFlowLocationHashTable()
{
reset();
}
void rebuildTable()
{
ranges.size = 0u;
ranges.reserve(tableDim3);
ranges.size = tableDim3;
nextIndices.size = 0u;
nextIndices.reserve(locations.size);
nextIndices.size = locations.size;
// invalidate ranges
NvFlowLocationHashTableRange nullRange = { ~0llu, ~0llu };
for (NvFlowUint64 rangeIdx = 0u; rangeIdx < ranges.size; rangeIdx++)
{
ranges[rangeIdx] = nullRange;
}
for (NvFlowUint64 locationIdx = 0u; locationIdx < locations.size; locationIdx++)
{
NvFlowInt4 location = locations[locationIdx];
NvFlowUint64 baseRangeIdx = (location.x & tableDimLessOne) |
((location.y & tableDimLessOne) << tableDimBits) |
((location.z & tableDimLessOne) << (tableDimBits + tableDimBits));
// reset next for this location
nextIndices[locationIdx] = ~0llu;
NvFlowUint64 beginIdx = ranges[baseRangeIdx].beginIdx;
NvFlowUint64 endIdx = ranges[baseRangeIdx].endIdx;
if (beginIdx >= endIdx)
{
ranges[baseRangeIdx].beginIdx = locationIdx;
ranges[baseRangeIdx].endIdx = locationIdx + 1u;
}
else if (endIdx == locationIdx)
{
ranges[baseRangeIdx].endIdx = locationIdx + 1u;
nextIndices[endIdx - 1u] = locationIdx;
}
else
{
NvFlowUint64 prevIdx = endIdx - 1u;
NvFlowUint64 currentIdx = nextIndices[prevIdx];
while (currentIdx < nextIndices.size)
{
prevIdx = currentIdx;
currentIdx = nextIndices[currentIdx];
}
nextIndices[prevIdx] = locationIdx;
}
}
}
void compactNonZeroWithLimit(NvFlowUint64 maxLocations)
{
NvFlowUint64 dstIdx = 0u;
for (NvFlowUint64 srcIdx = 0u; srcIdx < locations.size && dstIdx < maxLocations; srcIdx++)
{
if (masks[srcIdx])
{
locations[dstIdx] = locations[srcIdx];
masks[dstIdx] = masks[srcIdx];
dstIdx++;
}
}
locations.size = dstIdx;
masks.size = dstIdx;
// optimize compacted table dim
tableDimBits = 0llu;
tableDimLessOne = 0llu;
tableDim3 = 1u;
while (locations.size > tableDim3)
{
tableDimBits++;
tableDimLessOne = (1u << tableDimBits) - 1u;
tableDim3 = (1 << (tableDimBits + tableDimBits + tableDimBits));
}
rebuildTable();
}
void sort()
{
NvFlowArray_copy(tmpLocations, locations);
NvFlowArray_copy(tmpMasks, masks);
NvFlowUint64 globalOffset = 0u;
for (NvFlowUint64 baseRangeIdx = 0u; baseRangeIdx < ranges.size; baseRangeIdx++)
{
NvFlowUint64 beginIdx = ranges[baseRangeIdx].beginIdx;
NvFlowUint64 endIdx = ranges[baseRangeIdx].endIdx;
for (NvFlowUint64 currentIdx = beginIdx; currentIdx < endIdx; currentIdx++)
{
locations[globalOffset] = tmpLocations[currentIdx];
masks[globalOffset] = tmpMasks[currentIdx];
globalOffset++;
}
if (beginIdx < endIdx)
{
NvFlowUint64 currentIdx = nextIndices[endIdx - 1u];
while (currentIdx < nextIndices.size)
{
locations[globalOffset] = tmpLocations[currentIdx];
masks[globalOffset] = tmpMasks[currentIdx];
globalOffset++;
currentIdx = nextIndices[currentIdx];
}
}
}
rebuildTable();
}
NvFlowUint64 find(NvFlowInt4 location)
{
NvFlowUint64 baseRangeIdx = (location.x & tableDimLessOne) |
((location.y & tableDimLessOne) << tableDimBits) |
((location.z & tableDimLessOne) << (tableDimBits + tableDimBits));
NvFlowUint64 beginIdx = ranges[baseRangeIdx].beginIdx;
NvFlowUint64 endIdx = ranges[baseRangeIdx].endIdx;
for (NvFlowUint64 currentIdx = beginIdx; currentIdx < endIdx; currentIdx++)
{
if (location.x == locations[currentIdx].x &&
location.y == locations[currentIdx].y &&
location.z == locations[currentIdx].z &&
location.w == locations[currentIdx].w)
{
return currentIdx;
}
}
if (beginIdx < endIdx)
{
NvFlowUint64 currentIdx = nextIndices[endIdx - 1u];
while (currentIdx < nextIndices.size)
{
if (location.x == locations[currentIdx].x &&
location.y == locations[currentIdx].y &&
location.z == locations[currentIdx].z &&
location.w == locations[currentIdx].w)
{
return currentIdx;
}
currentIdx = nextIndices[currentIdx];
}
}
return ~0llu;
}
void pushNoResize(NvFlowInt4 location, NvFlowUint mask)
{
NvFlowUint64 baseRangeIdx = (location.x & tableDimLessOne) |
((location.y & tableDimLessOne) << tableDimBits) |
((location.z & tableDimLessOne) << (tableDimBits + tableDimBits));
NvFlowUint64 beginIdx = ranges[baseRangeIdx].beginIdx;
NvFlowUint64 endIdx = ranges[baseRangeIdx].endIdx;
for (NvFlowUint64 currentIdx = beginIdx; currentIdx < endIdx; currentIdx++)
{
if (location.x == locations[currentIdx].x &&
location.y == locations[currentIdx].y &&
location.z == locations[currentIdx].z &&
location.w == locations[currentIdx].w)
{
masks[currentIdx] |= mask;
return;
}
}
if (beginIdx >= endIdx)
{
locations.pushBack(location);
masks.pushBack(mask);
nextIndices.pushBack(~0llu);
ranges[baseRangeIdx].beginIdx = locations.size - 1u;
ranges[baseRangeIdx].endIdx = locations.size;
}
else if (endIdx == locations.size)
{
locations.pushBack(location);
masks.pushBack(mask);
nextIndices.pushBack(~0llu);
ranges[baseRangeIdx].endIdx = locations.size;
nextIndices[endIdx - 1u] = locations.size - 1u;
}
else
{
NvFlowUint64 prevIdx = endIdx - 1u;
NvFlowUint64 currentIdx = nextIndices[prevIdx];
while (currentIdx < nextIndices.size)
{
if (location.x == locations[currentIdx].x &&
location.y == locations[currentIdx].y &&
location.z == locations[currentIdx].z &&
location.w == locations[currentIdx].w)
{
masks[currentIdx] |= mask;
return;
}
prevIdx = currentIdx;
currentIdx = nextIndices[currentIdx];
}
locations.pushBack(location);
masks.pushBack(mask);
nextIndices.pushBack(~0llu);
nextIndices[prevIdx] = locations.size - 1u;
}
}
void conditionalGrowTable()
{
if (locations.size > tableDim3)
{
tableDimBits++;
tableDimLessOne = (1u << tableDimBits) - 1u;
tableDim3 = (1 << (tableDimBits + tableDimBits + tableDimBits));
rebuildTable();
}
}
void push(NvFlowInt4 location, NvFlowUint mask)
{
pushNoResize(location, mask);
conditionalGrowTable();
}
void computeStats()
{
locationMin = NvFlowInt4{ 0, 0, 0, 0 };
locationMax = NvFlowInt4{ 0, 0, 0, 0 };
if (locations.size > 0)
{
locationMin = locations[0];
locationMax.x = locations[0].x + 1;
locationMax.y = locations[0].y + 1;
locationMax.z = locations[0].z + 1;
locationMax.w = locations[0].w + 1;
}
for (NvFlowUint64 locationIdx = 1u; locationIdx < locations.size; locationIdx++)
{
NvFlowInt4 location = locations[locationIdx];
if (location.x < locationMin.x)
{
locationMin.x = location.x;
}
if (location.y < locationMin.y)
{
locationMin.y = location.y;
}
if (location.z < locationMin.z)
{
locationMin.z = location.z;
}
if (location.w < locationMin.w)
{
locationMin.w = location.w;
}
// plus one, since max is exclusive
if (location.x + 1 > locationMax.x)
{
locationMax.x = location.x + 1;
}
if (location.y + 1 > locationMax.y)
{
locationMax.y = location.y + 1;
}
if (location.z + 1 > locationMax.z)
{
locationMax.z = location.z + 1;
}
if (location.w + 1 > locationMax.w)
{
locationMax.w = location.w + 1;
}
}
}
}; | 9,839 | C | 26.409471 | 92 | 0.686655 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowString.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include <stdarg.h>
#include "NvFlowTypes.h"
/// ************************** String Pool *********************************************
struct NvFlowStringPool;
NvFlowStringPool* NvFlowStringPoolCreate();
char* NvFlowStringPoolAllocate(NvFlowStringPool* pool, NvFlowUint64 size);
void NvFlowStringPoolTempAllocate(NvFlowStringPool* ptr, char** p_str_data, NvFlowUint64* p_str_size);
void NvFlowStringPoolTempAllocateCommit(NvFlowStringPool* ptr, char* str_data, NvFlowUint64 str_size);
void NvFlowStringPoolDestroy(NvFlowStringPool* pool);
void NvFlowStringPoolReset(NvFlowStringPool* pool);
char* NvFlowStringPrint(NvFlowStringPool* pool, const char* format, ...);
char* NvFlowStringPrintV(NvFlowStringPool* pool, const char* format, va_list args);
/// ************************** Macro utils *********************************
#define NV_FLOW_CSTR(X) NvFlowStringView{X, sizeof(X) - 1}
#define NvFlowStringToInteger(input) atoi(input)
#define NvFlowStringMakeView(input) NvFlowStringView{ input, (int)strlen(input) }
/// ************************** Char Utils *********************************************
NV_FLOW_INLINE int NvFlowCharIsWhiteSpace(char c)
{
return c == ' ' || c == '\n' || c == '\r' || c == '\t' || c == '\f' || c == '\v';
}
NV_FLOW_INLINE int NvFlowCharIsWhiteSpaceButNotNewline(char c)
{
return c == ' ' || c == '\r' || c == '\t' || c == '\f' || c == '\v';
}
NV_FLOW_INLINE int NvFlowCharIsAlphaUnderscore(char c)
{
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c == '_');
}
NV_FLOW_INLINE int NvFlowCharIsAlphaNum(char c)
{
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (c == '_');
}
NV_FLOW_INLINE int NvFlowCharIsNum(char c)
{
return (c >= '0' && c <= '9');
}
/// ************************** String Utils *********************************************
NV_FLOW_INLINE NvFlowUint64 NvFlowStringLength(const char* a)
{
if (!a)
{
return 0;
}
int idx = 0;
while (a[idx])
{
idx++;
}
return idx;
}
NV_FLOW_INLINE int NvFlowStringCompare(const char* a, const char* b)
{
a = a ? a : "\0";
b = b ? b : "\0";
int idx = 0;
while (a[idx] || b[idx])
{
if (a[idx] != b[idx])
{
return a[idx] < b[idx] ? -1 : +1;
}
idx++;
}
return 0;
}
NV_FLOW_INLINE char* NvFlowStringFromView(NvFlowStringPool* pool, const char* data, NvFlowUint64 size)
{
char* str = NvFlowStringPoolAllocate(pool, size);
for (NvFlowUint64 i = 0; i < size; i++)
{
str[i] = data[i];
}
return str;
}
NV_FLOW_INLINE void NvFlowStringSplitDelimFirst(NvFlowStringPool* pool, char** pFirst, char** pSecond, const char* input_data, char delim)
{
NvFlowUint64 input_size = NvFlowStringLength(input_data);
NvFlowUint64 slashIdx = 0;
while (slashIdx < input_size)
{
if (input_data[slashIdx] == delim)
{
break;
}
slashIdx++;
}
*pFirst = NvFlowStringFromView(pool, input_data, slashIdx + 1);
*pSecond = NvFlowStringFromView(pool, input_data + slashIdx + 1, input_size - slashIdx - 1);
}
NV_FLOW_INLINE void NvFlowStringSplitDelimLast(NvFlowStringPool* pool, char** pFirst, char** pSecond, const char* input_data, char delim)
{
NvFlowUint64 input_size = NvFlowStringLength(input_data);
NvFlowUint64 slashIdx = input_size - 1;
while (slashIdx < input_size)
{
if (input_data[slashIdx] == delim)
{
break;
}
slashIdx--;
}
*pFirst = NvFlowStringFromView(pool, input_data, slashIdx + 1);
*pSecond = NvFlowStringFromView(pool, input_data + slashIdx + 1, input_size - slashIdx - 1);
}
NV_FLOW_INLINE char* NvFlowStringDup(NvFlowStringPool* pool, const char* name)
{
NvFlowUint64 name_size = NvFlowStringLength(name);
return NvFlowStringFromView(pool, name, name_size);
}
NV_FLOW_INLINE char* NvFlowStringConcat(NvFlowStringPool* pool, const char* dir_data, const char* filename_data)
{
NvFlowUint64 dir_size = NvFlowStringLength(dir_data);
NvFlowUint64 filename_size = NvFlowStringLength(filename_data);
char* s_data = NvFlowStringPoolAllocate(pool, dir_size + filename_size);
for (NvFlowUint64 i = 0; i < dir_size; i++)
{
s_data[i] = dir_data[i];
}
for (NvFlowUint64 i = 0; i < filename_size; i++)
{
s_data[i + dir_size] = filename_data[i];
}
return s_data;
}
NV_FLOW_INLINE char* NvFlowStringConcatN(NvFlowStringPool* pool, const char** views, NvFlowUint64 numViews)
{
NvFlowUint64 totalSize = 0;
for (NvFlowUint64 viewIdx = 0; viewIdx < numViews; viewIdx++)
{
totalSize += NvFlowStringLength(views[viewIdx]);
}
char* s_data = NvFlowStringPoolAllocate(pool, totalSize);
NvFlowUint64 dstOffset = 0;
for (NvFlowUint64 viewIdx = 0; viewIdx < numViews; viewIdx++)
{
const char* view_data = views[viewIdx];
NvFlowUint64 view_size = NvFlowStringLength(view_data);
for (NvFlowUint64 i = 0; i < view_size; i++)
{
s_data[i + dstOffset] = view_data[i];
}
dstOffset += view_size;
}
return s_data;
}
NV_FLOW_INLINE char* NvFlowStringConcat3(NvFlowStringPool* pool, const char* a, const char* b, const char* c)
{
const char* list[3u] = { a, b, c };
return NvFlowStringConcatN(pool, list, 3u);
}
NV_FLOW_INLINE char* NvFlowStringConcat4(NvFlowStringPool* pool, const char* a, const char* b, const char* c, const char* d)
{
const char* list[4u] = { a, b, c, d };
return NvFlowStringConcatN(pool, list, 4u);
}
NV_FLOW_INLINE char* NvFlowStringTrimEnd(NvFlowStringPool* pool, const char* a_data, char trimChar)
{
NvFlowUint64 a_size = NvFlowStringLength(a_data);
while (a_size > 0 && a_data[a_size - 1] == trimChar)
{
a_size--;
}
return NvFlowStringFromView(pool, a_data, a_size);
}
NV_FLOW_INLINE char* NvFlowStringTrimBeginAndEnd(NvFlowStringPool* pool, const char* a_data, char trimChar)
{
NvFlowUint64 a_size = NvFlowStringLength(a_data);
while (a_size > 0 && a_data[0] == trimChar)
{
a_data++;
a_size--;
}
while (a_size > 0 && a_data[a_size - 1] == trimChar)
{
a_size--;
}
return NvFlowStringFromView(pool, a_data, a_size);
}
/// ************************** File Utils *********************************************
const char* NvFlowTextFileLoad(NvFlowStringPool* pool, const char* filename);
void NvFlowTextFileStore(const char* text, const char* filename);
NvFlowBool32 NvFlowTextFileTestOpen(const char* filename);
void NvFlowTextFileRemove(const char* name);
void NvFlowTextFileRename(const char* oldName, const char* newName);
NvFlowBool32 NvFlowTextFileDiffAndWriteIfModified(const char* filenameDst, const char* filenameTmp);
| 7,988 | C | 30.207031 | 138 | 0.672258 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowMath.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowTypes.h"
#include <math.h>
namespace NvFlowMath
{
static const float pi = 3.141592654f;
NV_FLOW_INLINE NvFlowFloat4 operator+(const NvFlowFloat4& lhs, const NvFlowFloat4& rhs)
{
NvFlowFloat4 ret;
ret.x = lhs.x + rhs.x;
ret.y = lhs.y + rhs.y;
ret.z = lhs.z + rhs.z;
ret.w = lhs.w + rhs.w;
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 operator-(const NvFlowFloat4& lhs, const NvFlowFloat4& rhs)
{
NvFlowFloat4 ret;
ret.x = lhs.x - rhs.x;
ret.y = lhs.y - rhs.y;
ret.z = lhs.z - rhs.z;
ret.w = lhs.w - rhs.w;
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 operator*(const NvFlowFloat4& lhs, const NvFlowFloat4& rhs)
{
NvFlowFloat4 ret;
ret.x = lhs.x * rhs.x;
ret.y = lhs.y * rhs.y;
ret.z = lhs.z * rhs.z;
ret.w = lhs.w * rhs.w;
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 operator/(const NvFlowFloat4& lhs, const NvFlowFloat4& rhs)
{
NvFlowFloat4 ret;
ret.x = lhs.x / rhs.x;
ret.y = lhs.y / rhs.y;
ret.z = lhs.z / rhs.z;
ret.w = lhs.w / rhs.w;
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 operator*(float v, const NvFlowFloat4& rhs)
{
NvFlowFloat4 ret;
ret.x = v * rhs.x;
ret.y = v * rhs.y;
ret.z = v * rhs.z;
ret.w = v * rhs.w;
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 operator*(const NvFlowFloat4& lhs, float v)
{
NvFlowFloat4 ret;
ret.x = lhs.x * v;
ret.y = lhs.y * v;
ret.z = lhs.z * v;
ret.w = lhs.w * v;
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 vectorSplatX(const NvFlowFloat4& a)
{
return NvFlowFloat4{ a.x, a.x, a.x, a.x };
}
NV_FLOW_INLINE NvFlowFloat4 vectorSplatY(const NvFlowFloat4& a)
{
return NvFlowFloat4{ a.y, a.y, a.y, a.y };
}
NV_FLOW_INLINE NvFlowFloat4 vectorSplatZ(const NvFlowFloat4& a)
{
return NvFlowFloat4{ a.z, a.z, a.z, a.z };
}
NV_FLOW_INLINE NvFlowFloat4 vectorSplatW(const NvFlowFloat4& a)
{
return NvFlowFloat4{ a.w, a.w, a.w, a.w };
}
NV_FLOW_INLINE NvFlowFloat4 vector3Normalize(const NvFlowFloat4& v)
{
float magn = sqrtf(v.x * v.x + v.y * v.y + v.z * v.z);
if (magn > 0.f)
{
magn = 1.f / magn;
}
return NvFlowFloat4{ v.x * magn, v.y * magn, v.z * magn, v.w * magn };
}
NV_FLOW_INLINE NvFlowFloat4 vectorPerspectiveDivide(const NvFlowFloat4& v)
{
return v / vectorSplatW(v);
}
NV_FLOW_INLINE NvFlowFloat4 matrixMultiplyRow(const NvFlowFloat4x4& b, const NvFlowFloat4& r)
{
NvFlowFloat4 result;
result.x = b.x.x * r.x + b.y.x * r.y + b.z.x * r.z + b.w.x * r.w;
result.y = b.x.y * r.x + b.y.y * r.y + b.z.y * r.z + b.w.y * r.w;
result.z = b.x.z * r.x + b.y.z * r.y + b.z.z * r.z + b.w.z * r.w;
result.w = b.x.w * r.x + b.y.w * r.y + b.z.w * r.z + b.w.w * r.w;
return result;
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixMultiply(const NvFlowFloat4x4& a, const NvFlowFloat4x4& b)
{
NvFlowFloat4x4 result;
result.x = matrixMultiplyRow(b, a.x);
result.y = matrixMultiplyRow(b, a.y);
result.z = matrixMultiplyRow(b, a.z);
result.w = matrixMultiplyRow(b, a.w);
return result;
}
NV_FLOW_INLINE NvFlowFloat4 matrixTransposeRow(const NvFlowFloat4x4& a, unsigned int offset)
{
NvFlowFloat4 result;
result.x = *((&a.x.x) + offset);
result.y = *((&a.y.x) + offset);
result.z = *((&a.z.x) + offset);
result.w = *((&a.w.x) + offset);
return result;
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixTranspose(const NvFlowFloat4x4& a)
{
NvFlowFloat4x4 result;
result.x = matrixTransposeRow(a, 0u);
result.y = matrixTransposeRow(a, 1u);
result.z = matrixTransposeRow(a, 2u);
result.w = matrixTransposeRow(a, 3u);
return result;
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixInverse(const NvFlowFloat4x4& a)
{
const NvFlowFloat4x4& m = a;
float f = (float(1.0) /
(m.x.x * m.y.y * m.z.z * m.w.w +
m.x.x * m.y.z * m.z.w * m.w.y +
m.x.x * m.y.w * m.z.y * m.w.z +
m.x.y * m.y.x * m.z.w * m.w.z +
m.x.y * m.y.z * m.z.x * m.w.w +
m.x.y * m.y.w * m.z.z * m.w.x +
m.x.z * m.y.x * m.z.y * m.w.w +
m.x.z * m.y.y * m.z.w * m.w.x +
m.x.z * m.y.w * m.z.x * m.w.y +
m.x.w * m.y.x * m.z.z * m.w.y +
m.x.w * m.y.y * m.z.x * m.w.z +
m.x.w * m.y.z * m.z.y * m.w.x +
-m.x.x * m.y.y * m.z.w * m.w.z +
-m.x.x * m.y.z * m.z.y * m.w.w +
-m.x.x * m.y.w * m.z.z * m.w.y +
-m.x.y * m.y.x * m.z.z * m.w.w +
-m.x.y * m.y.z * m.z.w * m.w.x +
-m.x.y * m.y.w * m.z.x * m.w.z +
-m.x.z * m.y.x * m.z.w * m.w.y +
-m.x.z * m.y.y * m.z.x * m.w.w +
-m.x.z * m.y.w * m.z.y * m.w.x +
-m.x.w * m.y.x * m.z.y * m.w.z +
-m.x.w * m.y.y * m.z.z * m.w.x +
-m.x.w * m.y.z * m.z.x * m.w.y));
float a00 = (m.y.y * m.z.z * m.w.w +
m.y.z * m.z.w * m.w.y +
m.y.w * m.z.y * m.w.z +
-m.y.y * m.z.w * m.w.z +
-m.y.z * m.z.y * m.w.w +
-m.y.w * m.z.z * m.w.y);
float a10 = (m.x.y * m.z.w * m.w.z +
m.x.z * m.z.y * m.w.w +
m.x.w * m.z.z * m.w.y +
-m.x.y * m.z.z * m.w.w +
-m.x.z * m.z.w * m.w.y +
-m.x.w * m.z.y * m.w.z);
float a20 = (m.x.y * m.y.z * m.w.w +
m.x.z * m.y.w * m.w.y +
m.x.w * m.y.y * m.w.z +
-m.x.y * m.y.w * m.w.z +
-m.x.z * m.y.y * m.w.w +
-m.x.w * m.y.z * m.w.y);
float a30 = (m.x.y * m.y.w * m.z.z +
m.x.z * m.y.y * m.z.w +
m.x.w * m.y.z * m.z.y +
-m.x.y * m.y.z * m.z.w +
-m.x.z * m.y.w * m.z.y +
-m.x.w * m.y.y * m.z.z);
float a01 = (m.y.x * m.z.w * m.w.z +
m.y.z * m.z.x * m.w.w +
m.y.w * m.z.z * m.w.x +
-m.y.x * m.z.z * m.w.w +
-m.y.z * m.z.w * m.w.x +
-m.y.w * m.z.x * m.w.z);
float a11 = (m.x.x * m.z.z * m.w.w +
m.x.z * m.z.w * m.w.x +
m.x.w * m.z.x * m.w.z +
-m.x.x * m.z.w * m.w.z +
-m.x.z * m.z.x * m.w.w +
-m.x.w * m.z.z * m.w.x);
float a21 = (m.x.x * m.y.w * m.w.z +
m.x.z * m.y.x * m.w.w +
m.x.w * m.y.z * m.w.x +
-m.x.x * m.y.z * m.w.w +
-m.x.z * m.y.w * m.w.x +
-m.x.w * m.y.x * m.w.z);
float a31 = (m.x.x * m.y.z * m.z.w +
m.x.z * m.y.w * m.z.x +
m.x.w * m.y.x * m.z.z +
-m.x.x * m.y.w * m.z.z +
-m.x.z * m.y.x * m.z.w +
-m.x.w * m.y.z * m.z.x);
float a02 = (m.y.x * m.z.y * m.w.w +
m.y.y * m.z.w * m.w.x +
m.y.w * m.z.x * m.w.y +
-m.y.x * m.z.w * m.w.y +
-m.y.y * m.z.x * m.w.w +
-m.y.w * m.z.y * m.w.x);
float a12 = (-m.x.x * m.z.y * m.w.w +
-m.x.y * m.z.w * m.w.x +
-m.x.w * m.z.x * m.w.y +
m.x.x * m.z.w * m.w.y +
m.x.y * m.z.x * m.w.w +
m.x.w * m.z.y * m.w.x);
float a22 = (m.x.x * m.y.y * m.w.w +
m.x.y * m.y.w * m.w.x +
m.x.w * m.y.x * m.w.y +
-m.x.x * m.y.w * m.w.y +
-m.x.y * m.y.x * m.w.w +
-m.x.w * m.y.y * m.w.x);
float a32 = (m.x.x * m.y.w * m.z.y +
m.x.y * m.y.x * m.z.w +
m.x.w * m.y.y * m.z.x +
-m.x.y * m.y.w * m.z.x +
-m.x.w * m.y.x * m.z.y +
-m.x.x * m.y.y * m.z.w);
float a03 = (m.y.x * m.z.z * m.w.y +
m.y.y * m.z.x * m.w.z +
m.y.z * m.z.y * m.w.x +
-m.y.x * m.z.y * m.w.z +
-m.y.y * m.z.z * m.w.x +
-m.y.z * m.z.x * m.w.y);
float a13 = (m.x.x * m.z.y * m.w.z +
m.x.y * m.z.z * m.w.x +
m.x.z * m.z.x * m.w.y +
-m.x.x * m.z.z * m.w.y +
-m.x.y * m.z.x * m.w.z +
-m.x.z * m.z.y * m.w.x);
float a23 = (m.x.x * m.y.z * m.w.y +
m.x.y * m.y.x * m.w.z +
m.x.z * m.y.y * m.w.x +
-m.x.x * m.y.y * m.w.z +
-m.x.y * m.y.z * m.w.x +
-m.x.z * m.y.x * m.w.y);
float a33 = (m.x.x * m.y.y * m.z.z +
m.x.y * m.y.z * m.z.x +
m.x.z * m.y.x * m.z.y +
-m.x.x * m.y.z * m.z.y +
-m.x.y * m.y.x * m.z.z +
-m.x.z * m.y.y * m.z.x);
return NvFlowFloat4x4{
a00*f, a10*f, a20*f, a30*f,
a01*f, a11*f, a21*f, a31*f,
a02*f, a12*f, a22*f, a32*f,
a03*f, a13*f, a23*f, a33*f };
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixIdentity()
{
return NvFlowFloat4x4{
{ 1.f, 0.f, 0.f, 0.f },
{ 0.f, 1.f, 0.f, 0.f },
{ 0.f, 0.f, 1.f, 0.f },
{ 0.f, 0.f, 0.f, 1.f }
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixScaling(float x, float y, float z)
{
return NvFlowFloat4x4{
x, 0.f, 0.f, 0.f,
0.f, y, 0.f, 0.f,
0.f, 0.f, z, 0.f,
0.f, 0.f, 0.f, 1.f
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixTranslation(float x, float y, float z)
{
return NvFlowFloat4x4{
1.f, 0.f, 0.f, 0.f,
0.f, 1.f, 0.f, 0.f,
0.f, 0.f, 1.f, 0.f,
x, y, z, 1.f
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixPerspectiveFovRH(float fovAngleY, float aspectRatio, float nearZ, float farZ)
{
float sinfov = sinf(0.5f * fovAngleY);
float cosfov = cosf(0.5f * fovAngleY);
float height = cosfov / sinfov;
float width = height / aspectRatio;
float frange = farZ / (nearZ - farZ);
if (nearZ == INFINITY)
{
return NvFlowFloat4x4{
{ width, 0.f, 0.f, 0.f },
{ 0.f, height, 0.f, 0.f },
{ 0.f, 0.f, frange, -1.f },
{ 0.f, 0.f, farZ, 0.f }
};
}
return NvFlowFloat4x4{
{ width, 0.f, 0.f, 0.f },
{ 0.f, height, 0.f, 0.f },
{ 0.f, 0.f, frange, -1.f },
{ 0.f, 0.f, frange * nearZ, 0.f }
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixPerspectiveFovLH(float fovAngleY, float aspectRatio, float nearZ, float farZ)
{
float sinfov = sinf(0.5f * fovAngleY);
float cosfov = cosf(0.5f * fovAngleY);
float height = cosfov / sinfov;
float width = height / aspectRatio;
float frange = farZ / (farZ - nearZ);
if (nearZ == INFINITY)
{
return NvFlowFloat4x4{
{ width, 0.f, 0.f, 0.f },
{ 0.f, height, 0.f, 0.f },
{ 0.f, 0.f, frange, 1.f },
{ 0.f, 0.f, farZ, 0.f }
};
}
return NvFlowFloat4x4{
{ width, 0.f, 0.f, 0.f },
{ 0.f, height, 0.f, 0.f },
{ 0.f, 0.f, frange, 1.f },
{ 0.f, 0.f, -frange * nearZ, 0.f }
};
}
NV_FLOW_INLINE NvFlowBool32 matrixPerspectiveIsRH(const NvFlowFloat4x4& m)
{
return m.z.w < 0.f ? NV_FLOW_TRUE : NV_FLOW_FALSE;
}
NV_FLOW_INLINE NvFlowBool32 matrixPerspectiveIsReverseZ(const NvFlowFloat4x4& m)
{
float nearZ = -m.w.z / m.z.z;
float farZ = (m.w.w - m.w.z) / (m.z.z - m.z.w);
float singZ = -m.w.w / m.z.w;
return fabsf(farZ - singZ) < fabs(nearZ - singZ) ? NV_FLOW_TRUE : NV_FLOW_FALSE;
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixOrthographicLH(float width, float height, float nearZ, float farZ)
{
float frange = 1.f / (farZ - nearZ);
return NvFlowFloat4x4{
{ 2.f / width, 0.f, 0.f, 0.f },
{ 0.f, 2.f / height, 0.f, 0.f },
{ 0.f, 0.f, frange, 0.f },
{ 0.f, 0.f, -frange * nearZ, 1.f }
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixOrthographicRH(float width, float height, float nearZ, float farZ)
{
float frange = 1.f / (nearZ - farZ);
return NvFlowFloat4x4{
{ 2.f / width, 0.f, 0.f, 0.f },
{ 0.f, 2.f / height, 0.f, 0.f },
{ 0.f, 0.f, frange, 0.f },
{ 0.f, 0.f, frange * nearZ, 1.f }
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixRotationNormal(NvFlowFloat4 normal, float angle)
{
float sinAngle = sinf(angle);
float cosAngle = cosf(angle);
NvFlowFloat4 a = { sinAngle, cosAngle, 1.f - cosAngle, 0.f };
NvFlowFloat4 c2 = vectorSplatZ(a);
NvFlowFloat4 c1 = vectorSplatY(a);
NvFlowFloat4 c0 = vectorSplatX(a);
NvFlowFloat4 n0 = { normal.y, normal.z, normal.x, normal.w };
NvFlowFloat4 n1 = { normal.z, normal.x, normal.y, normal.w };
NvFlowFloat4 v0 = c2 * n0;
v0 = v0 * n1;
NvFlowFloat4 r0 = c2 * normal;
r0 = (r0 * normal) + c1;
NvFlowFloat4 r1 = (c0 * normal) + v0;
NvFlowFloat4 r2 = v0 - (c0 * normal);
v0 = NvFlowFloat4{ r0.x, r0.y, r0.z, a.w };
NvFlowFloat4 v1 = { r1.z, r2.y, r2.z, r1.x };
NvFlowFloat4 v2 = { r1.y, r2.x, r1.y, r2.x };
return NvFlowFloat4x4{
{ v0.x, v1.x, v1.y, v0.w },
{ v1.z, v0.y, v1.w, v0.w },
{ v2.x, v2.y, v0.z, v0.w },
{ 0.f, 0.f, 0.f, 1.f }
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixRotationAxis(NvFlowFloat4 axis, float angle)
{
NvFlowFloat4 normal = vector3Normalize(axis);
return matrixRotationNormal(normal, angle);
}
NV_FLOW_INLINE NvFlowFloat4 quaterionRotationRollPitchYawFromVector(NvFlowFloat4 angles)
{
NvFlowFloat4 sign = { 1.f, -1.f, -1.f, 1.f };
NvFlowFloat4 halfAngles = angles * NvFlowFloat4{ 0.5f, 0.5f, 0.5f, 0.5f };
NvFlowFloat4 sinAngle = NvFlowFloat4{ sinf(halfAngles.x), sinf(halfAngles.y), sinf(halfAngles.z), sinf(halfAngles.w) };
NvFlowFloat4 cosAngle = NvFlowFloat4{ cosf(halfAngles.x), cosf(halfAngles.y), cosf(halfAngles.z), cosf(halfAngles.w) };
NvFlowFloat4 p0 = { sinAngle.x, cosAngle.x, cosAngle.x, cosAngle.x };
NvFlowFloat4 y0 = { cosAngle.y, sinAngle.y, cosAngle.y, cosAngle.y };
NvFlowFloat4 r0 = { cosAngle.z, cosAngle.z, sinAngle.z, cosAngle.z };
NvFlowFloat4 p1 = { cosAngle.x, sinAngle.x, sinAngle.x, sinAngle.x };
NvFlowFloat4 y1 = { sinAngle.y, cosAngle.y, sinAngle.y, sinAngle.y };
NvFlowFloat4 r1 = { sinAngle.z, sinAngle.z, cosAngle.z, sinAngle.z };
NvFlowFloat4 q1 = p1 * sign;
NvFlowFloat4 q0 = p0 * y0;
q1 = q1 * y1;
q0 = q0 * r0;
NvFlowFloat4 q = (q1 * r1) + q0;
return q;
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixRotationQuaternion(NvFlowFloat4 quaternion)
{
NvFlowFloat4 constant1110 = { 1.f, 1.f, 1.f, 0.f };
NvFlowFloat4 q0 = quaternion + quaternion;
NvFlowFloat4 q1 = quaternion * q0;
NvFlowFloat4 v0 = { q1.y, q1.x, q1.x, constant1110.w };
NvFlowFloat4 v1 = { q1.z, q1.z, q1.y, constant1110.w };
NvFlowFloat4 r0 = constant1110 - v0;
r0 = r0 - v1;
v0 = NvFlowFloat4{ quaternion.x, quaternion.x, quaternion.y, quaternion.w };
v1 = NvFlowFloat4{ q0.z, q0.y, q0.z, q0.w };
v0 = v0 * v1;
v1 = vectorSplatW(quaternion);
NvFlowFloat4 v2 = { q0.y, q0.z, q0.x, q0.w };
v1 = v1 * v2;
NvFlowFloat4 r1 = v0 + v1;
NvFlowFloat4 r2 = v0 - v1;
v0 = NvFlowFloat4{ r1.y, r2.x, r2.y, r1.z };
v1 = NvFlowFloat4{ r1.x, r2.z, r1.x, r2.z };
return NvFlowFloat4x4{
{ r0.x, v0.x, v0.y, r0.w },
{ v0.z, r0.y, v0.w, r0.w },
{ v1.x, v1.y, r0.z, r0.w },
{ 0.f, 0.f, 0.f, 1.f }
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixRotationRollPitchYaw(float pitch, float yaw, float roll)
{
NvFlowFloat4 angles = { pitch, yaw, roll, 0.f };
NvFlowFloat4 q = quaterionRotationRollPitchYawFromVector(angles);
return matrixRotationQuaternion(q);
}
NV_FLOW_INLINE NvFlowFloat4 vectorLerp(NvFlowFloat4 a, NvFlowFloat4 b, float t)
{
return NvFlowFloat4{
(1.f - t) * a.x + t * b.x,
(1.f - t) * a.y + t * b.y,
(1.f - t) * a.z + t * b.z,
(1.f - t) * a.w + t * b.w
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixInterpolateTranslation(const NvFlowFloat4x4& a, const NvFlowFloat4x4& b, float t)
{
NvFlowFloat4x4 ret;
if (t < 0.5f)
{
ret = a;
}
else
{
ret = b;
}
ret.w.x = (1.f - t) * a.w.x + t * b.w.x;
ret.w.y = (1.f - t) * a.w.y + t * b.w.y;
ret.w.z = (1.f - t) * a.w.z + t * b.w.z;
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 vector4Normalize(const NvFlowFloat4& v)
{
float magn = sqrtf(v.x * v.x + v.y * v.y + v.z * v.z + v.w * v.w);
if (magn > 0.f)
{
magn = 1.f / magn;
}
return NvFlowFloat4{v.x * magn, v.y * magn, v.z * magn, v.w * magn};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixNormalize(const NvFlowFloat4x4& a)
{
NvFlowFloat4x4 temp = a;
temp.x.w = 0.f;
temp.y.w = 0.f;
temp.z.w = 0.f;
temp.w.w = 1.f;
temp.w.x = 0.f;
temp.w.y = 0.f;
temp.w.z = 0.f;
NvFlowFloat4x4 ret = temp;
ret.x = vector4Normalize(ret.x);
ret.y = vector4Normalize(ret.y);
ret.z = vector4Normalize(ret.z);
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 vector4Transform(const NvFlowFloat4& x, const NvFlowFloat4x4& A)
{
return NvFlowFloat4{
A.x.x * x.x + A.y.x * x.y + A.z.x * x.z + A.w.x * x.w,
A.x.y * x.x + A.y.y * x.y + A.z.y * x.z + A.w.y * x.w,
A.x.z * x.x + A.y.z * x.y + A.z.z * x.z + A.w.z * x.w,
A.x.w * x.x + A.y.w * x.y + A.z.w * x.z + A.w.w * x.w
};
}
NV_FLOW_INLINE NvFlowFloat4 vectorMin(const NvFlowFloat4& a, const NvFlowFloat4& b)
{
return NvFlowFloat4{
a.x < b.x ? a.x : b.x,
a.y < b.y ? a.y : b.y,
a.z < b.z ? a.z : b.z,
a.w < b.w ? a.w : b.w
};
}
NV_FLOW_INLINE NvFlowFloat4 vectorMax(const NvFlowFloat4& a, const NvFlowFloat4& b)
{
return NvFlowFloat4{
a.x > b.x ? a.x : b.x,
a.y > b.y ? a.y : b.y,
a.z > b.z ? a.z : b.z,
a.w > b.w ? a.w : b.w
};
}
NV_FLOW_INLINE NvFlowFloat4 vectorMultiply(const NvFlowFloat4& a, const NvFlowFloat4& b)
{
return NvFlowFloat4{a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w};
}
NV_FLOW_INLINE NvFlowFloat4 vectorFloor(const NvFlowFloat4& a)
{
return NvFlowFloat4{ floorf(a.x), floorf(a.y), floorf(a.z), floorf(a.w) };
}
NV_FLOW_INLINE NvFlowFloat4 vectorCeiling(const NvFlowFloat4& a)
{
return NvFlowFloat4{ceilf(a.x), ceilf(a.y), ceilf(a.z), ceilf(a.w)};
}
NV_FLOW_INLINE NvFlowFloat4 vector3Dot(const NvFlowFloat4& a, const NvFlowFloat4& b)
{
float magn = a.x * b.x + a.y * b.y + a.z * b.z;
return NvFlowFloat4{ magn, magn, magn, magn };
}
NV_FLOW_INLINE NvFlowFloat4 vector4Dot(const NvFlowFloat4& a, const NvFlowFloat4& b)
{
float magn = a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w;
return NvFlowFloat4{ magn, magn, magn, magn };
}
NV_FLOW_INLINE NvFlowFloat4 vector3Cross(const NvFlowFloat4& a, const NvFlowFloat4& b)
{
return NvFlowFloat4{
a.y * b.z - a.z * b.y,
a.z * b.x - a.x * b.z,
a.x * b.y - a.y * b.x,
0.f
};
}
NV_FLOW_INLINE NvFlowFloat4 vector3Length(const NvFlowFloat4& a)
{
float magn = sqrtf(a.x * a.x + a.y * a.y + a.z * a.z);
return NvFlowFloat4{ magn, magn, magn, magn };
}
NV_FLOW_INLINE NvFlowFloat4 make_float4(NvFlowFloat3 a, float b)
{
return NvFlowFloat4{ a.x, a.y, a.z, b };
}
NV_FLOW_INLINE NvFlowFloat3 float4_to_float3(NvFlowFloat4 a)
{
return NvFlowFloat3{ a.x, a.y, a.z };
}
NV_FLOW_INLINE NvFlowUint log2ui(NvFlowUint val)
{
NvFlowUint ret = 0;
for (NvFlowUint i = 0; i < 32; i++)
{
if ((1u << i) >= val)
{
ret = i;
break;
}
}
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 computeRayOrigin(const NvFlowFloat4x4& viewInv, const NvFlowFloat4x4& projectionInv, NvFlowFloat2 ndc, float nearZ)
{
NvFlowFloat4 viewPos = vector4Transform(NvFlowFloat4{ ndc.x, ndc.y, nearZ, 1.f }, projectionInv);
return vectorPerspectiveDivide(vector4Transform(viewPos, viewInv));
}
NV_FLOW_INLINE NvFlowFloat4 computeRayDir(const NvFlowFloat4x4& viewInv, const NvFlowFloat4x4& projectionInv, NvFlowFloat2 ndc, float nearZ)
{
NvFlowFloat4x4 projectionInvT = matrixTranspose(projectionInv);
NvFlowFloat4 ndc_ext = NvFlowFloat4{ ndc.x, ndc.y, 0.f, 1.f };
NvFlowFloat4 dir = {
-projectionInvT.w.z * vector4Dot(projectionInvT.x, ndc_ext).x,
-projectionInvT.w.z * vector4Dot(projectionInvT.y, ndc_ext).x,
-projectionInvT.w.z * vector4Dot(projectionInvT.z, ndc_ext).x +
projectionInvT.z.z * vector4Dot(projectionInvT.w, ndc_ext).x,
0.f
};
if (nearZ > 0.5f)
{
dir = NvFlowFloat4{ 0.f, 0.f, 0.f, 0.f } - dir;
}
return vector4Transform(dir, viewInv);
}
struct FrustumRays
{
NvFlowFloat4 rayOrigin00;
NvFlowFloat4 rayOrigin10;
NvFlowFloat4 rayOrigin01;
NvFlowFloat4 rayOrigin11;
NvFlowFloat4 rayDir00;
NvFlowFloat4 rayDir10;
NvFlowFloat4 rayDir01;
NvFlowFloat4 rayDir11;
float nearZ;
NvFlowBool32 isReverseZ;
};
NV_FLOW_INLINE void computeFrustumRays(FrustumRays* ptr, const NvFlowFloat4x4& viewInv, const NvFlowFloat4x4& projectionInv)
{
NvFlowFloat4 nearPoint = vector4Transform(NvFlowFloat4{ 0.f, 0.f, 0.f, 1.f }, projectionInv);
NvFlowFloat4 farPoint = vector4Transform(NvFlowFloat4{ 0.f, 0.f, 1.f, 1.f }, projectionInv);
nearPoint = nearPoint / vectorSplatW(nearPoint);
farPoint = farPoint / vectorSplatW(farPoint);
float nearZ = fabsf(nearPoint.z) < fabsf(farPoint.z) ? 0.f : 1.f;
ptr->rayOrigin00 = computeRayOrigin(viewInv, projectionInv, NvFlowFloat2{ -1.f, +1.f }, nearZ);
ptr->rayOrigin10 = computeRayOrigin(viewInv, projectionInv, NvFlowFloat2{ +1.f, +1.f }, nearZ);
ptr->rayOrigin01 = computeRayOrigin(viewInv, projectionInv, NvFlowFloat2{ -1.f, -1.f }, nearZ);
ptr->rayOrigin11 = computeRayOrigin(viewInv, projectionInv, NvFlowFloat2{ +1.f, -1.f }, nearZ);
ptr->rayDir00 = computeRayDir(viewInv, projectionInv, NvFlowFloat2{ -1.f, +1.f }, nearZ);
ptr->rayDir10 = computeRayDir(viewInv, projectionInv, NvFlowFloat2{ +1.f, +1.f }, nearZ);
ptr->rayDir01 = computeRayDir(viewInv, projectionInv, NvFlowFloat2{ -1.f, -1.f }, nearZ);
ptr->rayDir11 = computeRayDir(viewInv, projectionInv, NvFlowFloat2{ +1.f, -1.f }, nearZ);
ptr->nearZ = nearZ;
ptr->isReverseZ = fabsf(nearPoint.z) >= fabsf(farPoint.z);
}
} | 21,822 | C | 27.415365 | 144 | 0.592109 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowPreprocessor.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#include "NvFlowPreprocessor.h"
#include "NvFlowArray.h"
#include <stdlib.h>
#include <stdio.h>
struct NvFlowPreprocessorItem
{
NvFlowPreprocessorFunction function;
NvFlowPreprocessorToken* tokens_data;
NvFlowUint64 tokens_size;
NvFlowBool32 isEnabled;
};
struct NvFlowPreprocessor
{
NvFlowStringPool* stringPool = nullptr;
int currentLevel = 0;
NvFlowPreprocessorMode mode = eNvFlowPreprocessorMode_default;
NvFlowArray<NvFlowPreprocessorItem, 16u> items;
NvFlowArray<const char*> stringStack;
NvFlowArray<const char*, 8u> tempStrViews;
NvFlowArray<NvFlowPreprocessorToken> tempTokens;
};
NvFlowPreprocessor* NvFlowPreprocessorCreate(NvFlowStringPool* pool)
{
auto ptr = new NvFlowPreprocessor();
ptr->stringPool = pool;
ptr->currentLevel = 0;
return ptr;
}
void NvFlowPreprocessorDestroy(NvFlowPreprocessor* ptr)
{
delete ptr;
}
void NvFlowPreprocessorReset(NvFlowPreprocessor* ptr)
{
ptr->currentLevel = 0;
ptr->mode = eNvFlowPreprocessorMode_default;
ptr->items.size = 0u;
ptr->stringStack.size = 0u;
ptr->tempStrViews.size = 0u;
ptr->tempTokens.size = 0u;
}
void NvFlowPreprocessorSetMode(NvFlowPreprocessor* ptr, NvFlowPreprocessorMode mode)
{
ptr->mode = mode;
}
NvFlowPreprocessorMode NvFlowPreprocessorGetMode(NvFlowPreprocessor* ptr)
{
return ptr->mode;
}
NvFlowStringPool* NvFlowPreprocessorStringPool(NvFlowPreprocessor* ptr)
{
return ptr->stringPool;
}
void NvFlowPreprocessor_addItem(NvFlowPreprocessor* ptr, const NvFlowPreprocessorFunction* pFunction)
{
NvFlowPreprocessorItem item = {};
item.function = *pFunction;
item.isEnabled = NV_FLOW_TRUE;
const char* tokenStr = item.function.name;
if (item.function.type == eNvFlowPreprocessorType_function)
{
tokenStr = NvFlowStringConcat(ptr->stringPool, tokenStr, "(");
}
else if (item.function.type == eNvFlowPreprocessorType_index)
{
tokenStr = NvFlowStringConcat(ptr->stringPool, tokenStr, "[");
}
else if (item.function.type == eNvFlowPreprocessorType_attribute)
{
tokenStr = NvFlowStringConcat(ptr->stringPool, "[", tokenStr);
}
else if (item.function.type == eNvFlowPreprocessorType_line)
{
tokenStr = NvFlowStringConcat(ptr->stringPool, "#", tokenStr);
}
else if (item.function.type == eNvFlowPreprocessorType_templateInstance)
{
tokenStr = NvFlowStringConcat(ptr->stringPool, tokenStr, "<");
}
else
{
tokenStr = NvFlowStringDup(ptr->stringPool, tokenStr);
}
NvFlowPreprocessorTokenize(ptr, tokenStr, &item.tokens_size, &item.tokens_data);
ptr->items.pushBack(item);
}
char* NvFlowPreprocessor_substituteConstant(NvFlowPreprocessor* ptr, void* userdata, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens)
{
const char* value = (const char*)userdata;
return NvFlowStringDup(ptr->stringPool, value);
}
void NvFlowPreprocessorAddConstants(NvFlowPreprocessor* ptr, NvFlowUint64 numConstants, const NvFlowPreprocessorConstant* constants)
{
for (NvFlowUint64 idx = 0u; idx < numConstants; idx++)
{
char* valueStr = NvFlowStringDup(ptr->stringPool, constants[idx].value);
NvFlowPreprocessorFunction function = {};
function.name = constants[idx].name;
function.type = eNvFlowPreprocessorType_constant;
function.userdata = valueStr;
function.substitute = NvFlowPreprocessor_substituteConstant;
NvFlowPreprocessor_addItem(ptr, &function);
}
}
void NvFlowPreprocessorAddFunctions(NvFlowPreprocessor* ptr, NvFlowUint64 numFunctions, const NvFlowPreprocessorFunction* functions)
{
for (NvFlowUint64 idx = 0u; idx < numFunctions; idx++)
{
NvFlowPreprocessor_addItem(ptr, &functions[idx]);
}
}
char NvFlowPreprocessor_peekChar(const char* input, NvFlowUint64 inputIdx, NvFlowUint64 input_size)
{
char ret = '\0';
if (inputIdx < input_size)
{
ret = input[inputIdx];
}
return ret;
}
NvFlowBool32 NvFlowPreprocessor_whitespaceButNotNewline(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
if (NvFlowCharIsWhiteSpaceButNotNewline(c0))
{
NvFlowUint64 beginIdx = inputIdx;
inputIdx++;
for (; inputIdx < input_size; inputIdx++)
{
c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
if (!NvFlowCharIsWhiteSpaceButNotNewline(c0))
{
break;
}
}
*pOutput_size = inputIdx - beginIdx;
*pOutput = input + beginIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_continuation(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
char c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '\\' && c1 == '\n')
{
*pOutput_size = 2;
*pOutput = input + inputIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_whitespace(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
if (NvFlowPreprocessor_whitespaceButNotNewline(pOutput, pOutput_size, input, input_size, inputIdx))
{
return NV_FLOW_TRUE;
}
return NvFlowPreprocessor_continuation(pOutput, pOutput_size, input, input_size, inputIdx);
}
NvFlowBool32 NvFlowPreprocessor_newline(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
if (c0 == '\n')
{
*pOutput_size = 1;
*pOutput = input + inputIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_commentLine(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
char c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '/' && c1 == '/')
{
NvFlowUint64 beginIdx = inputIdx;
inputIdx++;
for (; inputIdx < input_size; inputIdx++)
{
c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '\n')
{
break;
}
}
*pOutput_size = inputIdx - beginIdx;
*pOutput = input + beginIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_commentMultiLine(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
char c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '/' && c1 == '*')
{
NvFlowUint64 beginIdx = inputIdx;
inputIdx++;
for (; inputIdx < input_size; inputIdx++)
{
c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '*' && c1 == '/')
{
inputIdx += 2;
break;
}
}
*pOutput_size = inputIdx - beginIdx;
*pOutput = input + beginIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_comment(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
if (NvFlowPreprocessor_commentLine(pOutput, pOutput_size, input, input_size, inputIdx))
{
return NV_FLOW_TRUE;
}
return NvFlowPreprocessor_commentMultiLine(pOutput, pOutput_size, input, input_size, inputIdx);
}
NvFlowBool32 NvFlowPreprocessor_name(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
if (NvFlowCharIsAlphaUnderscore(c0))
{
NvFlowUint64 beginIdx = inputIdx;
inputIdx++;
for (; inputIdx < input_size; inputIdx++)
{
c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
if (!NvFlowCharIsAlphaNum(c0))
{
break;
}
}
*pOutput_size = inputIdx - beginIdx;
*pOutput = input + beginIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_number(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
if (NvFlowCharIsNum(c0))
{
NvFlowUint64 beginIdx = inputIdx;
inputIdx++;
for (; inputIdx < input_size; inputIdx++)
{
c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
if (!(NvFlowCharIsAlphaNum(c0) || (c0 == '.')))
{
break;
}
}
*pOutput_size = inputIdx - beginIdx;
*pOutput = input + beginIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_string(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
char c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '\"')
{
NvFlowUint64 beginIdx = inputIdx;
inputIdx++;
for (; inputIdx < input_size; inputIdx++)
{
c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '\"')
{
inputIdx++;
break;
}
else if (c0 == '\\' && c1 == '\"')
{
inputIdx++;
}
}
*pOutput_size = inputIdx - beginIdx;
*pOutput = input + beginIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_char(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
char c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '\'')
{
NvFlowUint64 beginIdx = inputIdx;
inputIdx++;
for (; inputIdx < input_size; inputIdx++)
{
c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '\'')
{
inputIdx++;
break;
}
else if (c0 == '\\' && c1 == '\'')
{
inputIdx++;
}
}
*pOutput_size = inputIdx - beginIdx;
*pOutput = input + beginIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_specialChar(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx, char specialChar)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
if (c0 == specialChar)
{
*pOutput_size = 1;
*pOutput = input + inputIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
void NvFlowPreprocessorTokenize(NvFlowPreprocessor* ptr, const char* input, NvFlowUint64* pTotalTokens, NvFlowPreprocessorToken** pTokens)
{
NvFlowStringPool* pool = ptr->stringPool;
ptr->tempTokens.size = 0u;
NvFlowUint64 input_size = NvFlowStringLength(input);
NvFlowUint64 inputIdx = 0u;
while (inputIdx < input_size)
{
char c0 = input[inputIdx];
char c1 = '\0';
if (inputIdx + 1 < input_size)
{
c1 = input[inputIdx + 1u];
}
// default to single char token
NvFlowPreprocessorToken token = { eNvFlowPreprocessorTokenType_unknown, "InvalidToken" };
NvFlowUint64 output_size = 1;
const char* output = input + inputIdx;
if (NvFlowPreprocessor_whitespace(&output, &output_size, input, input_size, inputIdx))
{
token.type = eNvFlowPreprocessorTokenType_whitespace;
}
else if (NvFlowPreprocessor_newline(&output, &output_size, input, input_size, inputIdx))
{
token.type = eNvFlowPreprocessorTokenType_newline;
}
else if (NvFlowPreprocessor_comment(&output, &output_size, input, input_size, inputIdx))
{
token.type = eNvFlowPreprocessorTokenType_comment;
}
else if (NvFlowPreprocessor_name(&output, &output_size, input, input_size, inputIdx))
{
token.type = eNvFlowPreprocessorTokenType_name;
}
else if (NvFlowPreprocessor_number(&output, &output_size, input, input_size, inputIdx))
{
token.type = eNvFlowPreprocessorTokenType_number;
}
else if (NvFlowPreprocessor_string(&output, &output_size, input, input_size, inputIdx))
{
token.type = eNvFlowPreprocessorTokenType_string;
}
else if (NvFlowPreprocessor_char(&output, &output_size, input, input_size, inputIdx))
{
token.type = eNvFlowPreprocessorTokenType_char;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '#'))
{
token.type = eNvFlowPreprocessorTokenType_pound;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, ','))
{
token.type = eNvFlowPreprocessorTokenType_comma;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '.'))
{
token.type = eNvFlowPreprocessorTokenType_period;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, ';'))
{
token.type = eNvFlowPreprocessorTokenType_semicolon;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, ':'))
{
token.type = eNvFlowPreprocessorTokenType_colon;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '='))
{
token.type = eNvFlowPreprocessorTokenType_equals;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '*'))
{
token.type = eNvFlowPreprocessorTokenType_asterisk;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '('))
{
token.type = eNvFlowPreprocessorTokenType_leftParenthesis;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, ')'))
{
token.type = eNvFlowPreprocessorTokenType_rightParenthesis;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '['))
{
token.type = eNvFlowPreprocessorTokenType_leftBracket;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, ']'))
{
token.type = eNvFlowPreprocessorTokenType_rightBracket;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '{'))
{
token.type = eNvFlowPreprocessorTokenType_leftCurlyBrace;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '}'))
{
token.type = eNvFlowPreprocessorTokenType_rightCurlyBrace;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '<'))
{
token.type = eNvFlowPreprocessorTokenType_lessThan;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '>'))
{
token.type = eNvFlowPreprocessorTokenType_greaterThan;
}
// duplicate output to null terminated string
token.str = NvFlowStringFromView(pool, output, output_size);
ptr->tempTokens.pushBack(token);
// advance past token
inputIdx += output_size;
}
auto tokenData = (NvFlowPreprocessorToken*)NvFlowStringPoolAllocate(pool, ptr->tempTokens.size * sizeof(NvFlowPreprocessorToken));
for (NvFlowUint64 idx = 0u; idx < ptr->tempTokens.size; idx++)
{
tokenData[idx] = ptr->tempTokens[idx];
}
*pTokens = tokenData;
*pTotalTokens = ptr->tempTokens.size;
ptr->tempTokens.size = 0u;
}
NvFlowBool32 NvFlowPreprocessorFindKeyInSource(NvFlowPreprocessor* ptr, const NvFlowPreprocessorToken* keyTokens, NvFlowUint64 keyTokenCount, const NvFlowPreprocessorToken* sourceTokens, NvFlowUint64 sourceTokenCount, NvFlowUint64* pSourceIndex)
{
NvFlowUint64 keyTokenIdx = 0u;
NvFlowUint64 sourceTokenIdx = 0u;
NvFlowUint64 matches = 0u;
NvFlowUint64 keyTestCount = 0u;
while (keyTokenIdx < keyTokenCount && sourceTokenIdx < sourceTokenCount)
{
NvFlowPreprocessorSkipWhitespaceTokens(&keyTokenIdx, keyTokenCount, keyTokens);
NvFlowPreprocessorSkipWhitespaceTokens(&sourceTokenIdx, sourceTokenCount, sourceTokens);
if (keyTokenIdx < keyTokenCount)
{
keyTestCount++;
}
if (keyTokenIdx < keyTokenCount && sourceTokenIdx < sourceTokenCount)
{
if (keyTokens[keyTokenIdx].type == sourceTokens[sourceTokenIdx].type)
{
if (keyTokens[keyTokenIdx].type == eNvFlowPreprocessorTokenType_name)
{
if (NvFlowStringCompare(keyTokens[keyTokenIdx].str, sourceTokens[sourceTokenIdx].str) == 0)
{
matches++;
}
}
else
{
matches++;
}
}
}
keyTokenIdx++;
sourceTokenIdx++;
}
if (pSourceIndex)
{
*pSourceIndex += sourceTokenIdx;
}
return (matches > 0 && matches == keyTestCount) ? NV_FLOW_TRUE : NV_FLOW_FALSE;
}
NvFlowPreprocessorRange NvFlowPreprocessorExtractTokensDelimitedN(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowUint64 numDelimiters, const NvFlowPreprocessorTokenType* delimiters)
{
NvFlowUint64 beginTokenIdx = (*pTokenIdx);
NvFlowPreprocessorRange range = { beginTokenIdx, beginTokenIdx };
if (numDelimiters > 0u)
{
NvFlowUint64 localTokenIdx = beginTokenIdx;
range = NvFlowPreprocessorExtractTokensDelimited(ptr, &localTokenIdx, numTokens, tokens, delimiters[0u]);
(*pTokenIdx) = localTokenIdx;
}
for (NvFlowUint64 delimiterIdx = 1u; delimiterIdx < numDelimiters; delimiterIdx++)
{
NvFlowUint64 localTokenIdx = beginTokenIdx;
NvFlowPreprocessorRange localRange = NvFlowPreprocessorExtractTokensDelimited(ptr, &localTokenIdx, numTokens, tokens, delimiters[delimiterIdx]);
if (localRange.end < range.end)
{
range = localRange;
(*pTokenIdx) = localTokenIdx;
}
}
return range;
}
NvFlowPreprocessorRange NvFlowPreprocessorExtractTokensDelimited(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowPreprocessorTokenType delimiter)
{
NvFlowPreprocessorRange range = { (*pTokenIdx), (*pTokenIdx) };
NvFlowPreprocessorTokenType rightType = eNvFlowPreprocessorTokenType_rightParenthesis;
NvFlowPreprocessorTokenType leftType = eNvFlowPreprocessorTokenType_leftParenthesis;
if (delimiter == eNvFlowPreprocessorTokenType_greaterThan)
{
rightType = eNvFlowPreprocessorTokenType_greaterThan;
leftType = eNvFlowPreprocessorTokenType_lessThan;
}
bool delimiterIsScopeEnd = (
delimiter == eNvFlowPreprocessorTokenType_rightParenthesis ||
delimiter == eNvFlowPreprocessorTokenType_rightBracket ||
delimiter == eNvFlowPreprocessorTokenType_rightCurlyBrace ||
delimiter == eNvFlowPreprocessorTokenType_greaterThan
);
int scopeIdx = delimiterIsScopeEnd ? 1 : 0;
for (; (*pTokenIdx) < numTokens; (*pTokenIdx)++)
{
// scope end is 'before' the end symbol
if (tokens[(*pTokenIdx)].type == eNvFlowPreprocessorTokenType_rightParenthesis ||
tokens[(*pTokenIdx)].type == eNvFlowPreprocessorTokenType_rightBracket ||
tokens[(*pTokenIdx)].type == eNvFlowPreprocessorTokenType_rightCurlyBrace ||
tokens[(*pTokenIdx)].type == rightType)
{
scopeIdx--;
}
if (scopeIdx == 0 && tokens[(*pTokenIdx)].type == delimiter)
{
(*pTokenIdx)++;
break;
}
else if (scopeIdx == 0 && delimiter == eNvFlowPreprocessorTokenType_anyWhitespace && NvFlowPreprocessorTokenIsWhitespace(tokens[(*pTokenIdx)]))
{
(*pTokenIdx)++;
break;
}
else
{
range.end++;
}
// scope begin is 'after' the start symbol
if (tokens[(*pTokenIdx)].type == eNvFlowPreprocessorTokenType_leftParenthesis ||
tokens[(*pTokenIdx)].type == eNvFlowPreprocessorTokenType_leftBracket ||
tokens[(*pTokenIdx)].type == eNvFlowPreprocessorTokenType_leftCurlyBrace ||
tokens[(*pTokenIdx)].type == leftType)
{
scopeIdx++;
}
}
return range;
}
const char* NvFlowPreprocessorExtractDelimitedN(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowUint64 numDelimiters, const NvFlowPreprocessorTokenType* delimiters)
{
ptr->tempStrViews.size = 0u;
NvFlowPreprocessorSkipWhitespaceTokens(pTokenIdx, numTokens, tokens);
NvFlowPreprocessorRange range = NvFlowPreprocessorExtractTokensDelimitedN(ptr, pTokenIdx, numTokens, tokens, numDelimiters, delimiters);
NvFlowPreprocessorToken prevPushedToken = {};
for (NvFlowUint64 idx = range.begin; idx < range.end; idx++)
{
if (NvFlowPreprocessorTokenIsWhitespace(tokens[idx]))
{
continue;
}
else
{
if (tokens[idx].type == eNvFlowPreprocessorTokenType_name ||
tokens[idx].type == eNvFlowPreprocessorTokenType_number)
{
if (prevPushedToken.type == eNvFlowPreprocessorTokenType_name ||
prevPushedToken.type == eNvFlowPreprocessorTokenType_number)
{
ptr->tempStrViews.pushBack(" ");
}
}
ptr->tempStrViews.pushBack(tokens[idx].str);
prevPushedToken = tokens[idx];
}
}
char* output = NvFlowStringConcatN(ptr->stringPool, ptr->tempStrViews.data, ptr->tempStrViews.size);
ptr->tempStrViews.size = 0u;
return output;
}
const char* NvFlowPreprocessorExtractDelimited(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowPreprocessorTokenType delimiter)
{
return NvFlowPreprocessorExtractDelimitedN(ptr, pTokenIdx, numTokens, tokens, 1u, &delimiter);
}
const char* NvFlowPreprocessorExtractDelimitedPreserve(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowPreprocessorTokenType delimiter)
{
NvFlowPreprocessorRange range = NvFlowPreprocessorExtractTokensDelimited(ptr, pTokenIdx, numTokens, tokens, delimiter);
return NvFlowPreprocessorConcatTokens(ptr, tokens + range.begin, range.end - range.begin);
}
const char* NvFlowPreprocessorExtractIfType(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowPreprocessorTokenType type)
{
const char* ret = nullptr;
NvFlowPreprocessorSkipWhitespaceTokens(pTokenIdx, numTokens, tokens);
if ((*pTokenIdx) < numTokens && tokens[(*pTokenIdx)].type == type)
{
ret = tokens[(*pTokenIdx)].str;
(*pTokenIdx)++;
}
return ret;
}
const char* NvFlowPreprocessorConcatTokens(NvFlowPreprocessor* ptr, const NvFlowPreprocessorToken* tokens, NvFlowUint64 numTokens)
{
ptr->tempStrViews.size = 0u;
for (NvFlowUint64 idx = 0u; idx < numTokens; idx++)
{
ptr->tempStrViews.pushBack(tokens[idx].str);
}
char* output = NvFlowStringConcatN(ptr->stringPool, ptr->tempStrViews.data, ptr->tempStrViews.size);
ptr->tempStrViews.size = 0u;
return output;
}
char* NvFlowPreprocessorExecute(NvFlowPreprocessor* ptr, const char* input)
{
// increment level
ptr->currentLevel++;
NvFlowUint64 stringStackBegin = ptr->stringStack.size;
// tokenize
NvFlowPreprocessorToken* tokenStack_data = nullptr;
NvFlowUint64 tokenStack_size = 0u;
NvFlowPreprocessorTokenize(ptr, input, &tokenStack_size, &tokenStack_data);
// process tokens
for (NvFlowUint64 tokenIdx = 0u; tokenIdx < tokenStack_size; tokenIdx++)
{
NvFlowPreprocessorToken firstToken = tokenStack_data[tokenIdx];
if (NvFlowPreprocessorTokenIsWhitespace(firstToken))
{
if (ptr->mode == eNvFlowPreprocessorMode_disable_passthrough)
{
// NOP
}
else
{
ptr->stringStack.pushBack(firstToken.str);
}
}
else
{
NvFlowUint64 itemIdx = 0u;
for (; itemIdx < ptr->items.size; itemIdx++)
{
const NvFlowPreprocessorItem item = ptr->items[itemIdx];
NvFlowUint64 compareSourceIdx = tokenIdx;
if (item.isEnabled && NvFlowPreprocessorFindKeyInSource(ptr,
item.tokens_data, item.tokens_size,
tokenStack_data + tokenIdx, tokenStack_size - tokenIdx,
&compareSourceIdx))
{
NvFlowUint64 childTokenBegin = tokenIdx;
NvFlowUint64 childTokenEnd = tokenIdx;
if (item.function.type == eNvFlowPreprocessorType_constant)
{
childTokenEnd = compareSourceIdx;
}
else if (item.function.type == eNvFlowPreprocessorType_statement)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_semicolon);
}
else if (item.function.type == eNvFlowPreprocessorType_statementComma)
{
NvFlowPreprocessorTokenType delimiters[2u] = { eNvFlowPreprocessorTokenType_comma, eNvFlowPreprocessorTokenType_rightParenthesis };
NvFlowPreprocessorExtractTokensDelimitedN(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, 2u, delimiters);
}
else if (item.function.type == eNvFlowPreprocessorType_function)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_leftParenthesis);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_rightParenthesis);
}
else if (item.function.type == eNvFlowPreprocessorType_attribute)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_leftBracket);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_rightBracket);
}
else if (item.function.type == eNvFlowPreprocessorType_body)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_leftCurlyBrace);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_rightCurlyBrace);
}
else if (item.function.type == eNvFlowPreprocessorType_templateInstance)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_lessThan);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_greaterThan);
}
else if (item.function.type == eNvFlowPreprocessorType_index)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_leftBracket);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_rightBracket);
NvFlowUint64 childTokenEndWithEquals = childTokenEnd;
NvFlowPreprocessorSkipWhitespaceTokens(&childTokenEndWithEquals, tokenStack_size, tokenStack_data);
// check for =
if (childTokenEndWithEquals < tokenStack_size)
{
const NvFlowPreprocessorToken token = tokenStack_data[childTokenEndWithEquals];
if (token.type == eNvFlowPreprocessorTokenType_equals)
{
childTokenEndWithEquals++;
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEndWithEquals, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_semicolon);
// commit
childTokenEnd = childTokenEndWithEquals;
}
}
}
else if (item.function.type == eNvFlowPreprocessorType_line)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_newline);
}
if (!ptr->items[itemIdx].function.allowRecursion)
{
ptr->items[itemIdx].isEnabled = NV_FLOW_FALSE; // disable recursion
}
if (item.function.substitute)
{
char* substituteStr = item.function.substitute(
ptr,
item.function.userdata,
childTokenEnd - childTokenBegin,
tokenStack_data + childTokenBegin
);
char* substituteOutput = nullptr;
if (ptr->mode == eNvFlowPreprocessorMode_singlePass)
{
substituteOutput = substituteStr;
}
else // eNvFlowPreprocessorModeDefault or eNvFlowPreprocessorMode_disable_passthrough
{
substituteOutput = NvFlowPreprocessorExecute(ptr, substituteStr);
}
ptr->stringStack.pushBack(substituteOutput);
}
if (!ptr->items[itemIdx].function.allowRecursion)
{
ptr->items[itemIdx].isEnabled = NV_FLOW_TRUE;
}
// advance tokenIdx
if (childTokenEnd > childTokenBegin)
{
tokenIdx += childTokenEnd - childTokenBegin - 1u;
}
break;
}
}
// If no match found, pass through token
if (itemIdx == ptr->items.size)
{
if (ptr->mode == eNvFlowPreprocessorMode_disable_passthrough)
{
// NOP
}
else
{
ptr->stringStack.pushBack(firstToken.str);
}
}
}
}
// pop string stack
NvFlowUint64 stringStackEnd = ptr->stringStack.size;
char* ret = NvFlowStringConcatN(ptr->stringPool, ptr->stringStack.data + stringStackBegin, stringStackEnd - stringStackBegin);
ptr->stringStack.size = stringStackBegin;
// decrement level
ptr->currentLevel--;
return ret;
}
char* NvFlowPreprocessorExecuteGlobal(NvFlowPreprocessor* ptr, const char* input, void* userdata, char*(*substitute)(NvFlowPreprocessor* ptr, void* userdata, NvFlowPreprocessorGlobalType globalType, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens))
{
// increment level
ptr->currentLevel++;
NvFlowUint64 stringStackBegin = ptr->stringStack.size;
// tokenize
NvFlowPreprocessorToken* tokenStack_data = nullptr;
NvFlowUint64 tokenStack_size = 0u;
NvFlowPreprocessorTokenize(ptr, input, &tokenStack_size, &tokenStack_data);
// process tokens
NvFlowUint64 tokenIdx = 0u;
while (tokenIdx < tokenStack_size)
{
NvFlowPreprocessorToken firstToken = tokenStack_data[tokenIdx];
// skip whitespace, but include in output stream
if (NvFlowPreprocessorTokenIsWhitespace(firstToken))
{
ptr->stringStack.pushBack(firstToken.str);
tokenIdx++;
continue;
}
NvFlowUint64 childTokenBegin = tokenIdx;
NvFlowUint64 childTokenEnd = tokenIdx;
NvFlowPreprocessorGlobalType globalType = eNvFlowPreprocessorGlobalType_unknown;
// check for # condition
if (firstToken.type == eNvFlowPreprocessorTokenType_pound)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_newline);
globalType = eNvFlowPreprocessorGlobalType_line;
}
// check for [ condition
if (firstToken.type == eNvFlowPreprocessorTokenType_leftBracket)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_leftBracket);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_rightBracket);
globalType = eNvFlowPreprocessorGlobalType_attribute;
}
// attempt to detect a function declaration, unless line was detected
if (childTokenBegin == childTokenEnd)
{
// names and whitespace are acceptable up to initial (
while (childTokenEnd < tokenStack_size)
{
const NvFlowPreprocessorToken token = tokenStack_data[childTokenEnd];
if (!(token.type == eNvFlowPreprocessorTokenType_name || NvFlowPreprocessorTokenIsWhitespace(token)))
{
break;
}
childTokenEnd++;
}
if (childTokenBegin != childTokenEnd && tokenStack_data[childTokenEnd].type == eNvFlowPreprocessorTokenType_leftParenthesis)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_leftParenthesis);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_rightParenthesis);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_leftCurlyBrace);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_rightCurlyBrace);
globalType = eNvFlowPreprocessorGlobalType_function;
}
else
{
// invalidate
childTokenEnd = childTokenBegin;
}
}
// attempt to extract a simple statement
if (childTokenBegin == childTokenEnd)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_semicolon);
globalType = eNvFlowPreprocessorGlobalType_statement;
}
if (childTokenBegin == childTokenEnd)
{
// not indentified, force advance
childTokenEnd++;
}
if (globalType != eNvFlowPreprocessorGlobalType_unknown)
{
char* substituteOutput = nullptr;
if (substitute)
{
substituteOutput = substitute(ptr, userdata, globalType, childTokenEnd - childTokenBegin, tokenStack_data + childTokenBegin);
}
if (substituteOutput)
{
ptr->stringStack.pushBack(substituteOutput);
}
}
else
{
for (NvFlowUint64 localTokenIdx = childTokenBegin; localTokenIdx < childTokenEnd; localTokenIdx++)
{
ptr->stringStack.pushBack(tokenStack_data[localTokenIdx].str);
}
}
// advance tokenIdx
tokenIdx = childTokenEnd;
}
// pop string stack
NvFlowUint64 stringStackEnd = ptr->stringStack.size;
char* ret = NvFlowStringConcatN(ptr->stringPool, ptr->stringStack.data + stringStackBegin, stringStackEnd - stringStackBegin);
ptr->stringStack.size = stringStackBegin;
// decrement level
ptr->currentLevel--;
return ret;
}
| 34,949 | C++ | 32.254044 | 262 | 0.741881 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/pyproject.toml | [tool.poetry]
name = "openusd-code-samples"
version = "1.1.0"
description = "Universal Scene Description (OpenUSD) code samples in Python, C++, and USDA for common development features and tasks."
license = "Apache-2.0"
authors = []
readme = "README.md"
packages = [{include = "source"}]
[tool.poetry.dependencies]
python = ">=3.8, <3.11"
numpy = "1.24.1"
usd-core = "23.5"
types-usd = "~23.5.4"
[tool.poetry.group.docs.dependencies]
myst-parser = "0.18.0"
rstcloth = "0.5.4"
Sphinx = "4.5.0"
sphinx-design = "0.2.0"
sphinx-rtd-theme = "1.0.0"
toml = "0.10.2"
# Pinned for security patches
certifi = "2023.7.22"
markdown-it-py = "2.2.0"
pygments = "2.16.1"
requests = "2.31.0"
urllib3 = "1.26.18"
jinja2 = "3.1.3"
[tool.poetry.dev-dependencies]
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 834 | TOML | 21.567567 | 134 | 0.669065 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/CODE-SAMPLE-GUIDELINES.md | # Code Sample Guidelines
## Samples Format
This image shows the file structure that contains two Code Samples for cameras.

Our Code Samples are stored in the source directory, organized by categories. Each sample has their files, including the actual sample code, in their own directory.
In this example, we have two camera Code Samples. The paths to these two Code Samples folders are the following:
`source/cameras/create-orthographic-camera`
`source/cameras/create-perspective-camera`
**Within each Code Sample folder are the following files:**
| File(s) | Purpose |
| -----|----- |
| config.toml | Contains the title, metadata: description and SEO keywords |
| header.md | The overview for this code sample |
| Code Sample "flavor" file(s) | See below |
| Markdown file for each "flavor" | See below |
The header file is an overview for all of the flavors. It can contain markdown formatting including URL's and markdown directives.
**Each Code Sample should have at least one "flavor":**
| Flavor Source File Name | Language and USD type |
| -----|----- |
| py_usd.py | Python using Pixar USD API |
| py_omni_usd.py | Python using omni.usd extension |
| py_kit_cmds.py | Python using Kit commands |
| cpp_usd.cpp | C++ using Pixar USD API |
| cpp_omni_usd.cpp | C++ using omni.usd extension |
| cpp_kit_cmds.cpp | C++ using Kit commands |
| usda.usda | USDA (text) file |
Each flavor can have more than one sample (variations). In this case we append _var< X >, where X starts with 1 and increments for as many sample variations needed.
Example: `py_usd.py`, `py_usd_var1.py`, `py_usd_var2.py `, etc...
**Markdown files:**
Every flavor that has a sample needs exactly one markdown file, no matter how many variations are included. They will have the same name as the flavor, but with the .md extension.
Example, if you have some `py_usd.py` samples you'll need a `py_usd.md` file. In the markdown file you'll need to use the `literalinclude` directive.
Example:
```
**Convert to Numpy Array**
To convert a VtArray to a Numpy Array, simply pass the VtArray object to `numpy.array` constructor.
``` {literalinclude} py_usd.py
:language: py
```
**Convert from Numpy Array**
To convert a Numpy Array to a VtArray, you can use `FromNumpy()` from the VtArray class you want to convert to.
``` {literalinclude} py_usd_var1.py
:language: py
```
```
This example includes two samples, with a description for each one.
| Language code | File type |
| -----|----- |
| py | Python |
| c++ | C++/cpp |
| usd | USDA |
## Building the Samples
When all of your files are in place you should build and verify your samples are correctly setup by running the build script:
```
>poetry run python build_docs.py
```
If there are no errors, you can then view it by loading the ``index.html`` file, in the ``sphinx/_build folder``, in a browser.

There are two ways to do this. The first way:
1) Select the ``index.html`` file
2) Right click and select ``Copy Path``
3) Paste the path into address bar of your web browser

The second way:
1) select the ``index.html`` file so it's showing in a VS Code window
2) Press ``Alt-B`` and it will be launched in your default web browser.
## Markdown Cheatsheet
### Links
Create links using typical markdown syntax.
Here's an external link:
[USD Data Types documentation](https://docs.omniverse.nvidia.com/dev-guide/latest/dev_usd/quick-start/usd-types.html)
You can also link to other code samples using relative paths. Here's a link to a code sample in the same category:
[Add a Payload](add-payload)
Use the folder name for the code sample. The folder name will be the final markdown/HTML file name.
Here's a link to a code sample in different category:
[Add a Payload](../prims/check-prim-exists)
### Admonitions
https://myst-parser.readthedocs.io/en/latest/syntax/admonitions.html
```{tip}
https://myst-parser.readthedocs.io/en/latest/syntax/admonitions.html
```
| 4,126 | Markdown | 30.030075 | 179 | 0.710131 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/CONTRIBUTING.md |
## OpenUSD Code Samples OSS Contribution Rules
#### Issue Tracking
* All enhancement, bugfix, or change requests must begin with the creation of a [OpenUSD Code Samples Issue Request](https://github.com/NVIDIA-Omniverse/OpenUSD-Code-Samples/issues).
* The issue request must be reviewed by OpenUSD Code Samples engineers and approved prior to code review.
#### Coding Guidelines
- All source code contributions must strictly adhere to the [OpenUSD Code Samples Guidelines](CODE-SAMPLE-GUIDELINES.md).
- In addition, please follow the existing conventions in the relevant file, submodule, module, and project when you add new code or when you extend/fix existing functionality.
- Avoid introducing unnecessary complexity into existing code so that maintainability and readability are preserved.
- All development should happen against the "main" branch of the repository. Please make sure the base branch of your pull request is set to the "main" branch when filing your pull request.
- Try to keep pull requests (PRs) as concise as possible:
- Avoid committing commented-out code.
- Wherever possible, each PR should address a single concern. If there are several otherwise-unrelated things that should be fixed to reach a desired endpoint, our recommendation is to open several PRs and indicate the dependencies in the description. The more complex the changes are in a single PR, the more time it will take to review those changes.
- Write commit titles using imperative mood and [these rules](https://chris.beams.io/posts/git-commit/), and reference the Issue number corresponding to the PR. Following is the recommended format for commit texts:
```
Issue #<Issue Number> - <Commit Title>
<Commit Body>
```
- Ensure that the Sphinx build log is clean, meaning no warnings or errors should be present.
- Ensure that all code blocks execute correctly prior to submitting your code.
- All OSS components must contain accompanying documentation (READMEs) describing the functionality, dependencies, and known issues.
- See `README.md` for existing samples and plugins for reference.
- All OSS components must have an accompanying test.
- If introducing a new component, such as a plugin, provide a test sample to verify the functionality.
- Make sure that you can contribute your work to open source (no license and/or patent conflict is introduced by your code). You will need to [`sign`](#signing-your-work) your commit.
- Thanks in advance for your patience as we review your contributions; we do appreciate them!
#### Pull Requests
Developer workflow for code contributions is as follows:
1. Developers must first [fork](https://help.github.com/en/articles/fork-a-repo) the [upstream](https://github.com/NVIDIA-Omniverse/OpenUSD-Code-Samples) OpenUSD Code Samples repository.
2. Git clone the forked repository.
```bash
git clone https://github.com/YOUR_USERNAME/YOUR_FORK.git OpenUSD-Code-Samples
```
3. Create a branch off of the "main" branch and commit changes. See [Coding Guidelines](#coding-guidelines) for commit formatting rules.
```bash
# Create a branch off of the "main" branch
git checkout -b <local-branch> <remote-branch>
git add <path-to-files>
# -s flag will "sign-off" on your commit, we require all contributors to sign-off on their commits. See below for more
git commit -s -m "Issue #<Issue Number> - <Commit Title>"
```
4. Push Changes to the personal fork.
```bash
# Push the commits to a branch on the fork (remote).
git push -u origin <local-branch>:<remote-branch>
```
5. Please make sure that your pull requests are clean. Use the rebase and squash git facilities as needed to ensure that the pull request is as clean as possible.
6. Once the code changes are staged on the fork and ready for review, a [Pull Request](https://help.github.com/en/articles/about-pull-requests) (PR) can be [requested](https://help.github.com/en/articles/creating-a-pull-request) to merge the changes from your branch to the upstream "main" branch.
* Exercise caution when selecting the source and target branches for the PR.
* Creation of a PR creation kicks off the code review process.
* At least one OpenUSD Code Samples engineer will be assigned for the review.
* While under review, mark your PRs as work-in-progress by prefixing the PR title with [WIP].
7. Since there is no CI/CD process in place yet, the PR will be accepted and the corresponding issue closed only after adequate testing has been completed, manually, by the developer and/or OpenUSD Code Samples engineer reviewing the code.
#### Signing Your Work
* We require that all contributors "sign-off" on their commits. This certifies that the contribution is your original work, or you have rights to submit it under the same license, or a compatible license.
* Any contribution which contains commits that are not Signed-Off will not be accepted.
* To sign off on a commit you simply use the `--signoff` (or `-s`) option when committing your changes:
```bash
$ git commit -s -m "Add cool feature."
```
This will append the following to your commit message:
```
Signed-off-by: Your Name <[email protected]>
```
* Full text of the DCO:
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
``` | 6,676 | Markdown | 48.095588 | 354 | 0.75 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/build_docs.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import argparse
import logging
import os
from pathlib import Path
import shutil
from rstcloth import RstCloth
import sphinx.cmd.build
import toml
REPO_ROOT = Path(__file__).parent
SOURCE_DIR = REPO_ROOT / "source"
SPHINX_DIR = REPO_ROOT / "sphinx"
SPHINX_CODE_SAMPLES_DIR = SPHINX_DIR / "usd"
# 0 = normal toctree, 1 = :doc: tags
TOCTREE_STYLE = 0
REPLACE_USDA_EXT = True
STRIP_COPYRIGHTS = True
IMAGE_TYPES = {".jpg" , ".gif"}
logger = logging.getLogger(__name__)
def main():
# flush build dir
if os.path.exists(SPHINX_CODE_SAMPLES_DIR):
shutil.rmtree(SPHINX_CODE_SAMPLES_DIR)
SPHINX_CODE_SAMPLES_DIR.mkdir(exist_ok=False)
samples = {}
# each config.toml should be a sample
for config_file in SOURCE_DIR.rglob("config.toml"):
category_name = config_file.parent.parent.name
sample_name = config_file.parent.name
if category_name not in samples:
samples[category_name] = []
logger.info(f"processing: {sample_name}")
sample_source_dir = config_file.parent
sample_output_dir = SPHINX_CODE_SAMPLES_DIR / sample_source_dir.parent.relative_to(SOURCE_DIR) / f"{sample_name}"
# make sure category dir exists
category_output_dir = SPHINX_CODE_SAMPLES_DIR / sample_source_dir.parent.relative_to(SOURCE_DIR)
if not os.path.exists(category_output_dir):
category_output_dir.mkdir(exist_ok=False)
sample_rst_out = category_output_dir / f"{sample_name}.rst"
with open(config_file) as f:
content = f.read()
config = toml.loads(content)
title = config["core"]["title"]
samples[category_name].append([sample_name, title])
sample_output_dir.mkdir(exist_ok=True)
with open(sample_rst_out, "w") as f:
doc = RstCloth(f)
if TOCTREE_STYLE == 1:
doc._add(":orphan:")
doc.newline()
doc.directive("meta",
fields=[
('description', config["metadata"]["description"]),
('keywords', ", ".join(config["metadata"]["keywords"]))
])
doc.newline()
doc.title(config["core"]["title"], overline=False)
doc.newline()
md_file_path = sample_source_dir / "header.md"
new_md_name = sample_name + "_header.md"
out_md = category_output_dir / new_md_name
prepend_include_path(md_file_path, out_md, sample_name)
fields = [("parser" , "myst_parser.sphinx_")]
doc.directive( "include", new_md_name, fields)
doc.newline()
doc.newline()
doc.directive("tab-set")
doc.newline()
code_flavors = {"USD Python" : "py_usd.md",
"Python omni.usd" : "py_omni_usd.md",
"Python Kit Commands" : "py_kit_cmds.md",
"USD C++" : "cpp_usd.md",
"C++ omni.usd" : "cpp_omni_usd.md",
"C++ Kit Commands" : "cpp_kit_cmds.md",
"usdview": "py_usdview.md",
"USDA" : "usda.md",
}
for tab_name in code_flavors:
md_file_name = code_flavors[tab_name]
md_file_path = sample_source_dir / code_flavors[tab_name]
if md_file_path.exists():
doc.directive("tab-item", tab_name, None, None, 3)
doc.newline()
# make sure all md flavor names are unique
new_md_name = sample_name + "_" + md_file_name
category_output_dir
out_md = category_output_dir / new_md_name
prepend_include_path(md_file_path, out_md, sample_name)
fields = [("parser" , "myst_parser.sphinx_")]
doc.directive( "include", new_md_name, fields, None, 6)
doc.newline()
# copy all samples
ignore=shutil.ignore_patterns('*.md', 'config.toml')
if REPLACE_USDA_EXT:
ignore=shutil.ignore_patterns('*.md', 'config.toml', '*.usda')
shutil.copytree(sample_source_dir, sample_output_dir, ignore=ignore, dirs_exist_ok=True )
# copy any usda's to .py
if REPLACE_USDA_EXT:
for filename in os.listdir(sample_source_dir):
base_file, ext = os.path.splitext(filename)
if ext == ".usda":
orig = str(sample_source_dir) + "/" + filename
newname = str(sample_output_dir) + "/" + str(base_file) + ".py"
shutil.copy(orig, newname)
# strip out copyright comments in output files
if STRIP_COPYRIGHTS:
for filename in os.listdir(sample_output_dir):
full_path = os.path.join(sample_output_dir, filename)
strip_copyrights(full_path)
doc.newline()
generate_sphinx_index(samples)
sphinx.cmd.build.main([str(SPHINX_DIR), str(SPHINX_DIR / "_build"), "-b", "html"])
def strip_copyrights(filename):
base_file, ext = os.path.splitext(filename)
if ext in IMAGE_TYPES:
print(f"strip_copyrights, skip image :: {filename}")
return
with open(filename) as sample_file:
sample_lines = sample_file.readlines()
# strip copyrights
# .py
while sample_lines[0].startswith("# SPDX-"):
sample_lines.pop(0)
# .cpp
while sample_lines[0].startswith("// SPDX-"):
sample_lines.pop(0)
# get rid of empty spacer line
if len(sample_lines[0].strip()) < 1:
sample_lines.pop(0)
with open(filename, "w") as sample_file:
for line in sample_lines:
sample_file.write(line)
def prepend_include_path(in_file_path: str, out_file_path: str, dir_path: str):
with open(in_file_path) as mdf:
md_data = mdf.read()
md_lines = md_data.split("\n")
lc = 0
for line in md_lines:
inc_str ="``` {literalinclude}"
sp = line.split(inc_str)
if len(sp) > 1:
filename = sp[1].strip()
if REPLACE_USDA_EXT:
sfn = filename.split(".")
if len(sfn) > 1 and sfn[1] == "usda":
filename = sfn[0] + ".py"
newl = inc_str + " " + dir_path + "/" + filename
md_lines[lc] = newl
lc += 1
with open(out_file_path,"w") as nmdf:
for line in md_lines:
nmdf.writelines(line + "\n")
def generate_sphinx_index(samples):
cat_names_path = SOURCE_DIR / "category-display-names.toml"
cat_names = toml.load(cat_names_path)["name_mappings"]
print(f"CAT_NAMES: {cat_names}")
ref_links = {"variant-sets" : "variant_sets_ref"}
index_rst = SPHINX_DIR / "usd.rst"
with open(index_rst, "w") as f:
doc = RstCloth(f)
doc.directive("include", "usd_header.rst")
doc.newline()
#doc.title("OpenUSD Code Samples")
for category, cat_samples in samples.items():
if category in ref_links:
doc.ref_target(ref_links[category])
doc.newline()
human_readable = readable_from_category_dir_name(category)
if category in cat_names.keys():
human_readable = cat_names[category]
doc.h2(human_readable)
fields = [
#("caption", human_readable),
("titlesonly", ""),
]
doc.newline()
if TOCTREE_STYLE == 0:
sample_paths = [f"usd/{category}/{sample[0]}" for sample in cat_samples]
doc.directive("toctree", None, fields, sample_paths)
doc.newline()
elif TOCTREE_STYLE == 1:
#doc.h2(human_readable)
doc.newline()
for sample, title in cat_samples:
doc._add("- :doc:`" + title + f" <usd/{category}/" + sample + ">`")
doc.newline()
doc.directive("include", "usd_footer.rst")
doc.newline()
def readable_from_category_dir_name(category):
sub_strs = category.split("-")
readable = ""
for sub in sub_strs:
readable += sub.capitalize() + " "
return readable.strip()
if __name__ == "__main__":
# Create an argument parser
parser = argparse.ArgumentParser(description='Build rST documentation from code sample source.')
# Parse the arguments
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
main() | 9,545 | Python | 34.225092 | 122 | 0.503929 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/README.md | # OpenUSD Code Samples
[](https://opensource.org/licenses/Apache-2.0) [](https://docs.omniverse.nvidia.com/dev-guide/latest/programmer_ref/usd.html)
This repository contains useful Universal Scene Description (OpenUSD) code samples in Python, C++, and USDA. If you want to browse the code samples to use them, you can see them fully rendered in the [OpenUSD Code Samples documentation](https://docs.omniverse.nvidia.com/dev-guide/latest/programmer_ref/usd.html) page.
## Configuration
This repository uses [Poetry](https://python-poetry.org/docs/) for dependency management. If you're new to Poetry, you don't need to know much more than the commands we use in the [build instructions](#How-to-Build). To make it easier when authoring code samples and contributing, we recommend installing:
1. Install any version of Python between versions 3.8-3.10 .
1. [Install Poetry](https://python-poetry.org/docs/#installation)
## How to Build
1. `poetry install`
1. `poetry run python build_docs.py`
1. In a web browser, open `sphinx/_build/index.html`
## Have an Idea for a New Code Sample?
Ideas for new code samples that could help other developers are always welcome. Please [create a new issue](https://github.com/NVIDIA-Omniverse/OpenUSD-Code-Samples/issues) requesting a new code sample and add the _new request_ label. Someone from the NVIDIA team or OpenUSD community will pick it up. If you can contribute it yourself, even better!
## Find a Typo or an Error?
Please let us know if you find any mistakes or non-working code samples. [File an issue](https://github.com/NVIDIA-Omniverse/OpenUSD-Code-Samples/issues) with a _bug_ label to let us know and so we can address it.
## Contributing
Contributions are welcome! If you would like to contribute, please read our [Contributing Guidelines](./CONTRIBUTING.md) to understand how to contribute. Also, check out the [Code Sample Guidelines](CODE-SAMPLE-GUIDELINES.md) to understand how code samples file and folders are structured in this repository and how to adhere to follow our code samples style.
## Disclosures
The goal of this repository is to help developers learn OpenUSD and be more productive. To that end, NVIDIA reserves the right to use the source code and documentation in this repository for the purpose of training and/or benchmarking of an AI code assistant for OpenUSD developers.
| 2,507 | Markdown | 88.571425 | 359 | 0.780614 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/py_kit_cmds.md | Here you can add any info specific to the code sample flavor and introduce the code sample.
You should include your code sample as a separate source code file like this:
``` {literalinclude} py_kit_cmds.py
:language: py
```
You should use these includes instead of putting code in markdown code blocks. The first source code file should be named the same as the markdown file. If you want to show any variations of the code sample of expand it, you should then include source code files with the suffix `_var#`.
Variations are not required and you generally won't need them, but it's available if you find you code sample could benefit from showing variations. | 663 | Markdown | 65.399993 | 287 | 0.785822 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# Add all the imports that you need for you snippets
from pxr import Usd, Sdf, UsdGeom
def descriptive_code_sample_name(stage: Usd.Stage, prim_path: str="/World/MyPerspCam") -> UsdGeom.Camera:
"""Docstring is optional. Use Google style docstrings if you choose to add them.
The code sample should be defined as a function. As a descriptive name for the function.
Use function arguments to:
- Pass in any objects that your code sample expects to exist (e.g. a Stage)
- Pass in Paths rather than hard-coding them.
Use type-hinting to help learners understand what type every variable is. Don't assume they'll know.
Args:
stage (Usd.Stage): _description_
prim_path (str, optional): _description_. Defaults to "/World/MyPerspCam".
Returns:
UsdGeom.Camera: _description_
"""
camera_path = Sdf.Path(prim_path)
usd_camera: UsdGeom.Camera = UsdGeom.Camera.Define(stage, camera_path)
usd_camera.CreateProjectionAttr().Set(UsdGeom.Tokens.perspective)
return usd_camera
#############
# Full Usage
#############
# Here you will show your code sample in context. Add any additional imports
# that you may need for your "Full Usage" code
# You can create an in-memory stage and do any stage setup before calling
# you code sample.
stage: Usd.Stage = Usd.Stage.CreateInMemory()
default_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World"))
stage.SetDefaultPrim(default_prim.GetPrim())
cam_path = default_prim.GetPath().AppendPath("MyPerspCam")
# Call your code sample function
camera = descriptive_code_sample_name(stage, cam_path)
# print out the result
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Do some basic asserts to show learners how to interact with the results.
prim = camera.GetPrim()
assert prim.IsValid()
assert camera.GetPath() == Sdf.Path(cam_path)
assert prim.GetTypeName() == "Camera"
projection = camera.GetProjectionAttr().Get()
assert projection == UsdGeom.Tokens.perspective
| 2,131 | Python | 35.75862 | 105 | 0.725481 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/py_omni_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Source code for code block in the py_omni_usd flavor. See the py_usd.py for a
full example of writing a code sample.
You should use omni.usd.get_stage() instead of creating an in-memory stage
for the Full Usage part since this is meant to run in Omniverse.
""" | 403 | Python | 39.399996 | 98 | 0.764268 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/py_usd.md | Here you can add any info specific to the code sample flavor and introduce the code sample.
You should include your code sample as a separate source code file like this:
``` {literalinclude} py_usd.py
:language: py
```
You should use these includes instead of putting code in markdown code blocks. The first source code file should be named the same as the markdown file. If you want to show any variations of the code sample of expand it, you should then include source code files with the suffix `_var#`.
``` {literalinclude} py_usd_var1.py
:language: py
```
Variations are not required and you generally won't need them, but it's available if you find you code sample could benefit from showing variations. | 713 | Markdown | 49.999996 | 287 | 0.774194 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/py_kit_cmds.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Source code for code block in the py_kit_cmds flavor. See the py_usd.py for a
full example of writing a code sample.
You should use omni.usd.get_stage() instead of creating an in-memory stage
for the Full Usage part since this is meant to run in Omniverse.
""" | 403 | Python | 39.399996 | 98 | 0.764268 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/usda.md | Here you can say something before showing the USDA example. You can use the usda string generated by the py_usd flavor.
``` {literalinclude} usda.usda
:language: c++
``` | 170 | Markdown | 41.74999 | 119 | 0.747059 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/config.toml | [core]
# The title for this code sample. Used to name the page.
title = "My Code Example Code Sample"
[metadata]
#A concise description of the code sample for SEO.
description = "Universal Scene Description (OpenUSD) code samples to show how to contribute."
# Put in SEO keywords relevant to this code sample.
keywords = ["OpenUSD", "USD", "code sample", "snippet", "Python", "C++", "example"] | 394 | TOML | 42.888884 | 93 | 0.725888 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/py_omni_usd.md | Here you can add any info specific to the code sample flavor and introduce the code sample.
You should include your code sample as a separate source code file like this:
``` {literalinclude} py_omni_usd.py
:language: py
```
You should use these includes instead of putting code in markdown code blocks. The first source code file should be named the same as the markdown file. If you want to show any variations of the code sample of expand it, you should then include source code files with the suffix `_var#`.
Variations are not required and you generally won't need them, but it's available if you find you code sample could benefit from showing variations. | 663 | Markdown | 65.399993 | 287 | 0.785822 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/py_usd_var1.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Source code for another code block in the py_usd flavor. See the py_usd.py for a
full example of writing a code sample.
""" | 265 | Python | 36.999995 | 98 | 0.758491 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/category-display-names.toml | [name_mappings]
hierarchy-traversal = "Hierarchy & Traversal"
references-payloads = "References & Payloads"
| 108 | TOML | 26.249993 | 45 | 0.777778 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-orthographic-camera/py_kit_cmds.md | The `CreatePrimWithDefaultXform` command in Kit can create a Camera prim and you can optionally set camera attributes values during creation. You must use the attribute token names as the keys for the `attributes` dictionary. In Omniverse applications, you can explore the names by hovering over a property label in the Property Window and reading it from the tooltip.
``` {literalinclude} py_kit_cmds.py
:language: py
``` | 423 | Markdown | 83.799983 | 368 | 0.799054 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-orthographic-camera/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Sdf, Usd, UsdGeom
def create_orthographic_camera(stage: Usd.Stage, prim_path: str="/World/MyOrthoCam") -> UsdGeom.Camera:
"""Create an orthographic camera
Args:
stage (Usd.Stage): A USD Stage to create the camera on.
prim_path (str, optional): The prim path for where to create the camera. Defaults to "/World/MyOrthoCam".
"""
camera_path = Sdf.Path(prim_path)
usd_camera = UsdGeom.Camera.Define(stage, camera_path)
usd_camera.CreateProjectionAttr().Set(UsdGeom.Tokens.orthographic)
return usd_camera
#############
# Full Usage
#############
cam_path = "/World/MyOrthoCam"
stage: Usd.Stage = Usd.Stage.CreateInMemory()
root_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World"))
stage.SetDefaultPrim(root_prim.GetPrim())
camera = create_orthographic_camera(stage, cam_path)
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Check that the camera was created
prim = camera.GetPrim()
assert prim.IsValid()
assert camera.GetPath() == Sdf.Path(cam_path)
assert prim.GetTypeName() == "Camera"
projection = camera.GetProjectionAttr().Get()
assert projection == UsdGeom.Tokens.orthographic
| 1,298 | Python | 31.474999 | 113 | 0.718028 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-orthographic-camera/py_usd.md | ``` {literalinclude} py_usd.py
:language: py
```
| 50 | Markdown | 9.199998 | 30 | 0.62 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-orthographic-camera/py_kit_cmds.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
from pxr import UsdGeom
def create_orthographic_camera(prim_path: str="/World/MyOrthoCam"):
"""Create an orthographic camera
Args:
prim_path (str, optional): The prim path where the camera should be created. Defaults to "/World/MyOrthoCam".
"""
omni.kit.commands.execute("CreatePrimWithDefaultXform",
prim_type="Camera",
prim_path="/World/MyOrthoCam",
attributes={"projection": UsdGeom.Tokens.orthographic}
)
#############
# Full Usage
#############
import omni.usd
# Create an orthographic camera at /World/MyOrthoCam
path = "/World/MyOrthoCam"
create_orthographic_camera(path)
# Check that the camera was created
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(path)
assert prim.IsValid() == True
assert prim.GetTypeName() == "Camera"
projection = prim.GetAttribute("projection").Get()
assert projection == UsdGeom.Tokens.orthographic | 1,082 | Python | 28.27027 | 117 | 0.711645 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-orthographic-camera/usda.md | This is an example USDA result from creating a Camera and setting the `projection` to `orthographic`. All other Properties are using the default values from the `UsdGeomCamera` schema definition.
``` {literalinclude} usda.usda
:language: usd
``` | 246 | Markdown | 60.749985 | 195 | 0.780488 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-orthographic-camera/config.toml | [core]
title = "Create an Orthographic Camera"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples for creating an orthographic camera prim."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "prim", "camera", "UsdGeom", "Orthographic"] | 278 | TOML | 45.499992 | 110 | 0.726619 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-orthographic-camera/header.md | You can define a new camera on a stage using `UsdGeom.Camera`. The Camera prim has a `projection` attribute that can be set to `orthographic`.
| 143 | Markdown | 70.999965 | 142 | 0.769231 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-perspective-camera/py_kit_cmds.md | The `CreatePrimWithDefaultXform` command in Kit can create a Camera prim and you can optionally set camera attributes values during creation. You must use the attribute token names as the keys for the `attributes` dictionary. In Omniverse applications, you can explore the names by hovering over a property label in the Property Window and reading it from the tooltip.
``` {literalinclude} py_kit_cmds.py
:language: py
``` | 423 | Markdown | 83.799983 | 368 | 0.799054 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-perspective-camera/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Usd, Sdf, UsdGeom
def create_perspective_camera(stage: Usd.Stage, prim_path: str="/World/MyPerspCam") -> UsdGeom.Camera:
camera_path = Sdf.Path(prim_path)
usd_camera: UsdGeom.Camera = UsdGeom.Camera.Define(stage, camera_path)
usd_camera.CreateProjectionAttr().Set(UsdGeom.Tokens.perspective)
return usd_camera
#############
# Full Usage
#############
# Create an in-memory Stage with /World Xform prim as the default prim
stage: Usd.Stage = Usd.Stage.CreateInMemory()
default_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World"))
stage.SetDefaultPrim(default_prim.GetPrim())
# Create the perspective camera at /World/MyPerspCam
cam_path = default_prim.GetPath().AppendPath("MyPerspCam")
camera = create_perspective_camera(stage, cam_path)
# Export the complete Stage as a string and print it.
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Check that the camera was created
prim = camera.GetPrim()
assert prim.IsValid()
assert camera.GetPath() == Sdf.Path(cam_path)
assert prim.GetTypeName() == "Camera"
projection = camera.GetProjectionAttr().Get()
assert projection == UsdGeom.Tokens.perspective
| 1,288 | Python | 33.837837 | 102 | 0.743789 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-perspective-camera/py_usd.md | With the USD API, you can use `UsdGeom.Camera.CreateProjectionAttr()` to create the `projection` attribute and then set the value with `Usd.Attribute.Set()`.
``` {literalinclude} py_usd.py
:language: py
```
Here is how to you can set some other common attributes on the camera:
``` {literalinclude} py_usd_var1.py
:language: py
``` | 335 | Markdown | 32.599997 | 157 | 0.728358 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-perspective-camera/py_kit_cmds.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
from pxr import UsdGeom
def create_perspective_camera(prim_path: str="/World/MyPerspCam"):
"""Create a perspective camera
Args:
prim_path (str, optional): The prim path where the camera should be created. Defaults to "/World/MyPerspCam".
"""
omni.kit.commands.execute("CreatePrimWithDefaultXform",
prim_type="Camera",
prim_path=prim_path,
attributes={
"projection": UsdGeom.Tokens.perspective,
"focalLength": 35,
"horizontalAperture": 20.955,
"verticalAperture": 15.2908,
"clippingRange": (0.1, 100000)
}
)
#############
# Full Usage
#############
import omni.usd
# Create a perspective camera at /World/MyPerspCam
path = "/World/MyPerspCam"
create_perspective_camera(path)
# Check that the camera was created
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(path)
assert prim.IsValid() == True
assert prim.GetTypeName() == "Camera"
projection = prim.GetAttribute("projection").Get()
assert projection == UsdGeom.Tokens.perspective | 1,239 | Python | 27.181818 | 117 | 0.673123 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-perspective-camera/usda.md | This is an example USDA result from creating a Camera and setting the `projection` to `perspective`. All other Properties are using the default values from the `UsdGeomCamera` schema definition.
``` {literalinclude} usda.usda
:language: usd
``` | 245 | Markdown | 60.499985 | 194 | 0.779592 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-perspective-camera/config.toml | [core]
title = "Create a Perspective Camera"
[metadata]
description = "Universal Scene Description (OpenUSD) code sample to create a perspective camera."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "prim", "camera", "perspective"] | 253 | TOML | 41.333326 | 98 | 0.719368 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-perspective-camera/py_usd_var1.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Usd, Sdf, UsdGeom
def create_perspective_35mm_camera(stage: Usd.Stage, prim_path: str="/World/MyPerspCam") -> UsdGeom.Camera:
camera_path = Sdf.Path(prim_path)
usd_camera: UsdGeom.Camera = UsdGeom.Camera.Define(stage, camera_path)
usd_camera.CreateProjectionAttr().Set(UsdGeom.Tokens.perspective)
usd_camera.CreateFocalLengthAttr().Set(35)
# Set a few other common attributes too.
usd_camera.CreateHorizontalApertureAttr().Set(20.955)
usd_camera.CreateVerticalApertureAttr().Set(15.2908)
usd_camera.CreateClippingRangeAttr().Set((0.1,100000))
return usd_camera
#############
# Full Usage
#############
# Create an in-memory Stage with /World Xform prim as the default prim
stage: Usd.Stage = Usd.Stage.CreateInMemory()
default_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World"))
stage.SetDefaultPrim(default_prim.GetPrim())
# Create the perspective camera at path /World/MyPerspCam with 35mm
# set for the focal length.
cam_path = default_prim.GetPath().AppendPath("MyPerspCam")
camera = create_perspective_35mm_camera(stage, cam_path)
# Export the complete Stage as a string and print it.
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Check the camera attributes
focal_len = camera.GetFocalLengthAttr().Get()
assert focal_len == 35.0
clip_range = camera.GetClippingRangeAttr().Get()
assert clip_range == (0.1,100000)
| 1,533 | Python | 34.674418 | 107 | 0.740378 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-perspective-camera/header.md | You can define a new camera on a stage using `UsdGeom.Camera`. The Camera prim has a `projection` attribute that can be set to `perspective`.
| 142 | Markdown | 70.499965 | 141 | 0.767606 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/visibility/show-hide-prim/py_kit_cmds.md | You can use the `ChangeProperty` command from the `omni.kit.commands` extension to change the attribute of any prim. In Omniverse applications, you can discover the attribute name by hovering over the label in the Property Window and inspecting the tooltip.
You can find more information about the Kit command API at the [omni.kit.commands extension documentation](https://docs.omniverse.nvidia.com/kit/docs/omni.kit.commands/latest/API.html).
``` {literalinclude} py_kit_cmds.py
:language: py
``` | 499 | Markdown | 70.428561 | 257 | 0.791583 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/visibility/show-hide-prim/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from typing import Union
from pxr import Sdf, Usd, UsdGeom
def get_visibility_attribute(
stage: Usd.Stage, prim_path: str
) -> Union[Usd.Attribute, None]:
"""Return the visibility attribute of a prim"""
path = Sdf.Path(prim_path)
prim = stage.GetPrimAtPath(path)
if not prim.IsValid():
return None
visibility_attribute = prim.GetAttribute("visibility")
return visibility_attribute
def hide_prim(stage: Usd.Stage, prim_path: str):
"""Hide a prim
Args:
stage (Usd.Stage, required): The USD Stage
prim_path (str, required): The prim path of the prim to hide
"""
visibility_attribute = get_visibility_attribute(stage, prim_path)
if visibility_attribute is None:
return
visibility_attribute.Set("invisible")
def show_prim(stage: Usd.Stage, prim_path: str):
"""Show a prim
Args:
stage (Usd.Stage, required): The USD Stage
prim_path (str, required): The prim path of the prim to show
"""
visibility_attribute = get_visibility_attribute(stage, prim_path)
if visibility_attribute is None:
return
visibility_attribute.Set("inherited")
#############
# Full Usage
#############
# Here you will show your code sample in context. Add any additional imports
# that you may need for your "Full Usage" code
# Create a simple in-memory stage with a Cube
stage: Usd.Stage = Usd.Stage.CreateInMemory()
default_prim_path = Sdf.Path("/World")
default_prim = UsdGeom.Xform.Define(stage, default_prim_path)
stage.SetDefaultPrim(default_prim.GetPrim())
cube_path = default_prim_path.AppendPath("Cube")
cube = UsdGeom.Cube.Define(stage, cube_path)
# The prim is initially visible. Assert so and then demonstrate how to toggle
# it off and on
assert get_visibility_attribute(stage, cube_path).Get() == "inherited"
hide_prim(stage, cube_path)
assert get_visibility_attribute(stage, cube_path).Get() == "invisible"
show_prim(stage, cube_path)
assert get_visibility_attribute(stage, cube_path).Get() == "inherited"
# Print the USDA out
usda = stage.GetRootLayer().ExportToString()
print(usda)
| 2,246 | Python | 30.647887 | 98 | 0.702137 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/visibility/show-hide-prim/py_usd.md | You can use the USD API [Usd.Prim.GetAttribute()](https://openusd.org/release/api/class_usd_prim.html#a31225ac7165f58726f000ab1d67e9e61) to get an attribute of a prim and then use [Usd.Attribute.Set()](https://openusd.org/release/api/class_usd_attribute.html#a151e6fde58bbd911da8322911a3c0079) to change the value. The attribute name for visibility is `visibility` and you can set it to the value of `inherited` or `invisible`.
``` {literalinclude} py_usd.py
:language: py
``` | 477 | Markdown | 94.599981 | 427 | 0.779874 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/visibility/show-hide-prim/py_kit_cmds.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
import omni.usd
from pxr import Sdf
def hide_prim(prim_path: str):
"""Hide a prim
Args:
prim_path (str, required): The prim path of the prim to hide
"""
set_prim_visibility_attribute(prim_path, "invisible")
def show_prim(prim_path: str):
"""Show a prim
Args:
prim_path (str, required): The prim path of the prim to show
"""
set_prim_visibility_attribute(prim_path, "inherited")
def set_prim_visibility_attribute(prim_path: str, value: str):
"""Set the prim visibility attribute at prim_path to value
Args:
prim_path (str, required): The path of the prim to modify
value (str, required): The value of the visibility attribute
"""
# You can reference attributes using the path syntax by appending the
# attribute name with a leading `.`
prop_path = f"{prim_path}.visibility"
omni.kit.commands.execute(
"ChangeProperty", prop_path=Sdf.Path(prop_path), value=value, prev=None
)
"""
Full Usage
"""
# Path to a prim in the open stage
prim_path = "/World/Cube"
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(prim_path)
assert prim.IsValid()
# Manually confirm that the prim is not visible in the viewport after calling
# hide_prim. You should comment out the below show_prim call and assert.
hide_prim(prim_path)
assert prim.GetAttribute("visibility").Get() == "invisible"
# Manually confirm that the prim is visible in the viewport after calling
# show_prim
show_prim(prim_path)
assert prim.GetAttribute("visibility").Get() == "inherited"
| 1,738 | Python | 27.508196 | 98 | 0.698504 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/visibility/show-hide-prim/usda.md | This is an example USDA result from creating a Cube and setting the visibility property to `inherited`. You can edit the value to `invisible` to hide the prim.
``` {literalinclude} usda.usda
:language: usd
``` | 211 | Markdown | 41.399992 | 159 | 0.753554 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/visibility/show-hide-prim/config.toml | [core]
# The title for this code sample. Used to name the page.
title = "Show or Hide a Prim"
[metadata]
#A concise description of the code sample for SEO.
description = "Universal Scene Description (OpenUSD) code samples that demonstrates how to a show or hide a prim."
# Put in SEO keywords relevant to this code sample.
keywords = ["OpenUSD", "USD", "Python", "visibility", "show prim", "hide prim"] | 403 | TOML | 43.888884 | 114 | 0.73201 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/layers/add-sublayer/py_kit_cmds.md | ``` {literalinclude} py_kit_cmds.py
:language: py
``` | 53 | Markdown | 16.999994 | 35 | 0.660377 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/layers/add-sublayer/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Sdf
def add_sub_layer(sub_layer_path: str, root_layer) -> Sdf.Layer:
sub_layer: Sdf.Layer = Sdf.Layer.CreateNew(sub_layer_path)
# You can use standard python list.insert to add the subLayer to any position in the list
root_layer.subLayerPaths.append(sub_layer.identifier)
return sub_layer
#############
# Full Usage
#############
from pxr import Usd
# Get the root layer
stage: Usd.Stage = Usd.Stage.CreateInMemory()
root_layer: Sdf.Layer = stage.GetRootLayer()
# Add the sub layer to the root layer
sub_layer = add_sub_layer(r"C:/path/to/sublayer.usd", root_layer)
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Check to see if the sublayer is loaded
loaded_layers = root_layer.GetLoadedLayers()
assert sub_layer in loaded_layers | 921 | Python | 28.741935 | 98 | 0.726384 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/layers/add-sublayer/py_usd.md | ``` {literalinclude} py_usd.py
:language: py
```
| 49 | Markdown | 11.499997 | 30 | 0.632653 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/layers/add-sublayer/py_kit_cmds.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
omni.kit.commands.execute("CreateSublayer",
layer_identifier=stage.GetRootLayer().identifier,
# This example prepends to the subLayers list
sublayer_position=0,
new_layer_path=r"C:/path/to/sublayer.usd",
transfer_root_content=False,
# When True, it will create the layer file for you too.
create_or_insert=True
)
| 506 | Python | 30.687498 | 98 | 0.741107 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/layers/add-sublayer/config.toml | [core]
title = "Add a SubLayer"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples for adding an Inherit composition arc to a prim."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "layer", "SubLayer", "composition", "composition arc"] | 280 | TOML | 45.833326 | 120 | 0.717857 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/create-payload/py_kit_cmds.md | The `CreatePayload` command is a convenient wrapper that creates an Xform prim and adds a Payload to it all at once. If you don't need the two steps batched together, you may want to [add a Payload](add-payload) to an existing prim via Kit Commands or USD Python API.
``` {literalinclude} py_kit_cmds.py
:language: py
``` | 322 | Markdown | 63.599987 | 267 | 0.757764 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/create-payload/py_kit_cmds.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
import omni.usd
from pxr import Usd, Sdf
def create_payload(usd_context: omni.usd.UsdContext, path_to: Sdf.Path, asset_path: str, prim_path: Sdf.Path) -> Usd.Prim:
omni.kit.commands.execute("CreatePayload",
usd_context=usd_context,
path_to=path_to, # Prim path for where to create the prim with the payload
asset_path=asset_path, # The file path to the payload USD. Relative paths are accepted too.
prim_path=prim_path # OPTIONAL: Prim path to a prim in the payloaded USD, if not provided the default prim is used
)
return usd_context.get_stage().GetPrimAtPath(path_to)
#############
# Full Usage
#############
# Get the USD context from kit
context: omni.usd.UsdContext = omni.usd.get_context()
# Create and add external payload to specific prim
payload_prim: Usd.Prim = create_payload(context, Sdf.Path("/World/payload_prim"), "C:/path/to/file.usd", Sdf.Path("/World/some/target"))
# Get the existing USD stage from kit
stage: Usd.Stage = context.get_stage()
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Check that the payload prims were created
assert payload_prim.IsValid()
assert payload_prim.GetPrimStack()[0].payloadList.prependedItems[0] == Sdf.Payload(assetPath="file:/C:/path/to/file.usd", primPath=Sdf.Path("/World/some/target")) | 1,469 | Python | 39.833332 | 162 | 0.721579 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/create-payload/usda.md | This is an example USDA result from creating a reference with the `CreateReference` command.
``` {literalinclude} usda.usda
:language: usd
``` | 143 | Markdown | 34.999991 | 92 | 0.762238 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/create-payload/config.toml | [core]
title = "Create a Payload"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples for creating an Xform prim and adding a Payload in Omniverse Kit in one step."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "prim", "payload", "CreatePayload"] | 292 | TOML | 47.833325 | 144 | 0.726027 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-payload/py_kit_cmds.md | The `AddPayload` command in Kit can add payloads to a prim.
``` {literalinclude} py_kit_cmds.py
:language: py
``` | 113 | Markdown | 27.499993 | 59 | 0.716814 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-payload/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Usd, Sdf
def add_payload(prim: Usd.Prim, payload_asset_path: str, payload_target_path: Sdf.Path) -> None:
payloads: Usd.Payloads = prim.GetPayloads()
payloads.AddPayload(
assetPath=payload_asset_path,
primPath=payload_target_path # OPTIONAL: Payload a specific target prim. Otherwise, uses the payloadd layer's defaultPrim.
)
#############
# Full Usage
#############
from pxr import UsdGeom
# Create new USD stage for this sample
stage: Usd.Stage = Usd.Stage.CreateInMemory()
# Create and define default prim
default_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World"))
stage.SetDefaultPrim(default_prim.GetPrim())
# Create an xform which should hold all payloads in this sample
payload_prim: Usd.Prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World/payload_prim")).GetPrim()
# Add an external payload
add_payload(payload_prim, "C:/path/to/file.usd", Sdf.Path("/World/some/target"))
# Add other external payload to default prim
add_payload(payload_prim, "C:/path/to/other/file.usd", Sdf.Path.emptyPath)
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Get a list of all prepended payloads
payloads = []
for prim_spec in payload_prim.GetPrimStack():
payloads.extend(prim_spec.payloadList.prependedItems)
# Check that the payload prim was created and that the payloads are correct
assert payload_prim.IsValid()
assert payloads[0] == Sdf.Payload(assetPath="C:/path/to/file.usd", primPath=Sdf.Path("/World/some/target"))
assert payloads[1] == Sdf.Payload(assetPath="C:/path/to/other/file.usd")
| 1,698 | Python | 35.148935 | 130 | 0.73616 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-payload/py_usd.md | With the USD API, you can use `Usd.Prim.GetPayloads()` to receive the payloads and add a new one with `Usd.Payloads.AddPayload()`.
``` {literalinclude} py_usd.py
:language: py
``` | 179 | Markdown | 43.999989 | 130 | 0.72067 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-payload/py_kit_cmds.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
from pxr import Usd, Sdf
def add_payload(prim: Usd.Prim, payload_asset_path: str, payload_target_path: Sdf.Path) -> None:
omni.kit.commands.execute("AddPayload",
stage=prim.GetStage(),
prim_path = prim.GetPath(), # an existing prim to add the payload to.
payload=Sdf.Payload(
assetPath = payload_asset_path,
primPath = payload_target_path
)
)
#############
# Full Usage
#############
from pxr import UsdGeom
import omni.usd
# Create new USD stage for this sample in OV
context: omni.usd.UsdContext = omni.usd.get_context()
success: bool = context.new_stage()
stage: Usd.Stage = context.get_stage()
# Create and define default prim, so this file can be easily payloaderenced again
default_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World"))
stage.SetDefaultPrim(default_prim.GetPrim())
# Create a xform which should hold all payloads in this sample
payload_prim: Usd.Prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World/payload_prim")).GetPrim()
# Add an payload specific prim
add_payload(payload_prim, "C:/path/to/file.usd", Sdf.Path("/World/some/target"))
# Add other payload to default prim
add_payload(payload_prim, "C:/path/to/other/file.usd", Sdf.Path.emptyPath)
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Get a list of all prepended payloads
payloads = []
for prim_spec in payload_prim.GetPrimStack():
payloads.extend(prim_spec.payloadList.prependedItems)
# Check that the payload prim was created and that the payloads are correct
assert payload_prim.IsValid()
assert payloads[0] == Sdf.Payload(assetPath="C:/path/to/file.usd", primPath=Sdf.Path("/World/some/target"))
assert payloads[1] == Sdf.Payload(assetPath="C:/path/to/other/file.usd") | 1,908 | Python | 35.018867 | 107 | 0.719078 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-payload/usda.md | This is an example USDA result from creating an `Xform` and adding two `Payloads` to it. The first payload target prim in this case is in the file `C:/path/to/file.usd` with the prim path `/World/some/target` and the second is the default prim in the file `C:/path/to/other/file.usd`.
``` {literalinclude} usda.usda
:language: usd
``` | 335 | Markdown | 82.999979 | 284 | 0.737313 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-payload/config.toml | [core]
title = "Add a Payload"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples for adding a Payload to a prim."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "prim", "payload", "AddPayload"] | 240 | TOML | 39.16666 | 98 | 0.7 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/create-reference/py_kit_cmds.md | The `CreateReference` command is a convenient wrapper that creates an Xform prim and adds a Reference to it all at once. If you don't need the two steps batched together, you may want to [add a Reference](add-reference) to an existing prim via Kit Commands or USD API.
``` {literalinclude} py_kit_cmds.py
:language: py
``` | 323 | Markdown | 63.799987 | 268 | 0.76161 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/create-reference/py_kit_cmds.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
import omni.usd
from pxr import Usd, Sdf
def create_reference(usd_context: omni.usd.UsdContext, path_to: Sdf.Path, asset_path: str, prim_path: Sdf.Path) -> Usd.Prim:
omni.kit.commands.execute("CreateReference",
usd_context=usd_context,
path_to=path_to, # Prim path for where to create the prim with the reference
asset_path=asset_path, # The file path to reference. Relative paths are accepted too.
prim_path=prim_path # OPTIONAL: Prim path to a prim in the referenced USD, if not provided the default prim is used
)
return usd_context.get_stage().GetPrimAtPath(path_to)
#############
# Full Usage
#############
# Get the USD context from kit
context: omni.usd.UsdContext = omni.usd.get_context()
# Create and add external reference to specific prim
ref_prim: Usd.Prim = create_reference(context, Sdf.Path("/World/ref_prim"), "C:/path/to/file.usd", Sdf.Path("/World/some/target"))
# Get the existing USD stage from kit
stage: Usd.Stage = context.get_stage()
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Check that the reference prims were created
assert ref_prim.IsValid()
assert ref_prim.GetPrimStack()[0].referenceList.prependedItems[0] == Sdf.Reference(assetPath="file:/C:/path/to/file.usd", primPath=Sdf.Path("/World/some/target")) | 1,464 | Python | 39.694443 | 162 | 0.721995 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/create-reference/usda.md | This is an example USDA result from creating a reference with the `CreateReference` command.
``` {literalinclude} usda.usda
:language: usd
``` | 143 | Markdown | 34.999991 | 92 | 0.762238 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/create-reference/config.toml | [core]
title = "Create a Reference"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples for creating an Xform prim and adding a Reference in Omniverse Kit in one step."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "prim", "reference", "CreateReference"] | 300 | TOML | 49.166658 | 146 | 0.733333 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-reference/py_kit_cmds.md | The `AddReference` command in Kit can add internal and external references to a prim.
``` {literalinclude} py_kit_cmds.py
:language: py
``` | 139 | Markdown | 33.999992 | 85 | 0.748201 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-reference/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Usd, Sdf
def add_int_reference(prim: Usd.Prim, ref_target_path: Sdf.Path) -> None:
references: Usd.References = prim.GetReferences()
references.AddInternalReference(ref_target_path)
def add_ext_reference(prim: Usd.Prim, ref_asset_path: str, ref_target_path: Sdf.Path) -> None:
references: Usd.References = prim.GetReferences()
references.AddReference(
assetPath=ref_asset_path,
primPath=ref_target_path # OPTIONAL: Reference a specific target prim. Otherwise, uses the referenced layer's defaultPrim.
)
#############
# Full Usage
#############
from pxr import UsdGeom
# Create new USD stage for this sample
stage: Usd.Stage = Usd.Stage.CreateInMemory()
# Create and define default prim, so this file can be easily referenced again
default_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World"))
stage.SetDefaultPrim(default_prim.GetPrim())
# Create an xform which should hold all references in this sample
ref_prim: Usd.Prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World/ref_prim")).GetPrim()
# Add an internal reference
intern_target_path: Sdf.Path = Sdf.Path("/World/intern_target")
target_prim: Usd.Prim = UsdGeom.Xform.Define(stage, intern_target_path).GetPrim()
add_int_reference(ref_prim, intern_target_path)
# Add an external reference to specific prim
add_ext_reference(ref_prim, "C:/path/to/file.usd", Sdf.Path("/World/some/target"))
# Add other external reference to default prim
add_ext_reference(ref_prim, "C:/path/to/other/file.usd", Sdf.Path.emptyPath)
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Get a list of all prepended references
references = []
for prim_spec in ref_prim.GetPrimStack():
references.extend(prim_spec.referenceList.prependedItems)
# Check that the reference prim was created and that the references are correct
assert ref_prim.IsValid()
assert references[0] == Sdf.Reference(primPath=intern_target_path)
assert references[1] == Sdf.Reference(assetPath="C:/path/to/file.usd", primPath=Sdf.Path("/World/some/target"))
assert references[2] == Sdf.Reference(assetPath="C:/path/to/other/file.usd")
| 2,250 | Python | 38.491227 | 130 | 0.744 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-reference/py_usd.md | With the USD API, you can use `Usd.Prim.GetReferences()` to receive the references and add a new one with `Usd.References.AddReference()`.
``` {literalinclude} py_usd.py
:language: py
``` | 187 | Markdown | 45.999989 | 138 | 0.73262 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-reference/py_kit_cmds.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
from pxr import Usd, Sdf
def add_int_reference(prim: Usd.Prim, ref_target_path: Sdf.Path) -> None:
omni.kit.commands.execute("AddReference",
stage=prim.GetStage(),
prim_path = prim.GetPath(), # an existing prim to add the reference to.
reference=Sdf.Reference(
primPath = ref_target_path
)
)
def add_ext_reference(prim: Usd.Prim, ref_asset_path: str, ref_target_path: Sdf.Path) -> None:
omni.kit.commands.execute("AddReference",
stage=prim.GetStage(),
prim_path = prim.GetPath(), # an existing prim to add the reference to.
reference=Sdf.Reference(
assetPath = ref_asset_path,
primPath = ref_target_path
)
)
#############
# Full Usage
#############
import omni.usd
from pxr import UsdGeom
# Create new USD stage for this sample in OV
context: omni.usd.UsdContext = omni.usd.get_context()
success: bool = context.new_stage()
stage: Usd.Stage = context.get_stage()
# Create and define default prim, so this file can be easily referenced again
default_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World"))
stage.SetDefaultPrim(default_prim.GetPrim())
# Create a xform which should hold all references in this sample
ref_prim: Usd.Prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World/ref_prim")).GetPrim()
# Add an internal reference
intern_target_path: Sdf.Path = Sdf.Path("/World/intern_target")
target_prim: Usd.Prim = UsdGeom.Xform.Define(stage, intern_target_path).GetPrim()
add_int_reference(ref_prim, intern_target_path)
# Add an external reference to specific prim
add_ext_reference(ref_prim, "C:/path/to/file.usd", Sdf.Path("/World/some/target"))
# Add other external reference to default prim
add_ext_reference(ref_prim, "C:/path/to/other/file.usd", Sdf.Path.emptyPath)
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Get a list of all prepended references
references = []
for prim_spec in ref_prim.GetPrimStack():
references.extend(prim_spec.referenceList.prependedItems)
# Check that the reference prim was created and that the references are correct
assert ref_prim.IsValid()
assert references[0] == Sdf.Reference(primPath=intern_target_path)
assert references[1] == Sdf.Reference(assetPath="C:/path/to/file.usd", primPath=Sdf.Path("/World/some/target"))
assert references[2] == Sdf.Reference(assetPath="C:/path/to/other/file.usd") | 2,547 | Python | 36.470588 | 111 | 0.712996 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-reference/config.toml | [core]
title = "Add a Reference"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples for adding a Reference to a prim."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "prim", "reference", "AddReference"] | 248 | TOML | 40.499993 | 102 | 0.709677 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/author-variant-data/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
shading_varset = prim.GetVariantSets().GetVariantSet("shading")
selected_variant = shading_varset.GetVariantSelection()
shading_varset.SetVariantSelection(variant_name)
with shading_varset.GetVariantEditContext():
# Specs authored within this context are authored just for the variant.
...
# Set the variant selection back to the previously selected variant.
# Alternatively, you can use Usd.VariantSet.ClearVariantSelection()
# if you know that there isn't a variant selection in the current EditTarget.
if selected_variant:
shading_varset.SetVariantSelection(selected_variant)
| 731 | Python | 42.058821 | 98 | 0.79617 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/author-variant-data/py_usd.md | ``` {literalinclude} py_usd.py
:language: py
``` | 48 | Markdown | 15.333328 | 30 | 0.645833 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/author-variant-data/config.toml | [core]
# The title for this code sample. Used to name the page.
title = "Author Data for a Particular Variant"
[metadata]
#A concise description of the code sample for SEO.
description = "Universal Scene Description (OpenUSD) code samples for authoring data for a particular variant set."
# Put in SEO keywords relevant to this code sample.
keywords = ["OpenUSD", "USD", "code sample", "Python", "C++", "variant set", "composition", "variant"] | 444 | TOML | 48.444439 | 115 | 0.736486 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/author-variant-data/header.md | Opinions (i.e. data) for a particular variant can be authored on different layers. This shows how you can author opinions for an existing variant that
might have been authored on a different layer. | 197 | Markdown | 97.999951 | 150 | 0.807107 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/select-variant/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Usd
def select_variant_from_varaint_set(prim: Usd.Prim, variant_set_name: str, variant_name: str) -> None:
variant_set = prim.GetVariantSets().GetVariantSet(variant_set_name)
variant_set.SetVariantSelection(variant_name)
#############
# Full Usage
#############
from pxr import Sdf, UsdGeom
# Create an in-memory Stage with /World Xform prim as the default prim
stage: Usd.Stage = Usd.Stage.CreateInMemory()
default_prim: Usd.Prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World")).GetPrim()
stage.SetDefaultPrim(default_prim)
# Create the Variant Set
shading_varset: Usd.VariantSet = default_prim.GetVariantSets().AddVariantSet("shading")
# Add Variants to the Variant Set
shading_varset.AddVariant("cell_shading")
shading_varset.AddVariant("realistic")
select_variant_from_varaint_set(default_prim, "shading", "realistic")
usda = stage.GetRootLayer().ExportToString()
print(usda)
assert default_prim.GetVariantSets().GetVariantSet("shading").GetVariantSelection() == "realistic" | 1,150 | Python | 33.878787 | 102 | 0.753043 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/select-variant/py_usd.md | ``` {literalinclude} py_usd.py
:language: py
``` | 48 | Markdown | 15.333328 | 30 | 0.645833 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/select-variant/usda.md | This is an example USDA result from creating a Variant Set, adding two Variants to the set, and selecting the current Variant to `realistic`.
``` {literalinclude} usda.usda
:language: usd
``` | 192 | Markdown | 47.249988 | 141 | 0.760417 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/select-variant/config.toml | [core]
# The title for this code sample. Used to name the page.
title = "Select a Variant for a Variant Set"
[metadata]
#A concise description of the code sample for SEO.
description = "Universal Scene Description (OpenUSD) code samples for selecting a variant belonging to a variant set."
# Put in SEO keywords relevant to this code sample.
keywords = ["OpenUSD", "USD", "code sample", "Python", "C++", "variant set", "composition", "variant"] | 445 | TOML | 48.55555 | 118 | 0.732584 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/create-variant-set/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Usd
def create_variant_set(prim: Usd.Prim, variant_set_name: str, variants: list) -> Usd.VariantSet:
variant_set = prim.GetVariantSets().AddVariantSet(variant_set_name)
for variant in variants:
variant_set.AddVariant(variant)
return variant_set
#############
# Full Usage
#############
from pxr import Sdf, UsdGeom
# Create an in-memory Stage with /World Xform prim as the default prim
stage: Usd.Stage = Usd.Stage.CreateInMemory()
default_prim: Usd.Prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World")).GetPrim()
stage.SetDefaultPrim(default_prim)
# Create the variant set and add your variants to it.
variants = ["red", "blue", "green"]
shading_varset: Usd.VariantSet = create_variant_set(default_prim, "shading", variants)
usda = stage.GetRootLayer().ExportToString()
print(usda)
assert default_prim.GetVariantSets().HasVariantSet("shading")
| 1,027 | Python | 33.266666 | 98 | 0.730282 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/create-variant-set/py_usd.md | ``` {literalinclude} py_usd.py
:language: py
```
| 49 | Markdown | 11.499997 | 30 | 0.632653 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/create-variant-set/usda.md | This is an example USDA result from creating a Variant Set and adding Variants to the Set.
``` {literalinclude} usda.usda
:language: usd
``` | 141 | Markdown | 34.499991 | 90 | 0.751773 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/create-variant-set/config.toml | [core]
title = "Create a Variant Set"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples showing how to create a variant set."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "variant set", "composition", "create variant set", "variant"] | 282 | TOML | 46.166659 | 128 | 0.716312 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/specializes/add-specialize/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Usd
def add_specialize_to(base_prim: Usd.Prim, specializes: Usd.Specializes) -> bool:
return specializes.AddSpecialize(base_prim.GetPath())
#############
# Full Usage
#############
from pxr import Sdf, UsdGeom
# Create an in-memory Stage with /World Xform prim as the default prim
stage: Usd.Stage = Usd.Stage.CreateInMemory()
default_prim: Usd.Prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World")).GetPrim()
stage.SetDefaultPrim(default_prim)
prim: Usd.Prim = UsdGeom.Xform.Define(stage, default_prim.GetPath().AppendPath("prim")).GetPrim()
base: Usd.Prim = UsdGeom.Xform.Define(stage, default_prim.GetPath().AppendPath("base")).GetPrim()
specializes: Usd.Specializes = prim.GetSpecializes()
added_successfully = add_specialize_to(base, specializes)
usda = stage.GetRootLayer().ExportToString()
print(usda)
assert added_successfully | 999 | Python | 34.714284 | 98 | 0.746747 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/specializes/add-specialize/py_usd.md | ``` {literalinclude} py_usd.py
:language: py
``` | 48 | Markdown | 15.333328 | 30 | 0.645833 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/specializes/add-specialize/usda.md | This is an example USDA result adding an Specialize Arc to a prim.
``` {literalinclude} usda.usda
:language: usd
``` | 117 | Markdown | 28.499993 | 66 | 0.735043 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/specializes/add-specialize/config.toml | [core]
title = "Add a Specialize"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples for adding a Specialize composition arc to a prim."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "specialize", "composition", "composition arc"] | 277 | TOML | 45.333326 | 117 | 0.729242 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/prims/select-prim-by-path/py_kit_cmds.md | ``` {literalinclude} py_kit_cmds.py
:language: py
``` | 53 | Markdown | 16.999994 | 35 | 0.660377 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/prims/select-prim-by-path/py_omni_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.usd
prim_path = "/World/My/Prim"
ctx = omni.usd.get_context()
# The second arg is unused. Any boolean can be used.
ctx.get_selection().set_selected_prim_paths([prim_path], True) | 328 | Python | 35.555552 | 98 | 0.75 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/prims/select-prim-by-path/py_kit_cmds.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
import omni.usd
prim_path = "/World/My/Prim"
ctx = omni.usd.get_context()
old_selection = ctx.get_selection().get_selected_prim_paths()
omni.kit.commands.execute('SelectPrimsCommand',
old_selected_paths=old_selection,
new_selected_paths=[prim_path],
expand_in_stage=True) #DEPRECATED: Used only for backwards compatibility. | 500 | Python | 34.785712 | 98 | 0.76 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/prims/select-prim-by-path/config.toml | [core]
title = "Select a Prim by Prim Path"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples showing how to select a prim using its prim path."
keywords = ["OpenUSD", "USD", "Python", "code sample", "prim", "selection", "by path", "path", "prim path"] | 281 | TOML | 45.999992 | 116 | 0.697509 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/prims/select-prim-by-path/py_omni_usd.md | ``` {literalinclude} py_omni_usd.py
:language: py
``` | 53 | Markdown | 16.999994 | 35 | 0.660377 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/prims/check-prim-exists/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Usd
def check_prim_exists(prim: Usd.Prim) -> bool:
if prim.IsValid():
return True
return False
#############
# Full Usage
#############
from pxr import Sdf, UsdGeom
# Create an in-memory Stage with /World Xform prim as the default prim
stage: Usd.Stage = Usd.Stage.CreateInMemory()
default_prim: Usd.Prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World")).GetPrim()
stage.SetDefaultPrim(default_prim)
# Create one prim and
cube: Usd.Prim = UsdGeom.Cube.Define(stage, Sdf.Path("/World/Cube")).GetPrim()
empty_prim = Usd.Prim()
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Check if prims exist
assert check_prim_exists(default_prim)
assert check_prim_exists(cube)
assert not check_prim_exists(empty_prim) | 893 | Python | 26.937499 | 98 | 0.718925 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.