file_path
stringlengths 21
202
| content
stringlengths 12
1.02M
| size
int64 12
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 10
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
NVIDIA-Omniverse/PhysX/flow/source/nvfloweditor/Camera.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#include "Camera.h"
#include "NvFlowMath.h"
struct NvFlowCamera
{
// Settings
NvFlowCameraConfig config = {};
// Camera state
NvFlowCameraState state = {};
// Mouse state
int mouseXprev = 0;
int mouseYprev = 0;
NvFlowBool32 rotationActive = false;
NvFlowBool32 zoomActive = false;
NvFlowBool32 translateActive = false;
NvFlowUint keyTranslateActiveMask = 0u;
};
void NvFlowCamera_computeRotationBasis(NvFlowCamera* ptr, NvFlowFloat4* pXAxis, NvFlowFloat4* pYAxis, NvFlowFloat4* pZAxis)
{
using namespace NvFlowMath;
NvFlowFloat4 zAxis = vector3Normalize(make_float4(ptr->state.eyeDirection, 0.f));
// RH Z is negative going into screen, so reverse eyeDirectionVector, after building basis
if (ptr->config.isProjectionRH)
{
zAxis.x = -zAxis.x;
zAxis.y = -zAxis.y;
zAxis.z = -zAxis.z;
}
NvFlowFloat4 yAxis = make_float4(ptr->state.eyeUp, 0.f);
// force yAxis to orthogonal
yAxis = vector3Normalize(yAxis - vector3Dot(zAxis, yAxis) * zAxis);
// generate third basis vector
NvFlowFloat4 xAxis = vector3Cross(yAxis, zAxis);
if (pXAxis)
{
*pXAxis = xAxis;
}
if (pYAxis)
{
*pYAxis = yAxis;
}
if (pZAxis)
{
*pZAxis = zAxis;
}
}
NvFlowCamera* NvFlowCameraCreate(int winw, int winh)
{
auto ptr = new NvFlowCamera();
NvFlowCameraGetDefaultState(&ptr->state, false);
NvFlowCameraGetDefaultConfig(&ptr->config);
return ptr;
}
void NvFlowCameraDestroy(NvFlowCamera* ptr)
{
delete ptr;
}
void NvFlowCameraGetDefaultState(NvFlowCameraState* ptr, bool yUp)
{
ptr->position = { 0.f, 0.f, 0.f };
if (yUp)
{
ptr->eyeDirection = { 0.f, 0.f, 1.f };
ptr->eyeUp = { 0.f, 1.f, 0.f };
}
else
{
ptr->eyeDirection = { 0.f, 1.f, 0.f };
ptr->eyeUp = { 0.f, 0.f, 1.f };
}
ptr->eyeDistanceFromPosition = -700.f;
}
void NvFlowCameraGetDefaultConfig(NvFlowCameraConfig* ptr)
{
using namespace NvFlowMath;
ptr->isProjectionRH = NV_FLOW_TRUE;
ptr->isOrthographic = NV_FLOW_FALSE;
ptr->isReverseZ = NV_FLOW_TRUE;
ptr->nearPlane = 0.1f;
ptr->farPlane = INFINITY;
ptr->fovAngleY = pi / 4.f;
ptr->orthographicY = 500.f;
ptr->panRate = 1.f;
ptr->tiltRate = 1.f;
ptr->zoomRate = 1.f;
ptr->keyTranslationRate = 800.f;
}
void NvFlowCameraGetState(NvFlowCamera* ptr, NvFlowCameraState* state)
{
*state = ptr->state;
}
void NvFlowCameraSetState(NvFlowCamera* ptr, const NvFlowCameraState* state)
{
ptr->state = *state;
}
void NvFlowCameraGetConfig(NvFlowCamera* ptr, NvFlowCameraConfig* config)
{
*config = ptr->config;
}
void NvFlowCameraSetConfig(NvFlowCamera* ptr, const NvFlowCameraConfig* config)
{
ptr->config = *config;
}
void NvFlowCameraGetView(NvFlowCamera* ptr, NvFlowFloat4x4* viewMatrix)
{
using namespace NvFlowMath;
auto state = &ptr->state;
float eyeDistanceWithDepth = state->eyeDistanceFromPosition;
NvFlowFloat3 eyePosition = state->position;
eyePosition.x -= state->eyeDirection.x * state->eyeDistanceFromPosition;
eyePosition.y -= state->eyeDirection.y * state->eyeDistanceFromPosition;
eyePosition.z -= state->eyeDirection.z * state->eyeDistanceFromPosition;
NvFlowFloat4x4 translate = matrixTranslation(eyePosition.x, eyePosition.y, eyePosition.z);
// derive rotation from eyeDirection, eyeUp vectors
NvFlowFloat4x4 rotation = {};
{
NvFlowFloat4 zAxis = {};
NvFlowFloat4 xAxis = {};
NvFlowFloat4 yAxis = {};
NvFlowCamera_computeRotationBasis(ptr, &xAxis, &yAxis, &zAxis);
rotation = NvFlowFloat4x4{
xAxis.x, yAxis.x, zAxis.x, 0.f,
xAxis.y, yAxis.y, zAxis.y, 0.f,
xAxis.z, yAxis.z, zAxis.z, 0.f,
0.f, 0.f, 0.f, 1.f
};
}
NvFlowFloat4x4 view = matrixMultiply(translate, rotation);
*viewMatrix = view;
}
void NvFlowCameraGetProjection(NvFlowCamera* ptr, NvFlowFloat4x4* projMatrix, float aspectWidth, float aspectHeight)
{
using namespace NvFlowMath;
float aspectRatio = aspectWidth / aspectHeight;
NvFlowFloat4x4 projection = {};
if (ptr->config.isOrthographic)
{
if (ptr->config.isProjectionRH)
{
if (ptr->config.isReverseZ)
{
projection = matrixOrthographicRH(ptr->config.orthographicY * aspectRatio, ptr->config.orthographicY, ptr->config.farPlane, ptr->config.nearPlane);
}
else
{
projection = matrixOrthographicRH(ptr->config.orthographicY * aspectRatio, ptr->config.orthographicY, ptr->config.nearPlane, ptr->config.farPlane);
}
*projMatrix = projection;
}
else
{
if (ptr->config.isReverseZ)
{
projection = matrixOrthographicLH(ptr->config.orthographicY * aspectRatio, ptr->config.orthographicY, ptr->config.farPlane, ptr->config.nearPlane);
}
else
{
projection = matrixOrthographicLH(ptr->config.orthographicY * aspectRatio, ptr->config.orthographicY, ptr->config.nearPlane, ptr->config.farPlane);
}
*projMatrix = projection;
}
}
else
{
if (ptr->config.isProjectionRH)
{
if (ptr->config.isReverseZ)
{
projection = matrixPerspectiveFovRH(ptr->config.fovAngleY, aspectRatio, ptr->config.farPlane, ptr->config.nearPlane);
}
else
{
projection = matrixPerspectiveFovRH(ptr->config.fovAngleY, aspectRatio, ptr->config.nearPlane, ptr->config.farPlane);
}
*projMatrix = projection;
}
else
{
if (ptr->config.isReverseZ)
{
projection = matrixPerspectiveFovLH(ptr->config.fovAngleY, aspectRatio, ptr->config.farPlane, ptr->config.nearPlane);
}
else
{
projection = matrixPerspectiveFovLH(ptr->config.fovAngleY, aspectRatio, ptr->config.nearPlane, ptr->config.farPlane);
}
*projMatrix = projection;
}
}
}
void NvFlowCameraMouseUpdate(NvFlowCamera* ptr, NvFlowCameraMouseButton button, NvFlowCameraAction action, int mouseX, int mouseY, int winw, int winh)
{
using namespace NvFlowMath;
// transient mouse state
float rotationDx = 0.f;
float rotationDy = 0.f;
float translateDx = 0.f;
float translateDy = 0.f;
int translateWinW = 1024;
int translateWinH = 1024;
float zoomDy = 0.f;
// process event
if (action == eNvFlowCameraAction_down)
{
if (button == eNvFlowCameraMouseButton_left)
{
ptr->rotationActive = true;
rotationDx = 0.f;
rotationDy = 0.f;
}
else if (button == eNvFlowCameraMouseButton_middle)
{
ptr->translateActive = true;
translateDx = 0.f;
translateDy = 0.f;
}
else if (button == eNvFlowCameraMouseButton_right)
{
ptr->zoomActive = true;
zoomDy = 0.f;
}
}
else if (action == eNvFlowCameraAction_up)
{
if (button == eNvFlowCameraMouseButton_left)
{
ptr->rotationActive = false;
rotationDx = 0.f;
rotationDy = 0.f;
}
else if (button == eNvFlowCameraMouseButton_middle)
{
ptr->translateActive = false;
translateDx = 0.f;
translateDy = 0.f;
}
else if (button == eNvFlowCameraMouseButton_right)
{
ptr->zoomActive = false;
zoomDy = 0.f;
}
}
else if (action == eNvFlowCameraAction_unknown)
{
if (ptr->rotationActive)
{
int dx = +(mouseX - ptr->mouseXprev);
int dy = +(mouseY - ptr->mouseYprev);
rotationDx = float(dx) * 2.f * 3.14f / (winw);
rotationDy = float(dy) * 2.f * 3.14f / (winh);
}
if (ptr->translateActive)
{
float dx = float(mouseX - ptr->mouseXprev);
float dy = -float(mouseY - ptr->mouseYprev);
translateDx = dx * 2.f / (winw);
translateDy = dy * 2.f / (winh);
translateWinW = winw;
translateWinH = winh;
}
if (ptr->zoomActive)
{
float dy = -float(mouseY - ptr->mouseYprev);
zoomDy = dy * 3.14f / float(winh);
}
}
// keep current mouse position for next previous
ptr->mouseXprev = mouseX;
ptr->mouseYprev = mouseY;
// apply rotation
if (rotationDx != 0.f || rotationDy != 0.f)
{
float dx = rotationDx;
float dy = rotationDy;
if (ptr->config.isProjectionRH)
{
dx = -dx;
dy = -dy;
}
float rotTilt = ptr->config.tiltRate * float(dy);
float rotPan = ptr->config.panRate * float(dx);
const float eyeDotLimit = 0.99f;
// tilt
{
NvFlowFloat4 eyeDirection4 = make_float4(ptr->state.eyeDirection, 0.f);
NvFlowFloat4 rotVec = {};
NvFlowCamera_computeRotationBasis(ptr, &rotVec, nullptr, nullptr);
const float angle = rotTilt;
NvFlowFloat4x4 dtilt = matrixRotationAxis(rotVec, angle);
eyeDirection4 = vector4Transform(eyeDirection4, dtilt);
// make sure eye direction stays normalized
eyeDirection4 = vector3Normalize(eyeDirection4);
// check dot of eyeDirection and eyeUp, and avoid commit if value is very low
float eyeDot = fabsf(
eyeDirection4.x * ptr->state.eyeUp.x +
eyeDirection4.y * ptr->state.eyeUp.y +
eyeDirection4.z * ptr->state.eyeUp.z
);
if (eyeDot < eyeDotLimit)
{
ptr->state.eyeDirection = float4_to_float3(eyeDirection4);
}
}
// pan
{
NvFlowFloat4 eyeDirection4 = make_float4(ptr->state.eyeDirection, 0.f);
NvFlowFloat4 rotVec = make_float4(ptr->state.eyeUp, 0.f);
const float angle = rotPan;
NvFlowFloat4x4 dpan = matrixRotationAxis(rotVec, angle);
eyeDirection4 = vector4Transform(eyeDirection4, dpan);
// make sure eye direction stays normalized
eyeDirection4 = vector3Normalize(eyeDirection4);
ptr->state.eyeDirection = float4_to_float3(eyeDirection4);
}
}
// apply translation
if (translateDx != 0.f || translateDy != 0.f)
{
// goal here is to apply an NDC offset, to the position value in world space
NvFlowFloat4x4 projection = {};
NvFlowCameraGetProjection(ptr, &projection, float(translateWinW), float(translateWinH));
NvFlowFloat4x4 view = {};
NvFlowCameraGetView(ptr, &view);
// project position to NDC
NvFlowFloat4 positionNDC = make_float4(ptr->state.position, 1.f);
positionNDC = vector4Transform(positionNDC, view);
positionNDC = vector4Transform(positionNDC, projection);
// normalize
if (positionNDC.w > 0.f)
{
positionNDC = positionNDC / vectorSplatW(positionNDC);
}
// offset using mouse data
positionNDC.x += translateDx;
positionNDC.y += translateDy;
// move back to world space
NvFlowFloat4x4 projViewInverse = matrixInverse(
matrixMultiply(view, projection)
);
NvFlowFloat4 positionWorld = vector4Transform(positionNDC, projViewInverse);
// normalize
if (positionWorld.w > 0.f)
{
positionWorld = positionWorld / vectorSplatW(positionWorld);
}
// commit update
ptr->state.position = float4_to_float3(positionWorld);
}
// apply zoom
if (zoomDy != 0.f)
{
ptr->state.eyeDistanceFromPosition *= (1.f + ptr->config.zoomRate * zoomDy);
}
}
void NvFlowCameraKeyUpdate(NvFlowCamera* ptr, NvFlowCameraKey key, NvFlowCameraAction action)
{
if (action == eNvFlowCameraAction_down)
{
if (key == eNvFlowCameraKey_up)
{
ptr->keyTranslateActiveMask |= 2u;
}
if (key == eNvFlowCameraKey_down)
{
ptr->keyTranslateActiveMask |= 4u;
}
if (key == eNvFlowCameraKey_left)
{
ptr->keyTranslateActiveMask |= 8u;
}
if (key == eNvFlowCameraKey_right)
{
ptr->keyTranslateActiveMask |= 16u;
}
}
else if (action == eNvFlowCameraAction_up)
{
if (key == eNvFlowCameraKey_up)
{
ptr->keyTranslateActiveMask &= ~2u;
}
if (key == eNvFlowCameraKey_down)
{
ptr->keyTranslateActiveMask &= ~4u;
}
if (key == eNvFlowCameraKey_left)
{
ptr->keyTranslateActiveMask &= ~8u;
}
if (key == eNvFlowCameraKey_right)
{
ptr->keyTranslateActiveMask &= ~16u;
}
}
}
void NvFlowCameraAnimationTick(NvFlowCamera* ptr, float deltaTime)
{
using namespace NvFlowMath;
float x = 0.f;
float y = 0.f;
float z = 0.f;
float rate = ptr->config.keyTranslationRate * deltaTime;
if (ptr->keyTranslateActiveMask & 2u)
{
z += -rate;
}
if (ptr->keyTranslateActiveMask & 4u)
{
z += +rate;
}
if (ptr->keyTranslateActiveMask & 8)
{
x += +rate;
}
if (ptr->keyTranslateActiveMask & 16)
{
x += -rate;
}
if (ptr->keyTranslateActiveMask)
{
ptr->state.position.x += ptr->state.eyeDirection.x * z;
ptr->state.position.y += ptr->state.eyeDirection.y * z;
ptr->state.position.z += ptr->state.eyeDirection.z * z;
// compute xaxis
NvFlowFloat4 xAxis{};
NvFlowCamera_computeRotationBasis(ptr, &xAxis, nullptr, nullptr);
ptr->state.position.x += xAxis.x * x;
ptr->state.position.y += xAxis.y * x;
ptr->state.position.z += xAxis.z * x;
}
} | 13,645 | C++ | 24.795841 | 151 | 0.700623 |
NVIDIA-Omniverse/PhysX/flow/source/nvfloweditor/EditorCompute.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#include "EditorCommon.h"
void editorCompute_init(EditorCompute* ptr, const NvFlowSwapchainDesc* swapchainDesc, NvFlowBool32 headless)
{
ptr->headless = headless;
// initialize benchmarking if enabled
if (ptr->benchmarkFrameCount)
{
ptr->vsync = NV_FLOW_FALSE;
appTimerInit(&ptr->benchmarkTimerCPU);
appTimerBegin(&ptr->benchmarkTimerCPU);
fopen_s(&ptr->outputFile, ptr->outputFilename, "w");
fprintf(ptr->outputFile, "FrameID, FrameTime, CPUTime, GPUTime, ActiveBlockCount\n");
}
NvFlowLoaderInitDeviceAPI(&ptr->loader, printError, nullptr, ptr->contextApi);
NvFlowBool32 validation = NV_FLOW_TRUE;
ptr->deviceManager = ptr->loader.deviceInterface.createDeviceManager(validation, nullptr, ptr->threadCount);
NvFlowDeviceDesc deviceDesc = {};
deviceDesc.deviceIndex = 0u;
deviceDesc.enableExternalUsage = NV_FLOW_TRUE;
deviceDesc.logPrint = editorCompute_logPrint;
ptr->device = ptr->loader.deviceInterface.createDevice(ptr->deviceManager, &deviceDesc);
ptr->deviceQueue = ptr->loader.deviceInterface.getDeviceQueue(ptr->device);
if (!ptr->headless)
{
ptr->swapchain = ptr->loader.deviceInterface.createSwapchain(ptr->deviceQueue, swapchainDesc);
}
NvFlowContextInterface_duplicate(&ptr->contextInterface, ptr->loader.deviceInterface.getContextInterface(ptr->deviceQueue));
// testing external semaphore
NvFlowUint64 testHandle = 0u;
NvFlowDeviceSemaphore* testSemaphore = ptr->loader.deviceInterface.createSemaphore(ptr->device);
ptr->loader.deviceInterface.getSemaphoreExternalHandle(testSemaphore, &testHandle, sizeof(testHandle));
printf("Test semaphore handle = %llu\n", testHandle);
ptr->loader.deviceInterface.closeSemaphoreExternalHandle(testSemaphore, &testHandle, sizeof(testHandle));
ptr->loader.deviceInterface.destroySemaphore(testSemaphore);
}
void editorCompute_destroy(EditorCompute* ptr)
{
if (ptr->swapchain)
{
ptr->loader.deviceInterface.destroySwapchain(ptr->swapchain);
}
ptr->loader.deviceInterface.destroyDevice(ptr->deviceManager, ptr->device);
ptr->loader.deviceInterface.destroyDeviceManager(ptr->deviceManager);
NvFlowLoaderDestroy(&ptr->loader);
// destroy benchmarking if enabled
if (ptr->benchmarkFrameCount)
{
appTimerEnd(&ptr->benchmarkTimerCPU);
fclose(ptr->outputFile);
}
}
void editorCompute_reportEntries(void* userdata, NvFlowUint64 captureID, NvFlowUint numEntries, NvFlowProfilerEntry* entries)
{
EditorCompute* ptr = (EditorCompute*)userdata;
if (ptr->benchmarkFrameCount)
{
appTimerEnd(&ptr->benchmarkTimerCPU);
float deltaTime = 0.f;
appTimerGetResults(&ptr->benchmarkTimerCPU, &deltaTime);
float cpuSum = 0.f;
float gpuSum = 0.f;
for (NvFlowUint entryIdx = 0u; entryIdx < numEntries; entryIdx++)
{
cpuSum += entries[entryIdx].cpuDeltaTime;
gpuSum += entries[entryIdx].gpuDeltaTime;
}
if (ptr->outputFile && ptr->benchmarkFrameID > 0u)
{
fprintf(ptr->outputFile, "%d, %f, %f, %f, %d\n", ptr->benchmarkFrameID, 1000.f * deltaTime, 1000.f * cpuSum, 1000.f * gpuSum, ptr->benchmarkActiveBlockCount);
}
ptr->benchmarkFrameID++;
if (ptr->benchmarkFrameID > ptr->benchmarkFrameCount)
{
ptr->benchmarkShouldRun = false;
}
appTimerBegin(&ptr->benchmarkTimerCPU);
}
// reset active mask
for (NvFlowUint entryIdx = 0u; entryIdx < ptr->statEntries_active.size; entryIdx++)
{
ptr->statEntries_active[entryIdx] = NV_FLOW_FALSE;
}
NvFlowUint minInactiveEntry = 0u;
for (NvFlowUint profEntryIdx = 0u; profEntryIdx < numEntries; profEntryIdx++)
{
const NvFlowProfilerEntry profEntry = entries[profEntryIdx];
// update minInactiveEntry
for (; minInactiveEntry < ptr->statEntries_active.size; minInactiveEntry++)
{
if (!ptr->statEntries_active[minInactiveEntry])
{
break;
}
}
// search for matching label
NvFlowUint64 entryIdx = minInactiveEntry;
for (; entryIdx < ptr->statEntries_label.size; entryIdx++)
{
if (!ptr->statEntries_active[entryIdx] && strcmp(profEntry.label, ptr->statEntries_label[entryIdx]) == 0)
{
break;
}
}
// allocate new if needed
if (entryIdx >= ptr->statEntries_label.size)
{
entryIdx = ptr->statEntries_label.size;
ptr->statEntries_label.pushBack(profEntry.label);
ptr->statEntries_active.pushBack(NV_FLOW_FALSE);
ptr->statEntries_cpuDeltaTime_sum.pushBack(0.f);
ptr->statEntries_cpuDeltaTime_count.pushBack(0.f);
ptr->statEntries_gpuDeltaTime_sum.pushBack(0.f);
ptr->statEntries_gpuDeltaTime_count.pushBack(0.f);
}
// update entry
{
ptr->statEntries_active[entryIdx] = NV_FLOW_TRUE;
ptr->statEntries_cpuDeltaTime_sum[entryIdx] += profEntry.cpuDeltaTime;
ptr->statEntries_cpuDeltaTime_count[entryIdx] += 1.f;
ptr->statEntries_gpuDeltaTime_sum[entryIdx] += profEntry.gpuDeltaTime;
ptr->statEntries_gpuDeltaTime_count[entryIdx] += 1.f;
}
}
// subsample by default, to avoid a massive log
if ((captureID % 15) == 0)
{
ptr->statOut_label.size = 0u;
ptr->statOut_cpu.size = 0u;
ptr->statOut_gpu.size = 0u;
ptr->statOut_label.pushBack("Total");
ptr->statOut_cpu.pushBack(0.f);
ptr->statOut_gpu.pushBack(0.f);
float cpuSum = 0.f;
float gpuSum = 0.f;
fprintf(ptr->perflog, "\nFrame[%lld] : label cpuTimeMS gpuTimeMS\n", captureID);
for (NvFlowUint entryIdx = 0u; entryIdx < ptr->statEntries_label.size; entryIdx++)
{
const char* label = ptr->statEntries_label[entryIdx];
float cpuDeltaTime = ptr->statEntries_cpuDeltaTime_sum[entryIdx] / ptr->statEntries_cpuDeltaTime_count[entryIdx];
float gpuDeltaTime = ptr->statEntries_gpuDeltaTime_sum[entryIdx] / ptr->statEntries_gpuDeltaTime_count[entryIdx];
fprintf(ptr->perflog, "%s, %f, %f\n", label, 1000.f * cpuDeltaTime, 1000.f * gpuDeltaTime);
ptr->statOut_label.pushBack(label);
ptr->statOut_cpu.pushBack(1000.f * cpuDeltaTime);
ptr->statOut_gpu.pushBack(1000.f * gpuDeltaTime);
cpuSum += cpuDeltaTime;
gpuSum += gpuDeltaTime;
}
ptr->statOut_cpu[0] = 1000.f * cpuSum;
ptr->statOut_gpu[0] = 1000.f * gpuSum;
// reset stats
ptr->statEntries_label.size = 0u;
ptr->statEntries_active.size = 0u;
ptr->statEntries_cpuDeltaTime_sum.size = 0u;
ptr->statEntries_cpuDeltaTime_count.size = 0u;
ptr->statEntries_gpuDeltaTime_sum.size = 0u;
ptr->statEntries_gpuDeltaTime_count.size = 0u;
}
}
void editorCompute_logPrint(NvFlowLogLevel level, const char* format, ...)
{
va_list args;
va_start(args, format);
const char* prefix = "Unknown";
if (level == eNvFlowLogLevel_error)
{
prefix = "Error";
}
else if (level == eNvFlowLogLevel_warning)
{
prefix = "Warning";
}
else if (level == eNvFlowLogLevel_info)
{
prefix = "Info";
}
printf("%s: ", prefix);
vprintf(format, args);
printf("\n");
va_end(args);
}
| 9,189 | C++ | 36.206478 | 170 | 0.661334 |
NVIDIA-Omniverse/PhysX/flow/source/nvfloweditor/Loader.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#define GLFW_DLL
#if defined(_WIN32)
#define GLFW_EXPOSE_NATIVE_WIN32
#else
#define GLFW_EXPOSE_NATIVE_X11
#endif
#include <GLFW/glfw3.h>
#include <GLFW/glfw3native.h>
#define NV_FLOW_SWAPCHAIN_DESC 1
#include "NvFlowLoader.h"
#define GLFW_PTR(X) decltype(&X) p_##X = nullptr
#define GLFW_PTR_LOAD(X) ptr->p_##X = (decltype(&X))GlfwLoader_loadFunction(ptr, #X)
struct GlfwLoader
{
void* module = nullptr;
GLFW_PTR(glfwInit);
GLFW_PTR(glfwWindowHint);
GLFW_PTR(glfwCreateWindow);
GLFW_PTR(glfwGetPrimaryMonitor);
GLFW_PTR(glfwGetVideoMode);
GLFW_PTR(glfwSetWindowUserPointer);
GLFW_PTR(glfwSetWindowPos);
GLFW_PTR(glfwSetWindowSizeCallback);
GLFW_PTR(glfwSetKeyCallback);
GLFW_PTR(glfwSetCharCallback);
GLFW_PTR(glfwSetMouseButtonCallback);
GLFW_PTR(glfwSetCursorPosCallback);
GLFW_PTR(glfwSetScrollCallback);
#if defined(_WIN32)
GLFW_PTR(glfwGetWin32Window);
#else
GLFW_PTR(glfwGetX11Display);
GLFW_PTR(glfwGetX11Window);
#endif
GLFW_PTR(glfwDestroyWindow);
GLFW_PTR(glfwTerminate);
GLFW_PTR(glfwPollEvents);
GLFW_PTR(glfwWindowShouldClose);
GLFW_PTR(glfwGetWindowUserPointer);
GLFW_PTR(glfwSetWindowMonitor);
GLFW_PTR(glfwGetMouseButton);
};
inline void* GlfwLoader_loadFunction(GlfwLoader* ptr, const char* name)
{
return NvFlowGetProcAddress(ptr->module, name);
}
inline void GlfwLoader_init(GlfwLoader* ptr)
{
#if defined(__aarch64__)
ptr->module = NvFlowLoadLibrary("glfw3.dll", "libglfw_aarch64.so.3.3");
#else
ptr->module = NvFlowLoadLibrary("glfw3.dll", "libglfw.so.3");
#endif
GLFW_PTR_LOAD(glfwInit);
GLFW_PTR_LOAD(glfwWindowHint);
GLFW_PTR_LOAD(glfwCreateWindow);
GLFW_PTR_LOAD(glfwGetPrimaryMonitor);
GLFW_PTR_LOAD(glfwGetVideoMode);
GLFW_PTR_LOAD(glfwSetWindowUserPointer);
GLFW_PTR_LOAD(glfwSetWindowPos);
GLFW_PTR_LOAD(glfwSetWindowSizeCallback);
GLFW_PTR_LOAD(glfwSetKeyCallback);
GLFW_PTR_LOAD(glfwSetCharCallback);
GLFW_PTR_LOAD(glfwSetMouseButtonCallback);
GLFW_PTR_LOAD(glfwSetCursorPosCallback);
GLFW_PTR_LOAD(glfwSetScrollCallback);
#if defined(_WIN32)
GLFW_PTR_LOAD(glfwGetWin32Window);
#else
GLFW_PTR_LOAD(glfwGetX11Display);
GLFW_PTR_LOAD(glfwGetX11Window);
#endif
GLFW_PTR_LOAD(glfwDestroyWindow);
GLFW_PTR_LOAD(glfwTerminate);
GLFW_PTR_LOAD(glfwPollEvents);
GLFW_PTR_LOAD(glfwWindowShouldClose);
GLFW_PTR_LOAD(glfwGetWindowUserPointer);
GLFW_PTR_LOAD(glfwSetWindowMonitor);
GLFW_PTR_LOAD(glfwGetMouseButton);
}
inline void GlfwLoader_destroy(GlfwLoader* ptr)
{
NvFlowFreeLibrary(ptr->module);
}
| 4,061 | C | 31.758064 | 84 | 0.773701 |
NVIDIA-Omniverse/PhysX/flow/source/nvfloweditor/EditorFlow.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#include "EditorCommon.h"
struct NvFlowDatabasePrim
{
NvFlowDatabasePrim* parent;
const char* path;
const char* name;
NvFlowStringHashTable<NvFlowDatabaseAttr*> attrMap;
};
NV_FLOW_INLINE NvFlowDatabasePrim* createPrim(
NvFlowDatabaseContext* context,
NvFlowUint64 version,
NvFlowDatabasePrim* parent,
const char* displayTypename,
const char* path,
const char* name)
{
auto prim = new NvFlowDatabasePrim();
prim->parent = parent;
prim->path = path;
prim->name = name;
//printf("Create prim: displayTypename(%s), path(%s) name(%s)\n", displayTypename, path, name);
// register prim
EditorFlow* ptr = (EditorFlow*)context;
NvFlowBool32 success = NV_FLOW_FALSE;
ptr->primMap.insert(path, NvFlowStringHashFNV(path), prim, &success);
if (!success)
{
editorCompute_logPrint(eNvFlowLogLevel_warning, "Prim register failed, existing prim at path %s", path);
}
return prim;
}
NV_FLOW_INLINE void updatePrim(
NvFlowDatabaseContext* context,
NvFlowUint64 version,
NvFlowUint64 minActiveVersion,
NvFlowDatabasePrim* prim)
{
}
NV_FLOW_INLINE void markDestroyedPrim(NvFlowDatabaseContext* context, NvFlowDatabasePrim* prim)
{
// unregister prim
EditorFlow* ptr = (EditorFlow*)context;
if (!ptr->primMap.erase(prim->path, NvFlowStringHashFNV(prim->path)))
{
editorCompute_logPrint(eNvFlowLogLevel_warning, "Prim unregister failed, prim not registered %s", prim->path);
}
//printf("MarkDestroyed prim: path(%s) name(%s)\n", prim->path, prim->name);
}
NV_FLOW_INLINE void destroyPrim(NvFlowDatabaseContext* context, NvFlowDatabasePrim* prim)
{
//printf("Destroy prim: path(%s) name(%s)\n", prim->path, prim->name);
delete prim;
}
struct NvFlowDatabaseValue
{
NvFlowArray<NvFlowUint8> data;
NvFlowUint64 version;
NvFlowUint64 lastUsed;
};
struct NvFlowDatabaseAttr
{
NvFlowDatabasePrim* prim = nullptr;
NvFlowRingBufferPointer<NvFlowDatabaseValue*> values;
const char* name = nullptr;
NvFlowUint64 commandIdx = ~0llu;
};
NV_FLOW_INLINE NvFlowDatabaseValue* copyArray(
NvFlowUint64 version,
NvFlowUint64 minActiveVersion,
NvFlowDatabaseAttr* attr,
const NvFlowReflectData* reflectData,
NvFlowUint8* mappedData,
const void* srcData,
NvFlowUint64 srcDataSizeInBytes
)
{
auto value = attr->values.allocateBackPointer();
value->version = version;
value->lastUsed = version;
value->data.size = 0u;
NvFlowUint8** pData = (NvFlowUint8**)(mappedData + reflectData->dataOffset);
NvFlowUint64* pArraySize = (NvFlowUint64*)(mappedData + reflectData->arraySizeOffset);
value->data.reserve(srcDataSizeInBytes);
value->data.size = srcDataSizeInBytes;
if (srcData)
{
memcpy(value->data.data, srcData, srcDataSizeInBytes);
}
else
{
memset(value->data.data, 0, srcDataSizeInBytes);
}
// override to owned copy
*pData = value->data.data;
*pArraySize = srcDataSizeInBytes / reflectData->dataType->elementSize;
if (reflectData->reflectMode == eNvFlowReflectMode_arrayVersioned)
{
NvFlowUint64* pVersion = (NvFlowUint64*)(mappedData + reflectData->versionOffset);
// aligning array version to commit version, convenient by not required
*pVersion = version;
}
return value;
}
NV_FLOW_INLINE NvFlowDatabaseAttr* createAttr(
NvFlowDatabaseContext* context,
NvFlowUint64 version,
NvFlowDatabasePrim* prim,
const NvFlowReflectData* reflectData,
NvFlowUint8* mappedData)
{
auto attr = new NvFlowDatabaseAttr();
attr->prim = prim;
attr->name = reflectData->name;
// make copy of any read only arrays to allow in place edit
if (reflectData->reflectMode == eNvFlowReflectMode_array ||
reflectData->reflectMode == eNvFlowReflectMode_arrayVersioned)
{
NvFlowUint8** pData = (NvFlowUint8**)(mappedData + reflectData->dataOffset);
NvFlowUint64* pArraySize = (NvFlowUint64*)(mappedData + reflectData->arraySizeOffset);
NvFlowUint8* data = *pData;
NvFlowUint64 arraySizeInBytes = (*pArraySize) * reflectData->dataType->elementSize;
copyArray(version, version, attr, reflectData, mappedData, data, arraySizeInBytes);
}
// register attribute
EditorFlow* ptr = (EditorFlow*)context;
NvFlowBool32 success = NV_FLOW_FALSE;
prim->attrMap.insert(attr->name, NvFlowStringHashFNV(attr->name), attr, &success);
if (!success)
{
editorCompute_logPrint(eNvFlowLogLevel_warning, "Attribute register failed, existing attribute with name %s", reflectData->name);
}
return attr;
}
NV_FLOW_INLINE void updateAttr(
NvFlowDatabaseContext* context,
NvFlowUint64 version,
NvFlowUint64 minActiveVersion,
NvFlowDatabaseAttr* attr,
const NvFlowReflectData* reflectData,
NvFlowUint8* mappedData)
{
EditorFlow* ptr = (EditorFlow*)context;
// recycle before update to maximum chance of reuse
if (reflectData->reflectMode == eNvFlowReflectMode_array ||
reflectData->reflectMode == eNvFlowReflectMode_arrayVersioned)
{
// leave 1 to allow copy/migrate
while (attr->values.activeCount() > 1u && attr->values.front()->lastUsed < minActiveVersion)
{
//printf("Popping %s version %llu lastUsed %llu\n", reflectData->name, attr->values.front()->version, attr->values.front()->lastUsed);
attr->values.popFront();
}
}
if (attr->commandIdx < ptr->commands.size)
{
EditorFlowCommand* cmd = &ptr->commands[attr->commandIdx];
if (reflectData->reflectMode == eNvFlowReflectMode_value ||
reflectData->reflectMode == eNvFlowReflectMode_valueVersioned)
{
if (reflectData->dataType->elementSize == cmd->dataSize)
{
memcpy(mappedData + reflectData->dataOffset, cmd->data, cmd->dataSize);
}
}
else if (reflectData->reflectMode == eNvFlowReflectMode_array ||
reflectData->reflectMode == eNvFlowReflectMode_arrayVersioned)
{
copyArray(version, minActiveVersion, attr, reflectData, mappedData, cmd->data, cmd->dataSize);
}
// invalidate command
attr->commandIdx = ~0llu;
}
// free at end, in case new array allows old to free
if (reflectData->reflectMode == eNvFlowReflectMode_array ||
reflectData->reflectMode == eNvFlowReflectMode_arrayVersioned)
{
if (attr->values.activeCount() > 0u)
{
attr->values.back()->lastUsed = version;
}
// leave 1 to allow copy/migrate
while (attr->values.activeCount() > 1u && attr->values.front()->lastUsed < minActiveVersion)
{
//printf("Popping %s version %llu lastUsed %llu\n", reflectData->name, attr->values.front()->version, attr->values.front()->lastUsed);
attr->values.popFront();
}
}
}
NV_FLOW_INLINE void markDestroyedAttr(NvFlowDatabaseContext* context, NvFlowDatabaseAttr* attr)
{
// unregister attribute
EditorFlow* ptr = (EditorFlow*)context;
if (!attr->prim->attrMap.erase(attr->name, NvFlowStringHashFNV(attr->name)))
{
editorCompute_logPrint(eNvFlowLogLevel_warning, "Attribute unregister failed, attribute not registered %s", attr->name);
}
}
NV_FLOW_INLINE void destroyAttr(NvFlowDatabaseContext* context, NvFlowDatabaseAttr* attr)
{
delete attr;
}
static const NvFlowDatabaseInterface iface = {
createPrim, updatePrim, markDestroyedPrim, destroyPrim,
createAttr, updateAttr, markDestroyedAttr, destroyAttr
};
void editorFlow_init(EditorCompute* ctx, EditorFlow* ptr)
{
NvFlowContext* context = ctx->loader.deviceInterface.getContext(ctx->deviceQueue);
ptr->commandStringPool = NvFlowStringPoolCreate();
NvFlowGridDesc gridDesc = NvFlowGridDesc_default;
ptr->maxLocations = ptr->targetMaxLocations;
gridDesc.maxLocations = ptr->maxLocations;
ptr->grid = ctx->loader.gridInterface.createGrid(&ctx->contextInterface, context, &ctx->loader.opList, &ctx->loader.extOpList, &gridDesc);
ptr->gridParamsServer = ctx->loader.gridParamsInterface.createGridParamsNamed(nullptr);
ptr->gridParamsClient = ctx->loader.gridParamsInterface.createGridParamsNamed(nullptr);
ptr->gridParams = ctx->loader.gridParamsInterface.mapGridParamsNamed(ptr->gridParamsServer);
//ptr->loader.gridInterface.setResourceMinLifetime(context, ptr->grid, 0u);
editorCompute_logPrint(eNvFlowLogLevel_info, "Initialized Flow Grid");
NvFlowUint64 typeCount = 0u;
ctx->loader.gridParamsInterface.enumerateParamTypes(ptr->gridParams, nullptr, nullptr, nullptr, &typeCount);
ptr->typenames.reserve(typeCount);
ptr->typenames.size = typeCount;
ptr->displayTypenames.reserve(typeCount);
ptr->displayTypenames.size = typeCount;
ptr->dataTypes.reserve(typeCount);
ptr->dataTypes.size = typeCount;
ctx->loader.gridParamsInterface.enumerateParamTypes(ptr->gridParams, ptr->typenames.data, ptr->displayTypenames.data, ptr->dataTypes.data, &typeCount);
// register types
ptr->types.size = 0u;
for (NvFlowUint64 typeIdx = 0u; typeIdx < ptr->dataTypes.size; typeIdx++)
{
ptr->types.pushBack(ptr->gridParamsSet.createType(ptr->dataTypes[typeIdx], ptr->displayTypenames[typeIdx]));
}
const EditorFlowStage** builtinStages = nullptr;
NvFlowUint64 builtinStageCount = 0u;
editorFlowStage_getBuiltinStages(&builtinStages, &builtinStageCount);
for (NvFlowUint idx = 0u; idx < builtinStageCount; idx++)
{
ptr->stages.pushBack(builtinStages[idx]);
}
// command line stage selection
if (ptr->cmdStage)
{
for (NvFlowUint64 idx = 0u; idx < ptr->stages.size; idx++)
{
if (strcmp(ptr->stages[idx]->stageName, ptr->cmdStage) == 0)
{
ptr->targetStageIdx = idx;
}
}
}
if (ptr->stages.size > 0u)
{
ptr->targetStageIdx = ptr->targetStageIdx % ptr->stages.size;
const EditorFlowStage* targetStage = ptr->stages[ptr->targetStageIdx];
ptr->currentStage = targetStage;
ptr->stageUserdata = ptr->currentStage->init(ptr);
editorFlowStage_applyOverrides(ptr, ptr->cellsizeOverride, ptr->smallBlocksOverride);
}
}
void editorFlow_presimulate(EditorCompute* ctx, EditorFlow* ptr, float deltaTime, NvFlowBool32 isPaused)
{
NvFlowGridParamsDesc nullGridParamsDesc = {};
ptr->gridParamsDesc = nullGridParamsDesc;
ptr->absoluteSimTime += deltaTime;
float simDeltaTime = isPaused ? 0.f : deltaTime;
ptr->animationTime += simDeltaTime;
NvFlowBool32 globalForceClear = NV_FLOW_FALSE;
if (ptr->stages.size > 0u)
{
ptr->targetStageIdx = ptr->targetStageIdx % ptr->stages.size;
const EditorFlowStage* targetStage = ptr->stages[ptr->targetStageIdx];
if (ptr->currentStage != targetStage)
{
if (ptr->currentStage)
{
if (ptr->currentStage->destroy)
{
ptr->currentStage->destroy(ptr, ptr->stageUserdata);
ptr->stageUserdata = nullptr;
}
}
editorFlow_clearStage(ptr);
globalForceClear = NV_FLOW_TRUE;
ptr->currentStage = targetStage;
ptr->stageUserdata = ptr->currentStage->init(ptr);
editorFlowStage_applyOverrides(ptr, ptr->cellsizeOverride, ptr->smallBlocksOverride);
}
}
if (ptr->currentStage)
{
if (ptr->currentStage->update)
{
ptr->currentStage->update(ptr, ptr->stageUserdata, ptr->animationTime, simDeltaTime);
}
}
//auto testParams = ptr->loader.gridParamsInterface.createAbstractParams(ptr->gridParams, 0u, "test");
NvFlowUint64 stagingVersion = 0llu;
NvFlowUint64 minActiveVersion = 0llu;
ctx->loader.gridParamsInterface.getVersion(ptr->gridParams, &stagingVersion, &minActiveVersion);
// process commands
for (NvFlowUint64 idx = 0u; idx < ptr->commands.size; idx++)
{
EditorFlowCommand* cmd = &ptr->commands[idx];
if (strcmp(cmd->cmd, "clearStage") == 0)
{
ptr->gridParamsSet.markAllInstancesForDestroy<&iface>((NvFlowDatabaseContext*)ptr);
}
else if (strcmp(cmd->cmd, "definePrim") == 0)
{
NvFlowUint64 typenameIdx = 0u;
for (; typenameIdx < ptr->typenames.size; typenameIdx++)
{
if (NvFlowReflectStringCompare(ptr->displayTypenames[typenameIdx], cmd->type) == 0 ||
NvFlowReflectStringCompare(ptr->typenames[typenameIdx], cmd->type) == 0)
{
break;
}
}
if (typenameIdx < ptr->typenames.size)
{
ptr->gridParamsSet.createInstance<&iface>((NvFlowDatabaseContext*)ptr, stagingVersion, ptr->types[typenameIdx], cmd->path, cmd->name);
}
else
{
editorCompute_logPrint(eNvFlowLogLevel_warning, "definePrim(%s, %s) failed, type not recognized", cmd->type, cmd->path);
}
}
else if (strcmp(cmd->cmd, "setAttribute") == 0)
{
NvFlowBool32 success = NV_FLOW_FALSE;
NvFlowUint64 primFindIdx = ptr->primMap.find(cmd->path, NvFlowStringHashFNV(cmd->path));
if (primFindIdx != ~0llu)
{
NvFlowDatabasePrim* prim = ptr->primMap.values[primFindIdx];
NvFlowUint64 attrFindIdx = prim->attrMap.find(cmd->name, NvFlowStringHashFNV(cmd->name));
if (attrFindIdx != ~0llu)
{
NvFlowDatabaseAttr* attr = prim->attrMap.values[attrFindIdx];
attr->commandIdx = idx;
success = NV_FLOW_TRUE;
}
}
if (!success)
{
editorCompute_logPrint(eNvFlowLogLevel_warning,
"setAttribute(%s, %s) failed, attribute does not exist.",
cmd->path, cmd->name
);
}
}
}
ptr->gridParamsSet.update<&iface>((NvFlowDatabaseContext*)ptr, stagingVersion, minActiveVersion);
// reset command queue
ptr->commands.size = 0u;
NvFlowStringPoolReset(ptr->commandStringPool);
NvFlowGridParamsDescSnapshot snapshot = {};
ptr->gridParamsSet.getSnapshot(&snapshot.snapshot, stagingVersion);
snapshot.absoluteSimTime = ptr->absoluteSimTime;
snapshot.deltaTime = simDeltaTime;
snapshot.globalForceClear = globalForceClear;
ctx->loader.gridParamsInterface.commitParams(ptr->gridParams, &snapshot);
ptr->clientGridParams = ctx->loader.gridParamsInterface.mapGridParamsNamed(ptr->gridParamsClient);
ptr->paramsSnapshot = ctx->loader.gridParamsInterface.getParamsSnapshot(ptr->clientGridParams, ptr->absoluteSimTime, 0llu);
if (!ctx->loader.gridParamsInterface.mapParamsDesc(ptr->clientGridParams, ptr->paramsSnapshot, &ptr->gridParamsDesc))
{
printf("GridParams map failed!!!!!!!!!\n");
}
}
void editorFlow_simulate(EditorCompute* ctx, EditorFlow* ptr, float deltaTime, NvFlowBool32 isPaused)
{
NvFlowContext* context = ctx->loader.deviceInterface.getContext(ctx->deviceQueue);
{
if (ptr->maxLocations != ptr->targetMaxLocations)
{
ptr->maxLocations = ptr->targetMaxLocations;
NvFlowGridDesc gridDesc = NvFlowGridDesc_default;
gridDesc.maxLocations = ptr->maxLocations;
ctx->loader.gridInterface.resetGrid(
context,
ptr->grid,
&gridDesc
);
}
ctx->loader.gridInterface.simulate(
context,
ptr->grid,
&ptr->gridParamsDesc,
NV_FLOW_FALSE
);
NvFlowDatabaseSnapshot databaseSnapshot = {};
if (ptr->gridParamsDesc.snapshotCount > 0u)
{
databaseSnapshot = ptr->gridParamsDesc.snapshots[ptr->gridParamsDesc.snapshotCount - 1u].snapshot;
}
NV_FLOW_DATABASE_SNAPSHOT_FIND_TYPE_ARRAY(&databaseSnapshot, NvFlowGridSimulateLayerParams)
ptr->activeBlockCount = ctx->loader.gridInterface.getActiveBlockCount(ptr->grid);
ctx->benchmarkActiveBlockCount = ptr->activeBlockCount;
ptr->activeBlockDim = { 32u, 16u, 16u };
for (NvFlowUint64 layerParamIdx = 0u; layerParamIdx < NvFlowGridSimulateLayerParams_elementCount; layerParamIdx++)
{
if (NvFlowGridSimulateLayerParams_elements[layerParamIdx]->enableSmallBlocks)
{
ptr->activeBlockDim = { 16u, 8u, 8u };
}
}
ctx->loader.gridInterface.updateIsosurface(
context,
ptr->grid,
&ptr->gridParamsDesc
);
ptr->activeBlockCountIsosurface = ctx->loader.gridInterface.getActiveBlockCountIsosurface(ptr->grid);
}
// test grid export
{
NvFlowGridRenderData renderData = {};
ctx->loader.gridInterface.getRenderData(context, ptr->grid, &renderData);
static int writeFrame = 0;
writeFrame++;
if (writeFrame % 1000 == 999)
{
if (renderData.nanoVdb.readbackCount > 0u)
{
NvFlowUint64 lastGlobalFrameCompleted = ctx->contextInterface.getLastGlobalFrameCompleted(context);
NvFlowSparseNanoVdbExportReadback* readback = &renderData.nanoVdb.readbacks[0u];
if (readback->globalFrameCompleted <= lastGlobalFrameCompleted && readback->smokeNanoVdbReadback)
{
const char* path = "../../../data/capture0.nvdb.raw";
FILE* file = nullptr;
fopen_s(&file, path, "wb");
if (file)
{
printf("Writing out capture0.nvdb.raw...\n");
fwrite(readback->smokeNanoVdbReadback, 1u, readback->smokeNanoVdbReadbackSize, file);
fclose(file);
}
}
}
}
}
}
void editorFlow_offscreen(EditorCompute* ctx, EditorFlow* ptr)
{
NvFlowContext* context = ctx->loader.deviceInterface.getContext(ctx->deviceQueue);
ctx->loader.gridInterface.offscreen(
context,
ptr->grid,
&ptr->gridParamsDesc
);
}
void editorFlow_render(
EditorCompute* ctx,
EditorFlow* ptr,
NvFlowTextureTransient** colorFrontTransient,
NvFlowTextureTransient* offscreenDepthTransient,
NvFlowUint windowWidth,
NvFlowUint windowHeight,
const NvFlowFloat4x4* view,
const NvFlowFloat4x4* projection
)
{
NvFlowContext* context = ctx->loader.deviceInterface.getContext(ctx->deviceQueue);
ctx->loader.gridInterface.render(
context,
ptr->grid,
&ptr->gridParamsDesc,
view,
projection,
projection,
windowWidth,
windowHeight,
windowWidth,
windowHeight,
1.f,
offscreenDepthTransient,
eNvFlowFormat_r16g16b16a16_float,
*colorFrontTransient,
colorFrontTransient
);
ctx->loader.gridInterface.renderIsosurface(
context,
ptr->grid,
&ptr->gridParamsDesc,
view,
projection,
projection,
windowWidth,
windowHeight,
windowWidth,
windowHeight,
1.f,
offscreenDepthTransient,
eNvFlowFormat_r16g16b16a16_float,
*colorFrontTransient,
colorFrontTransient
);
}
void editorFlow_unmap(EditorCompute* ctx, EditorFlow* ptr)
{
ctx->loader.gridParamsInterface.unmapParamsDesc(ptr->clientGridParams, ptr->paramsSnapshot);
// invalidate mapped gridParamsDesc
NvFlowGridParamsDesc nullGridParamsDesc = {};
ptr->gridParamsDesc = nullGridParamsDesc;
}
void editorFlow_destroy(EditorCompute* ctx, EditorFlow* ptr)
{
NvFlowContext* context = ctx->loader.deviceInterface.getContext(ctx->deviceQueue);
if (ptr->currentStage)
{
if (ptr->currentStage->destroy)
{
ptr->currentStage->destroy(ptr, ptr->stageUserdata);
ptr->stageUserdata = nullptr;
}
}
ctx->loader.gridInterface.destroyGrid(context, ptr->grid);
ctx->loader.gridParamsInterface.destroyGridParamsNamed(ptr->gridParamsServer);
ctx->loader.gridParamsInterface.destroyGridParamsNamed(ptr->gridParamsClient);
ptr->gridParamsSet.markAllInstancesForDestroy<&iface>((NvFlowDatabaseContext*)ptr);
ptr->gridParamsSet.destroy<&iface>((NvFlowDatabaseContext*)ptr);
if (ptr->primMap.keyCount > 0u)
{
editorCompute_logPrint(eNvFlowLogLevel_warning, "Warning primMap not fully unregistered");
}
NvFlowStringPoolDestroy(ptr->commandStringPool);
editorCompute_logPrint(eNvFlowLogLevel_info, "Destroyed Grid");
}
void editorFlow_clearStage(EditorFlow* ptr)
{
EditorFlowCommand command = {};
command.cmd = "clearStage";
ptr->commands.pushBack(command);
}
void editorFlow_definePrim(EditorFlow* ptr, const char* type, const char* path, const char* name)
{
EditorFlowCommand command = {};
command.cmd = "definePrim";
command.path = NvFlowStringDup(ptr->commandStringPool, path);
command.name = NvFlowStringDup(ptr->commandStringPool, name);
command.type = NvFlowStringDup(ptr->commandStringPool, type);
ptr->commands.pushBack(command);
}
void editorFlow_setAttribute(EditorFlow* ptr, const char* primPath, const char* name, const void* data, NvFlowUint64 sizeInBytes)
{
char* commandData = NvFlowStringPoolAllocate(ptr->commandStringPool, sizeInBytes);
memcpy(commandData, data, sizeInBytes);
EditorFlowCommand command = {};
command.cmd = "setAttribute";
command.path = NvFlowStringDup(ptr->commandStringPool, primPath);
command.name = NvFlowStringDup(ptr->commandStringPool, name);
command.data = (NvFlowUint8*)commandData;
command.dataSize = sizeInBytes;
ptr->commands.pushBack(command);
}
void editorFlow_setAttributeFloat(EditorFlow* ptr, const char* primPath, const char* name, float value)
{
editorFlow_setAttribute(ptr, primPath, name, &value, sizeof(float));
}
void editorFlow_setAttributeInt(EditorFlow* ptr, const char* primPath, const char* name, int value)
{
editorFlow_setAttribute(ptr, primPath, name, &value, sizeof(int));
}
void editorFlow_setAttributeUint(EditorFlow* ptr, const char* primPath, const char* name, NvFlowUint value)
{
editorFlow_setAttribute(ptr, primPath, name, &value, sizeof(unsigned int));
}
void editorFlow_setAttributeBool(EditorFlow* ptr, const char* primPath, const char* name, NvFlowBool32 value)
{
editorFlow_setAttribute(ptr, primPath, name, &value, sizeof(NvFlowBool32));
}
void editorFlow_setAttributeFloat3(EditorFlow* ptr, const char* primPath, const char* name, NvFlowFloat3 value)
{
editorFlow_setAttribute(ptr, primPath, name, &value, sizeof(NvFlowFloat3));
}
void editorFlow_setAttributeFloat3Array(EditorFlow* ptr, const char* primPath, const char* name, const NvFlowFloat3* values, NvFlowUint64 elementCount)
{
editorFlow_setAttribute(ptr, primPath, name, values, elementCount * sizeof(NvFlowFloat3));
}
void editorFlow_setAttributeFloat4Array(EditorFlow* ptr, const char* primPath, const char* name, const NvFlowFloat4* values, NvFlowUint64 elementCount)
{
editorFlow_setAttribute(ptr, primPath, name, values, elementCount * sizeof(NvFlowFloat4));
}
void editorFlow_setAttributeIntArray(EditorFlow* ptr, const char* primPath, const char* name, const int* values, NvFlowUint64 elementCount)
{
editorFlow_setAttribute(ptr, primPath, name, values, elementCount * sizeof(int));
}
| 25,520 | C++ | 34.495132 | 155 | 0.667829 |
NVIDIA-Omniverse/PhysX/flow/source/nvfloweditor/ShapeRenderer.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
struct NvFlowShapeRenderer;
struct NvFlowShapeRendererParams
{
NvFlowUint numSpheres;
NvFlowFloat4* spherePositionRadius;
};
struct NvFlowShapeRendererInterface
{
NV_FLOW_REFLECT_INTERFACE();
NvFlowShapeRenderer*(NV_FLOW_ABI* create)(NvFlowContextInterface* contextInterface, NvFlowContext* context);
void(NV_FLOW_ABI* destroy)(NvFlowContext* context, NvFlowShapeRenderer* renderer);
void(NV_FLOW_ABI* render)(
NvFlowContext* context,
NvFlowShapeRenderer* renderer, const NvFlowShapeRendererParams* params,
const NvFlowFloat4x4* view,
const NvFlowFloat4x4* projection,
NvFlowUint textureWidth,
NvFlowUint textureHeight,
NvFlowTextureTransient* depthOut,
NvFlowTextureTransient* colorOut
);
};
#define NV_FLOW_REFLECT_TYPE NvFlowShapeRendererInterface
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_FUNCTION_POINTER(create, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(destroy, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(render, 0, 0)
NV_FLOW_REFLECT_END(0)
NV_FLOW_REFLECT_INTERFACE_IMPL()
#undef NV_FLOW_REFLECT_TYPE
NvFlowShapeRendererInterface* NvFlowGetShapeRendererInterface(); | 2,675 | C | 39.545454 | 109 | 0.783551 |
NVIDIA-Omniverse/PhysX/flow/source/nvfloweditor/EditorGlfw.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#include "EditorCommon.h"
GlfwLoader glfwLoader{};
void windowSizeCallback(GLFWwindow* win, int width, int height);
void keyboardCallback(GLFWwindow* win, int key, int scanCode, int action, int modifiers);
void charInputCallback(GLFWwindow* win, uint32_t input);
void mouseMoveCallback(GLFWwindow* win, double mouseX, double mouseY);
void mouseButtonCallback(GLFWwindow* win, int button, int action, int modifiers);
void mouseWheelCallback(GLFWwindow* win, double scrollX, double scrollY);
int editorGlfw_init(App* ptr)
{
GlfwLoader_init(&glfwLoader);
if (!glfwLoader.p_glfwInit)
{
fprintf(stderr, "GLFW binary is missing!\n");
return 1;
}
if (!glfwLoader.p_glfwInit())
{
fprintf(stderr, "Failed to init GLFW\n");
return 1;
}
glfwLoader.p_glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
const char* windowName = "NVIDIA Flow 2 Editor";
ptr->window = glfwLoader.p_glfwCreateWindow((int)ptr->windowWidth, (int)ptr->windowHeight, windowName, nullptr, nullptr);
if (!ptr->window)
{
fprintf(stderr, "Failed to create GLFW window\n");
return 1;
}
GLFWmonitor* monitor = glfwLoader.p_glfwGetPrimaryMonitor();
const GLFWvidmode* mode = glfwLoader.p_glfwGetVideoMode(monitor);
glfwLoader.p_glfwSetWindowUserPointer(ptr->window, ptr);
glfwLoader.p_glfwSetWindowPos(ptr->window, mode->width / 2 - ((int)ptr->windowWidth) / 2, mode->height / 2 - ((int)ptr->windowHeight) / 2);
glfwLoader.p_glfwSetWindowSizeCallback(ptr->window, windowSizeCallback);
glfwLoader.p_glfwSetKeyCallback(ptr->window, keyboardCallback);
glfwLoader.p_glfwSetCharCallback(ptr->window, charInputCallback);
glfwLoader.p_glfwSetMouseButtonCallback(ptr->window, mouseButtonCallback);
glfwLoader.p_glfwSetCursorPosCallback(ptr->window, mouseMoveCallback);
glfwLoader.p_glfwSetScrollCallback(ptr->window, mouseWheelCallback);
return 0;
}
void editorGlfw_getSwapchainDesc(App* ptr, NvFlowSwapchainDesc* outDesc)
{
NvFlowSwapchainDesc swapchainDesc = {};
swapchainDesc.format = eNvFlowFormat_b8g8r8a8_unorm;
#if defined(_WIN32)
swapchainDesc.hwnd = glfwLoader.p_glfwGetWin32Window(ptr->window);
swapchainDesc.hinstance = (HINSTANCE)GetWindowLongPtr(swapchainDesc.hwnd, GWLP_HINSTANCE);
#else
swapchainDesc.dpy = glfwLoader.p_glfwGetX11Display();
swapchainDesc.window = glfwLoader.p_glfwGetX11Window(ptr->window);
#endif
*outDesc = swapchainDesc;
}
int editorGlfw_processEvents(App* ptr)
{
glfwLoader.p_glfwPollEvents();
if (glfwLoader.p_glfwWindowShouldClose(ptr->window))
{
editorCompute_logPrint(eNvFlowLogLevel_info, "GLFW Close Window.");
return 1;
}
return 0;
}
void editorGlfw_destroy(App* ptr)
{
glfwLoader.p_glfwDestroyWindow(ptr->window);
glfwLoader.p_glfwTerminate();
GlfwLoader_destroy(&glfwLoader);
}
void editorGlfw_newFrame(App* ptr, float deltaTime)
{
ImGuiIO& io = ImGui::GetIO();
io.DisplaySize = ImVec2(float(ptr->windowWidth), float(ptr->windowHeight));
io.DeltaTime = deltaTime;
for (int i = 0; i < IM_ARRAYSIZE(io.MouseDown); i++)
{
io.MouseDown[i] = ptr->mouseJustPressed[i] != 0u || glfwLoader.p_glfwGetMouseButton(ptr->window, i) != 0;
ptr->mouseJustPressed[i] = NV_FLOW_FALSE;
}
io.MousePos.x = (float)ptr->mouseX;
io.MousePos.y = (float)ptr->mouseY;
ImGui::NewFrame();
}
void windowSizeCallback(GLFWwindow* win, int width, int height)
{
auto ptr = (App*)glfwLoader.p_glfwGetWindowUserPointer(win);
// resize
ptr->compute.loader.deviceInterface.resizeSwapchain(ptr->compute.swapchain, (NvFlowUint)width, (NvFlowUint)height);
if (width == 0 || height == 0)
{
return;
}
ptr->windowWidth = width;
ptr->windowHeight = height;
}
void keyboardCallback(GLFWwindow* win, int key, int scanCode, int action, int modifiers)
{
auto ptr = (App*)glfwLoader.p_glfwGetWindowUserPointer(win);
ImGuiIO& io = ImGui::GetIO();
if (!io.WantCaptureKeyboard)
{
if (action == GLFW_PRESS)
{
if (key == GLFW_KEY_ESCAPE)
{
ptr->shouldRun = false;
}
else if (key == GLFW_KEY_H)
{
NvFlowCameraConfig config = {};
NvFlowCameraGetConfig(ptr->camera, &config);
config.isProjectionRH = !config.isProjectionRH;
NvFlowCameraSetConfig(ptr->camera, &config);
}
else if (key == GLFW_KEY_O)
{
NvFlowCameraConfig config = {};
NvFlowCameraGetConfig(ptr->camera, &config);
config.isOrthographic = !config.isOrthographic;
if (config.isOrthographic)
{
config.farPlane = 10000.f;
}
if (!config.isOrthographic && config.isReverseZ)
{
config.farPlane = INFINITY;
}
NvFlowCameraSetConfig(ptr->camera, &config);
}
else if (key == GLFW_KEY_J)
{
NvFlowCameraConfig config = {};
NvFlowCameraGetConfig(ptr->camera, &config);
config.isReverseZ = !config.isReverseZ;
if (config.isReverseZ)
{
config.farPlane = INFINITY;
}
else
{
config.farPlane = 10000.f;
}
if (config.isOrthographic)
{
config.farPlane = 10000.f;
}
NvFlowCameraSetConfig(ptr->camera, &config);
}
else if (key == GLFW_KEY_K)
{
NvFlowCameraState state = {};
NvFlowCameraGetState(ptr->camera, &state);
bool isZup = state.eyeUp.z > 0.5f;
NvFlowCameraGetDefaultState(&state, isZup);
NvFlowCameraSetState(ptr->camera, &state);
}
else if (key == GLFW_KEY_V)
{
ptr->compute.vsync ^= NV_FLOW_TRUE;
}
else if (key == GLFW_KEY_P)
{
ptr->isPaused ^= NV_FLOW_TRUE;
}
else if (key == GLFW_KEY_G)
{
ptr->overlayEnabled ^= NV_FLOW_TRUE;
}
else if (key == GLFW_KEY_E)
{
ptr->editorEnabled ^= NV_FLOW_TRUE;
}
else if (key == GLFW_KEY_C)
{
ptr->captureEnabled ^= NV_FLOW_TRUE;
}
else if (key == GLFW_KEY_F11)
{
if (ptr->fullscreenState == 0)
{
GLFWmonitor* monitor = glfwLoader.p_glfwGetPrimaryMonitor();
const GLFWvidmode* mode = glfwLoader.p_glfwGetVideoMode(monitor);
ptr->windowWidthOld = ptr->windowWidth;
ptr->windowHeightOld = ptr->windowHeight;
glfwLoader.p_glfwSetWindowMonitor(ptr->window, monitor, 0, 0, mode->width, mode->height, mode->refreshRate);
ptr->fullscreenState = 1;
}
else if (ptr->fullscreenState == 2)
{
glfwLoader.p_glfwSetWindowMonitor(ptr->window, nullptr,
(int)(ptr->windowWidth / 2 - ptr->windowWidthOld / 2),
(int)(ptr->windowHeight / 2 - ptr->windowHeightOld / 2),
(int)(ptr->windowWidthOld), (int)(ptr->windowHeightOld),
GLFW_DONT_CARE
);
ptr->fullscreenState = 3;
}
}
}
else if (action == GLFW_RELEASE)
{
if (key == GLFW_KEY_F11)
{
if (ptr->fullscreenState == 1)
{
ptr->fullscreenState = 2;
}
else if (ptr->fullscreenState == 3)
{
ptr->fullscreenState = 0;
}
}
}
if (!ImGui::GetIO().WantCaptureMouse)
{
NvFlowCameraAction nvfAction = eNvFlowCameraAction_unknown;
if (action == GLFW_PRESS)
{
nvfAction = eNvFlowCameraAction_down;
}
else if (action == GLFW_RELEASE)
{
nvfAction = eNvFlowCameraAction_up;
}
NvFlowCameraKey flowKey = eNvFlowCameraKey_unknown;
if (key == GLFW_KEY_UP)
{
flowKey = eNvFlowCameraKey_up;
}
else if (key == GLFW_KEY_DOWN)
{
flowKey = eNvFlowCameraKey_down;
}
else if (key == GLFW_KEY_LEFT)
{
flowKey = eNvFlowCameraKey_left;
}
else if (key == GLFW_KEY_RIGHT)
{
flowKey = eNvFlowCameraKey_right;
}
NvFlowCameraKeyUpdate(ptr->camera, flowKey, nvfAction);
}
}
// imgui always captures
{
if (action == GLFW_PRESS)
{
io.KeysDown[key] = true;
}
else if (action == GLFW_RELEASE)
{
io.KeysDown[key] = false;
}
io.KeyCtrl = io.KeysDown[GLFW_KEY_LEFT_CONTROL] || io.KeysDown[GLFW_KEY_RIGHT_CONTROL];
io.KeyShift = io.KeysDown[GLFW_KEY_LEFT_SHIFT] || io.KeysDown[GLFW_KEY_RIGHT_SHIFT];
io.KeyAlt = io.KeysDown[GLFW_KEY_LEFT_ALT] || io.KeysDown[GLFW_KEY_RIGHT_ALT];
io.KeySuper = io.KeysDown[GLFW_KEY_LEFT_SUPER] || io.KeysDown[GLFW_KEY_RIGHT_SUPER];
}
}
void charInputCallback(GLFWwindow* win, uint32_t input)
{
auto ptr = (App*)glfwLoader.p_glfwGetWindowUserPointer(win);
ImGuiIO& io = ImGui::GetIO();
// imgui always captures
{
io.AddInputCharacter(input);
}
}
void mouseMoveCallback(GLFWwindow* win, double mouseX, double mouseY)
{
auto ptr = (App*)glfwLoader.p_glfwGetWindowUserPointer(win);
int x = int(mouseX);
int y = int(mouseY);
ptr->mouseX = x;
ptr->mouseY = y;
ptr->mouseYInv = ptr->windowHeight - 1 - y;
if (!ImGui::GetIO().WantCaptureMouse)
{
NvFlowCameraMouseUpdate(ptr->camera, eNvFlowCameraMouseButton_unknown, eNvFlowCameraAction_unknown, ptr->mouseX, ptr->mouseY, (int)(ptr->windowWidth), (int)(ptr->windowHeight));
}
}
void mouseButtonCallback(GLFWwindow* win, int button, int action, int modifiers)
{
auto ptr = (App*)glfwLoader.p_glfwGetWindowUserPointer(win);
if (!ImGui::GetIO().WantCaptureMouse)
{
NvFlowCameraAction nvfAction = eNvFlowCameraAction_unknown;
if (action == GLFW_PRESS)
{
nvfAction = eNvFlowCameraAction_down;
}
else if (action == GLFW_RELEASE)
{
nvfAction = eNvFlowCameraAction_up;
}
NvFlowCameraMouseButton nvfMouse = eNvFlowCameraMouseButton_unknown;
if (button == GLFW_MOUSE_BUTTON_LEFT)
{
nvfMouse = eNvFlowCameraMouseButton_left;
}
else if (button == GLFW_MOUSE_BUTTON_MIDDLE)
{
nvfMouse = eNvFlowCameraMouseButton_middle;
}
else if (button == GLFW_MOUSE_BUTTON_RIGHT)
{
nvfMouse = eNvFlowCameraMouseButton_right;
}
NvFlowCameraMouseUpdate(ptr->camera, nvfMouse, nvfAction, ptr->mouseX, ptr->mouseY, (int)ptr->windowWidth, (int)ptr->windowHeight);
}
// imgui
if (action == GLFW_PRESS && button >= 0 && button < 5)
{
ptr->mouseJustPressed[button] = NV_FLOW_TRUE;
}
}
void mouseWheelCallback(GLFWwindow* win, double scrollX, double scrollY)
{
auto ptr = (App*)glfwLoader.p_glfwGetWindowUserPointer(win);
// imgui
ImGuiIO& io = ImGui::GetIO();
io.MouseWheelH += (float)scrollX;
io.MouseWheel += (float)scrollY;
}
| 13,638 | C++ | 32.593596 | 185 | 0.588649 |
NVIDIA-Omniverse/PhysX/flow/source/nvfloweditor/ImguiRenderer.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#include "imgui.h"
#include "NvFlowLoader.h"
#include "ImguiRenderer.h"
#include "NvFlowUploadBuffer.h"
#include "NvFlowDynamicBuffer.h"
#include "shaders/ImguiParams.h"
#include "shaders/ImguiCS.hlsl.h"
#include "shaders/ImguiBuildCS.hlsl.h"
#include "shaders/ImguiTileCS.hlsl.h"
#include "shaders/ImguiTileCountCS.hlsl.h"
#include "shaders/ImguiTileScanCS.hlsl.h"
namespace NvFlowImguiRendererDefault
{
struct Renderer
{
NvFlowContextInterface contextInterface = {};
ImguiCS_Pipeline imguiCS = {};
ImguiBuildCS_Pipeline imguiBuildCS = {};
ImguiTileCS_Pipeline imguiTileCS = {};
ImguiTileCountCS_Pipeline imguiTileCountCS = {};
ImguiTileScanCS_Pipeline imguiTileScanCS = {};
NvFlowUploadBuffer vertexPosTexCoordBuffer = {};
NvFlowUploadBuffer vertexColorBuffer = {};
NvFlowUploadBuffer indicesBuffer = {};
NvFlowUploadBuffer drawCmdsBuffer = {};
NvFlowUploadBuffer constantBuffer = {};
NvFlowUploadBuffer textureUpload = {};
NvFlowTexture* textureDevice = nullptr;
NvFlowSampler* samplerLinear = nullptr;
NvFlowDynamicBuffer treeBuffer = {};
NvFlowDynamicBuffer tileCountBuffer = {};
NvFlowDynamicBuffer triangleBuffer = {};
NvFlowDynamicBuffer triangleRangeBuffer = {};
NvFlowBuffer* totalCountBuffer = nullptr;
};
NV_FLOW_CAST_PAIR(NvFlowImguiRenderer, Renderer)
NvFlowImguiRenderer* create(
NvFlowContextInterface* contextInterface,
NvFlowContext* context,
unsigned char* pixels,
int texWidth,
int texHeight
)
{
auto ptr = new Renderer();
NvFlowContextInterface_duplicate(&ptr->contextInterface, contextInterface);
ImguiCS_init(&ptr->contextInterface, context, &ptr->imguiCS);
ImguiBuildCS_init(&ptr->contextInterface, context, &ptr->imguiBuildCS);
ImguiTileCS_init(&ptr->contextInterface, context, &ptr->imguiTileCS);
ImguiTileCountCS_init(&ptr->contextInterface, context, &ptr->imguiTileCountCS);
ImguiTileScanCS_init(&ptr->contextInterface, context, &ptr->imguiTileScanCS);
NvFlowBufferUsageFlags bufferUsage = eNvFlowBufferUsage_structuredBuffer | eNvFlowBufferUsage_bufferCopySrc;
NvFlowUploadBuffer_init(&ptr->contextInterface, context, &ptr->vertexPosTexCoordBuffer, bufferUsage, eNvFlowFormat_unknown, sizeof(NvFlowFloat4));
NvFlowUploadBuffer_init(&ptr->contextInterface, context, &ptr->vertexColorBuffer, bufferUsage, eNvFlowFormat_unknown, sizeof(NvFlowUint));
NvFlowUploadBuffer_init(&ptr->contextInterface, context, &ptr->indicesBuffer, bufferUsage, eNvFlowFormat_unknown, sizeof(NvFlowUint));
NvFlowUploadBuffer_init(&ptr->contextInterface, context, &ptr->drawCmdsBuffer, bufferUsage, eNvFlowFormat_unknown, sizeof(ImguiRendererDrawCmd));
NvFlowUploadBuffer_init(&ptr->contextInterface, context, &ptr->constantBuffer, eNvFlowBufferUsage_constantBuffer, eNvFlowFormat_unknown, 0u);
NvFlowUploadBuffer_init(&ptr->contextInterface, context, &ptr->textureUpload, eNvFlowBufferUsage_bufferCopySrc, eNvFlowFormat_unknown, 0u);
NvFlowUint numBytes = NvFlowUint(texWidth * texHeight * 4u * sizeof(unsigned char));
auto mapped = (unsigned char*)NvFlowUploadBuffer_map(context, &ptr->textureUpload, numBytes);
for (NvFlowUint idx = 0u; idx < numBytes; idx++)
{
mapped[idx] = pixels[idx];
}
NvFlowBufferTransient* bufferTransient = NvFlowUploadBuffer_unmap(context, &ptr->textureUpload);
NvFlowTextureDesc texDesc = {};
texDesc.textureType = eNvFlowTextureType_2d;
texDesc.usageFlags = eNvFlowTextureUsage_textureCopyDst | eNvFlowTextureUsage_texture;
texDesc.format = eNvFlowFormat_r8g8b8a8_unorm;
texDesc.width = texWidth;
texDesc.height = texHeight;
texDesc.depth = 1u;
texDesc.mipLevels = 1u;
texDesc.optimizedClearValue = NvFlowFloat4{0.f, 0.f, 0.f, 0.f};
ptr->textureDevice = ptr->contextInterface.createTexture(context, &texDesc);
NvFlowSamplerDesc samplerDesc = {};
samplerDesc.filterMode = eNvFlowSamplerFilterMode_linear;
samplerDesc.addressModeU = eNvFlowSamplerAddressMode_wrap;
samplerDesc.addressModeV = eNvFlowSamplerAddressMode_wrap;
samplerDesc.addressModeW = eNvFlowSamplerAddressMode_wrap;
ptr->samplerLinear = ptr->contextInterface.createSampler(context, &samplerDesc);
NvFlowTextureTransient* textureTransient = ptr->contextInterface.registerTextureAsTransient(context, ptr->textureDevice);
NvFlowPassCopyBufferToTextureParams copyParams = {};
copyParams.bufferOffset = 0llu;
copyParams.bufferRowPitch = texWidth * 4u * sizeof(unsigned char);
copyParams.bufferDepthPitch = numBytes;
copyParams.textureMipLevel = 0u;
copyParams.textureOffset = NvFlowUint3{0u, 0u, 0u};
copyParams.textureExtent = NvFlowUint3{NvFlowUint(texWidth), NvFlowUint(texHeight), 1u};
copyParams.src = bufferTransient;
copyParams.dst = textureTransient;
copyParams.debugLabel = "ImguiUploadTexture";
ptr->contextInterface.addPassCopyBufferToTexture(context, ©Params);
NvFlowBufferUsageFlags deviceBufUsage = eNvFlowBufferUsage_rwStructuredBuffer | eNvFlowBufferUsage_structuredBuffer;
NvFlowDynamicBuffer_init(&ptr->contextInterface, context, &ptr->treeBuffer, deviceBufUsage, eNvFlowFormat_unknown, sizeof(NvFlowInt4));
NvFlowDynamicBuffer_init(&ptr->contextInterface, context, &ptr->tileCountBuffer, deviceBufUsage, eNvFlowFormat_unknown, sizeof(NvFlowUint));
NvFlowDynamicBuffer_init(&ptr->contextInterface, context, &ptr->triangleBuffer, deviceBufUsage, eNvFlowFormat_unknown, sizeof(NvFlowUint));
NvFlowDynamicBuffer_init(&ptr->contextInterface, context, &ptr->triangleRangeBuffer, deviceBufUsage, eNvFlowFormat_unknown, sizeof(NvFlowUint2));
NvFlowBufferDesc totalCountDesc = {};
totalCountDesc.usageFlags = deviceBufUsage;
totalCountDesc.format = eNvFlowFormat_unknown;
totalCountDesc.structureStride = sizeof(NvFlowUint);
totalCountDesc.sizeInBytes = 1024u * sizeof(NvFlowUint);
ptr->totalCountBuffer = ptr->contextInterface.createBuffer(context, eNvFlowMemoryType_readback, &totalCountDesc);
return cast(ptr);
}
void destroy(NvFlowContext* context, NvFlowImguiRenderer* renderer)
{
auto ptr = cast(renderer);
NvFlowUploadBuffer_destroy(context, &ptr->vertexPosTexCoordBuffer);
NvFlowUploadBuffer_destroy(context, &ptr->vertexColorBuffer);
NvFlowUploadBuffer_destroy(context, &ptr->indicesBuffer);
NvFlowUploadBuffer_destroy(context, &ptr->drawCmdsBuffer);
NvFlowUploadBuffer_destroy(context, &ptr->constantBuffer);
NvFlowUploadBuffer_destroy(context, &ptr->textureUpload);
ptr->contextInterface.destroyTexture(context, ptr->textureDevice);
ptr->contextInterface.destroySampler(context, ptr->samplerLinear);
NvFlowDynamicBuffer_destroy(context, &ptr->treeBuffer);
NvFlowDynamicBuffer_destroy(context, &ptr->tileCountBuffer);
NvFlowDynamicBuffer_destroy(context, &ptr->triangleBuffer);
NvFlowDynamicBuffer_destroy(context, &ptr->triangleRangeBuffer);
ptr->contextInterface.destroyBuffer(context, ptr->totalCountBuffer);
ImguiCS_destroy(context, &ptr->imguiCS);
ImguiBuildCS_destroy(context, &ptr->imguiBuildCS);
ImguiTileCS_destroy(context, &ptr->imguiTileCS);
ImguiTileCountCS_destroy(context, &ptr->imguiTileCountCS);
ImguiTileScanCS_destroy(context, &ptr->imguiTileScanCS);
delete ptr;
}
void render(NvFlowContext* context, NvFlowImguiRenderer* renderer, ImDrawData* drawData, NvFlowUint width, NvFlowUint height, NvFlowTextureTransient* colorIn, NvFlowTextureTransient* colorOut)
{
auto ptr = cast(renderer);
NvFlowUint numVertices = drawData->TotalVtxCount;
NvFlowUint numIndices = drawData->TotalIdxCount;
NvFlowUint numDrawCmds = 0u;
for (int listIdx = 0; listIdx < drawData->CmdListsCount; listIdx++)
{
numDrawCmds += drawData->CmdLists[listIdx]->CmdBuffer.Size;
}
NvFlowUint numTriangles = numIndices / 3u;
NvFlowUint trianglesPerBlock = 256u;
NvFlowUint numBlocks = (numTriangles + trianglesPerBlock - 1u) / trianglesPerBlock;
NvFlowUint64 treeNumBytes = numBlocks * (1u + 4u + 16u + 64u + 256u) * sizeof(NvFlowInt4);
NvFlowDynamicBuffer_resize(context, &ptr->treeBuffer, treeNumBytes);
NvFlowUint tileDimBits = 4u;
NvFlowUint tileDim = 1u << tileDimBits;
NvFlowUint tileGridDim_x = (width + tileDim - 1u) / tileDim;
NvFlowUint tileGridDim_y = (height + tileDim - 1u) / tileDim;
NvFlowUint tileGridDim_xy = tileGridDim_x * tileGridDim_y;
NvFlowUint numTileBuckets = (tileGridDim_xy + 255u) / 256u;
NvFlowUint numTileBucketPasses = (numTileBuckets + 255u) / 256u;
NvFlowUint64 tileCountNumBytes = tileGridDim_x * tileGridDim_y * 3u * sizeof(NvFlowUint);
NvFlowDynamicBuffer_resize(context, &ptr->tileCountBuffer, tileCountNumBytes);
NvFlowUint maxTriangles = 4u * 256u * 1024u;
NvFlowUint64 triangleBufferNumBytes = maxTriangles * sizeof(NvFlowUint);
NvFlowDynamicBuffer_resize(context, &ptr->triangleBuffer, triangleBufferNumBytes);
NvFlowUint64 triangleRangeBufferNumBytes = tileGridDim_xy * sizeof(NvFlowUint2);
NvFlowDynamicBuffer_resize(context, &ptr->triangleRangeBuffer, triangleRangeBufferNumBytes);
NvFlowUint64 numBytesPosTex = (numVertices + 1u) * sizeof(NvFlowFloat4);
NvFlowUint64 numBytesColor = (numVertices + 1u) * sizeof(NvFlowUint);
NvFlowUint64 numBytesIndices = (numIndices + 1u) * sizeof(NvFlowUint);
NvFlowUint64 numBytesDrawCmds = (numDrawCmds + 1u) * sizeof(ImguiRendererDrawCmd);
auto mappedPosTex = (NvFlowFloat4*)NvFlowUploadBuffer_map(context, &ptr->vertexPosTexCoordBuffer, numBytesPosTex);
auto mappedColor = (NvFlowUint*)NvFlowUploadBuffer_map(context, &ptr->vertexColorBuffer, numBytesColor);
auto mappedIndices = (NvFlowUint*)NvFlowUploadBuffer_map(context, &ptr->indicesBuffer, numBytesIndices);
auto mappedDrawCmds = (ImguiRendererDrawCmd*)NvFlowUploadBuffer_map(context, &ptr->drawCmdsBuffer, numBytesDrawCmds);
auto mapped = (ImguiRendererParams*)NvFlowUploadBuffer_map(context, &ptr->constantBuffer, sizeof(ImguiRendererParams));
NvFlowUint vertexOffset = 0u;
NvFlowUint indexOffset = 0u;
NvFlowUint drawCmdOffset = 0u;
for (int cmdListIdx = 0u; cmdListIdx < drawData->CmdListsCount; cmdListIdx++)
{
ImDrawList* cmdList = drawData->CmdLists[cmdListIdx];
// copy vertices
for (int vertIdx = 0; vertIdx < cmdList->VtxBuffer.Size; vertIdx++)
{
NvFlowUint writeIdx = vertIdx + vertexOffset;
mappedPosTex[writeIdx].x = cmdList->VtxBuffer[vertIdx].pos.x;
mappedPosTex[writeIdx].y = cmdList->VtxBuffer[vertIdx].pos.y;
mappedPosTex[writeIdx].z = cmdList->VtxBuffer[vertIdx].uv.x;
mappedPosTex[writeIdx].w = cmdList->VtxBuffer[vertIdx].uv.y;
mappedColor[writeIdx] = cmdList->VtxBuffer[vertIdx].col;
}
// copy indices
for (int indexIdx = 0; indexIdx < cmdList->IdxBuffer.Size; indexIdx++)
{
NvFlowUint writeIdx = indexIdx + indexOffset;
mappedIndices[writeIdx] = cmdList->IdxBuffer[indexIdx] + vertexOffset; // apply vertex offset on CPU
}
// copy drawCmds
NvFlowUint indexOffsetLocal = indexOffset;
for (int drawCmdIdx = 0; drawCmdIdx < cmdList->CmdBuffer.Size; drawCmdIdx++)
{
NvFlowUint writeIdx = drawCmdIdx + drawCmdOffset;
auto& dst = mappedDrawCmds[writeIdx];
auto& src = cmdList->CmdBuffer[drawCmdIdx];
dst.clipRect.x = src.ClipRect.x;
dst.clipRect.y = src.ClipRect.y;
dst.clipRect.z = src.ClipRect.z;
dst.clipRect.w = src.ClipRect.w;
dst.elemCount = src.ElemCount;
dst.userTexture = *((NvFlowUint*)(&src.TextureId));
dst.vertexOffset = 0u; // vertex offset already applied
dst.indexOffset = indexOffsetLocal;
indexOffsetLocal += src.ElemCount;
}
vertexOffset += NvFlowUint(cmdList->VtxBuffer.Size);
indexOffset += NvFlowUint(cmdList->IdxBuffer.Size);
drawCmdOffset += NvFlowUint(cmdList->CmdBuffer.Size);
}
mapped->numVertices = numVertices;
mapped->numIndices = numIndices;
mapped->numDrawCmds = numDrawCmds;
mapped->numBlocks = numBlocks;
mapped->width = float(width);
mapped->height = float(height);
mapped->widthInv = 1.f / float(width);
mapped->heightInv = 1.f / float(height);
mapped->tileGridDim_x = tileGridDim_x;
mapped->tileGridDim_y = tileGridDim_y;
mapped->tileGridDim_xy = tileGridDim_xy;
mapped->tileDimBits = tileDimBits;
mapped->maxTriangles = maxTriangles;
mapped->tileNumTrianglesOffset = 0u;
mapped->tileLocalScanOffset = tileGridDim_xy;
mapped->tileLocalTotalOffset = 2u * tileGridDim_xy;
mapped->tileGlobalScanOffset = 2u * tileGridDim_xy + numTileBuckets;
mapped->numTileBuckets = numTileBuckets;
mapped->numTileBucketPasses = numTileBucketPasses;
mapped->pad3 = 0u;
//NvFlowBufferTransient* vertexPosTexCoordTransient = NvFlowUploadBuffer_unmapDevice(context, &ptr->vertexPosTexCoordBuffer, 0llu, numBytesPosTex);
//NvFlowBufferTransient* vertexColorTransient = NvFlowUploadBuffer_unmapDevice(context, &ptr->vertexColorBuffer, 0llu, numBytesColor);
//NvFlowBufferTransient* indicesTransient = NvFlowUploadBuffer_unmapDevice(context, &ptr->indicesBuffer, 0llu, numBytesIndices);
//NvFlowBufferTransient* drawCmdsInTransient = NvFlowUploadBuffer_unmapDevice(context, &ptr->drawCmdsBuffer, 0llu, numBytesDrawCmds);
//NvFlowBufferTransient* paramsInTransient = NvFlowUploadBuffer_unmap(context, &ptr->constantBuffer);
NvFlowBufferTransient* vertexPosTexCoordTransient = NvFlowUploadBuffer_unmap(context, &ptr->vertexPosTexCoordBuffer);
NvFlowBufferTransient* vertexColorTransient = NvFlowUploadBuffer_unmap(context, &ptr->vertexColorBuffer);
NvFlowBufferTransient* indicesTransient = NvFlowUploadBuffer_unmap(context, &ptr->indicesBuffer);
NvFlowBufferTransient* drawCmdsInTransient = NvFlowUploadBuffer_unmap(context, &ptr->drawCmdsBuffer);
NvFlowBufferTransient* paramsInTransient = NvFlowUploadBuffer_unmap(context, &ptr->constantBuffer);
NvFlowTextureTransient* textureTransient = ptr->contextInterface.registerTextureAsTransient(context, ptr->textureDevice);
NvFlowBufferTransient* treeTransient = NvFlowDynamicBuffer_getTransient(context, &ptr->treeBuffer);
NvFlowBufferTransient* tileCountTransient = NvFlowDynamicBuffer_getTransient(context, &ptr->tileCountBuffer);
NvFlowBufferTransient* triangleTransient = NvFlowDynamicBuffer_getTransient(context, &ptr->triangleBuffer);
NvFlowBufferTransient* triangleRangeTransient = NvFlowDynamicBuffer_getTransient(context, &ptr->triangleRangeBuffer);
auto totalCountMapped = (NvFlowUint*)ptr->contextInterface.mapBuffer(context, ptr->totalCountBuffer);
ptr->contextInterface.unmapBuffer(context, ptr->totalCountBuffer);
NvFlowBufferTransient* totalCountTransient = ptr->contextInterface.registerBufferAsTransient(context, ptr->totalCountBuffer);
// build acceleration structure
{
NvFlowUint3 gridDim = {
numBlocks,
1u,
1u
};
ImguiBuildCS_PassParams passParams = {};
passParams.paramsIn = paramsInTransient;
passParams.vertexPosTexCoordIn = vertexPosTexCoordTransient;
passParams.vertexColorIn = vertexColorTransient;
passParams.indicesIn = indicesTransient;
passParams.drawCmdsIn = drawCmdsInTransient;
passParams.treeOut = treeTransient;
ImguiBuildCS_addPassCompute(context, &ptr->imguiBuildCS, gridDim, &passParams);
}
// count triangles per tile
{
NvFlowUint3 gridDim = {
(tileGridDim_xy + 255u) / 256u,
1u,
1u
};
ImguiTileCountCS_PassParams passParams = {};
passParams.paramsIn = paramsInTransient;
passParams.treeIn = treeTransient;
passParams.tileCountOut = tileCountTransient;
ImguiTileCountCS_addPassCompute(context, &ptr->imguiTileCountCS, gridDim, &passParams);
}
// scan buckets
{
NvFlowUint3 gridDim = {
1u,
1u,
1u
};
ImguiTileScanCS_PassParams passParams = {};
passParams.paramsIn = paramsInTransient;
passParams.tileCountOut = tileCountTransient;
passParams.totalCountOut = totalCountTransient;
ImguiTileScanCS_addPassCompute(context, &ptr->imguiTileScanCS, gridDim, &passParams);
}
// generate tile data
{
NvFlowUint3 gridDim = {
(tileGridDim_xy + 255u) / 256u,
1u,
1u
};
ImguiTileCS_PassParams passParams = {};
passParams.paramsIn = paramsInTransient;
passParams.treeIn = treeTransient;
passParams.tileCountIn = tileCountTransient;
passParams.drawCmdsIn = drawCmdsInTransient;
passParams.triangleOut = triangleTransient;
passParams.triangleRangeOut = triangleRangeTransient;
ImguiTileCS_addPassCompute(context, &ptr->imguiTileCS, gridDim, &passParams);
}
// render
{
NvFlowUint3 gridDim = {
(width + 7u) / 8u,
(height + 7u) / 8u,
1u
};
ImguiCS_PassParams passParams = {};
passParams.paramsIn = paramsInTransient;
passParams.vertexPosTexCoordIn = vertexPosTexCoordTransient;
passParams.vertexColorIn = vertexColorTransient;
passParams.indicesIn = indicesTransient;
passParams.drawCmdsIn = drawCmdsInTransient;
passParams.textureIn = textureTransient;
passParams.samplerIn = ptr->samplerLinear;
passParams.triangleIn = triangleTransient;
passParams.triangleRangeIn = triangleRangeTransient;
passParams.colorIn = colorIn;
passParams.colorOut = colorOut;
ImguiCS_addPassCompute(context, &ptr->imguiCS, gridDim, &passParams);
}
}
}
NvFlowImguiRendererInterface* NvFlowGetImguiRendererInterface()
{
using namespace NvFlowImguiRendererDefault;
static NvFlowImguiRendererInterface iface = { NV_FLOW_REFLECT_INTERFACE_INIT(NvFlowImguiRendererInterface) };
iface.create = create;
iface.destroy = destroy;
iface.render = render;
return &iface;
} | 18,999 | C++ | 41.128603 | 193 | 0.770883 |
NVIDIA-Omniverse/PhysX/flow/include/nvflow/NvFlowReflect.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#ifndef NV_FLOW_REFLECT_H
#define NV_FLOW_REFLECT_H
#include "NvFlowTypes.h"
/// ************************* NvFlowReflect **********************************
NV_FLOW_INLINE int NvFlowReflectStringCompare(const char* a, const char* b)
{
a = a ? a : "\0";
b = b ? b : "\0";
int idx = 0;
while (a[idx] || b[idx])
{
if (a[idx] != b[idx])
{
return a[idx] < b[idx] ? -1 : +1;
}
idx++;
}
return 0;
}
NV_FLOW_INLINE const char* NvFlowTypeToString(NvFlowType type)
{
switch (type)
{
case eNvFlowType_unknown: return "unknown";
case eNvFlowType_void: return "void";
case eNvFlowType_function: return "function";
case eNvFlowType_struct: return "struct";
case eNvFlowType_int: return "int";
case eNvFlowType_int2: return "int2";
case eNvFlowType_int3: return "int3";
case eNvFlowType_int4: return "int4";
case eNvFlowType_uint: return "uint";
case eNvFlowType_uint2: return "uint2";
case eNvFlowType_uint3: return "uint3";
case eNvFlowType_uint4: return "uint4";
case eNvFlowType_float: return "float";
case eNvFlowType_float2: return "float2";
case eNvFlowType_float3: return "float3";
case eNvFlowType_float4: return "float4";
case eNvFlowType_float4x4: return "float4x4";
case eNvFlowType_bool32: return "bool32";
case eNvFlowType_uint8: return "uint8";
case eNvFlowType_uint16: return "uint16";
case eNvFlowType_uint64: return "uint64";
case eNvFlowType_char: return "char";
case eNvFlowType_double: return "double";
default: return "unknown";
}
}
NV_FLOW_INLINE NvFlowType NvFlowTypeFromString(const char* name)
{
if (NvFlowReflectStringCompare(name, "unknown") == 0) { return eNvFlowType_unknown; }
else if (NvFlowReflectStringCompare(name, "struct") == 0) { return eNvFlowType_struct; }
else if (NvFlowReflectStringCompare(name, "void") == 0) { return eNvFlowType_void; }
else if (NvFlowReflectStringCompare(name, "function") == 0) { return eNvFlowType_function; }
else if (NvFlowReflectStringCompare(name, "int") == 0) { return eNvFlowType_int; }
else if (NvFlowReflectStringCompare(name, "int2") == 0) { return eNvFlowType_int2; }
else if (NvFlowReflectStringCompare(name, "int3") == 0) { return eNvFlowType_int3; }
else if (NvFlowReflectStringCompare(name, "int4") == 0) { return eNvFlowType_int4; }
else if (NvFlowReflectStringCompare(name, "uint") == 0) { return eNvFlowType_uint; }
else if (NvFlowReflectStringCompare(name, "uint2") == 0) { return eNvFlowType_uint2; }
else if (NvFlowReflectStringCompare(name, "uint3") == 0) { return eNvFlowType_uint3; }
else if (NvFlowReflectStringCompare(name, "uint4") == 0) { return eNvFlowType_uint4; }
else if (NvFlowReflectStringCompare(name, "float") == 0) { return eNvFlowType_float; }
else if (NvFlowReflectStringCompare(name, "float2") == 0) { return eNvFlowType_float2; }
else if (NvFlowReflectStringCompare(name, "float3") == 0) { return eNvFlowType_float3; }
else if (NvFlowReflectStringCompare(name, "float4") == 0) { return eNvFlowType_float4; }
else if (NvFlowReflectStringCompare(name, "float4x4") == 0) { return eNvFlowType_float4x4; }
else if (NvFlowReflectStringCompare(name, "bool32") == 0) { return eNvFlowType_bool32; }
else if (NvFlowReflectStringCompare(name, "uint8") == 0) { return eNvFlowType_uint8; }
else if (NvFlowReflectStringCompare(name, "uint16") == 0) { return eNvFlowType_uint16; }
else if (NvFlowReflectStringCompare(name, "uint64") == 0) { return eNvFlowType_uint64; }
else if (NvFlowReflectStringCompare(name, "char") == 0) { return eNvFlowType_char; }
else if (NvFlowReflectStringCompare(name, "double") == 0) { return eNvFlowType_double; }
else return eNvFlowType_unknown;
}
NV_FLOW_INLINE void NvFlowReflectMemcpy(void* dst, const void* src, NvFlowUint64 numBytes)
{
for (NvFlowUint64 byteIdx = 0u; byteIdx < numBytes; byteIdx++)
{
((NvFlowUint8*)dst)[byteIdx] = ((const NvFlowUint8*)src)[byteIdx];
}
}
NV_FLOW_INLINE void NvFlowReflectClear(void* dst, NvFlowUint64 numBytes)
{
for (NvFlowUint64 byteIdx = 0u; byteIdx < numBytes; byteIdx++)
{
((NvFlowUint8*)dst)[byteIdx] = 0;
}
}
typedef NvFlowUint NvFlowReflectHintFlags;
typedef enum NvFlowReflectHint
{
eNvFlowReflectHint_none = 0x00,
eNvFlowReflectHint_transient = 0x00000001, // Hint to not serialize
eNvFlowReflectHint_noEdit = 0x00000002, // Hint to not expose to editor
eNvFlowReflectHint_transientNoEdit = 0x00000003,
eNvFlowReflectHint_resource = 0x0000000C, // Mask for resource hints
eNvFlowReflectHint_asset = 0x0000000C, // Hint to serialize as external asset, instead of inlined
eNvFlowReflectHint_bufferId = 0x00000004, // Hint to treat NvFlowUint64 as bufferId, allowing conversion from paths to ids
eNvFlowReflectHint_textureId = 0x00000008, // Hint to treat NvFlowUint64 as textureId, allowing conversion from paths to ids
eNvFlowReflectHint_pinEnabled = 0x00010000,
eNvFlowReflectHint_pinGlobal = 0x00020000,
eNvFlowReflectHint_pinEnabledGlobal = 0x00030000,
eNvFlowReflectHint_pinMutable = 0x00040000,
eNvFlowReflectHint_pinEnabledMutable = 0x00050000,
eNvFlowReflectHint_pinGroup = 0x00080000,
eNvFlowReflectHint_maxEnum = 0x7FFFFFFF
}NvFlowReflectHint;
typedef NvFlowUint NvFlowReflectModeFlags;
typedef enum NvFlowReflectMode
{
eNvFlowReflectMode_value = 0x00,
eNvFlowReflectMode_pointer = 0x01,
eNvFlowReflectMode_array = 0x02,
eNvFlowReflectMode_pointerArray = 0x03,
eNvFlowReflectMode_valueVersioned = 0x04,
eNvFlowReflectMode_pointerVersioned = 0x05,
eNvFlowReflectMode_arrayVersioned = 0x06,
eNvFlowReflectMode_pointerArrayVersioned = 0x07,
eNvFlowReflectMode_maxEnum = 0x7FFFFFFF
}NvFlowReflectMode;
struct NvFlowReflectDataType;
typedef struct NvFlowReflectDataType NvFlowReflectDataType;
typedef struct NvFlowReflectData
{
NvFlowReflectHintFlags reflectHints;
NvFlowReflectModeFlags reflectMode;
const NvFlowReflectDataType* dataType;
const char* name;
NvFlowUint64 dataOffset;
NvFlowUint64 arraySizeOffset;
NvFlowUint64 versionOffset;
const char* metadata;
}NvFlowReflectData;
typedef struct NvFlowReflectDataType
{
NvFlowType dataType;
NvFlowUint64 elementSize;
const char* structTypename;
const NvFlowReflectData* childReflectDatas;
NvFlowUint64 childReflectDataCount;
const void* defaultValue;
}NvFlowReflectDataType;
typedef void(NV_FLOW_ABI* NvFlowReflectProcess_t)(NvFlowUint8* data, const NvFlowReflectDataType* dataType, void* userdata);
NV_FLOW_INLINE void NvFlowReflectCopyByName(
void* dstData, const NvFlowReflectDataType* dstType,
const void* srcData, const NvFlowReflectDataType* srcType
)
{
NvFlowUint8* dstData8 = (NvFlowUint8*)dstData;
const NvFlowUint8* srcData8 = (const NvFlowUint8*)srcData;
// For safety, take min of elementSize
NvFlowUint64 safeCopySize = srcType->elementSize < dstType->elementSize ? srcType->elementSize : dstType->elementSize;
// Start with raw copy, to potential cover non-reflect data
NvFlowReflectMemcpy(dstData, srcData, safeCopySize);
// Single level copy by name, enough to cover interfaces
if (dstType != srcType)
{
NvFlowUint64 srcIdx = 0u;
for (NvFlowUint64 dstIdx = 0u; dstIdx < dstType->childReflectDataCount; dstIdx++)
{
for (NvFlowUint64 srcCount = 0u; srcCount < srcType->childReflectDataCount; srcCount++)
{
const NvFlowReflectData* childDst = dstType->childReflectDatas + dstIdx;
const NvFlowReflectData* childSrc = srcType->childReflectDatas + srcIdx;
if (childDst->name == childSrc->name ||
NvFlowReflectStringCompare(childDst->name, childSrc->name) == 0)
{
// only copy if not covered by bulk memcpy
if (childDst->dataOffset != childSrc->dataOffset)
{
NvFlowReflectMemcpy(
dstData8 + childDst->dataOffset,
srcData8 + childSrc->dataOffset,
(childDst->reflectMode & eNvFlowReflectMode_pointerArray) ? sizeof(void*) : childDst->dataType->elementSize
);
}
if (childDst->reflectMode & eNvFlowReflectMode_array)
{
if (childDst->arraySizeOffset != childSrc->arraySizeOffset)
{
NvFlowReflectMemcpy(
dstData8 + childDst->arraySizeOffset,
srcData8 + childSrc->arraySizeOffset,
sizeof(NvFlowUint64)
);
}
}
if (childDst->reflectMode & eNvFlowReflectMode_valueVersioned)
{
if (childDst->versionOffset != childSrc->versionOffset)
{
NvFlowReflectMemcpy(
dstData8 + childDst->versionOffset,
srcData8 + childSrc->versionOffset,
sizeof(NvFlowUint64)
);
}
}
srcCount = srcType->childReflectDataCount - 1u;
}
srcIdx++;
if (srcIdx >= srcType->childReflectDataCount)
{
srcIdx = 0u;
}
}
}
}
}
// Reflect blocks must start with #define NV_FLOW_REFLECT_TYPE typename
// And end with #undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_XSTR(X) NV_FLOW_REFLECT_STR(X)
#define NV_FLOW_REFLECT_STR(X) #X
#define NV_FLOW_REFLECT_XCONCAT(A, B) NV_FLOW_REFLECT_CONCAT(A, B)
#define NV_FLOW_REFLECT_CONCAT(A, B) A##B
#define NV_FLOW_REFLECT_VALIDATE(type) \
NV_FLOW_INLINE type* NV_FLOW_REFLECT_XCONCAT(type,_NvFlowValidatePtr)(const type* v) { return (type*)v; } \
NV_FLOW_INLINE type** NV_FLOW_REFLECT_XCONCAT(type,_NvFlowValidatePtrPtr)(const type* const * v) { return (type**)v; } \
NV_FLOW_INLINE type*** NV_FLOW_REFLECT_XCONCAT(type,_NvFlowValidatePtrPtrPtr)(const type*const *const * v) { return (type***)v; }
#if defined(__cplusplus)
#define NV_FLOW_REFLECT_VALIDATE_value(type, name) NV_FLOW_REFLECT_XCONCAT(type,_NvFlowValidatePtr)(&((NV_FLOW_REFLECT_TYPE*)0)->name)
#define NV_FLOW_REFLECT_VALIDATE_pointer(type, name) NV_FLOW_REFLECT_XCONCAT(type,_NvFlowValidatePtrPtr)(&((NV_FLOW_REFLECT_TYPE*)0)->name)
#define NV_FLOW_REFLECT_VALIDATE_array(type, name) NV_FLOW_REFLECT_XCONCAT(type,_NvFlowValidatePtrPtr)(&((NV_FLOW_REFLECT_TYPE*)0)->name)
#define NV_FLOW_REFLECT_VALIDATE_pointerArray(type, name) NV_FLOW_REFLECT_XCONCAT(type,_NvFlowValidatePtrPtrPtr)(&((NV_FLOW_REFLECT_TYPE*)0)->name)
#define NV_FLOW_REFLECT_VALIDATE_valueVersioned(type, name) NV_FLOW_REFLECT_XCONCAT(type,_NvFlowValidatePtr)(&((NV_FLOW_REFLECT_TYPE*)0)->name)
#define NV_FLOW_REFLECT_VALIDATE_pointerVersioned(type, name) NV_FLOW_REFLECT_XCONCAT(type,_NvFlowValidatePtrPtr)(&((NV_FLOW_REFLECT_TYPE*)0)->name)
#define NV_FLOW_REFLECT_VALIDATE_arrayVersioned(type, name) NV_FLOW_REFLECT_XCONCAT(type,_NvFlowValidatePtrPtr)(&((NV_FLOW_REFLECT_TYPE*)0)->name)
#define NV_FLOW_REFLECT_VALIDATE_pointerArrayVersioned(type, name) NV_FLOW_REFLECT_XCONCAT(type,_NvFlowValidatePtrPtrPtr)(&((NV_FLOW_REFLECT_TYPE*)0)->name)
#else
#define NV_FLOW_REFLECT_VALIDATE_value(type, name) (&((NV_FLOW_REFLECT_TYPE*)0)->name)
#define NV_FLOW_REFLECT_VALIDATE_pointer(type, name) (&((NV_FLOW_REFLECT_TYPE*)0)->name)
#define NV_FLOW_REFLECT_VALIDATE_array(type, name) (&((NV_FLOW_REFLECT_TYPE*)0)->name)
#define NV_FLOW_REFLECT_VALIDATE_pointerArray(type, name) (&((NV_FLOW_REFLECT_TYPE*)0)->name)
#define NV_FLOW_REFLECT_VALIDATE_valueVersioned(type, name) (&((NV_FLOW_REFLECT_TYPE*)0)->name)
#define NV_FLOW_REFLECT_VALIDATE_pointerVersioned(type, name) (&((NV_FLOW_REFLECT_TYPE*)0)->name)
#define NV_FLOW_REFLECT_VALIDATE_arrayVersioned(type, name) (&((NV_FLOW_REFLECT_TYPE*)0)->name)
#define NV_FLOW_REFLECT_VALIDATE_pointerArrayVersioned(type, name) (&((NV_FLOW_REFLECT_TYPE*)0)->name)
#endif
#define NV_FLOW_REFLECT_BUILTIN_IMPL(enumName, typeName) \
static const NvFlowReflectDataType NV_FLOW_REFLECT_XCONCAT(typeName,_NvFlowReflectDataType) = { enumName, sizeof(typeName), 0, 0, 0, 0 }; \
NV_FLOW_REFLECT_VALIDATE(typeName)
#define NV_FLOW_REFLECT_STRUCT_OPAQUE_IMPL(name) \
static const NvFlowReflectDataType NV_FLOW_REFLECT_XCONCAT(name,_NvFlowReflectDataType) = { eNvFlowType_struct, 0llu, #name, 0, 0, 0 }; \
NV_FLOW_REFLECT_VALIDATE(name)
#define NV_FLOW_REFLECT_BEGIN() \
static const NvFlowReflectData NV_FLOW_REFLECT_XCONCAT(NV_FLOW_REFLECT_TYPE,_reflectDatas)[] = {
#define NV_FLOW_REFLECT_END(defaultValue) \
}; \
static const NvFlowReflectDataType NV_FLOW_REFLECT_XCONCAT(NV_FLOW_REFLECT_TYPE,_NvFlowReflectDataType) = { \
eNvFlowType_struct, \
sizeof(NV_FLOW_REFLECT_TYPE), \
NV_FLOW_REFLECT_XSTR(NV_FLOW_REFLECT_TYPE), \
NV_FLOW_REFLECT_XCONCAT(NV_FLOW_REFLECT_TYPE,_reflectDatas), \
sizeof(NV_FLOW_REFLECT_XCONCAT(NV_FLOW_REFLECT_TYPE,_reflectDatas)) / sizeof(NvFlowReflectData), \
defaultValue \
}; \
NV_FLOW_REFLECT_VALIDATE(NV_FLOW_REFLECT_TYPE)
#define NV_FLOW_REFLECT_TYPE_ALIAS(SRC, DST) \
typedef SRC DST; \
static const NvFlowReflectDataType NV_FLOW_REFLECT_XCONCAT(DST,_NvFlowReflectDataType) = { \
eNvFlowType_struct, \
sizeof(SRC), \
#DST, \
NV_FLOW_REFLECT_XCONCAT(SRC,_reflectDatas), \
sizeof(NV_FLOW_REFLECT_XCONCAT(SRC,_reflectDatas)) / sizeof(NvFlowReflectData), \
&NV_FLOW_REFLECT_XCONCAT(SRC,_default) \
}; \
NV_FLOW_REFLECT_VALIDATE(DST)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_int, int)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_int2, NvFlowInt2)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_int3, NvFlowInt3)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_int4, NvFlowInt4)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_uint, NvFlowUint)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_uint2, NvFlowUint2)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_uint3, NvFlowUint3)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_uint4, NvFlowUint4)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_float, float)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_float2, NvFlowFloat2)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_float3, NvFlowFloat3)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_float4, NvFlowFloat4)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_float4x4, NvFlowFloat4x4)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_bool32, NvFlowBool32)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_uint8, NvFlowUint8)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_uint16, NvFlowUint16)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_uint64, NvFlowUint64)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_char, char)
NV_FLOW_REFLECT_BUILTIN_IMPL(eNvFlowType_double, double)
#if defined(__cplusplus)
#define NV_FLOW_REFLECT_SIZE_OFFSET(name_size) (NvFlowUint64)NV_FLOW_REFLECT_VALIDATE_value(NvFlowUint64, name_size)
#define NV_FLOW_REFLECT_VERSION_OFFSET(version) (NvFlowUint64)NV_FLOW_REFLECT_VALIDATE_value(NvFlowUint64, version)
#else
#define NV_FLOW_REFLECT_SIZE_OFFSET(name_size) (NvFlowUint64)(&((NV_FLOW_REFLECT_TYPE*)0)->name_size)
#define NV_FLOW_REFLECT_VERSION_OFFSET(version) (NvFlowUint64)(&((NV_FLOW_REFLECT_TYPE*)0)->version)
#endif
/// Builtin
#define NV_FLOW_REFLECT_GENERIC(reflectMode, type, name, ARRAY, VERSION, reflectHints, metadata) { \
reflectHints, \
NV_FLOW_REFLECT_XCONCAT(eNvFlowReflectMode_,reflectMode), \
&NV_FLOW_REFLECT_XCONCAT(type,_NvFlowReflectDataType), \
#name, \
(NvFlowUint64)NV_FLOW_REFLECT_XCONCAT(NV_FLOW_REFLECT_VALIDATE_,reflectMode)(type, name), \
ARRAY, \
VERSION, \
metadata \
},
#define NV_FLOW_REFLECT_VALUE(type, name, reflectHints, metadata) NV_FLOW_REFLECT_GENERIC(value, type, name, 0, 0, reflectHints, metadata)
#define NV_FLOW_REFLECT_POINTER(type, name, reflectHints, metadata) NV_FLOW_REFLECT_GENERIC(pointer, type, name, 0, 0, reflectHints, metadata)
#define NV_FLOW_REFLECT_ARRAY(type, name, name_size, reflectHints, metadata) NV_FLOW_REFLECT_GENERIC(array, type, name, NV_FLOW_REFLECT_SIZE_OFFSET(name_size), 0, reflectHints, metadata)
#define NV_FLOW_REFLECT_POINTER_ARRAY(type, name, name_size, reflectHints, metadata) NV_FLOW_REFLECT_GENERIC(pointerArray, type, name, NV_FLOW_REFLECT_SIZE_OFFSET(name_size), 0, reflectHints, metadata)
#define NV_FLOW_REFLECT_VALUE_VERSIONED(type, name, version, reflectHints, metadata) NV_FLOW_REFLECT_GENERIC(valueVersioned, type, name, 0, NV_FLOW_REFLECT_VERSION_OFFSET(version), reflectHints, metadata)
#define NV_FLOW_REFLECT_POINTER_VERSIONED(type, name, version, reflectHints, metadata) NV_FLOW_REFLECT_GENERIC(pointerVersioned, type, name, 0, NV_FLOW_REFLECT_VERSION_OFFSET(version), reflectHints, metadata)
#define NV_FLOW_REFLECT_ARRAY_VERSIONED(type, name, name_size, version, reflectHints, metadata) NV_FLOW_REFLECT_GENERIC(arrayVersioned, type, name, NV_FLOW_REFLECT_SIZE_OFFSET(name_size), NV_FLOW_REFLECT_VERSION_OFFSET(version), reflectHints, metadata)
#define NV_FLOW_REFLECT_POINTER_ARRAY_VERSIONED(type, name, name_size, version, reflectHints, metadata) NV_FLOW_REFLECT_GENERIC(pointerArrayVersioned, type, name, NV_FLOW_REFLECT_SIZE_OFFSET(name_size), NV_FLOW_REFLECT_VERSION_OFFSET(version), reflectHints, metadata)
/// Function Pointer
static const NvFlowReflectDataType function_NvFlowReflectDataType = { eNvFlowType_function, 0llu, 0, 0, 0, 0 };
#define NV_FLOW_REFLECT_FUNCTION_POINTER(name, reflectHints, metadata) { \
reflectHints, \
eNvFlowReflectMode_pointer, \
&function_NvFlowReflectDataType, \
#name, \
(NvFlowUint64)(&((NV_FLOW_REFLECT_TYPE*)0)->name), \
0, \
0, \
metadata \
},
/// Void
static const NvFlowReflectDataType void_NvFlowReflectDataType = { eNvFlowType_void, 0llu, 0, 0, 0, 0 };
#define NV_FLOW_REFLECT_VOID_POINTER(name, reflectHints, metadata) { \
reflectHints, \
eNvFlowReflectMode_pointer, \
&void_NvFlowReflectDataType, \
#name, \
(NvFlowUint64)(&((NV_FLOW_REFLECT_TYPE*)0)->name), \
0, \
0, \
metadata \
},
/// Enum
#define NV_FLOW_REFLECT_ENUM(name, reflectHints, metadata) { \
reflectHints, \
eNvFlowReflectMode_value, \
&NvFlowUint_NvFlowReflectDataType, \
#name, \
(NvFlowUint64)(&((NV_FLOW_REFLECT_TYPE*)0)->name), \
0, \
0, \
metadata \
},
#define NV_FLOW_REFLECT_INTERFACE() const NvFlowReflectDataType* interface_NvFlowReflectDataType
#define NV_FLOW_REFLECT_INTERFACE_INIT(type) &NV_FLOW_REFLECT_XCONCAT(type,_NvFlowReflectDataType)
#define NV_FLOW_REFLECT_INTERFACE_IMPL() \
NV_FLOW_INLINE void NV_FLOW_REFLECT_XCONCAT(NV_FLOW_REFLECT_TYPE,_duplicate)(NV_FLOW_REFLECT_TYPE* dst, const NV_FLOW_REFLECT_TYPE* src) \
{ \
dst->interface_NvFlowReflectDataType = &NV_FLOW_REFLECT_XCONCAT(NV_FLOW_REFLECT_TYPE,_NvFlowReflectDataType); \
NvFlowReflectCopyByName( \
dst, dst->interface_NvFlowReflectDataType, \
src, src->interface_NvFlowReflectDataType \
); \
}
#define NV_FLOW_REFLECT_TYPE NvFlowReflectData
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint, reflectHints, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, reflectMode, 0, 0)
NV_FLOW_REFLECT_VOID_POINTER(/*NvFlowReflectDataType,*/ dataType, 0, 0) // void to break circular reference
NV_FLOW_REFLECT_POINTER(char, name, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint64, dataOffset, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint64, arraySizeOffset, 0, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowReflectDataType
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_ENUM(dataType, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint64, elementSize, 0, 0)
NV_FLOW_REFLECT_POINTER(char, structTypename, 0, 0)
NV_FLOW_REFLECT_ARRAY(NvFlowReflectData, childReflectDatas, childReflectDataCount, 0, 0)
NV_FLOW_REFLECT_VOID_POINTER(defaultValue, 0, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_INLINE const char* NvFlowReflectTrimPrefix(const char* name)
{
if (name && name[0] == 'N')
{
name++;
}
if (name && name[0] == 'v')
{
name++;
}
return name;
}
#define NV_FLOW_CAST_PAIR(X, Y) \
NV_FLOW_INLINE X* cast(Y* ptr) { return (X*)ptr; } \
NV_FLOW_INLINE Y* cast(X* ptr) { return (Y*)ptr; } \
NV_FLOW_INLINE const X* cast(const Y* ptr) { return (X*)ptr; } \
NV_FLOW_INLINE const Y* cast(const X* ptr) { return (Y*)ptr; }
#define NV_FLOW_CAST_PAIR_NAMED(name, X, Y) \
NV_FLOW_INLINE X* name##_cast(Y* ptr) { return (X*)ptr; } \
NV_FLOW_INLINE Y* name##_cast(X* ptr) { return (Y*)ptr; } \
NV_FLOW_INLINE const X* name##_cast(const Y* ptr) { return (X*)ptr; } \
NV_FLOW_INLINE const Y* name##_cast(const X* ptr) { return (Y*)ptr; }
typedef struct NvFlowDatabaseTypeSnapshot
{
NvFlowUint64 version;
const NvFlowReflectDataType* dataType;
NvFlowUint8** instanceDatas;
NvFlowUint64 instanceCount;
}NvFlowDatabaseTypeSnapshot;
typedef struct NvFlowDatabaseSnapshot
{
NvFlowUint64 version;
NvFlowDatabaseTypeSnapshot* typeSnapshots;
NvFlowUint64 typeSnapshotCount;
}NvFlowDatabaseSnapshot;
NV_FLOW_INLINE void NvFlowDatabaseSnapshot_findType(const NvFlowDatabaseSnapshot* snapshot, const NvFlowReflectDataType* findDataType, NvFlowDatabaseTypeSnapshot** pSnapshot)
{
// try to find matching pointer first
for (NvFlowUint64 idx = 0u; idx < snapshot->typeSnapshotCount; idx++)
{
if (snapshot->typeSnapshots[idx].dataType == findDataType)
{
*pSnapshot = &snapshot->typeSnapshots[idx];
return;
}
}
// try to find by matching size and name
for (NvFlowUint64 idx = 0u; idx < snapshot->typeSnapshotCount; idx++)
{
if (snapshot->typeSnapshots[idx].dataType->elementSize == findDataType->elementSize &&
NvFlowReflectStringCompare(snapshot->typeSnapshots[idx].dataType->structTypename, findDataType->structTypename) == 0)
{
*pSnapshot = &snapshot->typeSnapshots[idx];
return;
}
}
*pSnapshot = 0;
}
NV_FLOW_INLINE void NvFlowDatabaseSnapshot_findTypeArray(const NvFlowDatabaseSnapshot* snapshot, const NvFlowReflectDataType* findDataType, void*** pData, NvFlowUint64* pCount)
{
NvFlowDatabaseTypeSnapshot* typeSnapshot = 0;
NvFlowDatabaseSnapshot_findType(snapshot, findDataType, &typeSnapshot);
if (typeSnapshot)
{
*pData = (void**)typeSnapshot->instanceDatas;
*pCount = typeSnapshot->instanceCount;
}
else
{
*pData = 0;
*pCount = 0llu;
}
}
#define NV_FLOW_DATABASE_SNAPSHOT_FIND_TYPE_ARRAY(snapshot, type) \
type** type##_elements = 0; \
NvFlowUint64 type##_elementCount = 0llu; \
NvFlowDatabaseSnapshot_findTypeArray(snapshot, &type##_NvFlowReflectDataType, (void***)&type##_elements, &type##_elementCount);
#endif
| 23,239 | C | 42.037037 | 267 | 0.747106 |
NVIDIA-Omniverse/PhysX/flow/include/nvflow/NvFlowTypes.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#ifndef NV_FLOW_TYPES_H
#define NV_FLOW_TYPES_H
#include "shaders/NvFlowShaderTypes.h"
//! \cond HIDDEN_SYMBOLS
#if defined(_WIN32)
#if defined(__cplusplus)
#define NV_FLOW_API extern "C" __declspec(dllexport)
#else
#define NV_FLOW_API __declspec(dllexport)
#endif
#define NV_FLOW_ABI __cdecl
#else
#if defined(__cplusplus)
#define NV_FLOW_API extern "C"
#else
#define NV_FLOW_API
#endif
#define NV_FLOW_ABI
#endif
#if defined(__cplusplus)
#define NV_FLOW_INLINE inline
#else
#define NV_FLOW_INLINE static
#endif
#if defined(_WIN32)
#define NV_FLOW_FORCE_INLINE inline __forceinline
#else
#define NV_FLOW_FORCE_INLINE inline __attribute__((always_inline))
#endif
// #define NV_FLOW_DEBUG_ALLOC
//! \endcond
typedef enum NvFlowLogLevel
{
eNvFlowLogLevel_error = 0,
eNvFlowLogLevel_warning = 1,
eNvFlowLogLevel_info = 2,
eNvFlowLogLevel_maxEnum = 0x7FFFFFFF
}NvFlowLogLevel;
typedef void(NV_FLOW_ABI* NvFlowLogPrint_t)(NvFlowLogLevel level, const char* format, ...);
#define NV_FLOW_FALSE 0
#define NV_FLOW_TRUE 1
typedef enum NvFlowType
{
eNvFlowType_unknown = 0,
eNvFlowType_void = 1,
eNvFlowType_function = 2,
eNvFlowType_struct = 3,
eNvFlowType_int = 4,
eNvFlowType_int2 = 5,
eNvFlowType_int3 = 6,
eNvFlowType_int4 = 7,
eNvFlowType_uint = 8,
eNvFlowType_uint2 = 9,
eNvFlowType_uint3 = 10,
eNvFlowType_uint4 = 11,
eNvFlowType_float = 12,
eNvFlowType_float2 = 13,
eNvFlowType_float3 = 14,
eNvFlowType_float4 = 15,
eNvFlowType_float4x4 = 16,
eNvFlowType_bool32 = 17,
eNvFlowType_uint8 = 18,
eNvFlowType_uint16 = 19,
eNvFlowType_uint64 = 20,
eNvFlowType_char = 21,
eNvFlowType_double = 22,
eNvFlowType_count = 23,
eNvFlowType_maxEnum = 0x7FFFFFFF
}NvFlowType;
typedef enum NvFlowContextApi
{
eNvFlowContextApi_abstract = 0,
eNvFlowContextApi_vulkan = 1,
eNvFlowContextApi_d3d12 = 2,
eNvFlowContextApi_cpu = 3,
eNvFlowContextApi_count = 4,
eNvFlowContextApi_maxEnum = 0x7FFFFFFF
}NvFlowContextApi;
typedef enum NvFlowFormat
{
eNvFlowFormat_unknown = 0,
eNvFlowFormat_r32g32b32a32_float = 1,
eNvFlowFormat_r32g32b32a32_uint = 2,
eNvFlowFormat_r32g32b32a32_sint = 3,
eNvFlowFormat_r32g32b32_float = 4,
eNvFlowFormat_r32g32b32_uint = 5,
eNvFlowFormat_r32g32b32_sint = 6,
eNvFlowFormat_r16g16b16a16_float = 7,
eNvFlowFormat_r16g16b16a16_unorm = 8,
eNvFlowFormat_r16g16b16a16_uint = 9,
eNvFlowFormat_r16g16b16a16_snorm = 10,
eNvFlowFormat_r16g16b16a16_sint = 11,
eNvFlowFormat_r32g32_float = 12,
eNvFlowFormat_r32g32_uint = 13,
eNvFlowFormat_r32g32_sint = 14,
eNvFlowFormat_r10g10b10a2_unorm = 15,
eNvFlowFormat_r10g10b10a2_uint = 16,
eNvFlowFormat_r11g11b10_float = 17,
eNvFlowFormat_r8g8b8a8_unorm = 18,
eNvFlowFormat_r8g8b8a8_unorm_srgb = 19,
eNvFlowFormat_r8g8b8a8_uint = 20,
eNvFlowFormat_r8g8b8a8_snorm = 21,
eNvFlowFormat_r8g8b8a8_sint = 22,
eNvFlowFormat_r16g16_float = 23,
eNvFlowFormat_r16g16_unorm = 24,
eNvFlowFormat_r16g16_uint = 25,
eNvFlowFormat_r16g16_snorm = 26,
eNvFlowFormat_r16g16_sint = 27,
eNvFlowFormat_r32_float = 28,
eNvFlowFormat_r32_uint = 29,
eNvFlowFormat_r32_sint = 30,
eNvFlowFormat_r8g8_unorm = 31,
eNvFlowFormat_r8g8_uint = 32,
eNvFlowFormat_r8g8_snorm = 33,
eNvFlowFormat_r8g8_sint = 34,
eNvFlowFormat_r16_float = 35,
eNvFlowFormat_r16_unorm = 36,
eNvFlowFormat_r16_uint = 37,
eNvFlowFormat_r16_snorm = 38,
eNvFlowFormat_r16_sint = 39,
eNvFlowFormat_r8_unorm = 40,
eNvFlowFormat_r8_uint = 41,
eNvFlowFormat_r8_snorm = 42,
eNvFlowFormat_r8_sint = 43,
eNvFlowFormat_b8g8r8a8_unorm = 44,
eNvFlowFormat_b8g8r8a8_unorm_srgb = 45,
eNvFlowFormat_count = 256,
eNvFlowFormat_maxEnum = 0x7FFFFFFF
}NvFlowFormat;
#endif | 5,210 | C | 27.167567 | 91 | 0.756622 |
NVIDIA-Omniverse/PhysX/flow/include/nvflow/NvFlow.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#ifndef NV_FLOW_H
#define NV_FLOW_H
#include "NvFlowContext.h"
#include "shaders/NvFlowShaderTypes.h"
/// ********************************* Op ***************************************
typedef enum NvFlowPinDir
{
eNvFlowPinDir_in = 0,
eNvFlowPinDir_out = 1,
eNvFlowPinDir_count = 2,
eNvFlowPinDir_maxEnum = 0x7FFFFFFF
}NvFlowPinDir;
struct NvFlowOp;
typedef struct NvFlowOp NvFlowOp;
struct NvFlowOpGraph;
typedef struct NvFlowOpGraph NvFlowOpGraph;
struct NvFlowOpGenericPinsIn;
typedef struct NvFlowOpGenericPinsIn NvFlowOpGenericPinsIn;
struct NvFlowOpGenericPinsOut;
typedef struct NvFlowOpGenericPinsOut NvFlowOpGenericPinsOut;
struct NvFlowOpExecuteGroup;
typedef struct NvFlowOpExecuteGroup NvFlowOpExecuteGroup;
NV_FLOW_REFLECT_STRUCT_OPAQUE_IMPL(NvFlowOpGraph)
typedef struct NvFlowOpExecuteGroupDesc
{
NvFlowOpExecuteGroup* group;
const char* name;
}NvFlowOpExecuteGroupDesc;
struct NvFlowOpInterface;
typedef struct NvFlowOpInterface NvFlowOpInterface;
typedef struct NvFlowOpInterface
{
NV_FLOW_REFLECT_INTERFACE();
const char* opTypename;
const NvFlowOpGraph* opGraph;
const NvFlowReflectDataType* pinsIn;
const NvFlowReflectDataType* pinsOut;
const NvFlowOpExecuteGroupDesc* executeGroupDescs;
NvFlowUint64 executeGroupCount;
NvFlowOp*(NV_FLOW_ABI* create)(const NvFlowOpInterface* opInterface, const NvFlowOpGenericPinsIn* in, NvFlowOpGenericPinsOut* out);
void(NV_FLOW_ABI* destroy)(NvFlowOp* op, const NvFlowOpGenericPinsIn* in, NvFlowOpGenericPinsOut* out);
void(NV_FLOW_ABI* execute)(NvFlowOp* op, const NvFlowOpGenericPinsIn* in, NvFlowOpGenericPinsOut* out);
void(NV_FLOW_ABI* executeGroup)(NvFlowOp* op, NvFlowOpExecuteGroup* group, const NvFlowOpGenericPinsIn* in, NvFlowOpGenericPinsOut* out);
}NvFlowOpInterface;
#define NV_FLOW_REFLECT_TYPE NvFlowOpInterface
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(char, opTypename, 0, 0)
NV_FLOW_REFLECT_POINTER(NvFlowOpGraph, opGraph, 0, 0)
NV_FLOW_REFLECT_POINTER(NvFlowReflectDataType, pinsIn, 0, 0)
NV_FLOW_REFLECT_POINTER(NvFlowReflectDataType, pinsOut, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(create, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(destroy, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(execute, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(executeGroup, 0, 0)
NV_FLOW_REFLECT_END(0)
NV_FLOW_REFLECT_INTERFACE_IMPL()
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_OP_IMPL(name, nameImpl) \
NvFlowOp* NvFlowOp_##nameImpl##_createGeneric(const NvFlowOpInterface* opInterface, const NvFlowOpGenericPinsIn* in, NvFlowOpGenericPinsOut* out) \
{ \
return (NvFlowOp*)nameImpl##_create(opInterface, (const name##PinsIn*)in, (name##PinsOut*)out); \
} \
void NvFlowOp_##nameImpl##_destroyGeneric(NvFlowOp* op, const NvFlowOpGenericPinsIn* in, NvFlowOpGenericPinsOut* out) \
{ \
nameImpl##_destroy((nameImpl*)op, (const name##PinsIn*)in, (name##PinsOut*)out); \
} \
void NvFlowOp_##nameImpl##_executeGeneric(NvFlowOp* op, const NvFlowOpGenericPinsIn* in, NvFlowOpGenericPinsOut* out) \
{ \
nameImpl##_execute((nameImpl*)op, (const name##PinsIn*)in, (name##PinsOut*)out); \
} \
void NvFlowOp_##nameImpl##_executeGroupGeneric(NvFlowOp* op, NvFlowOpExecuteGroup* group, const NvFlowOpGenericPinsIn* in, NvFlowOpGenericPinsOut* out) \
{ \
nameImpl##_execute((nameImpl*)op, (const name##PinsIn*)in, (name##PinsOut*)out); \
} \
NvFlowOpInterface* NvFlowOp_##nameImpl##_getOpInterface() \
{ \
static const NvFlowOpExecuteGroupDesc executeGroupDesc = {0, 0}; \
static NvFlowOpInterface iface = { \
NV_FLOW_REFLECT_INTERFACE_INIT(NvFlowOpInterface), \
#name, \
0, \
&name##PinsIn_NvFlowReflectDataType, \
&name##PinsOut_NvFlowReflectDataType, \
&executeGroupDesc, \
1u, \
NvFlowOp_##nameImpl##_createGeneric, \
NvFlowOp_##nameImpl##_destroyGeneric, \
NvFlowOp_##nameImpl##_executeGeneric, \
NvFlowOp_##nameImpl##_executeGroupGeneric \
}; \
return &iface; \
}
#define NV_FLOW_OP_TYPED(name) \
typedef struct name \
{ \
NvFlowOpInterface opInterface; \
NvFlowOp* op; \
}name; \
NV_FLOW_INLINE NvFlowBool32 NV_FLOW_REFLECT_XCONCAT(name,_init)(name* ptr, NvFlowOpInterface* opInterface, const NV_FLOW_REFLECT_XCONCAT(name,PinsIn)* pinsIn, NV_FLOW_REFLECT_XCONCAT(name,PinsOut)* pinsOut) \
{ \
NvFlowOpInterface_duplicate(&ptr->opInterface, opInterface); \
if (NvFlowReflectStringCompare(ptr->opInterface.opTypename, #name) == 0) \
{ \
ptr->op = ptr->opInterface.create(&ptr->opInterface, (const NvFlowOpGenericPinsIn*)pinsIn, (NvFlowOpGenericPinsOut*)pinsOut); \
return NV_FLOW_TRUE; \
} \
ptr->opInterface.create = 0; \
ptr->opInterface.destroy = 0; \
ptr->opInterface.execute = 0; \
ptr->opInterface.executeGroup = 0; \
ptr->op = 0; \
return NV_FLOW_FALSE; \
} \
NV_FLOW_INLINE void name##_destroy(name* ptr, const NV_FLOW_REFLECT_XCONCAT(name,PinsIn)* pinsIn, NV_FLOW_REFLECT_XCONCAT(name,PinsOut)* pinsOut) \
{ \
ptr->opInterface.destroy(ptr->op, (const NvFlowOpGenericPinsIn*)pinsIn, (NvFlowOpGenericPinsOut*)pinsOut); \
} \
NV_FLOW_INLINE void name##_execute(name* ptr, const NV_FLOW_REFLECT_XCONCAT(name,PinsIn)* pinsIn, NV_FLOW_REFLECT_XCONCAT(name,PinsOut)* pinsOut) \
{ \
ptr->opInterface.execute(ptr->op, (const NvFlowOpGenericPinsIn*)pinsIn, (NvFlowOpGenericPinsOut*)pinsOut); \
}
/// ********************************* OpGraph ***************************************
// Reserved, not in use yet
struct NvFlowOpGraphInterface;
typedef struct NvFlowOpGraphInterface NvFlowOpGraphInterface;
/// ********************************* OpRuntime ***************************************
// Reserved, not in use yet
struct NvFlowOpRuntimeInterface;
typedef struct NvFlowOpRuntimeInterface NvFlowOpRuntimeInterface;
/// ********************************* Sparse ***************************************
struct NvFlowSparse;
typedef struct NvFlowSparse NvFlowSparse;
NV_FLOW_REFLECT_STRUCT_OPAQUE_IMPL(NvFlowSparse)
typedef struct NvFlowSparseParams
{
NvFlowSparseLayerParams* layers;
NvFlowUint layerCount;
NvFlowSparseLevelParams* levels;
NvFlowUint levelCount;
NvFlowInt4* locations;
NvFlowUint64 locationCount;
NvFlowUint2* tableRanges;
NvFlowUint64 tableRangeCount;
}NvFlowSparseParams;
NV_FLOW_INLINE NvFlowUint NvFlowSparseParams_layerToLayerParamIdx(const NvFlowSparseParams* params, int layer)
{
NvFlowUint retLayerParamIdx = ~0u;
for (NvFlowUint layerParamIdx = 0u; layerParamIdx < params->layerCount; layerParamIdx++)
{
if (params->layers[layerParamIdx].layer == layer)
{
retLayerParamIdx = layerParamIdx;
break;
}
}
return retLayerParamIdx;
}
NV_FLOW_INLINE NvFlowUint NvFlowSparseParams_locationToBlockIdx(const NvFlowSparseParams* params, NvFlowInt4 location)
{
const NvFlowSparseLevelParams* tableParams = ¶ms->levels[0u];
NvFlowUint3 bucketIdx = {
(NvFlowUint)location.x & tableParams->tableDimLessOne.x,
(NvFlowUint)location.y & tableParams->tableDimLessOne.y,
(NvFlowUint)location.z & tableParams->tableDimLessOne.z
};
NvFlowUint bucketIdx1D = (bucketIdx.z << (tableParams->tableDimBits_xy)) |
(bucketIdx.y << tableParams->tableDimBits_x) |
(bucketIdx.x);
NvFlowUint2 range = params->tableRanges[bucketIdx1D];
NvFlowUint outBlockIdx = ~0u;
for (NvFlowUint blockIdx = range.x; blockIdx < range.y; blockIdx++)
{
NvFlowInt4 compareLocation = params->locations[blockIdx];
if (compareLocation.x == location.x &&
compareLocation.y == location.y &&
compareLocation.z == location.z &&
compareLocation.w == location.w)
{
outBlockIdx = blockIdx;
break;
}
}
return outBlockIdx;
}
NV_FLOW_INLINE NvFlowBool32 NvFlowBlockIdxToLocation(const NvFlowSparseParams* params, NvFlowUint blockIdx, NvFlowInt4* out_location)
{
NvFlowBool32 ret;
if (blockIdx < params->locationCount)
{
*out_location = params->locations[blockIdx];
ret = NV_FLOW_TRUE;
}
else
{
out_location->x = 0x40000000;
out_location->y = 0x40000000;
out_location->z = 0x40000000;
out_location->w = 0x40000000;
ret = NV_FLOW_FALSE;
}
return ret;
}
// TODO, maybe expand
#define NV_FLOW_REFLECT_TYPE NvFlowSparseParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint, layerCount, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, levelCount, 0, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
// TODO, maybe expand
#define NV_FLOW_REFLECT_TYPE NvFlowSparseLevelParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint, numLocations, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, numLayers, 0, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowSparseTexture
{
NvFlowTextureTransient* textureTransient;
NvFlowBufferTransient* sparseBuffer;
NvFlowSparseParams sparseParams;
NvFlowUint levelIdx;
NvFlowFormat format;
}NvFlowSparseTexture;
NV_FLOW_REFLECT_STRUCT_OPAQUE_IMPL(NvFlowTextureTransient)
NV_FLOW_REFLECT_STRUCT_OPAQUE_IMPL(NvFlowBufferTransient)
#define NV_FLOW_REFLECT_TYPE NvFlowSparseTexture
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowTextureTransient, textureTransient, 0, 0)
NV_FLOW_REFLECT_POINTER(NvFlowBufferTransient, sparseBuffer, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseParams, sparseParams, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, levelIdx, 0, 0)
NV_FLOW_REFLECT_ENUM(format, 0, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_INLINE void NvFlowSparseTexture_passThrough(NvFlowSparseTexture* dst, const NvFlowSparseTexture* src)
{
*dst = *src;
}
NV_FLOW_INLINE void NvFlowSparseTexture_duplicateWithFormat(NvFlowContextInterface* contextInterface, NvFlowContext* context, NvFlowSparseTexture* dst, const NvFlowSparseTexture* src, NvFlowFormat format)
{
*dst = *src;
NvFlowSparseLevelParams* levelParams = &dst->sparseParams.levels[dst->levelIdx];
NvFlowTextureDesc texDesc = { eNvFlowTextureType_3d };
texDesc.textureType = eNvFlowTextureType_3d;
texDesc.usageFlags = eNvFlowTextureUsage_rwTexture | eNvFlowTextureUsage_texture;
texDesc.format = format;
texDesc.width = levelParams->dim.x;
texDesc.height = levelParams->dim.y;
texDesc.depth = levelParams->dim.z;
texDesc.mipLevels = 1u;
dst->textureTransient = contextInterface->getTextureTransient(context, &texDesc);
}
NV_FLOW_INLINE void NvFlowSparseTexture_duplicate(NvFlowContextInterface* contextInterface, NvFlowContext* context, NvFlowSparseTexture* dst, const NvFlowSparseTexture* src)
{
NvFlowSparseTexture_duplicateWithFormat(contextInterface, context, dst, src, src->format);
}
typedef struct NvFlowSparseUpdateLayerParams
{
NvFlowFloat3 blockSizeWorld;
int layer;
NvFlowBool32 forceClear;
NvFlowBool32 forceDisableEmitters;
NvFlowBool32 forceDisableCoreSimulation;
}NvFlowSparseUpdateLayerParams;
typedef struct NvFlowSparseInterface
{
NV_FLOW_REFLECT_INTERFACE();
NvFlowSparse*(NV_FLOW_ABI* create)(NvFlowContextInterface* contextInterface, NvFlowContext* context, NvFlowUint maxLocations);
void(NV_FLOW_ABI* destroy)(NvFlowContext* context, NvFlowSparse* sparse);
void(NV_FLOW_ABI* reset)(NvFlowContext* context, NvFlowSparse* sparse, NvFlowUint maxLocations);
void(NV_FLOW_ABI* updateLayers)(NvFlowSparse* sparse, NvFlowSparseUpdateLayerParams* layers, NvFlowUint numLayers);
void(NV_FLOW_ABI* updateLocations)(NvFlowSparse* sparse, NvFlowInt4* locations, NvFlowUint numLocations, NvFlowUint3 baseBlockDimBits, NvFlowUint minLifetime);
void(NV_FLOW_ABI* updateLayerDeltaTimes)(NvFlowSparse* sparse, float* layerDeltaTimes, NvFlowUint64 layerDeltaTimeCount);
NvFlowBool32(NV_FLOW_ABI* getParams)(NvFlowSparse* sparse, NvFlowSparseParams* out);
void(NV_FLOW_ABI* addPasses)(NvFlowContext* context, NvFlowSparse* sparse, NvFlowBufferTransient** pBufferTransient);
void(NV_FLOW_ABI* addPassesNanoVdb)(
NvFlowContext* context,
NvFlowSparse* sparse,
NvFlowUint gridType,
NvFlowUint levelIdx,
NvFlowSparseNanoVdbParams* pParams,
NvFlowBufferTransient** pNanoVdbBufferTransient,
NvFlowBufferTransient** pCacheBufferTransient
);
void(NV_FLOW_ABI* addPassesNanoVdbComputeStats)(
NvFlowContext* context,
NvFlowSparse* sparse,
const NvFlowSparseNanoVdbParams* params,
NvFlowBufferTransient* nanoVdbBufferTransient,
NvFlowBufferTransient* cacheBufferTransient,
NvFlowBufferTransient* targetNanoVdbBuffer
);
void(NV_FLOW_ABI* addPassesMigrate)(
NvFlowContext* context,
NvFlowSparse* sparse,
const int* clearLayers,
NvFlowUint64 clearLayerCount,
NvFlowBool32* clearedNoMigrateOut,
NvFlowTextureTransient* oldTextureTransient,
const NvFlowTextureDesc* oldTexDesc,
NvFlowFormat targetFormat,
NvFlowUint targetLevelIdx,
NvFlowSparseTexture* valueOut,
NvFlowTextureDesc* texDescOut
);
NvFlowBufferTransient*(NV_FLOW_ABI* getSparseBuffer)(NvFlowContext* context, NvFlowSparse* sparse);
}NvFlowSparseInterface;
#define NV_FLOW_REFLECT_TYPE NvFlowSparseInterface
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_FUNCTION_POINTER(create, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(destroy, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(reset, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(updateLayers, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(updateLocations, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(updateLayerDeltaTimes, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getParams, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(addPasses, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(addPassesNanoVdb, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(addPassesNanoVdbComputeStats, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(addPassesMigrate, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getSparseBuffer, 0, 0)
NV_FLOW_REFLECT_END(0)
NV_FLOW_REFLECT_INTERFACE_IMPL()
#undef NV_FLOW_REFLECT_TYPE
/// ********************************* SparseNanoVdbExport ***************************************
typedef struct NvFlowSparseNanoVdbExportParams
{
NvFlowBool32 enabled;
NvFlowBool32 statisticsEnabled;
NvFlowBool32 readbackEnabled;
NvFlowBool32 temperatureEnabled;
NvFlowBool32 fuelEnabled;
NvFlowBool32 burnEnabled;
NvFlowBool32 smokeEnabled;
NvFlowBool32 velocityEnabled;
NvFlowBool32 divergenceEnabled;
}NvFlowSparseNanoVdbExportParams;
#define NvFlowSparseNanoVdbExportParams_default_init { \
NV_FLOW_FALSE, /*enabled*/ \
NV_FLOW_TRUE, /*statisticsEnabled*/ \
NV_FLOW_FALSE, /*readbackEnabled*/ \
NV_FLOW_FALSE, /*temperatureEnabled*/ \
NV_FLOW_FALSE, /*fuelEnabled*/ \
NV_FLOW_FALSE, /*burnEnabled*/ \
NV_FLOW_TRUE, /*smokeEnabled*/ \
NV_FLOW_FALSE, /*velocityEnabled*/ \
NV_FLOW_FALSE, /*divergenceEnabled*/ \
}
static const NvFlowSparseNanoVdbExportParams NvFlowSparseNanoVdbExportParams_default = NvFlowSparseNanoVdbExportParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowSparseNanoVdbExportParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, statisticsEnabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, readbackEnabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, temperatureEnabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, fuelEnabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, burnEnabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, smokeEnabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, velocityEnabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, divergenceEnabled, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowSparseNanoVdbExportParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowSparseNanoVdbExportPinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
NvFlowSparseInterface* sparseInterface;
NvFlowSparse* sparse;
const NvFlowSparseNanoVdbExportParams** params;
NvFlowUint64 paramCount;
NvFlowSparseTexture velocity;
NvFlowSparseTexture density;
}NvFlowSparseNanoVdbExportPinsIn;
typedef struct NvFlowSparseNanoVdbExportReadback
{
NvFlowUint64 globalFrameCompleted;
NvFlowUint8* temperatureNanoVdbReadback;
NvFlowUint64 temperatureNanoVdbReadbackSize;
NvFlowUint8* fuelNanoVdbReadback;
NvFlowUint64 fuelNanoVdbReadbackSize;
NvFlowUint8* burnNanoVdbReadback;
NvFlowUint64 burnNanoVdbReadbackSize;
NvFlowUint8* smokeNanoVdbReadback;
NvFlowUint64 smokeNanoVdbReadbackSize;
NvFlowUint8* velocityNanoVdbReadback;
NvFlowUint64 velocityNanoVdbReadbackSize;
NvFlowUint8* divergenceNanoVdbReadback;
NvFlowUint64 divergenceNanoVdbReadbackSize;
}NvFlowSparseNanoVdbExportReadback;
#define NvFlowSparseNanoVdbExportReadback_default_init { \
~0llu, /*globalFrameCompleted*/ \
0, /*temperatureNanoVdbReadback*/ \
0, /*temperatureNanoVdbReadbackSize*/ \
0, /*fuelNanoVdbReadback*/ \
0, /*fuelNanoVdbReadbackSize*/ \
0, /*burnNanoVdbReadback*/ \
0, /*burnNanoVdbReadbackSize*/ \
0, /*smokeNanoVdbReadback*/ \
0, /*smokeNanoVdbReadbackSize*/ \
0, /*velocityNanoVdbReadback*/ \
0, /*velocityNanoVdbReadbackSize*/ \
0, /*divergenceNanoVdbReadback*/ \
0, /*divergenceNanoVdbReadbackSize*/ \
}
static const NvFlowSparseNanoVdbExportReadback NvFlowSparseNanoVdbExportReadback_default = NvFlowSparseNanoVdbExportReadback_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowSparseNanoVdbExportReadback
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint64, globalFrameCompleted, 0, 0)
NV_FLOW_REFLECT_ARRAY(NvFlowUint8, temperatureNanoVdbReadback, temperatureNanoVdbReadbackSize, 0, 0)
NV_FLOW_REFLECT_ARRAY(NvFlowUint8, fuelNanoVdbReadback, fuelNanoVdbReadbackSize, 0, 0)
NV_FLOW_REFLECT_ARRAY(NvFlowUint8, burnNanoVdbReadback, burnNanoVdbReadbackSize, 0, 0)
NV_FLOW_REFLECT_ARRAY(NvFlowUint8, smokeNanoVdbReadback, smokeNanoVdbReadbackSize, 0, 0)
NV_FLOW_REFLECT_ARRAY(NvFlowUint8, velocityNanoVdbReadback, velocityNanoVdbReadbackSize, 0, 0)
NV_FLOW_REFLECT_ARRAY(NvFlowUint8, divergenceNanoVdbReadback, divergenceNanoVdbReadbackSize, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowSparseNanoVdbExportReadback_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowSparseNanoVdbExportPinsOut
{
NvFlowBufferTransient* temperatureNanoVdb;
NvFlowBufferTransient* fuelNanoVdb;
NvFlowBufferTransient* burnNanoVdb;
NvFlowBufferTransient* smokeNanoVdb;
NvFlowBufferTransient* velocityNanoVdb;
NvFlowBufferTransient* divergenceNanoVdb;
NvFlowSparseNanoVdbExportReadback* readbacks;
NvFlowUint64 readbackCount;
}NvFlowSparseNanoVdbExportPinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowSparseNanoVdbExportPinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowSparseInterface, sparseInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowSparse, sparse, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowSparseNanoVdbExportParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, velocity, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, density, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowSparseNanoVdbExportPinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowBufferTransient, temperatureNanoVdb, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER(NvFlowBufferTransient, fuelNanoVdb, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER(NvFlowBufferTransient, burnNanoVdb, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER(NvFlowBufferTransient, smokeNanoVdb, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER(NvFlowBufferTransient, velocityNanoVdb, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER(NvFlowBufferTransient, divergenceNanoVdb, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_ARRAY(NvFlowSparseNanoVdbExportReadback, readbacks, readbackCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowSparseNanoVdbExport)
/// ********************************* Advect ***************************************
typedef struct NvFlowAdvectionChannelParams
{
float secondOrderBlendThreshold;
float secondOrderBlendFactor;
float damping;
float fade;
}NvFlowAdvectionChannelParams;
#define NvFlowAdvectionChannelParams_default_init { \
0.5f, /*secondOrderBlendThreshold*/ \
0.001f, /*secondOrderBlendFactor*/ \
0.01f, /*damping*/ \
0.f /*fade*/ \
}
static const NvFlowAdvectionChannelParams NvFlowAdvectionChannelParams_default = NvFlowAdvectionChannelParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowAdvectionChannelParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(float, secondOrderBlendThreshold, 0, 0)
NV_FLOW_REFLECT_VALUE(float, secondOrderBlendFactor, 0, 0)
NV_FLOW_REFLECT_VALUE(float, damping, 0, 0)
NV_FLOW_REFLECT_VALUE(float, fade, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowAdvectionChannelParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowAdvectionCombustionParams
{
NvFlowBool32 enabled; //!< Allows advection to be disabled when not in use
NvFlowBool32 downsampleEnabled; //!< Allows density downsample in velocity advection to be disabled
NvFlowBool32 combustionEnabled; //!< Allows combustion to be disabled
NvFlowBool32 forceFadeEnabled; //!< Force fade to apply even when advection disabled
NvFlowAdvectionChannelParams velocity;
NvFlowAdvectionChannelParams divergence;
NvFlowAdvectionChannelParams temperature;
NvFlowAdvectionChannelParams fuel;
NvFlowAdvectionChannelParams burn;
NvFlowAdvectionChannelParams smoke;
float ignitionTemp; //!< Minimum temperature for combustion
float burnPerTemp; //!< Burn amount per unit temperature above ignitionTemp
float fuelPerBurn; //!< Fuel consumed per unit burn
float tempPerBurn; //!< Temperature increase per unit burn
float smokePerBurn; //!< Density increase per unit burn
float divergencePerBurn; //!< Expansion per unit burn
float buoyancyPerTemp; //!< Buoyant force per unit temperature
float buoyancyPerSmoke; //!< Buoyant force per unit smoke
float buoyancyMaxSmoke; //!< Smoke clamp value applied before computing smoke buoyancy
float coolingRate; //!< Cooling rate, exponential
NvFlowFloat3 gravity;
NvFlowBool32 globalFetch; //!< Global fetch, removes velocity clamping
}NvFlowAdvectionCombustionParams;
#define NvFlowAdvectionCombustionParams_default_init { \
NV_FLOW_TRUE, /*enabled*/ \
NV_FLOW_TRUE, /*downsampleEnabled*/ \
NV_FLOW_TRUE, /*combustionEnabled*/ \
NV_FLOW_FALSE, /*forceFadeEnabled*/ \
{0.001f, 0.5f, 0.01f, 1.00f}, /*velocity : {secondOrderBlendThreshold, secondOrderBlendFactor, damping, fade}*/ \
{0.001f, 0.5f, 0.01f, 1.00f}, /*divergence : {secondOrderBlendThreshold, secondOrderBlendFactor, damping, fade}*/ \
{0.001f, 0.9f, 0.00f, 0.00f}, /*temperature : {secondOrderBlendThreshold, secondOrderBlendFactor, damping, fade}*/ \
{0.001f, 0.9f, 0.00f, 0.00f}, /*fuel : {secondOrderBlendThreshold, secondOrderBlendFactor, damping, fade}*/ \
{0.001f, 0.9f, 0.00f, 0.00f}, /*burn : {secondOrderBlendThreshold, secondOrderBlendFactor, damping, fade}*/ \
{0.001f, 0.9f, 0.30f, 0.65f}, /*smoke : {secondOrderBlendThreshold, secondOrderBlendFactor, damping, fade}*/ \
0.05f, /*ignitionTemp*/ \
4.f, /*burnPerTemp*/ \
0.25f, /*fuelPerBurn*/ \
5.f, /*tempPerBurn*/ \
3.f, /*smokePerBurn*/ \
0.f, /*divergencePerBurn*/ \
2.f, /*buoyancyPerTemp*/ \
0.f, /*buoyancyPerSmoke*/ \
1.f, /*buoyancyMaxSmoke*/ \
1.5f, /*coolingRate*/ \
0.f, /*gravity.x*/ \
0.f, /*gravity.y*/ \
-100.f, /*gravity.z*/ \
0u /*globalFetch*/ \
}
static const NvFlowAdvectionCombustionParams NvFlowAdvectionCombustionParams_default = NvFlowAdvectionCombustionParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowAdvectionCombustionParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, downsampleEnabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, combustionEnabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, forceFadeEnabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowAdvectionChannelParams, velocity, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowAdvectionChannelParams, divergence, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowAdvectionChannelParams, temperature, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowAdvectionChannelParams, fuel, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowAdvectionChannelParams, burn, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowAdvectionChannelParams, smoke, 0, 0)
NV_FLOW_REFLECT_VALUE(float, ignitionTemp, 0, 0)
NV_FLOW_REFLECT_VALUE(float, burnPerTemp, 0, 0)
NV_FLOW_REFLECT_VALUE(float, fuelPerBurn, 0, 0)
NV_FLOW_REFLECT_VALUE(float, tempPerBurn, 0, 0)
NV_FLOW_REFLECT_VALUE(float, smokePerBurn, 0, 0)
NV_FLOW_REFLECT_VALUE(float, divergencePerBurn, 0, 0)
NV_FLOW_REFLECT_VALUE(float, buoyancyPerTemp, 0, 0)
NV_FLOW_REFLECT_VALUE(float, buoyancyPerSmoke, 0, 0)
NV_FLOW_REFLECT_VALUE(float, buoyancyMaxSmoke, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coolingRate, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, gravity, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, globalFetch, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowAdvectionCombustionParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowAdvectionSimplePinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
float deltaTime;
NvFlowSparseTexture velocity;
}NvFlowAdvectionSimplePinsIn;
typedef struct NvFlowAdvectionSimplePinsOut
{
NvFlowSparseTexture velocity;
}NvFlowAdvectionSimplePinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowAdvectionSimplePinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(float, deltaTime, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, velocity, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowAdvectionSimplePinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, velocity, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowAdvectionSimple)
typedef struct NvFlowAdvectionCombustionDensityPinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
float deltaTime;
const NvFlowAdvectionCombustionParams** params;
NvFlowUint64 paramCount;
NvFlowSparseTexture velocity;
NvFlowSparseTexture density;
NvFlowSparseTexture densityTemp;
}NvFlowAdvectionCombustionDensityPinsIn;
typedef struct NvFlowAdvectionCombustionDensityPinsOut
{
NvFlowSparseTexture density;
}NvFlowAdvectionCombustionDensityPinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowAdvectionCombustionDensityPinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(float, deltaTime, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowAdvectionCombustionParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, velocity, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, density, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, densityTemp, eNvFlowReflectHint_pinEnabledMutable, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowAdvectionCombustionDensityPinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, density, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowAdvectionCombustionDensity)
typedef struct NvFlowAdvectionCombustionVelocityPinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
float deltaTime;
const NvFlowAdvectionCombustionParams** params;
NvFlowUint64 paramCount;
NvFlowSparseTexture velocity;
NvFlowSparseTexture velocityTemp;
NvFlowSparseTexture density;
NvFlowSparseTexture densityCoarse;
}NvFlowAdvectionCombustionVelocityPinsIn;
typedef struct NvFlowAdvectionCombustionVelocityPinsOut
{
NvFlowSparseTexture velocity;
NvFlowSparseTexture densityCoarse;
}NvFlowAdvectionCombustionVelocityPinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowAdvectionCombustionVelocityPinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(float, deltaTime, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowAdvectionCombustionParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, velocity, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, velocityTemp, eNvFlowReflectHint_pinEnabledMutable, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, density, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, densityCoarse, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowAdvectionCombustionVelocityPinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, velocity, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, densityCoarse, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowAdvectionCombustionVelocity)
/// ********************************* Pressure ***************************************
typedef struct NvFlowPressureParams
{
NvFlowBool32 enabled;
}NvFlowPressureParams;
#define NvFlowPressureParams_default_init { \
NV_FLOW_TRUE, /*enabled*/ \
}
static const NvFlowPressureParams NvFlowPressureParams_default = NvFlowPressureParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowPressureParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enabled, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowPressureParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowPressurePinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
const NvFlowPressureParams** params;
NvFlowUint64 paramCount;
NvFlowSparseTexture velocity;
}NvFlowPressurePinsIn;
typedef struct NvFlowPressurePinsOut
{
NvFlowSparseTexture velocity;
}NvFlowPressurePinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowPressurePinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowPressureParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, velocity, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowPressurePinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, velocity, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowPressure)
/// ********************************* Vorticity ***************************************
typedef struct NvFlowVorticityParams
{
NvFlowBool32 enabled;
float forceScale;
float velocityMask;
float constantMask;
float densityMask;
float velocityLogScale;
float velocityLinearMask;
float temperatureMask;
float fuelMask;
float burnMask;
float smokeMask;
}NvFlowVorticityParams;
#define NvFlowVorticityParams_default_init { \
NV_FLOW_TRUE, /*enabled*/ \
0.6f, /*forceScale*/ \
1.f, /*velocityMask*/ \
0.f, /*constantMask*/ \
0.f, /*densityMask*/ \
1.f, /*velocityLogScale*/ \
0.f, /*velocityLinearMask*/ \
0.f, /*temperatureMask*/ \
0.f, /*fuelMask*/ \
0.f, /*burnMask*/ \
0.f /*smokeMask*/ \
}
static const NvFlowVorticityParams NvFlowVorticityParams_default = NvFlowVorticityParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowVorticityParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enabled, 0, 0)
NV_FLOW_REFLECT_VALUE(float, forceScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, velocityMask, 0, 0)
NV_FLOW_REFLECT_VALUE(float, constantMask, 0, 0)
NV_FLOW_REFLECT_VALUE(float, densityMask, 0, 0)
NV_FLOW_REFLECT_VALUE(float, velocityLogScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, velocityLinearMask, 0, 0)
NV_FLOW_REFLECT_VALUE(float, temperatureMask, 0, 0)
NV_FLOW_REFLECT_VALUE(float, fuelMask, 0, 0)
NV_FLOW_REFLECT_VALUE(float, burnMask, 0, 0)
NV_FLOW_REFLECT_VALUE(float, smokeMask, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowVorticityParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowVorticityPinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
float deltaTime;
const NvFlowVorticityParams** params;
NvFlowUint64 paramCount;
NvFlowSparseTexture velocity;
NvFlowSparseTexture coarseDensity;
}NvFlowVorticityPinsIn;
typedef struct NvFlowVorticityPinsOut
{
NvFlowSparseTexture velocity;
}NvFlowVorticityPinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowVorticityPinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(float, deltaTime, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowVorticityParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, velocity, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, coarseDensity, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowVorticityPinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, velocity, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowVorticity)
/// ********************************* Summary ***************************************
typedef struct NvFlowSummaryAllocateParams
{
float smokeThreshold;
float speedThreshold;
float speedThresholdMinSmoke;
NvFlowBool32 enableNeighborAllocation;
}NvFlowSummaryAllocateParams;
#define NvFlowSummaryAllocateParams_default_init { \
0.02f, /*smokeThreshold*/ \
1.f, /*speedThreshold*/ \
0.f, /*speedThresholdMinSmoke*/ \
NV_FLOW_TRUE, /*enableNeighborAllocation*/ \
}
static const NvFlowSummaryAllocateParams NvFlowSummaryAllocateParams_default = NvFlowSummaryAllocateParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowSummaryAllocateParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(float, smokeThreshold, 0, 0)
NV_FLOW_REFLECT_VALUE(float, speedThreshold, 0, 0)
NV_FLOW_REFLECT_VALUE(float, speedThresholdMinSmoke, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enableNeighborAllocation, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowSummaryAllocateParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowSummaryFeedback
{
void* data;
}NvFlowSummaryFeedback;
NV_FLOW_REFLECT_STRUCT_OPAQUE_IMPL(NvFlowSummaryFeedback)
typedef struct NvFlowSummaryPinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
NvFlowSummaryFeedback feedback;
NvFlowSparseTexture velocity;
NvFlowSparseTexture densityCoarse;
const NvFlowSummaryAllocateParams** params;
NvFlowUint64 paramCount;
}NvFlowSummaryPinsIn;
typedef struct NvFlowSummaryPinsOut
{
NvFlowUint unused;
}NvFlowSummaryPinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowSummaryPinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSummaryFeedback, feedback, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, velocity, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, densityCoarse, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowSummaryAllocateParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowSummaryPinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint, unused, eNvFlowReflectHint_none, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowSummary)
typedef struct NvFlowSummaryAllocatePinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
NvFlowSparseParams sparseParams;
const NvFlowSummaryAllocateParams** params;
NvFlowUint64 paramCount;
}NvFlowSummaryAllocatePinsIn;
typedef struct NvFlowSummaryAllocatePinsOut
{
NvFlowSummaryFeedback feedback;
NvFlowInt4* locations;
NvFlowUint64 locationCount;
}NvFlowSummaryAllocatePinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowSummaryAllocatePinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseParams, sparseParams, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowSummaryAllocateParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowSummaryAllocatePinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowSummaryFeedback, feedback, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_ARRAY(NvFlowInt4, locations, locationCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowSummaryAllocate)
/// ********************************* NvFlowOpList ***************************************
typedef struct NvFlowOpList
{
NV_FLOW_REFLECT_INTERFACE();
NvFlowOpGraphInterface* (NV_FLOW_ABI* getOpGraphInterface)();
NvFlowOpRuntimeInterface* (NV_FLOW_ABI* getOpRuntimeInterface)();
NvFlowSparseInterface* (NV_FLOW_ABI* getSparseInterface)();
NvFlowOpInterface* (NV_FLOW_ABI* pSparseNanoVdbExport)();
NvFlowOpInterface* (NV_FLOW_ABI* pAdvectionSimple)();
NvFlowOpInterface* (NV_FLOW_ABI* pAdvectionCombustionDensity)();
NvFlowOpInterface* (NV_FLOW_ABI* pAdvectionCombustionVelocity)();
NvFlowOpInterface* (NV_FLOW_ABI* pPressure)();
NvFlowOpInterface* (NV_FLOW_ABI* pVorticity)();
NvFlowOpInterface* (NV_FLOW_ABI* pSummary)();
NvFlowOpInterface* (NV_FLOW_ABI* pSummaryAllocate)();
}NvFlowOpList;
#define NV_FLOW_REFLECT_TYPE NvFlowOpList
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_FUNCTION_POINTER(getOpGraphInterface, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getOpRuntimeInterface, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getSparseInterface, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pSparseNanoVdbExport, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pAdvectionSimple, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pAdvectionCombustionDensity, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pAdvectionCombustionVelocity, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pPressure, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pVorticity, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pSummary, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pSummaryAllocate, 0, 0)
NV_FLOW_REFLECT_END(0)
NV_FLOW_REFLECT_INTERFACE_IMPL()
#undef NV_FLOW_REFLECT_TYPE
typedef NvFlowOpList* (NV_FLOW_ABI* PFN_NvFlowGetOpList)();
NV_FLOW_API NvFlowOpList* NvFlowGetOpList();
#endif
| 40,775 | C | 37.650237 | 209 | 0.787689 |
NVIDIA-Omniverse/PhysX/flow/include/nvflow/NvFlowContext.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#ifndef NV_FLOW_CONTEXT_H
#define NV_FLOW_CONTEXT_H
#include "NvFlowReflect.h"
/// ************************* NvFlowContext **********************************
struct NvFlowContext;
typedef struct NvFlowContext NvFlowContext;
NV_FLOW_REFLECT_STRUCT_OPAQUE_IMPL(NvFlowContext)
typedef enum NvFlowTextureBindingType
{
eNvFlowTextureBindingType_separateSampler = 0,
eNvFlowTextureBindingType_combinedSampler = 1,
eNvFlowTextureBindingType_count = 2,
eNvFlowTextureBindingType_maxEnum = 0x7FFFFFFF
}NvFlowTextureBindingType;
typedef struct NvFlowContextConfig
{
NvFlowContextApi api;
NvFlowTextureBindingType textureBinding;
}NvFlowContextConfig;
typedef struct NvFlowBytecode
{
const void* data;
NvFlowUint64 sizeInBytes;
}NvFlowBytecode;
struct NvFlowBuffer;
typedef struct NvFlowBuffer NvFlowBuffer;
struct NvFlowBufferTransient;
typedef struct NvFlowBufferTransient NvFlowBufferTransient;
struct NvFlowBufferAcquire;
typedef struct NvFlowBufferAcquire NvFlowBufferAcquire;
typedef enum NvFlowMemoryType
{
eNvFlowMemoryType_device = 0,
eNvFlowMemoryType_upload = 1,
eNvFlowMemoryType_readback = 2,
eNvFlowMemoryType_maxEnum = 0x7FFFFFFF
}NvFlowMemoryType;
typedef NvFlowUint NvFlowBufferUsageFlags;
typedef enum NvFlowBufferUsage
{
eNvFlowBufferUsage_constantBuffer = 0x01,
eNvFlowBufferUsage_structuredBuffer = 0x02,
eNvFlowBufferUsage_buffer = 0x04,
eNvFlowBufferUsage_rwStructuredBuffer = 0x08,
eNvFlowBufferUsage_rwBuffer = 0x10,
eNvFlowBufferUsage_indirectBuffer = 0x20,
eNvFlowBufferUsage_bufferCopySrc = 0x40,
eNvFlowBufferUsage_bufferCopyDst = 0x80,
eNvFlowBufferUsage_maxEnum = 0x7FFFFFFF
}NvFlowBufferUsage;
typedef struct NvFlowBufferDesc
{
NvFlowBufferUsageFlags usageFlags;
NvFlowFormat format;
NvFlowUint structureStride;
NvFlowUint64 sizeInBytes;
}NvFlowBufferDesc;
struct NvFlowTexture;
typedef struct NvFlowTexture NvFlowTexture;
struct NvFlowTextureTransient;
typedef struct NvFlowTextureTransient NvFlowTextureTransient;
struct NvFlowTextureAcquire;
typedef struct NvFlowTextureAcquire NvFlowTextureAcquire;
struct NvFlowSampler;
typedef struct NvFlowSampler NvFlowSampler;
typedef enum NvFlowTextureType
{
eNvFlowTextureType_1d = 0,
eNvFlowTextureType_2d = 1,
eNvFlowTextureType_3d = 2,
eNvFlowTextureType_maxEnum = 0x7FFFFFFF
}NvFlowTextureType;
typedef NvFlowUint NvFlowTextureUsageFlags;
typedef enum NvFlowTextureUsage
{
eNvFlowTextureUsage_texture = 0x01,
eNvFlowTextureUsage_rwTexture = 0x02,
eNvFlowTextureUsage_textureCopySrc = 0x04,
eNvFlowTextureUsage_textureCopyDst = 0x08,
eNvFlowTextureUsage_maxEnum = 0x7FFFFFFF
}NvFlowTextureUsage;
typedef struct NvFlowTextureDesc
{
NvFlowTextureType textureType;
NvFlowTextureUsageFlags usageFlags;
NvFlowFormat format;
NvFlowUint width;
NvFlowUint height;
NvFlowUint depth;
NvFlowUint mipLevels;
NvFlowFloat4 optimizedClearValue;
}NvFlowTextureDesc;
typedef enum NvFlowSamplerAddressMode
{
eNvFlowSamplerAddressMode_wrap = 0,
eNvFlowSamplerAddressMode_clamp = 1,
eNvFlowSamplerAddressMode_mirror = 2,
eNvFlowSamplerAddressMode_border = 3,
eNvFlowSamplerAddressMode_count = 4,
eNvFlowSamplerAddressMode_maxEnum = 0x7FFFFFFF
}NvFlowSamplerAddressMode;
typedef enum NvFlowSamplerFilterMode
{
eNvFlowSamplerFilterMode_point = 0,
eNvFlowSamplerFilterMode_linear = 1,
eNvFlowSamplerFilterMode_count = 2,
eNvFlowSamplerFilterMode_maxEnum = 0x7FFFFFFF
}NvFlowSamplerFilterMode;
typedef struct NvFlowSamplerDesc
{
NvFlowSamplerAddressMode addressModeU;
NvFlowSamplerAddressMode addressModeV;
NvFlowSamplerAddressMode addressModeW;
NvFlowSamplerFilterMode filterMode;
}NvFlowSamplerDesc;
typedef enum NvFlowDescriptorType
{
eNvFlowDescriptorType_unknown = 0,
/// Explicit in NFSL shader code
eNvFlowDescriptorType_constantBuffer = 1, // HLSL register b
eNvFlowDescriptorType_structuredBuffer = 2, // HLSL register t
eNvFlowDescriptorType_buffer = 3, // HLSL register t
eNvFlowDescriptorType_texture = 4, // HLSL register t
eNvFlowDescriptorType_sampler = 5, // HLSL register s
eNvFlowDescriptorType_rwStructuredBuffer = 6, // HLSL register u
eNvFlowDescriptorType_rwBuffer = 7, // HLSL register u
eNvFlowDescriptorType_rwTexture = 8, // HLSL register u
/// If requiresCombinedTextureSampler, uses TextureSampler instead of separate texture and sampler
eNvFlowDescriptorType_textureSampler = 9, // Vulkan only
/// Descriptors not explicitly mentioned in shaders
eNvFlowDescriptorType_indirectBuffer = 10, // No register
eNvFlowDescriptorType_bufferCopySrc = 11, // No register
eNvFlowDescriptorType_bufferCopyDst = 12, // No register
eNvFlowDescriptorType_textureCopySrc = 13, // No register
eNvFlowDescriptorType_textureCopyDst = 14, // No register
eNvFlowDescriptorType_count = 15,
eNvFlowDescriptorType_maxEnum = 0x7FFFFFFF
}NvFlowDescriptorType;
typedef struct NvFlowResource
{
NvFlowBufferTransient* bufferTransient;
NvFlowTextureTransient* textureTransient;
NvFlowSampler* sampler;
}NvFlowResource;
typedef enum NvFlowRegisterHlsl
{
eNvFlowRegisterHlsl_unknown = 0,
eNvFlowRegisterHlsl_b = 1,
eNvFlowRegisterHlsl_t = 2,
eNvFlowRegisterHlsl_s = 3,
eNvFlowRegisterHlsl_u = 4,
eNvFlowRegisterHlsl_count = 5,
eNvFlowRegisterHlsl_maxEnum = 0x7FFFFFFF
}NvFlowRegisterHlsl;
typedef struct NvFlowDescriptorWriteD3D12
{
NvFlowRegisterHlsl registerHlsl;
NvFlowUint registerIndex;
NvFlowUint space;
}NvFlowDescriptorWriteD3D12;
typedef struct NvFlowDescriptorWriteVulkan
{
NvFlowUint binding;
NvFlowUint arrayIndex;
NvFlowUint set;
}NvFlowDescriptorWriteVulkan;
typedef struct NvFlowDescriptorWriteUnion
{
NvFlowDescriptorWriteD3D12 d3d12;
NvFlowDescriptorWriteVulkan vulkan;
}NvFlowDescriptorWriteUnion;
typedef struct NvFlowDescriptorWrite
{
NvFlowDescriptorType type;
NvFlowDescriptorWriteUnion write;
}NvFlowDescriptorWrite;
typedef struct NvFlowBindingDescD3D12
{
NvFlowRegisterHlsl registerHlsl;
NvFlowUint registerBegin;
NvFlowUint numRegisters;
NvFlowUint space;
}NvFlowBindingDescD3D12;
typedef struct NvFlowBindingDescVulkan
{
NvFlowUint binding;
NvFlowUint descriptorCount;
NvFlowUint set;
}NvFlowBindingDescVulkan;
typedef struct NvFlowBindingDescUnion
{
NvFlowBindingDescD3D12 d3d12;
NvFlowBindingDescVulkan vulkan;
}NvFlowBindingDescUnion;
typedef struct NvFlowBindingDesc
{
NvFlowDescriptorType type;
NvFlowBindingDescUnion bindingDesc;
}NvFlowBindingDesc;
struct NvFlowComputePipeline;
typedef struct NvFlowComputePipeline NvFlowComputePipeline;
typedef struct NvFlowComputePipelineDesc
{
NvFlowUint numBindingDescs;
NvFlowBindingDesc* bindingDescs;
NvFlowBytecode bytecode;
}NvFlowComputePipelineDesc;
typedef struct NvFlowPassComputeParams
{
NvFlowComputePipeline* pipeline;
NvFlowUint3 gridDim;
NvFlowUint numDescriptorWrites;
const NvFlowDescriptorWrite* descriptorWrites;
const NvFlowResource* resources;
const char* debugLabel;
}NvFlowPassComputeParams;
typedef struct NvFlowPassCopyBufferParams
{
NvFlowUint64 srcOffset;
NvFlowUint64 dstOffset;
NvFlowUint64 numBytes;
NvFlowBufferTransient* src;
NvFlowBufferTransient* dst;
const char* debugLabel;
}NvFlowPassCopyBufferParams;
typedef struct NvFlowPassCopyBufferToTextureParams
{
NvFlowUint64 bufferOffset;
NvFlowUint bufferRowPitch;
NvFlowUint bufferDepthPitch;
NvFlowUint textureMipLevel;
NvFlowUint3 textureOffset;
NvFlowUint3 textureExtent;
NvFlowBufferTransient* src;
NvFlowTextureTransient* dst;
const char* debugLabel;
}NvFlowPassCopyBufferToTextureParams;
typedef struct NvFlowPassCopyTextureToBufferParams
{
NvFlowUint64 bufferOffset;
NvFlowUint bufferRowPitch;
NvFlowUint bufferDepthPitch;
NvFlowUint textureMipLevel;
NvFlowUint3 textureOffset;
NvFlowUint3 textureExtent;
NvFlowTextureTransient* src;
NvFlowBufferTransient* dst;
const char* debugLabel;
}NvFlowPassCopyTextureToBufferParams;
typedef struct NvFlowPassCopyTextureParams
{
NvFlowUint srcMipLevel;
NvFlowUint3 srcOffset;
NvFlowUint dstMipLevel;
NvFlowUint3 dstOffset;
NvFlowUint3 extent;
NvFlowTextureTransient* src;
NvFlowTextureTransient* dst;
const char* debugLabel;
}NvFlowPassCopyTextureParams;
typedef void(*NvFlowContextThreadPoolTask_t)(NvFlowUint taskIdx, NvFlowUint threadIdx, void* sharedMem, void* userdata);
typedef struct NvFlowContextInterface
{
NV_FLOW_REFLECT_INTERFACE();
void(NV_FLOW_ABI* getContextConfig)(NvFlowContext* context, NvFlowContextConfig* config);
NvFlowUint64(NV_FLOW_ABI* getCurrentFrame)(NvFlowContext* context);
NvFlowUint64(NV_FLOW_ABI* getLastFrameCompleted)(NvFlowContext* context);
NvFlowUint64(NV_FLOW_ABI* getCurrentGlobalFrame)(NvFlowContext* context);
NvFlowUint64(NV_FLOW_ABI* getLastGlobalFrameCompleted)(NvFlowContext* context);
NvFlowLogPrint_t(NV_FLOW_ABI* getLogPrint)(NvFlowContext* context);
void(NV_FLOW_ABI* executeTasks)(NvFlowContext* context, NvFlowUint taskCount, NvFlowUint taskGranularity, NvFlowContextThreadPoolTask_t task, void* userdata);
NvFlowBuffer*(NV_FLOW_ABI* createBuffer)(NvFlowContext* context, NvFlowMemoryType memoryType, const NvFlowBufferDesc* desc);
void(NV_FLOW_ABI* destroyBuffer)(NvFlowContext* context, NvFlowBuffer* buffer);
NvFlowBufferTransient*(NV_FLOW_ABI* getBufferTransient)(NvFlowContext* context, const NvFlowBufferDesc* desc);
NvFlowBufferTransient*(NV_FLOW_ABI* registerBufferAsTransient)(NvFlowContext* context, NvFlowBuffer* buffer);
NvFlowBufferAcquire*(NV_FLOW_ABI* enqueueAcquireBuffer)(NvFlowContext* context, NvFlowBufferTransient* buffer);
NvFlowBool32(NV_FLOW_ABI* getAcquiredBuffer)(NvFlowContext* context, NvFlowBufferAcquire* acquire, NvFlowBuffer** outBuffer);
void*(NV_FLOW_ABI* mapBuffer)(NvFlowContext* context, NvFlowBuffer* buffer);
void(NV_FLOW_ABI* unmapBuffer)(NvFlowContext* context, NvFlowBuffer* buffer);
NvFlowBufferTransient*(NV_FLOW_ABI* getBufferTransientById)(NvFlowContext* context, NvFlowUint64 bufferId);
NvFlowTexture*(NV_FLOW_ABI* createTexture)(NvFlowContext* context, const NvFlowTextureDesc* desc);
void(NV_FLOW_ABI* destroyTexture)(NvFlowContext* context, NvFlowTexture* texture);
NvFlowTextureTransient*(NV_FLOW_ABI* getTextureTransient)(NvFlowContext* context, const NvFlowTextureDesc* desc);
NvFlowTextureTransient*(NV_FLOW_ABI* registerTextureAsTransient)(NvFlowContext* context, NvFlowTexture* texture);
NvFlowTextureAcquire*(NV_FLOW_ABI* enqueueAcquireTexture)(NvFlowContext* context, NvFlowTextureTransient* texture);
NvFlowBool32(NV_FLOW_ABI* getAcquiredTexture)(NvFlowContext* context, NvFlowTextureAcquire* acquire, NvFlowTexture** outTexture);
NvFlowTextureTransient*(NV_FLOW_ABI* getTextureTransientById)(NvFlowContext* context, NvFlowUint64 textureId);
NvFlowSampler*(NV_FLOW_ABI* createSampler)(NvFlowContext* context, const NvFlowSamplerDesc* desc);
NvFlowSampler*(NV_FLOW_ABI* getDefaultSampler)(NvFlowContext* context);
void(NV_FLOW_ABI* destroySampler)(NvFlowContext* context, NvFlowSampler* sampler);
NvFlowComputePipeline*(NV_FLOW_ABI* createComputePipeline)(NvFlowContext* context, const NvFlowComputePipelineDesc* desc);
void(NV_FLOW_ABI* destroyComputePipeline)(NvFlowContext* context, NvFlowComputePipeline* pipeline);
void(NV_FLOW_ABI* addPassCompute)(NvFlowContext* context, const NvFlowPassComputeParams* params);
void(NV_FLOW_ABI* addPassCopyBuffer)(NvFlowContext* context, const NvFlowPassCopyBufferParams* params);
void(NV_FLOW_ABI* addPassCopyBufferToTexture)(NvFlowContext* context, const NvFlowPassCopyBufferToTextureParams* params);
void(NV_FLOW_ABI* addPassCopyTextureToBuffer)(NvFlowContext* context, const NvFlowPassCopyTextureToBufferParams* params);
void(NV_FLOW_ABI* addPassCopyTexture)(NvFlowContext* context, const NvFlowPassCopyTextureParams* params);
}NvFlowContextInterface;
#define NV_FLOW_REFLECT_TYPE NvFlowContextInterface
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_FUNCTION_POINTER(getContextConfig, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getCurrentFrame, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getLastFrameCompleted, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getCurrentGlobalFrame, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getLastGlobalFrameCompleted, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getLogPrint, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(executeTasks, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(createBuffer, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(destroyBuffer, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getBufferTransient, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(registerBufferAsTransient, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(enqueueAcquireBuffer, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getAcquiredBuffer, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(mapBuffer, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(unmapBuffer, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getBufferTransientById, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(createTexture, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(destroyTexture, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getTextureTransient, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(registerTextureAsTransient, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(enqueueAcquireTexture, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getAcquiredTexture, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getTextureTransientById, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(createSampler, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getDefaultSampler, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(destroySampler, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(createComputePipeline, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(destroyComputePipeline, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(addPassCompute, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(addPassCopyBuffer, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(addPassCopyBufferToTexture, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(addPassCopyTextureToBuffer, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(addPassCopyTexture, 0, 0)
NV_FLOW_REFLECT_END(0)
NV_FLOW_REFLECT_INTERFACE_IMPL()
#undef NV_FLOW_REFLECT_TYPE
#endif | 15,459 | C | 31.410901 | 159 | 0.816806 |
NVIDIA-Omniverse/PhysX/flow/include/nvflow/shaders/PNanoVDBWrite.h |
// Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/*!
\file PNanoVDBWrite.h
\author Andrew Reidmeyer
\brief This file is a portable (e.g. pointer-less) C99/GLSL/HLSL port
of NanoVDBWrite.h, which is compatible with most graphics APIs.
*/
#ifndef NANOVDB_PNANOVDB_WRITE_H_HAS_BEEN_INCLUDED
#define NANOVDB_PNANOVDB_WRITE_H_HAS_BEEN_INCLUDED
#if defined(PNANOVDB_BUF_C)
#if defined(PNANOVDB_ADDRESS_32)
PNANOVDB_BUF_FORCE_INLINE void pnanovdb_buf_write_uint32(pnanovdb_buf_t buf, uint32_t byte_offset, uint32_t value)
{
uint32_t wordaddress = (byte_offset >> 2u);
#ifdef PNANOVDB_BUF_BOUNDS_CHECK
if (wordaddress < buf.size_in_words)
{
buf.data[wordaddress] = value;
}
#else
buf.data[wordaddress] = value;
#endif
}
PNANOVDB_BUF_FORCE_INLINE void pnanovdb_buf_write_uint64(pnanovdb_buf_t buf, uint32_t byte_offset, uint64_t value)
{
uint64_t* data64 = (uint64_t*)buf.data;
uint32_t wordaddress64 = (byte_offset >> 3u);
#ifdef PNANOVDB_BUF_BOUNDS_CHECK
uint64_t size_in_words64 = buf.size_in_words >> 1u;
if (wordaddress64 < size_in_words64)
{
data64[wordaddress64] = value;
}
#else
data64[wordaddress64] = value;
#endif
}
#elif defined(PNANOVDB_ADDRESS_64)
PNANOVDB_BUF_FORCE_INLINE void pnanovdb_buf_write_uint32(pnanovdb_buf_t buf, uint64_t byte_offset, uint32_t value)
{
uint64_t wordaddress = (byte_offset >> 2u);
#ifdef PNANOVDB_BUF_BOUNDS_CHECK
if (wordaddress < buf.size_in_words)
{
buf.data[wordaddress] = value;
}
#else
buf.data[wordaddress] = value;
#endif
}
PNANOVDB_BUF_FORCE_INLINE void pnanovdb_buf_write_uint64(pnanovdb_buf_t buf, uint64_t byte_offset, uint64_t value)
{
uint64_t* data64 = (uint64_t*)buf.data;
uint64_t wordaddress64 = (byte_offset >> 3u);
#ifdef PNANOVDB_BUF_BOUNDS_CHECK
uint64_t size_in_words64 = buf.size_in_words >> 1u;
if (wordaddress64 < size_in_words64)
{
data64[wordaddress64] = value;
}
#else
data64[wordaddress64] = value;
#endif
}
#endif
#endif
#if defined(PNANOVDB_C)
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_float_as_uint32(float v) { return *((pnanovdb_uint32_t*)(&v)); }
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_double_as_uint64(double v) { return *((pnanovdb_uint64_t*)(&v)); }
#elif defined(PNANOVDB_HLSL)
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_float_as_uint32(float v) { return asuint(v); }
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_double_as_uint64(double v) { uint2 ret; asuint(v, ret.x, ret.y); return ret; }
#elif defined(PNANOVDB_GLSL)
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_float_as_uint32(float v) { return floatBitsToUint(v); }
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_double_as_uint64(double v) { return unpackDouble2x32(v); }
#endif
PNANOVDB_FORCE_INLINE void pnanovdb_write_uint32(pnanovdb_buf_t buf, pnanovdb_address_t address, pnanovdb_uint32_t value)
{
pnanovdb_buf_write_uint32(buf, address.byte_offset, value);
}
PNANOVDB_FORCE_INLINE void pnanovdb_write_uint64(pnanovdb_buf_t buf, pnanovdb_address_t address, pnanovdb_uint64_t value)
{
pnanovdb_buf_write_uint64(buf, address.byte_offset, value);
}
PNANOVDB_FORCE_INLINE void pnanovdb_write_int32(pnanovdb_buf_t buf, pnanovdb_address_t address, pnanovdb_int32_t value)
{
pnanovdb_write_uint32(buf, address, pnanovdb_int32_as_uint32(value));
}
PNANOVDB_FORCE_INLINE void pnanovdb_write_int64(pnanovdb_buf_t buf, pnanovdb_address_t address, pnanovdb_int64_t value)
{
pnanovdb_buf_write_uint64(buf, address.byte_offset, pnanovdb_int64_as_uint64(value));
}
PNANOVDB_FORCE_INLINE void pnanovdb_write_float(pnanovdb_buf_t buf, pnanovdb_address_t address, float value)
{
pnanovdb_write_uint32(buf, address, pnanovdb_float_as_uint32(value));
}
PNANOVDB_FORCE_INLINE void pnanovdb_write_double(pnanovdb_buf_t buf, pnanovdb_address_t address, double value)
{
pnanovdb_write_uint64(buf, address, pnanovdb_double_as_uint64(value));
}
PNANOVDB_FORCE_INLINE void pnanovdb_write_coord(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) value)
{
pnanovdb_write_uint32(buf, pnanovdb_address_offset(address, 0u), pnanovdb_int32_as_uint32(PNANOVDB_DEREF(value).x));
pnanovdb_write_uint32(buf, pnanovdb_address_offset(address, 4u), pnanovdb_int32_as_uint32(PNANOVDB_DEREF(value).y));
pnanovdb_write_uint32(buf, pnanovdb_address_offset(address, 8u), pnanovdb_int32_as_uint32(PNANOVDB_DEREF(value).z));
}
PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_offset_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint64_t node_offset_leaf) {
pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_LEAF), node_offset_leaf);
}
PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_offset_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint64_t node_offset_lower) {
pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_LOWER), node_offset_lower);
}
PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_offset_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint64_t node_offset_upper) {
pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_UPPER), node_offset_upper);
}
PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_offset_root(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint64_t node_offset_root) {
pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_ROOT), node_offset_root);
}
PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_count_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t node_count_leaf) {
pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_LEAF), node_count_leaf);
}
PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_count_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t node_count_lower) {
pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_LOWER), node_count_lower);
}
PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_count_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t node_count_upper) {
pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_UPPER), node_count_upper);
}
PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_tile_count_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t tile_count_leaf) {
pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_LEAF), tile_count_leaf);
}
PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_tile_count_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t tile_count_lower) {
pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_LOWER), tile_count_lower);
}
PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_tile_count_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t tile_count_upper) {
pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_UPPER), tile_count_upper);
}
PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_voxel_count(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint64_t voxel_count) {
pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_VOXEL_COUNT), voxel_count);
}
PNANOVDB_FORCE_INLINE void pnanovdb_root_set_bbox_min(pnanovdb_buf_t buf, pnanovdb_root_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_min) {
pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_BBOX_MIN), bbox_min);
}
PNANOVDB_FORCE_INLINE void pnanovdb_root_set_bbox_max(pnanovdb_buf_t buf, pnanovdb_root_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_max) {
pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_BBOX_MAX), bbox_max);
}
PNANOVDB_FORCE_INLINE void pnanovdb_root_set_tile_count(pnanovdb_buf_t buf, pnanovdb_root_handle_t p, pnanovdb_uint32_t tile_count) {
pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_TABLE_SIZE), tile_count);
}
PNANOVDB_FORCE_INLINE void pnanovdb_root_tile_set_key(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p, pnanovdb_uint64_t key) {
pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_KEY), key);
}
PNANOVDB_FORCE_INLINE void pnanovdb_root_tile_set_child(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p, pnanovdb_int64_t child) {
pnanovdb_write_int64(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_CHILD), child);
}
PNANOVDB_FORCE_INLINE void pnanovdb_root_tile_set_state(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p, pnanovdb_uint32_t state) {
pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_STATE), state);
}
PNANOVDB_FORCE_INLINE void pnanovdb_upper_set_bbox_min(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_min) {
pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_BBOX_MIN), bbox_min);
}
PNANOVDB_FORCE_INLINE void pnanovdb_upper_set_bbox_max(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_max) {
pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_BBOX_MAX), bbox_max);
}
PNANOVDB_FORCE_INLINE void pnanovdb_upper_set_child_mask(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p, pnanovdb_uint32_t bit_index, pnanovdb_bool_t value) {
pnanovdb_address_t addr = pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_CHILD_MASK + 4u * (bit_index >> 5u));
pnanovdb_uint32_t valueMask = pnanovdb_read_uint32(buf, addr);
if (!value) { valueMask &= ~(1u << (bit_index & 31u)); }
if (value) valueMask |= (1u << (bit_index & 31u));
pnanovdb_write_uint32(buf, addr, valueMask);
}
PNANOVDB_FORCE_INLINE void pnanovdb_upper_set_table_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node, pnanovdb_uint32_t n, pnanovdb_int64_t child)
{
pnanovdb_address_t bufAddress = pnanovdb_upper_get_table_address(grid_type, buf, node, n);
pnanovdb_write_int64(buf, bufAddress, child);
}
PNANOVDB_FORCE_INLINE void pnanovdb_lower_set_bbox_min(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_min) {
pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_BBOX_MIN), bbox_min);
}
PNANOVDB_FORCE_INLINE void pnanovdb_lower_set_bbox_max(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_max) {
pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_BBOX_MAX), bbox_max);
}
PNANOVDB_FORCE_INLINE void pnanovdb_lower_set_child_mask(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p, pnanovdb_uint32_t bit_index, pnanovdb_bool_t value) {
pnanovdb_address_t addr = pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_CHILD_MASK + 4u * (bit_index >> 5u));
pnanovdb_uint32_t valueMask = pnanovdb_read_uint32(buf, addr);
if (!value) { valueMask &= ~(1u << (bit_index & 31u)); }
if (value) valueMask |= (1u << (bit_index & 31u));
pnanovdb_write_uint32(buf, addr, valueMask);
}
PNANOVDB_FORCE_INLINE void pnanovdb_lower_set_table_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node, pnanovdb_uint32_t n, pnanovdb_int64_t child)
{
pnanovdb_address_t table_address = pnanovdb_lower_get_table_address(grid_type, buf, node, n);
pnanovdb_write_int64(buf, table_address, child);
}
PNANOVDB_FORCE_INLINE void pnanovdb_leaf_set_bbox_min(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_min) {
pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LEAF_OFF_BBOX_MIN), bbox_min);
}
PNANOVDB_FORCE_INLINE void pnanovdb_leaf_set_bbox_dif_and_flags(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t p, pnanovdb_uint32_t bbox_dif_and_flags) {
pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_LEAF_OFF_BBOX_DIF_AND_FLAGS), bbox_dif_and_flags);
}
PNANOVDB_FORCE_INLINE void pnanovdb_map_set_matf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, float matf) {
pnanovdb_write_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_MATF + 4u * index), matf);
}
PNANOVDB_FORCE_INLINE void pnanovdb_map_set_invmatf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, float invmatf) {
pnanovdb_write_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_INVMATF + 4u * index), invmatf);
}
PNANOVDB_FORCE_INLINE void pnanovdb_map_set_vecf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, float vecf) {
pnanovdb_write_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_VECF + 4u * index), vecf);
}
PNANOVDB_FORCE_INLINE void pnanovdb_map_set_taperf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, float taperf) {
pnanovdb_write_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_TAPERF), taperf);
}
PNANOVDB_FORCE_INLINE void pnanovdb_map_set_matd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, double matd) {
pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_MATD + 8u * index), matd);
}
PNANOVDB_FORCE_INLINE void pnanovdb_map_set_invmatd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, double invmatd) {
pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_INVMATD + 8u * index), invmatd);
}
PNANOVDB_FORCE_INLINE void pnanovdb_map_set_vecd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, double vecd) {
pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_VECD + 8u * index), vecd);
}
PNANOVDB_FORCE_INLINE void pnanovdb_map_set_taperd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, double taperd) {
pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_TAPERD), taperd);
}
PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_magic(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint64_t magic) {
pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_MAGIC), magic);
}
PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_checksum(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint64_t checksum) {
pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_CHECKSUM), checksum);
}
PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_version(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t version) {
pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_VERSION), version);
}
PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_flags(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t flags) {
pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_FLAGS), flags);
}
PNANOVDB_FORCE_INLINE void pnanovdb_grid_get_grid_index(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t grid_index) {
pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_INDEX), grid_index);
}
PNANOVDB_FORCE_INLINE void pnanovdb_grid_get_grid_count(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t grid_count) {
pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_COUNT), grid_count);
}
PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_grid_size(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint64_t grid_size) {
pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_SIZE), grid_size);
}
PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_grid_name(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index, pnanovdb_uint32_t grid_name) {
pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_NAME + 4u * index), grid_name);
}
PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_world_bbox(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index, double world_bbox) {
pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_WORLD_BBOX + 8u * index), world_bbox);
}
PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_voxel_size(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index, double voxel_size) {
pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_VOXEL_SIZE + 8u * index), voxel_size);
}
PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_grid_class(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t grid_class) {
pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_CLASS), grid_class);
}
PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_grid_type(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t grid_type) {
pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_TYPE), grid_type);
}
PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_blind_metadata_offset(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint64_t blind_metadata_offset) {
pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_BLIND_METADATA_OFFSET), blind_metadata_offset);
}
PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_blind_metadata_count(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t metadata_count) {
pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_BLIND_METADATA_COUNT), metadata_count);
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_make_version(pnanovdb_uint32_t major, pnanovdb_uint32_t minor, pnanovdb_uint32_t patch)
{
return (major << 21u) | (minor << 10u) | (patch);
}
#endif | 17,553 | C | 59.740484 | 184 | 0.768131 |
NVIDIA-Omniverse/PhysX/flow/include/nvflow/shaders/NvFlowShaderTypes.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#ifndef NV_FLOW_SHADER_TYPES_H
#define NV_FLOW_SHADER_TYPES_H
#ifndef NV_FLOW_CPU
#if defined(__cplusplus)
#define NV_FLOW_CPU 1
#endif
#endif
#ifdef NV_FLOW_CPU_SHADER
// For CPU Shader, basic types defined at global scope before shader
#elif defined(NV_FLOW_CPU)
typedef unsigned int NvFlowBool32;
typedef unsigned int NvFlowUint;
typedef unsigned char NvFlowUint8;
typedef unsigned short NvFlowUint16;
typedef unsigned long long NvFlowUint64;
typedef struct NvFlowUint2
{
NvFlowUint x, y;
}NvFlowUint2;
typedef struct NvFlowUint3
{
NvFlowUint x, y, z;
}NvFlowUint3;
typedef struct NvFlowUint4
{
NvFlowUint x, y, z, w;
}NvFlowUint4;
typedef struct NvFlowInt2
{
int x, y;
}NvFlowInt2;
typedef struct NvFlowInt3
{
int x, y, z;
}NvFlowInt3;
typedef struct NvFlowInt4
{
int x, y, z, w;
}NvFlowInt4;
typedef struct NvFlowFloat2
{
float x, y;
}NvFlowFloat2;
typedef struct NvFlowFloat3
{
float x, y, z;
}NvFlowFloat3;
typedef struct NvFlowFloat4
{
float x, y, z, w;
}NvFlowFloat4;
typedef struct NvFlowFloat4x4
{
NvFlowFloat4 x, y, z, w;
}NvFlowFloat4x4;
#else
#define NvFlowUint uint
#define NvFlowUint2 uint2
#define NvFlowUint3 uint3
#define NvFlowUint4 uint4
#define NvFlowInt2 int2
#define NvFlowInt3 int3
#define NvFlowInt4 int4
#define NvFlowFloat2 float2
#define NvFlowFloat3 float3
#define NvFlowFloat4 float4
#define NvFlowFloat4x4 float4x4
#define NvFlowBool32 uint
#endif
struct NvFlowSparseLevelParams
{
NvFlowUint3 blockDimLessOne;
NvFlowUint threadsPerBlock;
NvFlowUint3 blockDimBits;
NvFlowUint numLocations;
NvFlowUint3 tableDimLessOne;
NvFlowUint tableDim3;
NvFlowUint tableDimBits_x;
NvFlowUint tableDimBits_xy;
NvFlowUint tableDimBits_z;
NvFlowUint locationOffset;
NvFlowUint allocationOffset;
NvFlowUint newListOffset;
NvFlowUint blockLevelOffsetGlobal;
NvFlowUint blockLevelOffsetLocal;
NvFlowUint layerParamIdxOffset;
NvFlowUint numLayers;
NvFlowUint pad0;
NvFlowUint pad1;
NvFlowUint3 dim;
NvFlowUint maxLocations;
NvFlowFloat3 dimInv;
NvFlowUint numNewLocations;
NvFlowInt4 globalLocationMin;
NvFlowInt4 globalLocationMax;
};
#ifdef NV_FLOW_CPU
typedef struct NvFlowSparseLevelParams NvFlowSparseLevelParams;
#endif
struct NvFlowSparseLayerParams
{
NvFlowFloat3 blockSizeWorld;
float blockSizeWorld3;
NvFlowFloat3 blockSizeWorldInv;
int layer;
NvFlowInt4 locationMin;
NvFlowInt4 locationMax;
NvFlowFloat3 worldMin;
NvFlowUint forceClear;
NvFlowFloat3 worldMax;
NvFlowUint forceDisableEmitters;
NvFlowUint numLocations;
float deltaTime;
NvFlowUint forceDisableCoreSimulation;
NvFlowUint gridReset;
};
#ifdef NV_FLOW_CPU
typedef struct NvFlowSparseLayerParams NvFlowSparseLayerParams;
#endif
struct NvFlowSparseNanoVdbParams
{
NvFlowUint2 nanovdb_size_without_leaves;
NvFlowUint2 nanovdb_size_with_leaves;
NvFlowUint2 list_tile_offset;
NvFlowUint2 list_upper_offset;
NvFlowUint2 list_lower_offset;
NvFlowUint2 list_leaf_offset;
NvFlowUint2 cache_tile_offset;
NvFlowUint2 cache_upper_offset;
NvFlowUint2 cache_lower_offset;
NvFlowUint2 cache_leaf_offset;
NvFlowUint list_tile_count;
NvFlowUint list_upper_count;
NvFlowUint list_lower_count;
NvFlowUint list_leaf_count;
NvFlowUint cache_tile_count;
NvFlowUint cache_upper_count;
NvFlowUint cache_lower_count;
NvFlowUint cache_leaf_count;
NvFlowUint2 cache_size;
NvFlowUint grid_count;
NvFlowUint grid_type;
NvFlowUint3 subGridDimLessOne;
NvFlowUint pad3;
NvFlowUint3 subGridDimBits;
NvFlowUint pad4;
};
#ifdef NV_FLOW_CPU
typedef struct NvFlowSparseNanoVdbParams NvFlowSparseNanoVdbParams;
#endif
#endif | 5,144 | C | 23.154929 | 74 | 0.794518 |
NVIDIA-Omniverse/PhysX/flow/include/nvflow/shaders/PNanoVDB.h |
// Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/*!
\file PNanoVDB.h
\author Andrew Reidmeyer
\brief This file is a portable (e.g. pointer-less) C99/GLSL/HLSL port
of NanoVDB.h, which is compatible with most graphics APIs.
*/
#ifndef NANOVDB_PNANOVDB_H_HAS_BEEN_INCLUDED
#define NANOVDB_PNANOVDB_H_HAS_BEEN_INCLUDED
// ------------------------------------------------ Configuration -----------------------------------------------------------
// platforms
//#define PNANOVDB_C
//#define PNANOVDB_HLSL
//#define PNANOVDB_GLSL
// addressing mode
// PNANOVDB_ADDRESS_32
// PNANOVDB_ADDRESS_64
#if defined(PNANOVDB_C)
#ifndef PNANOVDB_ADDRESS_32
#define PNANOVDB_ADDRESS_64
#endif
#elif defined(PNANOVDB_HLSL)
#ifndef PNANOVDB_ADDRESS_64
#define PNANOVDB_ADDRESS_32
#endif
#elif defined(PNANOVDB_GLSL)
#ifndef PNANOVDB_ADDRESS_64
#define PNANOVDB_ADDRESS_32
#endif
#endif
// bounds checking
//#define PNANOVDB_BUF_BOUNDS_CHECK
// enable HDDA by default on HLSL/GLSL, make explicit on C
#if defined(PNANOVDB_C)
//#define PNANOVDB_HDDA
#ifdef PNANOVDB_HDDA
#ifndef PNANOVDB_CMATH
#define PNANOVDB_CMATH
#endif
#endif
#elif defined(PNANOVDB_HLSL)
#define PNANOVDB_HDDA
#elif defined(PNANOVDB_GLSL)
#define PNANOVDB_HDDA
#endif
#ifdef PNANOVDB_CMATH
#include <math.h>
#endif
// ------------------------------------------------ Buffer -----------------------------------------------------------
#if defined(PNANOVDB_BUF_CUSTOM)
// NOP
#elif defined(PNANOVDB_C)
#define PNANOVDB_BUF_C
#elif defined(PNANOVDB_HLSL)
#define PNANOVDB_BUF_HLSL
#elif defined(PNANOVDB_GLSL)
#define PNANOVDB_BUF_GLSL
#endif
#if defined(PNANOVDB_BUF_C)
#include <stdint.h>
#if defined(_WIN32)
#define PNANOVDB_BUF_FORCE_INLINE static inline __forceinline
#else
#define PNANOVDB_BUF_FORCE_INLINE static inline __attribute__((always_inline))
#endif
typedef struct pnanovdb_buf_t
{
uint32_t* data;
#ifdef PNANOVDB_BUF_BOUNDS_CHECK
uint64_t size_in_words;
#endif
}pnanovdb_buf_t;
PNANOVDB_BUF_FORCE_INLINE pnanovdb_buf_t pnanovdb_make_buf(uint32_t* data, uint64_t size_in_words)
{
pnanovdb_buf_t ret;
ret.data = data;
#ifdef PNANOVDB_BUF_BOUNDS_CHECK
ret.size_in_words = size_in_words;
#endif
return ret;
}
#if defined(PNANOVDB_ADDRESS_32)
PNANOVDB_BUF_FORCE_INLINE uint32_t pnanovdb_buf_read_uint32(pnanovdb_buf_t buf, uint32_t byte_offset)
{
uint32_t wordaddress = (byte_offset >> 2u);
#ifdef PNANOVDB_BUF_BOUNDS_CHECK
return wordaddress < buf.size_in_words ? buf.data[wordaddress] : 0u;
#else
return buf.data[wordaddress];
#endif
}
PNANOVDB_BUF_FORCE_INLINE uint64_t pnanovdb_buf_read_uint64(pnanovdb_buf_t buf, uint32_t byte_offset)
{
uint64_t* data64 = (uint64_t*)buf.data;
uint32_t wordaddress64 = (byte_offset >> 3u);
#ifdef PNANOVDB_BUF_BOUNDS_CHECK
uint64_t size_in_words64 = buf.size_in_words >> 1u;
return wordaddress64 < size_in_words64 ? data64[wordaddress64] : 0llu;
#else
return data64[wordaddress64];
#endif
}
#elif defined(PNANOVDB_ADDRESS_64)
PNANOVDB_BUF_FORCE_INLINE uint32_t pnanovdb_buf_read_uint32(pnanovdb_buf_t buf, uint64_t byte_offset)
{
uint64_t wordaddress = (byte_offset >> 2u);
#ifdef PNANOVDB_BUF_BOUNDS_CHECK
return wordaddress < buf.size_in_words ? buf.data[wordaddress] : 0u;
#else
return buf.data[wordaddress];
#endif
}
PNANOVDB_BUF_FORCE_INLINE uint64_t pnanovdb_buf_read_uint64(pnanovdb_buf_t buf, uint64_t byte_offset)
{
uint64_t* data64 = (uint64_t*)buf.data;
uint64_t wordaddress64 = (byte_offset >> 3u);
#ifdef PNANOVDB_BUF_BOUNDS_CHECK
uint64_t size_in_words64 = buf.size_in_words >> 1u;
return wordaddress64 < size_in_words64 ? data64[wordaddress64] : 0llu;
#else
return data64[wordaddress64];
#endif
}
#endif
typedef uint32_t pnanovdb_grid_type_t;
#define PNANOVDB_GRID_TYPE_GET(grid_typeIn, nameIn) pnanovdb_grid_type_constants[grid_typeIn].nameIn
#elif defined(PNANOVDB_BUF_HLSL)
#if defined(PNANOVDB_ADDRESS_32)
#define pnanovdb_buf_t StructuredBuffer<uint>
uint pnanovdb_buf_read_uint32(pnanovdb_buf_t buf, uint byte_offset)
{
return buf[(byte_offset >> 2u)];
}
uint2 pnanovdb_buf_read_uint64(pnanovdb_buf_t buf, uint byte_offset)
{
uint2 ret;
ret.x = pnanovdb_buf_read_uint32(buf, byte_offset + 0u);
ret.y = pnanovdb_buf_read_uint32(buf, byte_offset + 4u);
return ret;
}
#elif defined(PNANOVDB_ADDRESS_64)
#define pnanovdb_buf_t StructuredBuffer<uint>
uint pnanovdb_buf_read_uint32(pnanovdb_buf_t buf, uint64_t byte_offset)
{
return buf[uint(byte_offset >> 2u)];
}
uint64_t pnanovdb_buf_read_uint64(pnanovdb_buf_t buf, uint64_t byte_offset)
{
uint64_t ret;
ret = pnanovdb_buf_read_uint32(buf, byte_offset + 0u);
ret = ret + (uint64_t(pnanovdb_buf_read_uint32(buf, byte_offset + 4u)) << 32u);
return ret;
}
#endif
#define pnanovdb_grid_type_t uint
#define PNANOVDB_GRID_TYPE_GET(grid_typeIn, nameIn) pnanovdb_grid_type_constants[grid_typeIn].nameIn
#elif defined(PNANOVDB_BUF_GLSL)
struct pnanovdb_buf_t
{
uint unused; // to satisfy min struct size?
};
uint pnanovdb_buf_read_uint32(pnanovdb_buf_t buf, uint byte_offset)
{
return pnanovdb_buf_data[(byte_offset >> 2u)];
}
uvec2 pnanovdb_buf_read_uint64(pnanovdb_buf_t buf, uint byte_offset)
{
uvec2 ret;
ret.x = pnanovdb_buf_read_uint32(buf, byte_offset + 0u);
ret.y = pnanovdb_buf_read_uint32(buf, byte_offset + 4u);
return ret;
}
#define pnanovdb_grid_type_t uint
#define PNANOVDB_GRID_TYPE_GET(grid_typeIn, nameIn) pnanovdb_grid_type_constants[grid_typeIn].nameIn
#endif
// ------------------------------------------------ Basic Types -----------------------------------------------------------
// force inline
#if defined(PNANOVDB_C)
#if defined(_WIN32)
#define PNANOVDB_FORCE_INLINE static inline __forceinline
#else
#define PNANOVDB_FORCE_INLINE static inline __attribute__((always_inline))
#endif
#elif defined(PNANOVDB_HLSL)
#define PNANOVDB_FORCE_INLINE
#elif defined(PNANOVDB_GLSL)
#define PNANOVDB_FORCE_INLINE
#endif
// struct typedef, static const, inout
#if defined(PNANOVDB_C)
#define PNANOVDB_STRUCT_TYPEDEF(X) typedef struct X X;
#define PNANOVDB_STATIC_CONST static const
#define PNANOVDB_INOUT(X) X*
#define PNANOVDB_IN(X) const X*
#define PNANOVDB_DEREF(X) (*X)
#define PNANOVDB_REF(X) &X
#elif defined(PNANOVDB_HLSL)
#define PNANOVDB_STRUCT_TYPEDEF(X)
#define PNANOVDB_STATIC_CONST static const
#define PNANOVDB_INOUT(X) inout X
#define PNANOVDB_IN(X) X
#define PNANOVDB_DEREF(X) X
#define PNANOVDB_REF(X) X
#elif defined(PNANOVDB_GLSL)
#define PNANOVDB_STRUCT_TYPEDEF(X)
#define PNANOVDB_STATIC_CONST const
#define PNANOVDB_INOUT(X) inout X
#define PNANOVDB_IN(X) X
#define PNANOVDB_DEREF(X) X
#define PNANOVDB_REF(X) X
#endif
// basic types, type conversion
#if defined(PNANOVDB_C)
#define PNANOVDB_NATIVE_64
#include <stdint.h>
#if !defined(PNANOVDB_MEMCPY_CUSTOM)
#include <string.h>
#define pnanovdb_memcpy memcpy
#endif
typedef uint32_t pnanovdb_uint32_t;
typedef int32_t pnanovdb_int32_t;
typedef int32_t pnanovdb_bool_t;
#define PNANOVDB_FALSE 0
#define PNANOVDB_TRUE 1
typedef uint64_t pnanovdb_uint64_t;
typedef int64_t pnanovdb_int64_t;
typedef struct pnanovdb_coord_t
{
pnanovdb_int32_t x, y, z;
}pnanovdb_coord_t;
typedef struct pnanovdb_vec3_t
{
float x, y, z;
}pnanovdb_vec3_t;
PNANOVDB_FORCE_INLINE pnanovdb_int32_t pnanovdb_uint32_as_int32(pnanovdb_uint32_t v) { return (pnanovdb_int32_t)v; }
PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_uint64_as_int64(pnanovdb_uint64_t v) { return (pnanovdb_int64_t)v; }
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_int64_as_uint64(pnanovdb_int64_t v) { return (pnanovdb_uint64_t)v; }
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_int32_as_uint32(pnanovdb_int32_t v) { return (pnanovdb_uint32_t)v; }
PNANOVDB_FORCE_INLINE float pnanovdb_uint32_as_float(pnanovdb_uint32_t v) { float vf; pnanovdb_memcpy(&vf, &v, sizeof(vf)); return vf; }
PNANOVDB_FORCE_INLINE double pnanovdb_uint64_as_double(pnanovdb_uint64_t v) { double vf; pnanovdb_memcpy(&vf, &v, sizeof(vf)); return vf; }
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_uint64_low(pnanovdb_uint64_t v) { return (pnanovdb_uint32_t)v; }
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_uint64_high(pnanovdb_uint64_t v) { return (pnanovdb_uint32_t)(v >> 32u); }
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint32_as_uint64(pnanovdb_uint32_t x, pnanovdb_uint32_t y) { return ((pnanovdb_uint64_t)x) | (((pnanovdb_uint64_t)y) << 32u); }
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint32_as_uint64_low(pnanovdb_uint32_t x) { return ((pnanovdb_uint64_t)x); }
PNANOVDB_FORCE_INLINE pnanovdb_int32_t pnanovdb_uint64_is_equal(pnanovdb_uint64_t a, pnanovdb_uint64_t b) { return a == b; }
PNANOVDB_FORCE_INLINE pnanovdb_int32_t pnanovdb_int64_is_zero(pnanovdb_int64_t a) { return a == 0; }
#ifdef PNANOVDB_CMATH
PNANOVDB_FORCE_INLINE float pnanovdb_floor(float v) { return floorf(v); }
#endif
PNANOVDB_FORCE_INLINE pnanovdb_int32_t pnanovdb_float_to_int32(float v) { return (pnanovdb_int32_t)v; }
PNANOVDB_FORCE_INLINE float pnanovdb_int32_to_float(pnanovdb_int32_t v) { return (float)v; }
PNANOVDB_FORCE_INLINE float pnanovdb_uint32_to_float(pnanovdb_uint32_t v) { return (float)v; }
PNANOVDB_FORCE_INLINE float pnanovdb_min(float a, float b) { return a < b ? a : b; }
PNANOVDB_FORCE_INLINE float pnanovdb_max(float a, float b) { return a > b ? a : b; }
#elif defined(PNANOVDB_HLSL)
typedef uint pnanovdb_uint32_t;
typedef int pnanovdb_int32_t;
typedef bool pnanovdb_bool_t;
#define PNANOVDB_FALSE false
#define PNANOVDB_TRUE true
typedef int3 pnanovdb_coord_t;
typedef float3 pnanovdb_vec3_t;
pnanovdb_int32_t pnanovdb_uint32_as_int32(pnanovdb_uint32_t v) { return int(v); }
pnanovdb_uint32_t pnanovdb_int32_as_uint32(pnanovdb_int32_t v) { return uint(v); }
float pnanovdb_uint32_as_float(pnanovdb_uint32_t v) { return asfloat(v); }
float pnanovdb_floor(float v) { return floor(v); }
pnanovdb_int32_t pnanovdb_float_to_int32(float v) { return int(v); }
float pnanovdb_int32_to_float(pnanovdb_int32_t v) { return float(v); }
float pnanovdb_uint32_to_float(pnanovdb_uint32_t v) { return float(v); }
float pnanovdb_min(float a, float b) { return min(a, b); }
float pnanovdb_max(float a, float b) { return max(a, b); }
#if defined(PNANOVDB_ADDRESS_32)
typedef uint2 pnanovdb_uint64_t;
typedef int2 pnanovdb_int64_t;
pnanovdb_int64_t pnanovdb_uint64_as_int64(pnanovdb_uint64_t v) { return int2(v); }
pnanovdb_uint64_t pnanovdb_int64_as_uint64(pnanovdb_int64_t v) { return uint2(v); }
double pnanovdb_uint64_as_double(pnanovdb_uint64_t v) { return asdouble(v.x, v.y); }
pnanovdb_uint32_t pnanovdb_uint64_low(pnanovdb_uint64_t v) { return v.x; }
pnanovdb_uint32_t pnanovdb_uint64_high(pnanovdb_uint64_t v) { return v.y; }
pnanovdb_uint64_t pnanovdb_uint32_as_uint64(pnanovdb_uint32_t x, pnanovdb_uint32_t y) { return uint2(x, y); }
pnanovdb_uint64_t pnanovdb_uint32_as_uint64_low(pnanovdb_uint32_t x) { return uint2(x, 0); }
bool pnanovdb_uint64_is_equal(pnanovdb_uint64_t a, pnanovdb_uint64_t b) { return (a.x == b.x) && (a.y == b.y); }
bool pnanovdb_int64_is_zero(pnanovdb_int64_t a) { return a.x == 0 && a.y == 0; }
#else
typedef uint64_t pnanovdb_uint64_t;
typedef int64_t pnanovdb_int64_t;
pnanovdb_int64_t pnanovdb_uint64_as_int64(pnanovdb_uint64_t v) { return int64_t(v); }
pnanovdb_uint64_t pnanovdb_int64_as_uint64(pnanovdb_int64_t v) { return uint64_t(v); }
double pnanovdb_uint64_as_double(pnanovdb_uint64_t v) { return asdouble(uint(v), uint(v >> 32u)); }
pnanovdb_uint32_t pnanovdb_uint64_low(pnanovdb_uint64_t v) { return uint(v); }
pnanovdb_uint32_t pnanovdb_uint64_high(pnanovdb_uint64_t v) { return uint(v >> 32u); }
pnanovdb_uint64_t pnanovdb_uint32_as_uint64(pnanovdb_uint32_t x, pnanovdb_uint32_t y) { return uint64_t(x) + (uint64_t(y) << 32u); }
pnanovdb_uint64_t pnanovdb_uint32_as_uint64_low(pnanovdb_uint32_t x) { return uint64_t(x); }
bool pnanovdb_uint64_is_equal(pnanovdb_uint64_t a, pnanovdb_uint64_t b) { return a == b; }
bool pnanovdb_int64_is_zero(pnanovdb_int64_t a) { return a == 0; }
#endif
#elif defined(PNANOVDB_GLSL)
#define pnanovdb_uint32_t uint
#define pnanovdb_int32_t int
#define pnanovdb_bool_t bool
#define PNANOVDB_FALSE false
#define PNANOVDB_TRUE true
#define pnanovdb_uint64_t uvec2
#define pnanovdb_int64_t ivec2
#define pnanovdb_coord_t ivec3
#define pnanovdb_vec3_t vec3
pnanovdb_int32_t pnanovdb_uint32_as_int32(pnanovdb_uint32_t v) { return int(v); }
pnanovdb_int64_t pnanovdb_uint64_as_int64(pnanovdb_uint64_t v) { return ivec2(v); }
pnanovdb_uint64_t pnanovdb_int64_as_uint64(pnanovdb_int64_t v) { return uvec2(v); }
pnanovdb_uint32_t pnanovdb_int32_as_uint32(pnanovdb_int32_t v) { return uint(v); }
float pnanovdb_uint32_as_float(pnanovdb_uint32_t v) { return uintBitsToFloat(v); }
double pnanovdb_uint64_as_double(pnanovdb_uint64_t v) { return packDouble2x32(uvec2(v.x, v.y)); }
pnanovdb_uint32_t pnanovdb_uint64_low(pnanovdb_uint64_t v) { return v.x; }
pnanovdb_uint32_t pnanovdb_uint64_high(pnanovdb_uint64_t v) { return v.y; }
pnanovdb_uint64_t pnanovdb_uint32_as_uint64(pnanovdb_uint32_t x, pnanovdb_uint32_t y) { return uvec2(x, y); }
pnanovdb_uint64_t pnanovdb_uint32_as_uint64_low(pnanovdb_uint32_t x) { return uvec2(x, 0); }
bool pnanovdb_uint64_is_equal(pnanovdb_uint64_t a, pnanovdb_uint64_t b) { return (a.x == b.x) && (a.y == b.y); }
bool pnanovdb_int64_is_zero(pnanovdb_int64_t a) { return a.x == 0 && a.y == 0; }
float pnanovdb_floor(float v) { return floor(v); }
pnanovdb_int32_t pnanovdb_float_to_int32(float v) { return int(v); }
float pnanovdb_int32_to_float(pnanovdb_int32_t v) { return float(v); }
float pnanovdb_uint32_to_float(pnanovdb_uint32_t v) { return float(v); }
float pnanovdb_min(float a, float b) { return min(a, b); }
float pnanovdb_max(float a, float b) { return max(a, b); }
#endif
// ------------------------------------------------ Coord/Vec3 Utilties -----------------------------------------------------------
#if defined(PNANOVDB_C)
PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_uniform(float a)
{
pnanovdb_vec3_t v;
v.x = a;
v.y = a;
v.z = a;
return v;
}
PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_add(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b)
{
pnanovdb_vec3_t v;
v.x = a.x + b.x;
v.y = a.y + b.y;
v.z = a.z + b.z;
return v;
}
PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_sub(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b)
{
pnanovdb_vec3_t v;
v.x = a.x - b.x;
v.y = a.y - b.y;
v.z = a.z - b.z;
return v;
}
PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_mul(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b)
{
pnanovdb_vec3_t v;
v.x = a.x * b.x;
v.y = a.y * b.y;
v.z = a.z * b.z;
return v;
}
PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_div(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b)
{
pnanovdb_vec3_t v;
v.x = a.x / b.x;
v.y = a.y / b.y;
v.z = a.z / b.z;
return v;
}
PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_min(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b)
{
pnanovdb_vec3_t v;
v.x = a.x < b.x ? a.x : b.x;
v.y = a.y < b.y ? a.y : b.y;
v.z = a.z < b.z ? a.z : b.z;
return v;
}
PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_max(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b)
{
pnanovdb_vec3_t v;
v.x = a.x > b.x ? a.x : b.x;
v.y = a.y > b.y ? a.y : b.y;
v.z = a.z > b.z ? a.z : b.z;
return v;
}
PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_coord_to_vec3(const pnanovdb_coord_t coord)
{
pnanovdb_vec3_t v;
v.x = pnanovdb_int32_to_float(coord.x);
v.y = pnanovdb_int32_to_float(coord.y);
v.z = pnanovdb_int32_to_float(coord.z);
return v;
}
PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_coord_uniform(const pnanovdb_int32_t a)
{
pnanovdb_coord_t v;
v.x = a;
v.y = a;
v.z = a;
return v;
}
PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_coord_add(pnanovdb_coord_t a, pnanovdb_coord_t b)
{
pnanovdb_coord_t v;
v.x = a.x + b.x;
v.y = a.y + b.y;
v.z = a.z + b.z;
return v;
}
#elif defined(PNANOVDB_HLSL)
pnanovdb_vec3_t pnanovdb_vec3_uniform(float a) { return float3(a, a, a); }
pnanovdb_vec3_t pnanovdb_vec3_add(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a + b; }
pnanovdb_vec3_t pnanovdb_vec3_sub(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a - b; }
pnanovdb_vec3_t pnanovdb_vec3_mul(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a * b; }
pnanovdb_vec3_t pnanovdb_vec3_div(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a / b; }
pnanovdb_vec3_t pnanovdb_vec3_min(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return min(a, b); }
pnanovdb_vec3_t pnanovdb_vec3_max(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return max(a, b); }
pnanovdb_vec3_t pnanovdb_coord_to_vec3(pnanovdb_coord_t coord) { return float3(coord); }
pnanovdb_coord_t pnanovdb_coord_uniform(pnanovdb_int32_t a) { return int3(a, a, a); }
pnanovdb_coord_t pnanovdb_coord_add(pnanovdb_coord_t a, pnanovdb_coord_t b) { return a + b; }
#elif defined(PNANOVDB_GLSL)
pnanovdb_vec3_t pnanovdb_vec3_uniform(float a) { return vec3(a, a, a); }
pnanovdb_vec3_t pnanovdb_vec3_add(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a + b; }
pnanovdb_vec3_t pnanovdb_vec3_sub(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a - b; }
pnanovdb_vec3_t pnanovdb_vec3_mul(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a * b; }
pnanovdb_vec3_t pnanovdb_vec3_div(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a / b; }
pnanovdb_vec3_t pnanovdb_vec3_min(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return min(a, b); }
pnanovdb_vec3_t pnanovdb_vec3_max(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return max(a, b); }
pnanovdb_vec3_t pnanovdb_coord_to_vec3(const pnanovdb_coord_t coord) { return vec3(coord); }
pnanovdb_coord_t pnanovdb_coord_uniform(pnanovdb_int32_t a) { return ivec3(a, a, a); }
pnanovdb_coord_t pnanovdb_coord_add(pnanovdb_coord_t a, pnanovdb_coord_t b) { return a + b; }
#endif
// ------------------------------------------------ Address Type -----------------------------------------------------------
#if defined(PNANOVDB_ADDRESS_32)
struct pnanovdb_address_t
{
pnanovdb_uint32_t byte_offset;
};
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_address_t)
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset)
{
pnanovdb_address_t ret = address;
ret.byte_offset += byte_offset;
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset_neg(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset)
{
pnanovdb_address_t ret = address;
ret.byte_offset -= byte_offset;
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset_product(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset, pnanovdb_uint32_t multiplier)
{
pnanovdb_address_t ret = address;
ret.byte_offset += byte_offset * multiplier;
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset64(pnanovdb_address_t address, pnanovdb_uint64_t byte_offset)
{
pnanovdb_address_t ret = address;
// lose high bits on 32-bit
ret.byte_offset += pnanovdb_uint64_low(byte_offset);
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_address_mask(pnanovdb_address_t address, pnanovdb_uint32_t mask)
{
return address.byte_offset & mask;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_mask_inv(pnanovdb_address_t address, pnanovdb_uint32_t mask)
{
pnanovdb_address_t ret = address;
ret.byte_offset &= (~mask);
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_null()
{
pnanovdb_address_t ret = { 0 };
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_address_is_null(pnanovdb_address_t address)
{
return address.byte_offset == 0u;
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_address_in_interval(pnanovdb_address_t address, pnanovdb_address_t min_address, pnanovdb_address_t max_address)
{
return address.byte_offset >= min_address.byte_offset && address.byte_offset < max_address.byte_offset;
}
#elif defined(PNANOVDB_ADDRESS_64)
struct pnanovdb_address_t
{
pnanovdb_uint64_t byte_offset;
};
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_address_t)
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset)
{
pnanovdb_address_t ret = address;
ret.byte_offset += byte_offset;
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset_neg(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset)
{
pnanovdb_address_t ret = address;
ret.byte_offset -= byte_offset;
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset_product(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset, pnanovdb_uint32_t multiplier)
{
pnanovdb_address_t ret = address;
ret.byte_offset += pnanovdb_uint32_as_uint64_low(byte_offset) * pnanovdb_uint32_as_uint64_low(multiplier);
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset64(pnanovdb_address_t address, pnanovdb_uint64_t byte_offset)
{
pnanovdb_address_t ret = address;
ret.byte_offset += byte_offset;
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_address_mask(pnanovdb_address_t address, pnanovdb_uint32_t mask)
{
return pnanovdb_uint64_low(address.byte_offset) & mask;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_mask_inv(pnanovdb_address_t address, pnanovdb_uint32_t mask)
{
pnanovdb_address_t ret = address;
ret.byte_offset &= (~pnanovdb_uint32_as_uint64_low(mask));
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_null()
{
pnanovdb_address_t ret = { 0 };
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_address_is_null(pnanovdb_address_t address)
{
return address.byte_offset == 0llu;
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_address_in_interval(pnanovdb_address_t address, pnanovdb_address_t min_address, pnanovdb_address_t max_address)
{
return address.byte_offset >= min_address.byte_offset && address.byte_offset < max_address.byte_offset;
}
#endif
// ------------------------------------------------ High Level Buffer Read -----------------------------------------------------------
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_read_uint32(pnanovdb_buf_t buf, pnanovdb_address_t address)
{
return pnanovdb_buf_read_uint32(buf, address.byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_read_uint64(pnanovdb_buf_t buf, pnanovdb_address_t address)
{
return pnanovdb_buf_read_uint64(buf, address.byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_int32_t pnanovdb_read_int32(pnanovdb_buf_t buf, pnanovdb_address_t address)
{
return pnanovdb_uint32_as_int32(pnanovdb_read_uint32(buf, address));
}
PNANOVDB_FORCE_INLINE float pnanovdb_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address)
{
return pnanovdb_uint32_as_float(pnanovdb_read_uint32(buf, address));
}
PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_read_int64(pnanovdb_buf_t buf, pnanovdb_address_t address)
{
return pnanovdb_uint64_as_int64(pnanovdb_read_uint64(buf, address));
}
PNANOVDB_FORCE_INLINE double pnanovdb_read_double(pnanovdb_buf_t buf, pnanovdb_address_t address)
{
return pnanovdb_uint64_as_double(pnanovdb_read_uint64(buf, address));
}
PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_read_coord(pnanovdb_buf_t buf, pnanovdb_address_t address)
{
pnanovdb_coord_t ret;
ret.x = pnanovdb_uint32_as_int32(pnanovdb_read_uint32(buf, pnanovdb_address_offset(address, 0u)));
ret.y = pnanovdb_uint32_as_int32(pnanovdb_read_uint32(buf, pnanovdb_address_offset(address, 4u)));
ret.z = pnanovdb_uint32_as_int32(pnanovdb_read_uint32(buf, pnanovdb_address_offset(address, 8u)));
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_read_bit(pnanovdb_buf_t buf, pnanovdb_address_t address, pnanovdb_uint32_t bit_offset)
{
pnanovdb_address_t word_address = pnanovdb_address_mask_inv(address, 3u);
pnanovdb_uint32_t bit_index = (pnanovdb_address_mask(address, 3u) << 3u) + bit_offset;
pnanovdb_uint32_t value_word = pnanovdb_buf_read_uint32(buf, word_address.byte_offset);
return ((value_word >> bit_index) & 1) != 0u;
}
#if defined(PNANOVDB_C)
PNANOVDB_FORCE_INLINE short pnanovdb_read_half(pnanovdb_buf_t buf, pnanovdb_address_t address)
{
pnanovdb_uint32_t raw = pnanovdb_read_uint32(buf, address);
return (short)(raw >> (pnanovdb_address_mask(address, 2) << 3));
}
#elif defined(PNANOVDB_HLSL)
PNANOVDB_FORCE_INLINE float pnanovdb_read_half(pnanovdb_buf_t buf, pnanovdb_address_t address)
{
pnanovdb_uint32_t raw = pnanovdb_read_uint32(buf, address);
return f16tof32(raw >> (pnanovdb_address_mask(address, 2) << 3));
}
#elif defined(PNANOVDB_GLSL)
PNANOVDB_FORCE_INLINE float pnanovdb_read_half(pnanovdb_buf_t buf, pnanovdb_address_t address)
{
pnanovdb_uint32_t raw = pnanovdb_read_uint32(buf, address);
return unpackHalf2x16(raw >> (pnanovdb_address_mask(address, 2) << 3)).x;
}
#endif
// ------------------------------------------------ Core Structures -----------------------------------------------------------
#define PNANOVDB_MAGIC_NUMBER 0x304244566f6e614eUL// "NanoVDB0" in hex - little endian (uint64_t)
#define PNANOVDB_MAJOR_VERSION_NUMBER 32// reflects changes to the ABI
#define PNANOVDB_MINOR_VERSION_NUMBER 3// reflects changes to the API but not ABI
#define PNANOVDB_PATCH_VERSION_NUMBER 0// reflects bug-fixes with no ABI or API changes
#define PNANOVDB_GRID_TYPE_UNKNOWN 0
#define PNANOVDB_GRID_TYPE_FLOAT 1
#define PNANOVDB_GRID_TYPE_DOUBLE 2
#define PNANOVDB_GRID_TYPE_INT16 3
#define PNANOVDB_GRID_TYPE_INT32 4
#define PNANOVDB_GRID_TYPE_INT64 5
#define PNANOVDB_GRID_TYPE_VEC3F 6
#define PNANOVDB_GRID_TYPE_VEC3D 7
#define PNANOVDB_GRID_TYPE_MASK 8
#define PNANOVDB_GRID_TYPE_HALF 9
#define PNANOVDB_GRID_TYPE_UINT32 10
#define PNANOVDB_GRID_TYPE_BOOLEAN 11
#define PNANOVDB_GRID_TYPE_RGBA8 12
#define PNANOVDB_GRID_TYPE_FP4 13
#define PNANOVDB_GRID_TYPE_FP8 14
#define PNANOVDB_GRID_TYPE_FP16 15
#define PNANOVDB_GRID_TYPE_FPN 16
#define PNANOVDB_GRID_TYPE_VEC4F 17
#define PNANOVDB_GRID_TYPE_VEC4D 18
#define PNANOVDB_GRID_TYPE_END 19
#define PNANOVDB_GRID_CLASS_UNKNOWN 0
#define PNANOVDB_GRID_CLASS_LEVEL_SET 1 // narrow band levelset, e.g. SDF
#define PNANOVDB_GRID_CLASS_FOG_VOLUME 2 // fog volume, e.g. density
#define PNANOVDB_GRID_CLASS_STAGGERED 3 // staggered MAC grid, e.g. velocity
#define PNANOVDB_GRID_CLASS_POINT_INDEX 4 // point index grid
#define PNANOVDB_GRID_CLASS_POINT_DATA 5 // point data grid
#define PNANOVDB_GRID_CLASS_TOPOLOGY 6 // grid with active states only (no values)
#define PNANOVDB_GRID_CLASS_VOXEL_VOLUME 7 // volume of geometric cubes, e.g. minecraft
#define PNANOVDB_GRID_CLASS_END 8
#define PNANOVDB_GRID_FLAGS_HAS_LONG_GRID_NAME (1 << 0)
#define PNANOVDB_GRID_FLAGS_HAS_BBOX (1 << 1)
#define PNANOVDB_GRID_FLAGS_HAS_MIN_MAX (1 << 2)
#define PNANOVDB_GRID_FLAGS_HAS_AVERAGE (1 << 3)
#define PNANOVDB_GRID_FLAGS_HAS_STD_DEVIATION (1 << 4)
#define PNANOVDB_GRID_FLAGS_IS_BREADTH_FIRST (1 << 5)
#define PNANOVDB_GRID_FLAGS_END (1 << 6)
#define PNANOVDB_LEAF_TYPE_DEFAULT 0
#define PNANOVDB_LEAF_TYPE_LITE 1
#define PNANOVDB_LEAF_TYPE_FP 2
PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_value_strides_bits[PNANOVDB_GRID_TYPE_END] = { 0, 32, 64, 16, 32, 64, 96, 192, 0, 16, 32, 1, 32, 4, 8, 16, 0, 128, 256 };
PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_table_strides_bits[PNANOVDB_GRID_TYPE_END] = { 64, 64, 64, 64, 64, 64, 128, 192, 64, 64, 64, 64, 64, 64, 64, 64, 64, 128, 256 };
PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_minmax_strides_bits[PNANOVDB_GRID_TYPE_END] = { 0, 32, 64, 16, 32, 64, 96, 192, 8, 16, 32, 8, 32, 32, 32, 32, 32, 128, 256 };
PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_minmax_aligns_bits[PNANOVDB_GRID_TYPE_END] = { 0, 32, 64, 16, 32, 64, 32, 64, 8, 16, 32, 8, 32, 32, 32, 32, 32, 32, 64 };
PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_stat_strides_bits[PNANOVDB_GRID_TYPE_END] = { 0, 32, 64, 32, 32, 64, 32, 64, 8, 32, 32, 8, 32, 32, 32, 32, 32, 32, 64 };
PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_leaf_type[PNANOVDB_GRID_TYPE_END] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 2, 2, 2, 2, 0, 0 };
struct pnanovdb_map_t
{
float matf[9];
float invmatf[9];
float vecf[3];
float taperf;
double matd[9];
double invmatd[9];
double vecd[3];
double taperd;
};
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_map_t)
struct pnanovdb_map_handle_t { pnanovdb_address_t address; };
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_map_handle_t)
#define PNANOVDB_MAP_SIZE 264
#define PNANOVDB_MAP_OFF_MATF 0
#define PNANOVDB_MAP_OFF_INVMATF 36
#define PNANOVDB_MAP_OFF_VECF 72
#define PNANOVDB_MAP_OFF_TAPERF 84
#define PNANOVDB_MAP_OFF_MATD 88
#define PNANOVDB_MAP_OFF_INVMATD 160
#define PNANOVDB_MAP_OFF_VECD 232
#define PNANOVDB_MAP_OFF_TAPERD 256
PNANOVDB_FORCE_INLINE float pnanovdb_map_get_matf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) {
return pnanovdb_read_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_MATF + 4u * index));
}
PNANOVDB_FORCE_INLINE float pnanovdb_map_get_invmatf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) {
return pnanovdb_read_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_INVMATF + 4u * index));
}
PNANOVDB_FORCE_INLINE float pnanovdb_map_get_vecf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) {
return pnanovdb_read_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_VECF + 4u * index));
}
PNANOVDB_FORCE_INLINE float pnanovdb_map_get_taperf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) {
return pnanovdb_read_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_TAPERF));
}
PNANOVDB_FORCE_INLINE double pnanovdb_map_get_matd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) {
return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_MATD + 8u * index));
}
PNANOVDB_FORCE_INLINE double pnanovdb_map_get_invmatd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) {
return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_INVMATD + 8u * index));
}
PNANOVDB_FORCE_INLINE double pnanovdb_map_get_vecd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) {
return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_VECD + 8u * index));
}
PNANOVDB_FORCE_INLINE double pnanovdb_map_get_taperd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) {
return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_TAPERD));
}
struct pnanovdb_grid_t
{
pnanovdb_uint64_t magic; // 8 bytes, 0
pnanovdb_uint64_t checksum; // 8 bytes, 8
pnanovdb_uint32_t version; // 4 bytes, 16
pnanovdb_uint32_t flags; // 4 bytes, 20
pnanovdb_uint32_t grid_index; // 4 bytes, 24
pnanovdb_uint32_t grid_count; // 4 bytes, 28
pnanovdb_uint64_t grid_size; // 8 bytes, 32
pnanovdb_uint32_t grid_name[256 / 4]; // 256 bytes, 40
pnanovdb_map_t map; // 264 bytes, 296
double world_bbox[6]; // 48 bytes, 560
double voxel_size[3]; // 24 bytes, 608
pnanovdb_uint32_t grid_class; // 4 bytes, 632
pnanovdb_uint32_t grid_type; // 4 bytes, 636
pnanovdb_int64_t blind_metadata_offset; // 8 bytes, 640
pnanovdb_uint32_t blind_metadata_count; // 4 bytes, 648
pnanovdb_uint32_t pad[5]; // 20 bytes, 652
};
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_grid_t)
struct pnanovdb_grid_handle_t { pnanovdb_address_t address; };
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_grid_handle_t)
#define PNANOVDB_GRID_SIZE 672
#define PNANOVDB_GRID_OFF_MAGIC 0
#define PNANOVDB_GRID_OFF_CHECKSUM 8
#define PNANOVDB_GRID_OFF_VERSION 16
#define PNANOVDB_GRID_OFF_FLAGS 20
#define PNANOVDB_GRID_OFF_GRID_INDEX 24
#define PNANOVDB_GRID_OFF_GRID_COUNT 28
#define PNANOVDB_GRID_OFF_GRID_SIZE 32
#define PNANOVDB_GRID_OFF_GRID_NAME 40
#define PNANOVDB_GRID_OFF_MAP 296
#define PNANOVDB_GRID_OFF_WORLD_BBOX 560
#define PNANOVDB_GRID_OFF_VOXEL_SIZE 608
#define PNANOVDB_GRID_OFF_GRID_CLASS 632
#define PNANOVDB_GRID_OFF_GRID_TYPE 636
#define PNANOVDB_GRID_OFF_BLIND_METADATA_OFFSET 640
#define PNANOVDB_GRID_OFF_BLIND_METADATA_COUNT 648
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_grid_get_magic(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) {
return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_MAGIC));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_grid_get_checksum(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) {
return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_CHECKSUM));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_version(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_VERSION));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_flags(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_FLAGS));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_grid_index(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_INDEX));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_grid_count(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_COUNT));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_grid_get_grid_size(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) {
return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_SIZE));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_grid_name(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_NAME + 4u * index));
}
PNANOVDB_FORCE_INLINE pnanovdb_map_handle_t pnanovdb_grid_get_map(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) {
pnanovdb_map_handle_t ret;
ret.address = pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_MAP);
return ret;
}
PNANOVDB_FORCE_INLINE double pnanovdb_grid_get_world_bbox(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index) {
return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_WORLD_BBOX + 8u * index));
}
PNANOVDB_FORCE_INLINE double pnanovdb_grid_get_voxel_size(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index) {
return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_VOXEL_SIZE + 8u * index));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_grid_class(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_CLASS));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_grid_type(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_TYPE));
}
PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_grid_get_blind_metadata_offset(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) {
return pnanovdb_read_int64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_BLIND_METADATA_OFFSET));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_blind_metadata_count(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_BLIND_METADATA_COUNT));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_version_get_major(pnanovdb_uint32_t version)
{
return (version >> 21u) & ((1u << 11u) - 1u);
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_version_get_minor(pnanovdb_uint32_t version)
{
return (version >> 10u) & ((1u << 11u) - 1u);
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_version_get_patch(pnanovdb_uint32_t version)
{
return version & ((1u << 10u) - 1u);
}
struct pnanovdb_gridblindmetadata_t
{
pnanovdb_int64_t byte_offset; // 8 bytes, 0
pnanovdb_uint64_t element_count; // 8 bytes, 8
pnanovdb_uint32_t flags; // 4 bytes, 16
pnanovdb_uint32_t semantic; // 4 bytes, 20
pnanovdb_uint32_t data_class; // 4 bytes, 24
pnanovdb_uint32_t data_type; // 4 bytes, 28
pnanovdb_uint32_t name[256 / 4]; // 256 bytes, 32
};
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_gridblindmetadata_t)
struct pnanovdb_gridblindmetadata_handle_t { pnanovdb_address_t address; };
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_gridblindmetadata_handle_t)
#define PNANOVDB_GRIDBLINDMETADATA_SIZE 288
#define PNANOVDB_GRIDBLINDMETADATA_OFF_BYTE_OFFSET 0
#define PNANOVDB_GRIDBLINDMETADATA_OFF_ELEMENT_COUNT 8
#define PNANOVDB_GRIDBLINDMETADATA_OFF_FLAGS 16
#define PNANOVDB_GRIDBLINDMETADATA_OFF_SEMANTIC 20
#define PNANOVDB_GRIDBLINDMETADATA_OFF_DATA_CLASS 24
#define PNANOVDB_GRIDBLINDMETADATA_OFF_DATA_TYPE 28
#define PNANOVDB_GRIDBLINDMETADATA_OFF_NAME 32
PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_gridblindmetadata_get_byte_offset(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) {
return pnanovdb_read_int64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_BYTE_OFFSET));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_gridblindmetadata_get_element_count(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) {
return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_ELEMENT_COUNT));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_gridblindmetadata_get_flags(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_FLAGS));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_gridblindmetadata_get_semantic(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_SEMANTIC));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_gridblindmetadata_get_data_class(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_DATA_CLASS));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_gridblindmetadata_get_data_type(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_DATA_TYPE));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_gridblindmetadata_get_name(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p, pnanovdb_uint32_t index) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_NAME + 4u * index));
}
struct pnanovdb_tree_t
{
pnanovdb_uint64_t node_offset_leaf;
pnanovdb_uint64_t node_offset_lower;
pnanovdb_uint64_t node_offset_upper;
pnanovdb_uint64_t node_offset_root;
pnanovdb_uint32_t node_count_leaf;
pnanovdb_uint32_t node_count_lower;
pnanovdb_uint32_t node_count_upper;
pnanovdb_uint32_t tile_count_leaf;
pnanovdb_uint32_t tile_count_lower;
pnanovdb_uint32_t tile_count_upper;
pnanovdb_uint64_t voxel_count;
};
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_tree_t)
struct pnanovdb_tree_handle_t { pnanovdb_address_t address; };
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_tree_handle_t)
#define PNANOVDB_TREE_SIZE 64
#define PNANOVDB_TREE_OFF_NODE_OFFSET_LEAF 0
#define PNANOVDB_TREE_OFF_NODE_OFFSET_LOWER 8
#define PNANOVDB_TREE_OFF_NODE_OFFSET_UPPER 16
#define PNANOVDB_TREE_OFF_NODE_OFFSET_ROOT 24
#define PNANOVDB_TREE_OFF_NODE_COUNT_LEAF 32
#define PNANOVDB_TREE_OFF_NODE_COUNT_LOWER 36
#define PNANOVDB_TREE_OFF_NODE_COUNT_UPPER 40
#define PNANOVDB_TREE_OFF_TILE_COUNT_LEAF 44
#define PNANOVDB_TREE_OFF_TILE_COUNT_LOWER 48
#define PNANOVDB_TREE_OFF_TILE_COUNT_UPPER 52
#define PNANOVDB_TREE_OFF_VOXEL_COUNT 56
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_tree_get_node_offset_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) {
return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_LEAF));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_tree_get_node_offset_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) {
return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_LOWER));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_tree_get_node_offset_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) {
return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_UPPER));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_tree_get_node_offset_root(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) {
return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_ROOT));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_node_count_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_LEAF));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_node_count_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_LOWER));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_node_count_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_UPPER));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_tile_count_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_LEAF));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_tile_count_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_LOWER));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_tile_count_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_UPPER));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_tree_get_voxel_count(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) {
return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_VOXEL_COUNT));
}
struct pnanovdb_root_t
{
pnanovdb_coord_t bbox_min;
pnanovdb_coord_t bbox_max;
pnanovdb_uint32_t table_size;
pnanovdb_uint32_t pad1; // background can start here
// background, min, max
};
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_root_t)
struct pnanovdb_root_handle_t { pnanovdb_address_t address; };
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_root_handle_t)
#define PNANOVDB_ROOT_BASE_SIZE 28
#define PNANOVDB_ROOT_OFF_BBOX_MIN 0
#define PNANOVDB_ROOT_OFF_BBOX_MAX 12
#define PNANOVDB_ROOT_OFF_TABLE_SIZE 24
PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_root_get_bbox_min(pnanovdb_buf_t buf, pnanovdb_root_handle_t p) {
return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_BBOX_MIN));
}
PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_root_get_bbox_max(pnanovdb_buf_t buf, pnanovdb_root_handle_t p) {
return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_BBOX_MAX));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_root_get_tile_count(pnanovdb_buf_t buf, pnanovdb_root_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_TABLE_SIZE));
}
struct pnanovdb_root_tile_t
{
pnanovdb_uint64_t key;
pnanovdb_int64_t child; // signed byte offset from root to the child node, 0 means it is a constant tile, so use value
pnanovdb_uint32_t state;
pnanovdb_uint32_t pad1; // value can start here
// value
};
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_root_tile_t)
struct pnanovdb_root_tile_handle_t { pnanovdb_address_t address; };
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_root_tile_handle_t)
#define PNANOVDB_ROOT_TILE_BASE_SIZE 20
#define PNANOVDB_ROOT_TILE_OFF_KEY 0
#define PNANOVDB_ROOT_TILE_OFF_CHILD 8
#define PNANOVDB_ROOT_TILE_OFF_STATE 16
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_root_tile_get_key(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p) {
return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_KEY));
}
PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_root_tile_get_child(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p) {
return pnanovdb_read_int64(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_CHILD));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_root_tile_get_state(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_STATE));
}
struct pnanovdb_upper_t
{
pnanovdb_coord_t bbox_min;
pnanovdb_coord_t bbox_max;
pnanovdb_uint64_t flags;
pnanovdb_uint32_t value_mask[1024];
pnanovdb_uint32_t child_mask[1024];
// min, max
// alignas(32) pnanovdb_uint32_t table[];
};
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_upper_t)
struct pnanovdb_upper_handle_t { pnanovdb_address_t address; };
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_upper_handle_t)
#define PNANOVDB_UPPER_TABLE_COUNT 32768
#define PNANOVDB_UPPER_BASE_SIZE 8224
#define PNANOVDB_UPPER_OFF_BBOX_MIN 0
#define PNANOVDB_UPPER_OFF_BBOX_MAX 12
#define PNANOVDB_UPPER_OFF_FLAGS 24
#define PNANOVDB_UPPER_OFF_VALUE_MASK 32
#define PNANOVDB_UPPER_OFF_CHILD_MASK 4128
PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_upper_get_bbox_min(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p) {
return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_BBOX_MIN));
}
PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_upper_get_bbox_max(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p) {
return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_BBOX_MAX));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_upper_get_flags(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p) {
return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_FLAGS));
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_upper_get_value_mask(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p, pnanovdb_uint32_t bit_index) {
pnanovdb_uint32_t value = pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_VALUE_MASK + 4u * (bit_index >> 5u)));
return ((value >> (bit_index & 31u)) & 1) != 0u;
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_upper_get_child_mask(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p, pnanovdb_uint32_t bit_index) {
pnanovdb_uint32_t value = pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_CHILD_MASK + 4u * (bit_index >> 5u)));
return ((value >> (bit_index & 31u)) & 1) != 0u;
}
struct pnanovdb_lower_t
{
pnanovdb_coord_t bbox_min;
pnanovdb_coord_t bbox_max;
pnanovdb_uint64_t flags;
pnanovdb_uint32_t value_mask[128];
pnanovdb_uint32_t child_mask[128];
// min, max
// alignas(32) pnanovdb_uint32_t table[];
};
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_lower_t)
struct pnanovdb_lower_handle_t { pnanovdb_address_t address; };
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_lower_handle_t)
#define PNANOVDB_LOWER_TABLE_COUNT 4096
#define PNANOVDB_LOWER_BASE_SIZE 1056
#define PNANOVDB_LOWER_OFF_BBOX_MIN 0
#define PNANOVDB_LOWER_OFF_BBOX_MAX 12
#define PNANOVDB_LOWER_OFF_FLAGS 24
#define PNANOVDB_LOWER_OFF_VALUE_MASK 32
#define PNANOVDB_LOWER_OFF_CHILD_MASK 544
PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_lower_get_bbox_min(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p) {
return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_BBOX_MIN));
}
PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_lower_get_bbox_max(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p) {
return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_BBOX_MAX));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_lower_get_flags(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p) {
return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_FLAGS));
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_lower_get_value_mask(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p, pnanovdb_uint32_t bit_index) {
pnanovdb_uint32_t value = pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_VALUE_MASK + 4u * (bit_index >> 5u)));
return ((value >> (bit_index & 31u)) & 1) != 0u;
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_lower_get_child_mask(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p, pnanovdb_uint32_t bit_index) {
pnanovdb_uint32_t value = pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_CHILD_MASK + 4u * (bit_index >> 5u)));
return ((value >> (bit_index & 31u)) & 1) != 0u;
}
struct pnanovdb_leaf_t
{
pnanovdb_coord_t bbox_min;
pnanovdb_uint32_t bbox_dif_and_flags;
pnanovdb_uint32_t value_mask[16];
// min, max
// alignas(32) pnanovdb_uint32_t values[];
};
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_leaf_t)
struct pnanovdb_leaf_handle_t { pnanovdb_address_t address; };
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_leaf_handle_t)
#define PNANOVDB_LEAF_TABLE_COUNT 512
#define PNANOVDB_LEAF_BASE_SIZE 80
#define PNANOVDB_LEAF_OFF_BBOX_MIN 0
#define PNANOVDB_LEAF_OFF_BBOX_DIF_AND_FLAGS 12
#define PNANOVDB_LEAF_OFF_VALUE_MASK 16
#define PNANOVDB_LEAF_TABLE_NEG_OFF_BBOX_DIF_AND_FLAGS 84
#define PNANOVDB_LEAF_TABLE_NEG_OFF_MINIMUM 16
#define PNANOVDB_LEAF_TABLE_NEG_OFF_QUANTUM 12
PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_leaf_get_bbox_min(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t p) {
return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LEAF_OFF_BBOX_MIN));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_leaf_get_bbox_dif_and_flags(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t p) {
return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_LEAF_OFF_BBOX_DIF_AND_FLAGS));
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_get_value_mask(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t p, pnanovdb_uint32_t bit_index) {
pnanovdb_uint32_t value = pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_LEAF_OFF_VALUE_MASK + 4u * (bit_index >> 5u)));
return ((value >> (bit_index & 31u)) & 1) != 0u;
}
struct pnanovdb_grid_type_constants_t
{
pnanovdb_uint32_t root_off_background;
pnanovdb_uint32_t root_off_min;
pnanovdb_uint32_t root_off_max;
pnanovdb_uint32_t root_off_ave;
pnanovdb_uint32_t root_off_stddev;
pnanovdb_uint32_t root_size;
pnanovdb_uint32_t value_stride_bits;
pnanovdb_uint32_t table_stride;
pnanovdb_uint32_t root_tile_off_value;
pnanovdb_uint32_t root_tile_size;
pnanovdb_uint32_t upper_off_min;
pnanovdb_uint32_t upper_off_max;
pnanovdb_uint32_t upper_off_ave;
pnanovdb_uint32_t upper_off_stddev;
pnanovdb_uint32_t upper_off_table;
pnanovdb_uint32_t upper_size;
pnanovdb_uint32_t lower_off_min;
pnanovdb_uint32_t lower_off_max;
pnanovdb_uint32_t lower_off_ave;
pnanovdb_uint32_t lower_off_stddev;
pnanovdb_uint32_t lower_off_table;
pnanovdb_uint32_t lower_size;
pnanovdb_uint32_t leaf_off_min;
pnanovdb_uint32_t leaf_off_max;
pnanovdb_uint32_t leaf_off_ave;
pnanovdb_uint32_t leaf_off_stddev;
pnanovdb_uint32_t leaf_off_table;
pnanovdb_uint32_t leaf_size;
};
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_grid_type_constants_t)
PNANOVDB_STATIC_CONST pnanovdb_grid_type_constants_t pnanovdb_grid_type_constants[PNANOVDB_GRID_TYPE_END] =
{
{28, 28, 28, 28, 28, 32, 0, 8, 20, 32, 8224, 8224, 8224, 8224, 8224, 270368, 1056, 1056, 1056, 1056, 1056, 33824, 80, 80, 80, 80, 96, 96},
{28, 32, 36, 40, 44, 64, 32, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 80, 84, 88, 92, 96, 2144},
{32, 40, 48, 56, 64, 96, 64, 8, 24, 32, 8224, 8232, 8240, 8248, 8256, 270400, 1056, 1064, 1072, 1080, 1088, 33856, 80, 88, 96, 104, 128, 4224},
{28, 30, 32, 36, 40, 64, 16, 8, 20, 32, 8224, 8226, 8228, 8232, 8256, 270400, 1056, 1058, 1060, 1064, 1088, 33856, 80, 82, 84, 88, 96, 1120},
{28, 32, 36, 40, 44, 64, 32, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 80, 84, 88, 92, 96, 2144},
{32, 40, 48, 56, 64, 96, 64, 8, 24, 32, 8224, 8232, 8240, 8248, 8256, 270400, 1056, 1064, 1072, 1080, 1088, 33856, 80, 88, 96, 104, 128, 4224},
{28, 40, 52, 64, 68, 96, 96, 16, 20, 32, 8224, 8236, 8248, 8252, 8256, 532544, 1056, 1068, 1080, 1084, 1088, 66624, 80, 92, 104, 108, 128, 6272},
{32, 56, 80, 104, 112, 128, 192, 24, 24, 64, 8224, 8248, 8272, 8280, 8288, 794720, 1056, 1080, 1104, 1112, 1120, 99424, 80, 104, 128, 136, 160, 12448},
{28, 29, 30, 31, 32, 64, 0, 8, 20, 32, 8224, 8225, 8226, 8227, 8256, 270400, 1056, 1057, 1058, 1059, 1088, 33856, 80, 80, 80, 80, 96, 96},
{28, 30, 32, 36, 40, 64, 16, 8, 20, 32, 8224, 8226, 8228, 8232, 8256, 270400, 1056, 1058, 1060, 1064, 1088, 33856, 80, 82, 84, 88, 96, 1120},
{28, 32, 36, 40, 44, 64, 32, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 80, 84, 88, 92, 96, 2144},
{28, 29, 30, 31, 32, 64, 1, 8, 20, 32, 8224, 8225, 8226, 8227, 8256, 270400, 1056, 1057, 1058, 1059, 1088, 33856, 80, 80, 80, 80, 96, 160},
{28, 32, 36, 40, 44, 64, 32, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 80, 84, 88, 92, 96, 2144},
{28, 32, 36, 40, 44, 64, 0, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 88, 90, 92, 94, 96, 352},
{28, 32, 36, 40, 44, 64, 0, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 88, 90, 92, 94, 96, 608},
{28, 32, 36, 40, 44, 64, 0, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 88, 90, 92, 94, 96, 1120},
{28, 32, 36, 40, 44, 64, 0, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 88, 90, 92, 94, 96, 96},
{28, 44, 60, 76, 80, 96, 128, 16, 20, 64, 8224, 8240, 8256, 8260, 8288, 532576, 1056, 1072, 1088, 1092, 1120, 66656, 80, 96, 112, 116, 128, 8320},
{32, 64, 96, 128, 136, 160, 256, 32, 24, 64, 8224, 8256, 8288, 8296, 8320, 1056896, 1056, 1088, 1120, 1128, 1152, 132224, 80, 112, 144, 152, 160, 16544},
};
// ------------------------------------------------ Basic Lookup -----------------------------------------------------------
PNANOVDB_FORCE_INLINE pnanovdb_gridblindmetadata_handle_t pnanovdb_grid_get_gridblindmetadata(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, pnanovdb_uint32_t index)
{
pnanovdb_gridblindmetadata_handle_t meta = { grid.address };
pnanovdb_uint64_t byte_offset = pnanovdb_grid_get_blind_metadata_offset(buf, grid);
meta.address = pnanovdb_address_offset64(meta.address, byte_offset);
meta.address = pnanovdb_address_offset_product(meta.address, PNANOVDB_GRIDBLINDMETADATA_SIZE, index);
return meta;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanodvb_grid_get_gridblindmetadata_value_address(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, pnanovdb_uint32_t index)
{
pnanovdb_gridblindmetadata_handle_t meta = pnanovdb_grid_get_gridblindmetadata(buf, grid, index);
pnanovdb_int64_t byte_offset = pnanovdb_gridblindmetadata_get_byte_offset(buf, meta);
pnanovdb_address_t address = grid.address;
address = pnanovdb_address_offset64(address, pnanovdb_int64_as_uint64(byte_offset));
return address;
}
PNANOVDB_FORCE_INLINE pnanovdb_tree_handle_t pnanovdb_grid_get_tree(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid)
{
pnanovdb_tree_handle_t tree = { grid.address };
tree.address = pnanovdb_address_offset(tree.address, PNANOVDB_GRID_SIZE);
return tree;
}
PNANOVDB_FORCE_INLINE pnanovdb_root_handle_t pnanovdb_tree_get_root(pnanovdb_buf_t buf, pnanovdb_tree_handle_t tree)
{
pnanovdb_root_handle_t root = { tree.address };
pnanovdb_uint64_t byte_offset = pnanovdb_tree_get_node_offset_root(buf, tree);
root.address = pnanovdb_address_offset64(root.address, byte_offset);
return root;
}
PNANOVDB_FORCE_INLINE pnanovdb_root_tile_handle_t pnanovdb_root_get_tile(pnanovdb_grid_type_t grid_type, pnanovdb_root_handle_t root, pnanovdb_uint32_t n)
{
pnanovdb_root_tile_handle_t tile = { root.address };
tile.address = pnanovdb_address_offset(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_size));
tile.address = pnanovdb_address_offset_product(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_tile_size), n);
return tile;
}
PNANOVDB_FORCE_INLINE pnanovdb_root_tile_handle_t pnanovdb_root_get_tile_zero(pnanovdb_grid_type_t grid_type, pnanovdb_root_handle_t root)
{
pnanovdb_root_tile_handle_t tile = { root.address };
tile.address = pnanovdb_address_offset(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_size));
return tile;
}
PNANOVDB_FORCE_INLINE pnanovdb_upper_handle_t pnanovdb_root_get_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, pnanovdb_root_tile_handle_t tile)
{
pnanovdb_upper_handle_t upper = { root.address };
upper.address = pnanovdb_address_offset64(upper.address, pnanovdb_int64_as_uint64(pnanovdb_root_tile_get_child(buf, tile)));
return upper;
}
PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_coord_to_key(PNANOVDB_IN(pnanovdb_coord_t) ijk)
{
#if defined(PNANOVDB_NATIVE_64)
pnanovdb_uint64_t iu = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).x) >> 12u;
pnanovdb_uint64_t ju = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).y) >> 12u;
pnanovdb_uint64_t ku = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).z) >> 12u;
return (ku) | (ju << 21u) | (iu << 42u);
#else
pnanovdb_uint32_t iu = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).x) >> 12u;
pnanovdb_uint32_t ju = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).y) >> 12u;
pnanovdb_uint32_t ku = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).z) >> 12u;
pnanovdb_uint32_t key_x = ku | (ju << 21);
pnanovdb_uint32_t key_y = (iu << 10) | (ju >> 11);
return pnanovdb_uint32_as_uint64(key_x, key_y);
#endif
}
PNANOVDB_FORCE_INLINE pnanovdb_root_tile_handle_t pnanovdb_root_find_tile(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk)
{
pnanovdb_uint32_t tile_count = pnanovdb_uint32_as_int32(pnanovdb_root_get_tile_count(buf, root));
pnanovdb_root_tile_handle_t tile = pnanovdb_root_get_tile_zero(grid_type, root);
pnanovdb_uint64_t key = pnanovdb_coord_to_key(ijk);
for (pnanovdb_uint32_t i = 0u; i < tile_count; i++)
{
if (pnanovdb_uint64_is_equal(key, pnanovdb_root_tile_get_key(buf, tile)))
{
return tile;
}
tile.address = pnanovdb_address_offset(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_tile_size));
}
pnanovdb_root_tile_handle_t null_handle = { pnanovdb_address_null() };
return null_handle;
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_leaf_coord_to_offset(PNANOVDB_IN(pnanovdb_coord_t) ijk)
{
return (((PNANOVDB_DEREF(ijk).x & 7) >> 0) << (2 * 3)) +
(((PNANOVDB_DEREF(ijk).y & 7) >> 0) << (3)) +
((PNANOVDB_DEREF(ijk).z & 7) >> 0);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_min_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t node)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, leaf_off_min);
return pnanovdb_address_offset(node.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_max_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t node)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, leaf_off_max);
return pnanovdb_address_offset(node.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_ave_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t node)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, leaf_off_ave);
return pnanovdb_address_offset(node.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_stddev_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t node)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, leaf_off_stddev);
return pnanovdb_address_offset(node.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_table_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t node, pnanovdb_uint32_t n)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, leaf_off_table) + ((PNANOVDB_GRID_TYPE_GET(grid_type, value_stride_bits) * n) >> 3u);
return pnanovdb_address_offset(node.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, PNANOVDB_IN(pnanovdb_coord_t) ijk)
{
pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk);
return pnanovdb_leaf_get_table_address(grid_type, buf, leaf, n);
}
PNANOVDB_FORCE_INLINE float pnanovdb_leaf_fp_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t value_log_bits)
{
// value_log_bits // 2 3 4
pnanovdb_uint32_t value_bits = 1u << value_log_bits; // 4 8 16
pnanovdb_uint32_t value_mask = (1u << value_bits) - 1u; // 0xF 0xFF 0xFFFF
pnanovdb_uint32_t values_per_word_bits = 5u - value_log_bits; // 3 2 1
pnanovdb_uint32_t values_per_word_mask = (1u << values_per_word_bits) - 1u; // 7 3 1
pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk);
float minimum = pnanovdb_read_float(buf, pnanovdb_address_offset_neg(address, PNANOVDB_LEAF_TABLE_NEG_OFF_MINIMUM));
float quantum = pnanovdb_read_float(buf, pnanovdb_address_offset_neg(address, PNANOVDB_LEAF_TABLE_NEG_OFF_QUANTUM));
pnanovdb_uint32_t raw = pnanovdb_read_uint32(buf, pnanovdb_address_offset(address, ((n >> values_per_word_bits) << 2u)));
pnanovdb_uint32_t value_compressed = (raw >> ((n & values_per_word_mask) << value_log_bits)) & value_mask;
return pnanovdb_uint32_to_float(value_compressed) * quantum + minimum;
}
PNANOVDB_FORCE_INLINE float pnanovdb_leaf_fp4_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk)
{
return pnanovdb_leaf_fp_read_float(buf, address, ijk, 2u);
}
PNANOVDB_FORCE_INLINE float pnanovdb_leaf_fp8_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk)
{
return pnanovdb_leaf_fp_read_float(buf, address, ijk, 3u);
}
PNANOVDB_FORCE_INLINE float pnanovdb_leaf_fp16_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk)
{
return pnanovdb_leaf_fp_read_float(buf, address, ijk, 4u);
}
PNANOVDB_FORCE_INLINE float pnanovdb_leaf_fpn_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk)
{
pnanovdb_uint32_t bbox_dif_and_flags = pnanovdb_read_uint32(buf, pnanovdb_address_offset_neg(address, PNANOVDB_LEAF_TABLE_NEG_OFF_BBOX_DIF_AND_FLAGS));
pnanovdb_uint32_t flags = bbox_dif_and_flags >> 24u;
pnanovdb_uint32_t value_log_bits = flags >> 5; // b = 0, 1, 2, 3, 4 corresponding to 1, 2, 4, 8, 16 bits
return pnanovdb_leaf_fp_read_float(buf, address, ijk, value_log_bits);
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_lower_coord_to_offset(PNANOVDB_IN(pnanovdb_coord_t) ijk)
{
return (((PNANOVDB_DEREF(ijk).x & 127) >> 3) << (2 * 4)) +
(((PNANOVDB_DEREF(ijk).y & 127) >> 3) << (4)) +
((PNANOVDB_DEREF(ijk).z & 127) >> 3);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_min_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, lower_off_min);
return pnanovdb_address_offset(node.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_max_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, lower_off_max);
return pnanovdb_address_offset(node.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_ave_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, lower_off_ave);
return pnanovdb_address_offset(node.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_stddev_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, lower_off_stddev);
return pnanovdb_address_offset(node.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_table_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node, pnanovdb_uint32_t n)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, lower_off_table) + PNANOVDB_GRID_TYPE_GET(grid_type, table_stride) * n;
return pnanovdb_address_offset(node.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_lower_get_table_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node, pnanovdb_uint32_t n)
{
pnanovdb_address_t table_address = pnanovdb_lower_get_table_address(grid_type, buf, node, n);
return pnanovdb_read_int64(buf, table_address);
}
PNANOVDB_FORCE_INLINE pnanovdb_leaf_handle_t pnanovdb_lower_get_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, pnanovdb_uint32_t n)
{
pnanovdb_leaf_handle_t leaf = { lower.address };
leaf.address = pnanovdb_address_offset64(leaf.address, pnanovdb_int64_as_uint64(pnanovdb_lower_get_table_child(grid_type, buf, lower, n)));
return leaf;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_value_address_and_level(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) level)
{
pnanovdb_uint32_t n = pnanovdb_lower_coord_to_offset(ijk);
pnanovdb_address_t value_address;
if (pnanovdb_lower_get_child_mask(buf, lower, n))
{
pnanovdb_leaf_handle_t child = pnanovdb_lower_get_child(grid_type, buf, lower, n);
value_address = pnanovdb_leaf_get_value_address(grid_type, buf, child, ijk);
PNANOVDB_DEREF(level) = 0u;
}
else
{
value_address = pnanovdb_lower_get_table_address(grid_type, buf, lower, n);
PNANOVDB_DEREF(level) = 1u;
}
return value_address;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk)
{
pnanovdb_uint32_t level;
return pnanovdb_lower_get_value_address_and_level(grid_type, buf, lower, ijk, PNANOVDB_REF(level));
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_upper_coord_to_offset(PNANOVDB_IN(pnanovdb_coord_t) ijk)
{
return (((PNANOVDB_DEREF(ijk).x & 4095) >> 7) << (2 * 5)) +
(((PNANOVDB_DEREF(ijk).y & 4095) >> 7) << (5)) +
((PNANOVDB_DEREF(ijk).z & 4095) >> 7);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_min_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, upper_off_min);
return pnanovdb_address_offset(node.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_max_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, upper_off_max);
return pnanovdb_address_offset(node.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_ave_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, upper_off_ave);
return pnanovdb_address_offset(node.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_stddev_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, upper_off_stddev);
return pnanovdb_address_offset(node.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_table_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node, pnanovdb_uint32_t n)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, upper_off_table) + PNANOVDB_GRID_TYPE_GET(grid_type, table_stride) * n;
return pnanovdb_address_offset(node.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_upper_get_table_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node, pnanovdb_uint32_t n)
{
pnanovdb_address_t bufAddress = pnanovdb_upper_get_table_address(grid_type, buf, node, n);
return pnanovdb_read_int64(buf, bufAddress);
}
PNANOVDB_FORCE_INLINE pnanovdb_lower_handle_t pnanovdb_upper_get_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, pnanovdb_uint32_t n)
{
pnanovdb_lower_handle_t lower = { upper.address };
lower.address = pnanovdb_address_offset64(lower.address, pnanovdb_int64_as_uint64(pnanovdb_upper_get_table_child(grid_type, buf, upper, n)));
return lower;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_value_address_and_level(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) level)
{
pnanovdb_uint32_t n = pnanovdb_upper_coord_to_offset(ijk);
pnanovdb_address_t value_address;
if (pnanovdb_upper_get_child_mask(buf, upper, n))
{
pnanovdb_lower_handle_t child = pnanovdb_upper_get_child(grid_type, buf, upper, n);
value_address = pnanovdb_lower_get_value_address_and_level(grid_type, buf, child, ijk, level);
}
else
{
value_address = pnanovdb_upper_get_table_address(grid_type, buf, upper, n);
PNANOVDB_DEREF(level) = 2u;
}
return value_address;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk)
{
pnanovdb_uint32_t level;
return pnanovdb_upper_get_value_address_and_level(grid_type, buf, upper, ijk, PNANOVDB_REF(level));
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_min_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, root_off_min);
return pnanovdb_address_offset(root.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_max_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, root_off_max);
return pnanovdb_address_offset(root.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_ave_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, root_off_ave);
return pnanovdb_address_offset(root.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_stddev_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, root_off_stddev);
return pnanovdb_address_offset(root.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_tile_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t root_tile)
{
pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, root_tile_off_value);
return pnanovdb_address_offset(root_tile.address, byte_offset);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_value_address_and_level(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) level)
{
pnanovdb_root_tile_handle_t tile = pnanovdb_root_find_tile(grid_type, buf, root, ijk);
pnanovdb_address_t ret;
if (pnanovdb_address_is_null(tile.address))
{
ret = pnanovdb_address_offset(root.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_off_background));
PNANOVDB_DEREF(level) = 4u;
}
else if (pnanovdb_int64_is_zero(pnanovdb_root_tile_get_child(buf, tile)))
{
ret = pnanovdb_address_offset(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_tile_off_value));
PNANOVDB_DEREF(level) = 3u;
}
else
{
pnanovdb_upper_handle_t child = pnanovdb_root_get_child(grid_type, buf, root, tile);
ret = pnanovdb_upper_get_value_address_and_level(grid_type, buf, child, ijk, level);
}
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk)
{
pnanovdb_uint32_t level;
return pnanovdb_root_get_value_address_and_level(grid_type, buf, root, ijk, PNANOVDB_REF(level));
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_value_address_bit(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) bit_index)
{
pnanovdb_uint32_t level;
pnanovdb_address_t address = pnanovdb_root_get_value_address_and_level(grid_type, buf, root, ijk, PNANOVDB_REF(level));
PNANOVDB_DEREF(bit_index) = level == 0u ? pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).x & 7) : 0u;
return address;
}
PNANOVDB_FORCE_INLINE float pnanovdb_root_fp4_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level)
{
float ret;
if (level == 0)
{
ret = pnanovdb_leaf_fp4_read_float(buf, address, ijk);
}
else
{
ret = pnanovdb_read_float(buf, address);
}
return ret;
}
PNANOVDB_FORCE_INLINE float pnanovdb_root_fp8_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level)
{
float ret;
if (level == 0)
{
ret = pnanovdb_leaf_fp8_read_float(buf, address, ijk);
}
else
{
ret = pnanovdb_read_float(buf, address);
}
return ret;
}
PNANOVDB_FORCE_INLINE float pnanovdb_root_fp16_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level)
{
float ret;
if (level == 0)
{
ret = pnanovdb_leaf_fp16_read_float(buf, address, ijk);
}
else
{
ret = pnanovdb_read_float(buf, address);
}
return ret;
}
PNANOVDB_FORCE_INLINE float pnanovdb_root_fpn_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level)
{
float ret;
if (level == 0)
{
ret = pnanovdb_leaf_fpn_read_float(buf, address, ijk);
}
else
{
ret = pnanovdb_read_float(buf, address);
}
return ret;
}
// ------------------------------------------------ ReadAccessor -----------------------------------------------------------
struct pnanovdb_readaccessor_t
{
pnanovdb_coord_t key;
pnanovdb_leaf_handle_t leaf;
pnanovdb_lower_handle_t lower;
pnanovdb_upper_handle_t upper;
pnanovdb_root_handle_t root;
};
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_readaccessor_t)
PNANOVDB_FORCE_INLINE void pnanovdb_readaccessor_init(PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, pnanovdb_root_handle_t root)
{
PNANOVDB_DEREF(acc).key.x = 0x7FFFFFFF;
PNANOVDB_DEREF(acc).key.y = 0x7FFFFFFF;
PNANOVDB_DEREF(acc).key.z = 0x7FFFFFFF;
PNANOVDB_DEREF(acc).leaf.address = pnanovdb_address_null();
PNANOVDB_DEREF(acc).lower.address = pnanovdb_address_null();
PNANOVDB_DEREF(acc).upper.address = pnanovdb_address_null();
PNANOVDB_DEREF(acc).root = root;
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_readaccessor_iscached0(PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, int dirty)
{
if (pnanovdb_address_is_null(PNANOVDB_DEREF(acc).leaf.address)) { return PNANOVDB_FALSE; }
if ((dirty & ~((1u << 3) - 1u)) != 0)
{
PNANOVDB_DEREF(acc).leaf.address = pnanovdb_address_null();
return PNANOVDB_FALSE;
}
return PNANOVDB_TRUE;
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_readaccessor_iscached1(PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, int dirty)
{
if (pnanovdb_address_is_null(PNANOVDB_DEREF(acc).lower.address)) { return PNANOVDB_FALSE; }
if ((dirty & ~((1u << 7) - 1u)) != 0)
{
PNANOVDB_DEREF(acc).lower.address = pnanovdb_address_null();
return PNANOVDB_FALSE;
}
return PNANOVDB_TRUE;
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_readaccessor_iscached2(PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, int dirty)
{
if (pnanovdb_address_is_null(PNANOVDB_DEREF(acc).upper.address)) { return PNANOVDB_FALSE; }
if ((dirty & ~((1u << 12) - 1u)) != 0)
{
PNANOVDB_DEREF(acc).upper.address = pnanovdb_address_null();
return PNANOVDB_FALSE;
}
return PNANOVDB_TRUE;
}
PNANOVDB_FORCE_INLINE int pnanovdb_readaccessor_computedirty(PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk)
{
return (PNANOVDB_DEREF(ijk).x ^ PNANOVDB_DEREF(acc).key.x) | (PNANOVDB_DEREF(ijk).y ^ PNANOVDB_DEREF(acc).key.y) | (PNANOVDB_DEREF(ijk).z ^ PNANOVDB_DEREF(acc).key.z);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_value_address_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc)
{
pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk);
return pnanovdb_leaf_get_table_address(grid_type, buf, leaf, n);
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_value_address_and_level_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_INOUT(pnanovdb_uint32_t) level)
{
pnanovdb_uint32_t n = pnanovdb_lower_coord_to_offset(ijk);
pnanovdb_address_t value_address;
if (pnanovdb_lower_get_child_mask(buf, lower, n))
{
pnanovdb_leaf_handle_t child = pnanovdb_lower_get_child(grid_type, buf, lower, n);
PNANOVDB_DEREF(acc).leaf = child;
PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk);
value_address = pnanovdb_leaf_get_value_address_and_cache(grid_type, buf, child, ijk, acc);
PNANOVDB_DEREF(level) = 0u;
}
else
{
value_address = pnanovdb_lower_get_table_address(grid_type, buf, lower, n);
PNANOVDB_DEREF(level) = 1u;
}
return value_address;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_value_address_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc)
{
pnanovdb_uint32_t level;
return pnanovdb_lower_get_value_address_and_level_and_cache(grid_type, buf, lower, ijk, acc, PNANOVDB_REF(level));
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_value_address_and_level_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_INOUT(pnanovdb_uint32_t) level)
{
pnanovdb_uint32_t n = pnanovdb_upper_coord_to_offset(ijk);
pnanovdb_address_t value_address;
if (pnanovdb_upper_get_child_mask(buf, upper, n))
{
pnanovdb_lower_handle_t child = pnanovdb_upper_get_child(grid_type, buf, upper, n);
PNANOVDB_DEREF(acc).lower = child;
PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk);
value_address = pnanovdb_lower_get_value_address_and_level_and_cache(grid_type, buf, child, ijk, acc, level);
}
else
{
value_address = pnanovdb_upper_get_table_address(grid_type, buf, upper, n);
PNANOVDB_DEREF(level) = 2u;
}
return value_address;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_value_address_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc)
{
pnanovdb_uint32_t level;
return pnanovdb_upper_get_value_address_and_level_and_cache(grid_type, buf, upper, ijk, acc, PNANOVDB_REF(level));
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_value_address_and_level_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_INOUT(pnanovdb_uint32_t) level)
{
pnanovdb_root_tile_handle_t tile = pnanovdb_root_find_tile(grid_type, buf, root, ijk);
pnanovdb_address_t ret;
if (pnanovdb_address_is_null(tile.address))
{
ret = pnanovdb_address_offset(root.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_off_background));
PNANOVDB_DEREF(level) = 4u;
}
else if (pnanovdb_int64_is_zero(pnanovdb_root_tile_get_child(buf, tile)))
{
ret = pnanovdb_address_offset(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_tile_off_value));
PNANOVDB_DEREF(level) = 3u;
}
else
{
pnanovdb_upper_handle_t child = pnanovdb_root_get_child(grid_type, buf, root, tile);
PNANOVDB_DEREF(acc).upper = child;
PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk);
ret = pnanovdb_upper_get_value_address_and_level_and_cache(grid_type, buf, child, ijk, acc, level);
}
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_value_address_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc)
{
pnanovdb_uint32_t level;
return pnanovdb_root_get_value_address_and_level_and_cache(grid_type, buf, root, ijk, acc, PNANOVDB_REF(level));
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_readaccessor_get_value_address_and_level(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) level)
{
int dirty = pnanovdb_readaccessor_computedirty(acc, ijk);
pnanovdb_address_t value_address;
if (pnanovdb_readaccessor_iscached0(acc, dirty))
{
value_address = pnanovdb_leaf_get_value_address_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).leaf, ijk, acc);
PNANOVDB_DEREF(level) = 0u;
}
else if (pnanovdb_readaccessor_iscached1(acc, dirty))
{
value_address = pnanovdb_lower_get_value_address_and_level_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).lower, ijk, acc, level);
}
else if (pnanovdb_readaccessor_iscached2(acc, dirty))
{
value_address = pnanovdb_upper_get_value_address_and_level_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).upper, ijk, acc, level);
}
else
{
value_address = pnanovdb_root_get_value_address_and_level_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).root, ijk, acc, level);
}
return value_address;
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_readaccessor_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk)
{
pnanovdb_uint32_t level;
return pnanovdb_readaccessor_get_value_address_and_level(grid_type, buf, acc, ijk, PNANOVDB_REF(level));
}
PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_readaccessor_get_value_address_bit(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) bit_index)
{
pnanovdb_uint32_t level;
pnanovdb_address_t address = pnanovdb_readaccessor_get_value_address_and_level(grid_type, buf, acc, ijk, PNANOVDB_REF(level));
PNANOVDB_DEREF(bit_index) = level == 0u ? pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).x & 7) : 0u;
return address;
}
// ------------------------------------------------ ReadAccessor GetDim -----------------------------------------------------------
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_leaf_get_dim_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc)
{
return 1u;
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_lower_get_dim_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc)
{
pnanovdb_uint32_t n = pnanovdb_lower_coord_to_offset(ijk);
pnanovdb_uint32_t ret;
if (pnanovdb_lower_get_child_mask(buf, lower, n))
{
pnanovdb_leaf_handle_t child = pnanovdb_lower_get_child(grid_type, buf, lower, n);
PNANOVDB_DEREF(acc).leaf = child;
PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk);
ret = pnanovdb_leaf_get_dim_and_cache(grid_type, buf, child, ijk, acc);
}
else
{
ret = (1u << (3u)); // node 0 dim
}
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_upper_get_dim_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc)
{
pnanovdb_uint32_t n = pnanovdb_upper_coord_to_offset(ijk);
pnanovdb_uint32_t ret;
if (pnanovdb_upper_get_child_mask(buf, upper, n))
{
pnanovdb_lower_handle_t child = pnanovdb_upper_get_child(grid_type, buf, upper, n);
PNANOVDB_DEREF(acc).lower = child;
PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk);
ret = pnanovdb_lower_get_dim_and_cache(grid_type, buf, child, ijk, acc);
}
else
{
ret = (1u << (4u + 3u)); // node 1 dim
}
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_root_get_dim_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc)
{
pnanovdb_root_tile_handle_t tile = pnanovdb_root_find_tile(grid_type, buf, root, ijk);
pnanovdb_uint32_t ret;
if (pnanovdb_address_is_null(tile.address))
{
ret = 1u << (5u + 4u + 3u); // background, node 2 dim
}
else if (pnanovdb_int64_is_zero(pnanovdb_root_tile_get_child(buf, tile)))
{
ret = 1u << (5u + 4u + 3u); // tile value, node 2 dim
}
else
{
pnanovdb_upper_handle_t child = pnanovdb_root_get_child(grid_type, buf, root, tile);
PNANOVDB_DEREF(acc).upper = child;
PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk);
ret = pnanovdb_upper_get_dim_and_cache(grid_type, buf, child, ijk, acc);
}
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_readaccessor_get_dim(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk)
{
int dirty = pnanovdb_readaccessor_computedirty(acc, ijk);
pnanovdb_uint32_t dim;
if (pnanovdb_readaccessor_iscached0(acc, dirty))
{
dim = pnanovdb_leaf_get_dim_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).leaf, ijk, acc);
}
else if (pnanovdb_readaccessor_iscached1(acc, dirty))
{
dim = pnanovdb_lower_get_dim_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).lower, ijk, acc);
}
else if (pnanovdb_readaccessor_iscached2(acc, dirty))
{
dim = pnanovdb_upper_get_dim_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).upper, ijk, acc);
}
else
{
dim = pnanovdb_root_get_dim_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).root, ijk, acc);
}
return dim;
}
// ------------------------------------------------ ReadAccessor IsActive -----------------------------------------------------------
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_is_active_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc)
{
pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk);
return pnanovdb_leaf_get_value_mask(buf, leaf, n);
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_lower_is_active_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc)
{
pnanovdb_uint32_t n = pnanovdb_lower_coord_to_offset(ijk);
pnanovdb_bool_t is_active;
if (pnanovdb_lower_get_child_mask(buf, lower, n))
{
pnanovdb_leaf_handle_t child = pnanovdb_lower_get_child(grid_type, buf, lower, n);
PNANOVDB_DEREF(acc).leaf = child;
PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk);
is_active = pnanovdb_leaf_is_active_and_cache(grid_type, buf, child, ijk, acc);
}
else
{
is_active = pnanovdb_lower_get_value_mask(buf, lower, n);
}
return is_active;
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_upper_is_active_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc)
{
pnanovdb_uint32_t n = pnanovdb_upper_coord_to_offset(ijk);
pnanovdb_bool_t is_active;
if (pnanovdb_upper_get_child_mask(buf, upper, n))
{
pnanovdb_lower_handle_t child = pnanovdb_upper_get_child(grid_type, buf, upper, n);
PNANOVDB_DEREF(acc).lower = child;
PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk);
is_active = pnanovdb_lower_is_active_and_cache(grid_type, buf, child, ijk, acc);
}
else
{
is_active = pnanovdb_upper_get_value_mask(buf, upper, n);
}
return is_active;
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_root_is_active_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc)
{
pnanovdb_root_tile_handle_t tile = pnanovdb_root_find_tile(grid_type, buf, root, ijk);
pnanovdb_bool_t is_active;
if (pnanovdb_address_is_null(tile.address))
{
is_active = PNANOVDB_FALSE; // background
}
else if (pnanovdb_int64_is_zero(pnanovdb_root_tile_get_child(buf, tile)))
{
pnanovdb_uint32_t state = pnanovdb_root_tile_get_state(buf, tile);
is_active = state != 0u; // tile value
}
else
{
pnanovdb_upper_handle_t child = pnanovdb_root_get_child(grid_type, buf, root, tile);
PNANOVDB_DEREF(acc).upper = child;
PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk);
is_active = pnanovdb_upper_is_active_and_cache(grid_type, buf, child, ijk, acc);
}
return is_active;
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_readaccessor_is_active(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk)
{
int dirty = pnanovdb_readaccessor_computedirty(acc, ijk);
pnanovdb_bool_t is_active;
if (pnanovdb_readaccessor_iscached0(acc, dirty))
{
is_active = pnanovdb_leaf_is_active_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).leaf, ijk, acc);
}
else if (pnanovdb_readaccessor_iscached1(acc, dirty))
{
is_active = pnanovdb_lower_is_active_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).lower, ijk, acc);
}
else if (pnanovdb_readaccessor_iscached2(acc, dirty))
{
is_active = pnanovdb_upper_is_active_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).upper, ijk, acc);
}
else
{
is_active = pnanovdb_root_is_active_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).root, ijk, acc);
}
return is_active;
}
// ------------------------------------------------ Map Transforms -----------------------------------------------------------
PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_map_apply(pnanovdb_buf_t buf, pnanovdb_map_handle_t map, PNANOVDB_IN(pnanovdb_vec3_t) src)
{
pnanovdb_vec3_t dst;
float sx = PNANOVDB_DEREF(src).x;
float sy = PNANOVDB_DEREF(src).y;
float sz = PNANOVDB_DEREF(src).z;
dst.x = sx * pnanovdb_map_get_matf(buf, map, 0) + sy * pnanovdb_map_get_matf(buf, map, 1) + sz * pnanovdb_map_get_matf(buf, map, 2) + pnanovdb_map_get_vecf(buf, map, 0);
dst.y = sx * pnanovdb_map_get_matf(buf, map, 3) + sy * pnanovdb_map_get_matf(buf, map, 4) + sz * pnanovdb_map_get_matf(buf, map, 5) + pnanovdb_map_get_vecf(buf, map, 1);
dst.z = sx * pnanovdb_map_get_matf(buf, map, 6) + sy * pnanovdb_map_get_matf(buf, map, 7) + sz * pnanovdb_map_get_matf(buf, map, 8) + pnanovdb_map_get_vecf(buf, map, 2);
return dst;
}
PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_map_apply_inverse(pnanovdb_buf_t buf, pnanovdb_map_handle_t map, PNANOVDB_IN(pnanovdb_vec3_t) src)
{
pnanovdb_vec3_t dst;
float sx = PNANOVDB_DEREF(src).x - pnanovdb_map_get_vecf(buf, map, 0);
float sy = PNANOVDB_DEREF(src).y - pnanovdb_map_get_vecf(buf, map, 1);
float sz = PNANOVDB_DEREF(src).z - pnanovdb_map_get_vecf(buf, map, 2);
dst.x = sx * pnanovdb_map_get_invmatf(buf, map, 0) + sy * pnanovdb_map_get_invmatf(buf, map, 1) + sz * pnanovdb_map_get_invmatf(buf, map, 2);
dst.y = sx * pnanovdb_map_get_invmatf(buf, map, 3) + sy * pnanovdb_map_get_invmatf(buf, map, 4) + sz * pnanovdb_map_get_invmatf(buf, map, 5);
dst.z = sx * pnanovdb_map_get_invmatf(buf, map, 6) + sy * pnanovdb_map_get_invmatf(buf, map, 7) + sz * pnanovdb_map_get_invmatf(buf, map, 8);
return dst;
}
PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_map_apply_jacobi(pnanovdb_buf_t buf, pnanovdb_map_handle_t map, PNANOVDB_IN(pnanovdb_vec3_t) src)
{
pnanovdb_vec3_t dst;
float sx = PNANOVDB_DEREF(src).x;
float sy = PNANOVDB_DEREF(src).y;
float sz = PNANOVDB_DEREF(src).z;
dst.x = sx * pnanovdb_map_get_matf(buf, map, 0) + sy * pnanovdb_map_get_matf(buf, map, 1) + sz * pnanovdb_map_get_matf(buf, map, 2);
dst.y = sx * pnanovdb_map_get_matf(buf, map, 3) + sy * pnanovdb_map_get_matf(buf, map, 4) + sz * pnanovdb_map_get_matf(buf, map, 5);
dst.z = sx * pnanovdb_map_get_matf(buf, map, 6) + sy * pnanovdb_map_get_matf(buf, map, 7) + sz * pnanovdb_map_get_matf(buf, map, 8);
return dst;
}
PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_map_apply_inverse_jacobi(pnanovdb_buf_t buf, pnanovdb_map_handle_t map, PNANOVDB_IN(pnanovdb_vec3_t) src)
{
pnanovdb_vec3_t dst;
float sx = PNANOVDB_DEREF(src).x;
float sy = PNANOVDB_DEREF(src).y;
float sz = PNANOVDB_DEREF(src).z;
dst.x = sx * pnanovdb_map_get_invmatf(buf, map, 0) + sy * pnanovdb_map_get_invmatf(buf, map, 1) + sz * pnanovdb_map_get_invmatf(buf, map, 2);
dst.y = sx * pnanovdb_map_get_invmatf(buf, map, 3) + sy * pnanovdb_map_get_invmatf(buf, map, 4) + sz * pnanovdb_map_get_invmatf(buf, map, 5);
dst.z = sx * pnanovdb_map_get_invmatf(buf, map, 6) + sy * pnanovdb_map_get_invmatf(buf, map, 7) + sz * pnanovdb_map_get_invmatf(buf, map, 8);
return dst;
}
PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_grid_world_to_indexf(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, PNANOVDB_IN(pnanovdb_vec3_t) src)
{
pnanovdb_map_handle_t map = pnanovdb_grid_get_map(buf, grid);
return pnanovdb_map_apply_inverse(buf, map, src);
}
PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_grid_index_to_worldf(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, PNANOVDB_IN(pnanovdb_vec3_t) src)
{
pnanovdb_map_handle_t map = pnanovdb_grid_get_map(buf, grid);
return pnanovdb_map_apply(buf, map, src);
}
PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_grid_world_to_index_dirf(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, PNANOVDB_IN(pnanovdb_vec3_t) src)
{
pnanovdb_map_handle_t map = pnanovdb_grid_get_map(buf, grid);
return pnanovdb_map_apply_inverse_jacobi(buf, map, src);
}
PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_grid_index_to_world_dirf(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, PNANOVDB_IN(pnanovdb_vec3_t) src)
{
pnanovdb_map_handle_t map = pnanovdb_grid_get_map(buf, grid);
return pnanovdb_map_apply_jacobi(buf, map, src);
}
// ------------------------------------------------ DitherLUT -----------------------------------------------------------
// This table was generated with
/**************
static constexpr inline uint32
SYSwang_inthash(uint32 key)
{
// From http://www.concentric.net/~Ttwang/tech/inthash.htm
key += ~(key << 16);
key ^= (key >> 5);
key += (key << 3);
key ^= (key >> 13);
key += ~(key << 9);
key ^= (key >> 17);
return key;
}
static void
ut_initDitherR(float *pattern, float offset,
int x, int y, int z, int res, int goalres)
{
// These offsets are designed to maximize the difference between
// dither values in nearby voxels within a given 2x2x2 cell, without
// producing axis-aligned artifacts. The are organized in row-major
// order.
static const float theDitherOffset[] = {0,4,6,2,5,1,3,7};
static const float theScale = 0.125F;
int key = (((z << res) + y) << res) + x;
if (res == goalres)
{
pattern[key] = offset;
return;
}
// Randomly flip (on each axis) the dithering patterns used by the
// subcells. This key is xor'd with the subcell index below before
// looking up in the dither offset list.
key = SYSwang_inthash(key) & 7;
x <<= 1;
y <<= 1;
z <<= 1;
offset *= theScale;
for (int i = 0; i < 8; i++)
ut_initDitherR(pattern, offset+theDitherOffset[i ^ key]*theScale,
x+(i&1), y+((i&2)>>1), z+((i&4)>>2), res+1, goalres);
}
// This is a compact algorithm that accomplishes essentially the same thing
// as ut_initDither() above. We should eventually switch to use this and
// clean the dead code.
static fpreal32 *
ut_initDitherRecursive(int goalres)
{
const int nfloat = 1 << (goalres*3);
float *pattern = new float[nfloat];
ut_initDitherR(pattern, 1.0F, 0, 0, 0, 0, goalres);
// This has built an even spacing from 1/nfloat to 1.0.
// however, our dither pattern should be 1/(nfloat+1) to nfloat/(nfloat+1)
// So we do a correction here. Note that the earlier calculations are
// done with powers of 2 so are exact, so it does make sense to delay
// the renormalization to this pass.
float correctionterm = nfloat / (nfloat+1.0F);
for (int i = 0; i < nfloat; i++)
pattern[i] *= correctionterm;
return pattern;
}
theDitherMatrix = ut_initDitherRecursive(3);
for (int i = 0; i < 512/8; i ++)
{
for (int j = 0; j < 8; j ++)
std::cout << theDitherMatrix[i*8+j] << "f, ";
std::cout << std::endl;
}
**************/
PNANOVDB_STATIC_CONST float pnanovdb_dither_lut[512] =
{
0.14425f, 0.643275f, 0.830409f, 0.331384f, 0.105263f, 0.604289f, 0.167641f, 0.666667f,
0.892788f, 0.393762f, 0.0818713f, 0.580897f, 0.853801f, 0.354776f, 0.916179f, 0.417154f,
0.612086f, 0.11306f, 0.79922f, 0.300195f, 0.510721f, 0.0116959f, 0.947368f, 0.448343f,
0.362573f, 0.861598f, 0.0506823f, 0.549708f, 0.261209f, 0.760234f, 0.19883f, 0.697856f,
0.140351f, 0.639376f, 0.576998f, 0.0779727f, 0.522417f, 0.0233918f, 0.460039f, 0.959064f,
0.888889f, 0.389864f, 0.327485f, 0.826511f, 0.272904f, 0.77193f, 0.709552f, 0.210526f,
0.483431f, 0.982456f, 0.296296f, 0.795322f, 0.116959f, 0.615984f, 0.0545809f, 0.553606f,
0.732943f, 0.233918f, 0.545809f, 0.0467836f, 0.865497f, 0.366472f, 0.803119f, 0.304094f,
0.518519f, 0.0194932f, 0.45614f, 0.955166f, 0.729045f, 0.230019f, 0.54191f, 0.042885f,
0.269006f, 0.768031f, 0.705653f, 0.206628f, 0.479532f, 0.978558f, 0.292398f, 0.791423f,
0.237817f, 0.736842f, 0.424951f, 0.923977f, 0.136452f, 0.635478f, 0.323587f, 0.822612f,
0.986355f, 0.487329f, 0.674464f, 0.175439f, 0.88499f, 0.385965f, 0.573099f, 0.0740741f,
0.51462f, 0.0155945f, 0.202729f, 0.701754f, 0.148148f, 0.647174f, 0.834308f, 0.335283f,
0.265107f, 0.764133f, 0.951267f, 0.452242f, 0.896686f, 0.397661f, 0.08577f, 0.584795f,
0.8577f, 0.358674f, 0.920078f, 0.421053f, 0.740741f, 0.241715f, 0.678363f, 0.179337f,
0.109162f, 0.608187f, 0.17154f, 0.670565f, 0.491228f, 0.990253f, 0.42885f, 0.927875f,
0.0662768f, 0.565302f, 0.62768f, 0.128655f, 0.183236f, 0.682261f, 0.744639f, 0.245614f,
0.814815f, 0.315789f, 0.378168f, 0.877193f, 0.931774f, 0.432749f, 0.495127f, 0.994152f,
0.0350877f, 0.534113f, 0.97076f, 0.471735f, 0.214425f, 0.71345f, 0.526316f, 0.0272904f,
0.783626f, 0.2846f, 0.222222f, 0.721248f, 0.962963f, 0.463938f, 0.276803f, 0.775828f,
0.966862f, 0.467836f, 0.405458f, 0.904483f, 0.0701754f, 0.569201f, 0.881092f, 0.382066f,
0.218324f, 0.717349f, 0.654971f, 0.155945f, 0.818713f, 0.319688f, 0.132554f, 0.631579f,
0.0623782f, 0.561404f, 0.748538f, 0.249513f, 0.912281f, 0.413255f, 0.974659f, 0.475634f,
0.810916f, 0.311891f, 0.499025f, 0.998051f, 0.163743f, 0.662768f, 0.226121f, 0.725146f,
0.690058f, 0.191033f, 0.00389864f, 0.502924f, 0.557505f, 0.0584795f, 0.120858f, 0.619883f,
0.440546f, 0.939571f, 0.752437f, 0.253411f, 0.307992f, 0.807018f, 0.869396f, 0.37037f,
0.658869f, 0.159844f, 0.346979f, 0.846004f, 0.588694f, 0.0896686f, 0.152047f, 0.651072f,
0.409357f, 0.908382f, 0.596491f, 0.0974659f, 0.339181f, 0.838207f, 0.900585f, 0.401559f,
0.34308f, 0.842105f, 0.779727f, 0.280702f, 0.693957f, 0.194932f, 0.25731f, 0.756335f,
0.592593f, 0.0935673f, 0.0311891f, 0.530214f, 0.444444f, 0.94347f, 0.506823f, 0.00779727f,
0.68616f, 0.187135f, 0.124756f, 0.623782f, 0.288499f, 0.787524f, 0.350877f, 0.849903f,
0.436647f, 0.935673f, 0.873294f, 0.374269f, 0.538012f, 0.0389864f, 0.60039f, 0.101365f,
0.57115f, 0.0721248f, 0.758285f, 0.259259f, 0.719298f, 0.220273f, 0.532164f, 0.0331384f,
0.321637f, 0.820663f, 0.00974659f, 0.508772f, 0.469786f, 0.968811f, 0.282651f, 0.781676f,
0.539961f, 0.0409357f, 0.727096f, 0.22807f, 0.500975f, 0.00194932f, 0.563353f, 0.0643275f,
0.290448f, 0.789474f, 0.477583f, 0.976608f, 0.251462f, 0.750487f, 0.31384f, 0.812865f,
0.94152f, 0.442495f, 0.879142f, 0.380117f, 0.37232f, 0.871345f, 0.309942f, 0.808967f,
0.192982f, 0.692008f, 0.130604f, 0.62963f, 0.621832f, 0.122807f, 0.559454f, 0.0604289f,
0.660819f, 0.161793f, 0.723197f, 0.224172f, 0.403509f, 0.902534f, 0.840156f, 0.341131f,
0.411306f, 0.910331f, 0.473684f, 0.97271f, 0.653021f, 0.153996f, 0.0916179f, 0.590643f,
0.196881f, 0.695906f, 0.384016f, 0.883041f, 0.0955166f, 0.594542f, 0.157895f, 0.65692f,
0.945419f, 0.446394f, 0.633528f, 0.134503f, 0.844055f, 0.345029f, 0.906433f, 0.407407f,
0.165692f, 0.664717f, 0.103314f, 0.602339f, 0.126706f, 0.625731f, 0.189084f, 0.688109f,
0.91423f, 0.415205f, 0.851852f, 0.352827f, 0.875244f, 0.376218f, 0.937622f, 0.438596f,
0.317739f, 0.816764f, 0.255361f, 0.754386f, 0.996101f, 0.497076f, 0.933723f, 0.434698f,
0.567251f, 0.0682261f, 0.504873f, 0.00584795f, 0.247563f, 0.746589f, 0.185185f, 0.684211f,
0.037037f, 0.536062f, 0.0994152f, 0.598441f, 0.777778f, 0.278752f, 0.465887f, 0.964912f,
0.785575f, 0.28655f, 0.847953f, 0.348928f, 0.0292398f, 0.528265f, 0.7154f, 0.216374f,
0.39961f, 0.898636f, 0.961014f, 0.461988f, 0.0487329f, 0.547758f, 0.111111f, 0.610136f,
0.649123f, 0.150097f, 0.212476f, 0.711501f, 0.797271f, 0.298246f, 0.859649f, 0.360624f,
0.118908f, 0.617934f, 0.0565302f, 0.555556f, 0.329435f, 0.82846f, 0.516569f, 0.0175439f,
0.867446f, 0.368421f, 0.805068f, 0.306043f, 0.578947f, 0.079922f, 0.267057f, 0.766082f,
0.270955f, 0.76998f, 0.707602f, 0.208577f, 0.668616f, 0.169591f, 0.606238f, 0.107212f,
0.520468f, 0.0214425f, 0.45809f, 0.957115f, 0.419103f, 0.918129f, 0.356725f, 0.855751f,
0.988304f, 0.489279f, 0.426901f, 0.925926f, 0.450292f, 0.949318f, 0.512671f, 0.0136452f,
0.239766f, 0.738791f, 0.676413f, 0.177388f, 0.699805f, 0.20078f, 0.263158f, 0.762183f,
0.773879f, 0.274854f, 0.337232f, 0.836257f, 0.672515f, 0.173489f, 0.734893f, 0.235867f,
0.0253411f, 0.524366f, 0.586745f, 0.0877193f, 0.423002f, 0.922027f, 0.48538f, 0.984405f,
0.74269f, 0.243665f, 0.680312f, 0.181287f, 0.953216f, 0.454191f, 0.1423f, 0.641326f,
0.493177f, 0.992203f, 0.430799f, 0.929825f, 0.204678f, 0.703704f, 0.890838f, 0.391813f,
0.894737f, 0.395712f, 0.0838207f, 0.582846f, 0.0448343f, 0.54386f, 0.231969f, 0.730994f,
0.146199f, 0.645224f, 0.832359f, 0.333333f, 0.793372f, 0.294347f, 0.980507f, 0.481481f,
0.364522f, 0.863548f, 0.80117f, 0.302144f, 0.824561f, 0.325536f, 0.138402f, 0.637427f,
0.614035f, 0.11501f, 0.0526316f, 0.551657f, 0.0760234f, 0.575049f, 0.88694f, 0.387914f,
};
PNANOVDB_FORCE_INLINE float pnanovdb_dither_lookup(pnanovdb_bool_t enabled, int offset)
{
return enabled ? pnanovdb_dither_lut[offset & 511] : 0.5f;
}
// ------------------------------------------------ HDDA -----------------------------------------------------------
#ifdef PNANOVDB_HDDA
// Comment out to disable this explicit round-off check
#define PNANOVDB_ENFORCE_FORWARD_STEPPING
#define PNANOVDB_HDDA_FLOAT_MAX 1e38f
struct pnanovdb_hdda_t
{
pnanovdb_int32_t dim;
float tmin;
float tmax;
pnanovdb_coord_t voxel;
pnanovdb_coord_t step;
pnanovdb_vec3_t delta;
pnanovdb_vec3_t next;
};
PNANOVDB_STRUCT_TYPEDEF(pnanovdb_hdda_t)
PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_hdda_pos_to_ijk(PNANOVDB_IN(pnanovdb_vec3_t) pos)
{
pnanovdb_coord_t voxel;
voxel.x = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).x));
voxel.y = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).y));
voxel.z = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).z));
return voxel;
}
PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_hdda_pos_to_voxel(PNANOVDB_IN(pnanovdb_vec3_t) pos, int dim)
{
pnanovdb_coord_t voxel;
voxel.x = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).x)) & (~(dim - 1));
voxel.y = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).y)) & (~(dim - 1));
voxel.z = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).z)) & (~(dim - 1));
return voxel;
}
PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_hdda_ray_start(PNANOVDB_IN(pnanovdb_vec3_t) origin, float tmin, PNANOVDB_IN(pnanovdb_vec3_t) direction)
{
pnanovdb_vec3_t pos = pnanovdb_vec3_add(
pnanovdb_vec3_mul(PNANOVDB_DEREF(direction), pnanovdb_vec3_uniform(tmin)),
PNANOVDB_DEREF(origin)
);
return pos;
}
PNANOVDB_FORCE_INLINE void pnanovdb_hdda_init(PNANOVDB_INOUT(pnanovdb_hdda_t) hdda, PNANOVDB_IN(pnanovdb_vec3_t) origin, float tmin, PNANOVDB_IN(pnanovdb_vec3_t) direction, float tmax, int dim)
{
PNANOVDB_DEREF(hdda).dim = dim;
PNANOVDB_DEREF(hdda).tmin = tmin;
PNANOVDB_DEREF(hdda).tmax = tmax;
pnanovdb_vec3_t pos = pnanovdb_hdda_ray_start(origin, tmin, direction);
pnanovdb_vec3_t dir_inv = pnanovdb_vec3_div(pnanovdb_vec3_uniform(1.f), PNANOVDB_DEREF(direction));
PNANOVDB_DEREF(hdda).voxel = pnanovdb_hdda_pos_to_voxel(PNANOVDB_REF(pos), dim);
// x
if (PNANOVDB_DEREF(direction).x == 0.f)
{
PNANOVDB_DEREF(hdda).next.x = PNANOVDB_HDDA_FLOAT_MAX;
PNANOVDB_DEREF(hdda).step.x = 0;
PNANOVDB_DEREF(hdda).delta.x = 0.f;
}
else if (dir_inv.x > 0.f)
{
PNANOVDB_DEREF(hdda).step.x = 1;
PNANOVDB_DEREF(hdda).next.x = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.x + dim - pos.x) * dir_inv.x;
PNANOVDB_DEREF(hdda).delta.x = dir_inv.x;
}
else
{
PNANOVDB_DEREF(hdda).step.x = -1;
PNANOVDB_DEREF(hdda).next.x = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.x - pos.x) * dir_inv.x;
PNANOVDB_DEREF(hdda).delta.x = -dir_inv.x;
}
// y
if (PNANOVDB_DEREF(direction).y == 0.f)
{
PNANOVDB_DEREF(hdda).next.y = PNANOVDB_HDDA_FLOAT_MAX;
PNANOVDB_DEREF(hdda).step.y = 0;
PNANOVDB_DEREF(hdda).delta.y = 0.f;
}
else if (dir_inv.y > 0.f)
{
PNANOVDB_DEREF(hdda).step.y = 1;
PNANOVDB_DEREF(hdda).next.y = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.y + dim - pos.y) * dir_inv.y;
PNANOVDB_DEREF(hdda).delta.y = dir_inv.y;
}
else
{
PNANOVDB_DEREF(hdda).step.y = -1;
PNANOVDB_DEREF(hdda).next.y = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.y - pos.y) * dir_inv.y;
PNANOVDB_DEREF(hdda).delta.y = -dir_inv.y;
}
// z
if (PNANOVDB_DEREF(direction).z == 0.f)
{
PNANOVDB_DEREF(hdda).next.z = PNANOVDB_HDDA_FLOAT_MAX;
PNANOVDB_DEREF(hdda).step.z = 0;
PNANOVDB_DEREF(hdda).delta.z = 0.f;
}
else if (dir_inv.z > 0.f)
{
PNANOVDB_DEREF(hdda).step.z = 1;
PNANOVDB_DEREF(hdda).next.z = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.z + dim - pos.z) * dir_inv.z;
PNANOVDB_DEREF(hdda).delta.z = dir_inv.z;
}
else
{
PNANOVDB_DEREF(hdda).step.z = -1;
PNANOVDB_DEREF(hdda).next.z = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.z - pos.z) * dir_inv.z;
PNANOVDB_DEREF(hdda).delta.z = -dir_inv.z;
}
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_hdda_update(PNANOVDB_INOUT(pnanovdb_hdda_t) hdda, PNANOVDB_IN(pnanovdb_vec3_t) origin, PNANOVDB_IN(pnanovdb_vec3_t) direction, int dim)
{
if (PNANOVDB_DEREF(hdda).dim == dim)
{
return PNANOVDB_FALSE;
}
PNANOVDB_DEREF(hdda).dim = dim;
pnanovdb_vec3_t pos = pnanovdb_vec3_add(
pnanovdb_vec3_mul(PNANOVDB_DEREF(direction), pnanovdb_vec3_uniform(PNANOVDB_DEREF(hdda).tmin)),
PNANOVDB_DEREF(origin)
);
pnanovdb_vec3_t dir_inv = pnanovdb_vec3_div(pnanovdb_vec3_uniform(1.f), PNANOVDB_DEREF(direction));
PNANOVDB_DEREF(hdda).voxel = pnanovdb_hdda_pos_to_voxel(PNANOVDB_REF(pos), dim);
if (PNANOVDB_DEREF(hdda).step.x != 0)
{
PNANOVDB_DEREF(hdda).next.x = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.x - pos.x) * dir_inv.x;
if (PNANOVDB_DEREF(hdda).step.x > 0)
{
PNANOVDB_DEREF(hdda).next.x += dim * dir_inv.x;
}
}
if (PNANOVDB_DEREF(hdda).step.y != 0)
{
PNANOVDB_DEREF(hdda).next.y = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.y - pos.y) * dir_inv.y;
if (PNANOVDB_DEREF(hdda).step.y > 0)
{
PNANOVDB_DEREF(hdda).next.y += dim * dir_inv.y;
}
}
if (PNANOVDB_DEREF(hdda).step.z != 0)
{
PNANOVDB_DEREF(hdda).next.z = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.z - pos.z) * dir_inv.z;
if (PNANOVDB_DEREF(hdda).step.z > 0)
{
PNANOVDB_DEREF(hdda).next.z += dim * dir_inv.z;
}
}
return PNANOVDB_TRUE;
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_hdda_step(PNANOVDB_INOUT(pnanovdb_hdda_t) hdda)
{
pnanovdb_bool_t ret;
if (PNANOVDB_DEREF(hdda).next.x < PNANOVDB_DEREF(hdda).next.y && PNANOVDB_DEREF(hdda).next.x < PNANOVDB_DEREF(hdda).next.z)
{
#ifdef PNANOVDB_ENFORCE_FORWARD_STEPPING
if (PNANOVDB_DEREF(hdda).next.x <= PNANOVDB_DEREF(hdda).tmin)
{
PNANOVDB_DEREF(hdda).next.x += PNANOVDB_DEREF(hdda).tmin - 0.999999f * PNANOVDB_DEREF(hdda).next.x + 1.0e-6f;
}
#endif
PNANOVDB_DEREF(hdda).tmin = PNANOVDB_DEREF(hdda).next.x;
PNANOVDB_DEREF(hdda).next.x += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).delta.x;
PNANOVDB_DEREF(hdda).voxel.x += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).step.x;
ret = PNANOVDB_DEREF(hdda).tmin <= PNANOVDB_DEREF(hdda).tmax;
}
else if (PNANOVDB_DEREF(hdda).next.y < PNANOVDB_DEREF(hdda).next.z)
{
#ifdef PNANOVDB_ENFORCE_FORWARD_STEPPING
if (PNANOVDB_DEREF(hdda).next.y <= PNANOVDB_DEREF(hdda).tmin)
{
PNANOVDB_DEREF(hdda).next.y += PNANOVDB_DEREF(hdda).tmin - 0.999999f * PNANOVDB_DEREF(hdda).next.y + 1.0e-6f;
}
#endif
PNANOVDB_DEREF(hdda).tmin = PNANOVDB_DEREF(hdda).next.y;
PNANOVDB_DEREF(hdda).next.y += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).delta.y;
PNANOVDB_DEREF(hdda).voxel.y += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).step.y;
ret = PNANOVDB_DEREF(hdda).tmin <= PNANOVDB_DEREF(hdda).tmax;
}
else
{
#ifdef PNANOVDB_ENFORCE_FORWARD_STEPPING
if (PNANOVDB_DEREF(hdda).next.z <= PNANOVDB_DEREF(hdda).tmin)
{
PNANOVDB_DEREF(hdda).next.z += PNANOVDB_DEREF(hdda).tmin - 0.999999f * PNANOVDB_DEREF(hdda).next.z + 1.0e-6f;
}
#endif
PNANOVDB_DEREF(hdda).tmin = PNANOVDB_DEREF(hdda).next.z;
PNANOVDB_DEREF(hdda).next.z += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).delta.z;
PNANOVDB_DEREF(hdda).voxel.z += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).step.z;
ret = PNANOVDB_DEREF(hdda).tmin <= PNANOVDB_DEREF(hdda).tmax;
}
return ret;
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_hdda_ray_clip(
PNANOVDB_IN(pnanovdb_vec3_t) bbox_min,
PNANOVDB_IN(pnanovdb_vec3_t) bbox_max,
PNANOVDB_IN(pnanovdb_vec3_t) origin, PNANOVDB_INOUT(float) tmin,
PNANOVDB_IN(pnanovdb_vec3_t) direction, PNANOVDB_INOUT(float) tmax
)
{
pnanovdb_vec3_t dir_inv = pnanovdb_vec3_div(pnanovdb_vec3_uniform(1.f), PNANOVDB_DEREF(direction));
pnanovdb_vec3_t t0 = pnanovdb_vec3_mul(pnanovdb_vec3_sub(PNANOVDB_DEREF(bbox_min), PNANOVDB_DEREF(origin)), dir_inv);
pnanovdb_vec3_t t1 = pnanovdb_vec3_mul(pnanovdb_vec3_sub(PNANOVDB_DEREF(bbox_max), PNANOVDB_DEREF(origin)), dir_inv);
pnanovdb_vec3_t tmin3 = pnanovdb_vec3_min(t0, t1);
pnanovdb_vec3_t tmax3 = pnanovdb_vec3_max(t0, t1);
float tnear = pnanovdb_max(tmin3.x, pnanovdb_max(tmin3.y, tmin3.z));
float tfar = pnanovdb_min(tmax3.x, pnanovdb_min(tmax3.y, tmax3.z));
pnanovdb_bool_t hit = tnear <= tfar;
PNANOVDB_DEREF(tmin) = pnanovdb_max(PNANOVDB_DEREF(tmin), tnear);
PNANOVDB_DEREF(tmax) = pnanovdb_min(PNANOVDB_DEREF(tmax), tfar);
return hit;
}
PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_hdda_zero_crossing(
pnanovdb_grid_type_t grid_type,
pnanovdb_buf_t buf,
PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc,
PNANOVDB_IN(pnanovdb_vec3_t) origin, float tmin,
PNANOVDB_IN(pnanovdb_vec3_t) direction, float tmax,
PNANOVDB_INOUT(float) thit,
PNANOVDB_INOUT(float) v
)
{
pnanovdb_coord_t bbox_min = pnanovdb_root_get_bbox_min(buf, PNANOVDB_DEREF(acc).root);
pnanovdb_coord_t bbox_max = pnanovdb_root_get_bbox_max(buf, PNANOVDB_DEREF(acc).root);
pnanovdb_vec3_t bbox_minf = pnanovdb_coord_to_vec3(bbox_min);
pnanovdb_vec3_t bbox_maxf = pnanovdb_coord_to_vec3(pnanovdb_coord_add(bbox_max, pnanovdb_coord_uniform(1)));
pnanovdb_bool_t hit = pnanovdb_hdda_ray_clip(PNANOVDB_REF(bbox_minf), PNANOVDB_REF(bbox_maxf), origin, PNANOVDB_REF(tmin), direction, PNANOVDB_REF(tmax));
if (!hit || tmax > 1.0e20f)
{
return PNANOVDB_FALSE;
}
pnanovdb_vec3_t pos = pnanovdb_hdda_ray_start(origin, tmin, direction);
pnanovdb_coord_t ijk = pnanovdb_hdda_pos_to_ijk(PNANOVDB_REF(pos));
pnanovdb_address_t address = pnanovdb_readaccessor_get_value_address(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, PNANOVDB_REF(ijk));
float v0 = pnanovdb_read_float(buf, address);
pnanovdb_int32_t dim = pnanovdb_uint32_as_int32(pnanovdb_readaccessor_get_dim(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, PNANOVDB_REF(ijk)));
pnanovdb_hdda_t hdda;
pnanovdb_hdda_init(PNANOVDB_REF(hdda), origin, tmin, direction, tmax, dim);
while (pnanovdb_hdda_step(PNANOVDB_REF(hdda)))
{
pnanovdb_vec3_t pos_start = pnanovdb_hdda_ray_start(origin, hdda.tmin + 1.0001f, direction);
ijk = pnanovdb_hdda_pos_to_ijk(PNANOVDB_REF(pos_start));
dim = pnanovdb_uint32_as_int32(pnanovdb_readaccessor_get_dim(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, PNANOVDB_REF(ijk)));
pnanovdb_hdda_update(PNANOVDB_REF(hdda), origin, direction, dim);
if (hdda.dim > 1 || !pnanovdb_readaccessor_is_active(grid_type, buf, acc, PNANOVDB_REF(ijk)))
{
continue;
}
while (pnanovdb_hdda_step(PNANOVDB_REF(hdda)) && pnanovdb_readaccessor_is_active(grid_type, buf, acc, PNANOVDB_REF(hdda.voxel)))
{
ijk = hdda.voxel;
pnanovdb_address_t address = pnanovdb_readaccessor_get_value_address(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, PNANOVDB_REF(ijk));
PNANOVDB_DEREF(v) = pnanovdb_read_float(buf, address);
if (PNANOVDB_DEREF(v) * v0 < 0.f)
{
PNANOVDB_DEREF(thit) = hdda.tmin;
return PNANOVDB_TRUE;
}
}
}
return PNANOVDB_FALSE;
}
#endif
#endif // end of NANOVDB_PNANOVDB_H_HAS_BEEN_INCLUDED
| 114,655 | C | 44.807431 | 297 | 0.726798 |
NVIDIA-Omniverse/PhysX/flow/include/nvflowext/NvFlowLoader.h | #pragma once
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#ifndef NV_FLOW_LOADER_H
#define NV_FLOW_LOADER_H
#if defined(_WIN32)
#include <Windows.h>
static void* NvFlowLoadLibrary(const char* winName, const char* linuxName)
{
return (void*)LoadLibraryA(winName);
}
static void* NvFlowGetProcAddress(void* module, const char* name)
{
return GetProcAddress((HMODULE)module, name);
}
static void NvFlowFreeLibrary(void* module)
{
FreeLibrary((HMODULE)module);
}
static const char* NvFlowLoadLibraryError()
{
DWORD lastError = GetLastError();
static char buf[1024];
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, lastError, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), buf, sizeof(buf), NULL);
return buf;
}
#else
#include <dlfcn.h>
static void* NvFlowLoadLibrary(const char* winName, const char* linuxName)
{
void* module = dlopen(linuxName, RTLD_NOW);
//if (!module)
//{
// fprintf(stderr, "Module %s failed to load : %s\n", linuxName, dlerror());
//}
return module;
}
static void* NvFlowGetProcAddress(void* module, const char* name)
{
return dlsym(module, name);
}
static void NvFlowFreeLibrary(void* module)
{
dlclose(module);
}
static const char* NvFlowLoadLibraryError()
{
return dlerror();
}
#endif
#include "NvFlowExt.h"
struct NvFlowLoader
{
void* module_nvflow;
void* module_nvflowext;
NvFlowOpList opList;
NvFlowExtOpList extOpList;
NvFlowGridInterface gridInterface;
NvFlowGridParamsInterface gridParamsInterface;
NvFlowContextOptInterface contextOptInterface;
NvFlowDeviceInterface deviceInterface;
};
static void NvFlowLoaderInitDeviceAPI(NvFlowLoader* ptr, void(*printError)(const char* str, void* userdata), void* userdata, NvFlowContextApi deviceAPI)
{
NvFlowReflectClear(ptr, sizeof(NvFlowLoader));
/// Load nvflow and nvflowext
ptr->module_nvflow = NvFlowLoadLibrary("nvflow.dll", "libnvflow.so");
if (ptr->module_nvflow)
{
PFN_NvFlowGetOpList getOpList = (PFN_NvFlowGetOpList)NvFlowGetProcAddress(ptr->module_nvflow, "NvFlowGetOpList");
if (getOpList) { NvFlowOpList_duplicate(&ptr->opList, getOpList()); }
}
else if (printError)
{
printError(NvFlowLoadLibraryError(), userdata);
}
ptr->module_nvflowext = NvFlowLoadLibrary("nvflowext.dll", "libnvflowext.so");
if (ptr->module_nvflowext)
{
PFN_NvFlowGetExtOpList getExtOpList = (PFN_NvFlowGetExtOpList)NvFlowGetProcAddress(ptr->module_nvflowext, "NvFlowGetExtOpList");
PFN_NvFlowGetGridInterface getGridInterface = (PFN_NvFlowGetGridInterface)NvFlowGetProcAddress(ptr->module_nvflowext, "NvFlowGetGridInterface");
PFN_NvFlowGetGridParamsInterface getGridParamsInterface = (PFN_NvFlowGetGridParamsInterface)NvFlowGetProcAddress(ptr->module_nvflowext, "NvFlowGetGridParamsInterface");
PFN_NvFlowGetContextOptInterface getContextOptInterface = (PFN_NvFlowGetContextOptInterface)NvFlowGetProcAddress(ptr->module_nvflowext, "NvFlowGetContextOptInterface");
PFN_NvFlowGetDeviceInterface getDeviceInterface = (PFN_NvFlowGetDeviceInterface)NvFlowGetProcAddress(ptr->module_nvflowext, "NvFlowGetDeviceInterface");
if (getExtOpList) { NvFlowExtOpList_duplicate(&ptr->extOpList, getExtOpList()); }
if (getGridInterface) { NvFlowGridInterface_duplicate(&ptr->gridInterface, getGridInterface()); }
if (getGridParamsInterface) { NvFlowGridParamsInterface_duplicate(&ptr->gridParamsInterface, getGridParamsInterface()); }
if (getContextOptInterface) { NvFlowContextOptInterface_duplicate(&ptr->contextOptInterface, getContextOptInterface()); }
if (getDeviceInterface) { NvFlowDeviceInterface_duplicate(&ptr->deviceInterface, getDeviceInterface(deviceAPI)); }
}
else if (printError)
{
printError(NvFlowLoadLibraryError(), userdata);
}
}
static void NvFlowLoaderInit(NvFlowLoader* ptr, void(*printError)(const char* str, void* userdata), void* userdata)
{
NvFlowLoaderInitDeviceAPI(ptr, printError, userdata, eNvFlowContextApi_vulkan);
}
static void NvFlowLoaderDestroy(NvFlowLoader* ptr)
{
NvFlowFreeLibrary(ptr->module_nvflow);
NvFlowFreeLibrary(ptr->module_nvflowext);
}
#endif | 5,564 | C | 38.190141 | 170 | 0.780194 |
NVIDIA-Omniverse/PhysX/flow/include/nvflowext/NvFlowExt.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#ifndef NV_FLOW_EXT_H
#define NV_FLOW_EXT_H
#include "NvFlowContext.h"
#include "NvFlow.h"
/// ********************************* EmitterSphere ***************************************
typedef struct NvFlowEmitterSphereParams
{
NvFlowUint64 luid;
NvFlowBool32 enabled;
NvFlowFloat4x4 localToWorld;
NvFlowFloat4x4 localToWorldVelocity;
NvFlowBool32 velocityIsWorldSpace;
NvFlowFloat3 position;
int layer;
float radius;
NvFlowBool32 radiusIsWorldSpace;
float allocationScale;
NvFlowFloat3 velocity;
float divergence;
float temperature;
float fuel;
float burn;
float smoke;
float coupleRateVelocity;
float coupleRateDivergence;
float coupleRateTemperature;
float coupleRateFuel;
float coupleRateBurn;
float coupleRateSmoke;
NvFlowUint numSubSteps;
float physicsVelocityScale;
NvFlowBool32 applyPostPressure;
NvFlowBool32 multisample;
}NvFlowEmitterSphereParams;
#define NvFlowEmitterSphereParams_default_init { \
0llu, /*luid*/ \
NV_FLOW_TRUE, /*enabled*/ \
{ \
1.f, 0.f, 0.f, 0.f, \
0.f, 1.f, 0.f, 0.f, \
0.f, 0.f, 1.f, 0.f, \
0.f, 0.f, 0.f, 1.f \
}, /*localToWorld*/ \
{ \
1.f, 0.f, 0.f, 0.f, \
0.f, 1.f, 0.f, 0.f, \
0.f, 0.f, 1.f, 0.f, \
0.f, 0.f, 0.f, 1.f \
}, /*localToWorldVelocity*/ \
NV_FLOW_FALSE, /*velocityIsWorldSpace*/ \
{0.f, 0.f, 0.f}, /*position*/ \
0, /*layer*/ \
10.f, /*radius*/ \
NV_FLOW_TRUE, /*radiusIsWorldSpace*/ \
1.f, /*allocationScale*/ \
{0.f, 0.f, 400.f}, /*velocity*/ \
0.f, /*divergence*/ \
0.5f, /*temperature*/ \
0.8f, /*fuel*/ \
0.f, /*burn*/ \
0.f, /*smoke*/ \
2.f, /*coupleRateVelocity*/ \
0.f, /*coupleRateDivergence*/ \
2.f, /*coupleRateTemperature*/ \
2.f, /*coupleRateFuel*/ \
0.f, /*coupleRateBurn*/ \
0.f, /*coupleRateSmoke*/ \
1u, /*numSubSteps*/ \
0.f, /*physicsVelocityScale*/ \
NV_FLOW_FALSE, /*applyPostPressure*/ \
NV_FLOW_FALSE, /*multisample*/ \
}
static const NvFlowEmitterSphereParams NvFlowEmitterSphereParams_default = NvFlowEmitterSphereParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterSphereParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint64, luid, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat4x4, localToWorld, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat4x4, localToWorldVelocity, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, velocityIsWorldSpace, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, position, 0, 0)
NV_FLOW_REFLECT_VALUE(int, layer, 0, 0)
NV_FLOW_REFLECT_VALUE(float, radius, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, radiusIsWorldSpace, 0, 0)
NV_FLOW_REFLECT_VALUE(float, allocationScale, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, velocity, 0, 0)
NV_FLOW_REFLECT_VALUE(float, divergence, 0, 0)
NV_FLOW_REFLECT_VALUE(float, temperature, 0, 0)
NV_FLOW_REFLECT_VALUE(float, fuel, 0, 0)
NV_FLOW_REFLECT_VALUE(float, burn, 0, 0)
NV_FLOW_REFLECT_VALUE(float, smoke, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateVelocity, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateDivergence, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateTemperature, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateFuel, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateBurn, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateSmoke, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, numSubSteps, 0, 0)
NV_FLOW_REFLECT_VALUE(float, physicsVelocityScale, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, applyPostPressure, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, multisample, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowEmitterSphereParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowEmitterSpherePinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
float deltaTime;
const NvFlowEmitterSphereParams*const* velocityParams;
NvFlowUint64 velocityParamCount;
const NvFlowEmitterSphereParams*const* densityParams;
NvFlowUint64 densityParamCount;
NvFlowSparseTexture value;
NvFlowSparseTexture valueTemp;
NvFlowBool32 isPostPressure;
}NvFlowEmitterSpherePinsIn;
typedef struct NvFlowEmitterSpherePinsOut
{
NvFlowSparseTexture value;
}NvFlowEmitterSpherePinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterSpherePinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(float, deltaTime, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterSphereParams, velocityParams, velocityParamCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterSphereParams, densityParams, densityParamCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, value, eNvFlowReflectHint_pinEnabledMutable, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, valueTemp, eNvFlowReflectHint_pinEnabledMutable, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, isPostPressure, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterSpherePinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, value, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowEmitterSphere)
/// ********************************* EmitterSphereAllocate ***************************************
typedef struct NvFlowEmitterSphereAllocatePinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
NvFlowSparseParams sparseParams;
float deltaTime;
const NvFlowEmitterSphereParams*const* params;
NvFlowUint64 paramCount;
}NvFlowEmitterSphereAllocatePinsIn;
typedef struct NvFlowEmitterSphereAllocatePinsOut
{
NvFlowInt4* locations;
NvFlowUint64 locationCount;
}NvFlowEmitterSphereAllocatePinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterSphereAllocatePinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseParams, sparseParams, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(float, deltaTime, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterSphereParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterSphereAllocatePinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_ARRAY(NvFlowInt4, locations, locationCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowEmitterSphereAllocate)
/// ********************************* EmitterBox ***************************************
typedef struct NvFlowEmitterBoxParams
{
NvFlowUint64 luid;
NvFlowBool32 enabled;
NvFlowFloat4x4 localToWorld;
NvFlowFloat4x4 localToWorldVelocity;
NvFlowBool32 velocityIsWorldSpace;
NvFlowFloat3 position;
int layer;
NvFlowFloat3 halfSize;
float allocationScale;
NvFlowFloat3 velocity;
float divergence;
float temperature;
float fuel;
float burn;
float smoke;
float coupleRateVelocity;
float coupleRateDivergence;
float coupleRateTemperature;
float coupleRateFuel;
float coupleRateBurn;
float coupleRateSmoke;
float physicsVelocityScale;
NvFlowBool32 applyPostPressure;
NvFlowBool32 multisample;
NvFlowFloat4* clippingPlanes;
NvFlowUint64 clippingPlaneCount;
NvFlowUint* clippingPlaneCounts;
NvFlowUint64 clippingPlaneCountCount;
}NvFlowEmitterBoxParams;
#define NvFlowEmitterBoxParams_default_init { \
0llu, /*luid*/ \
NV_FLOW_TRUE, /*enabled*/ \
{ \
1.f, 0.f, 0.f, 0.f, \
0.f, 1.f, 0.f, 0.f, \
0.f, 0.f, 1.f, 0.f, \
0.f, 0.f, 0.f, 1.f \
}, /*localToWorld*/ \
{ \
1.f, 0.f, 0.f, 0.f, \
0.f, 1.f, 0.f, 0.f, \
0.f, 0.f, 1.f, 0.f, \
0.f, 0.f, 0.f, 1.f \
}, /*localToWorldVelocity*/ \
NV_FLOW_FALSE, /*velocityIsWorldSpace*/ \
{0.f, 0.f, 0.f}, /*position*/ \
0, /*layer*/ \
{10.f, 10.f, 10.f}, /*halfSize*/ \
1.f, /*allocationScale*/ \
{0.f, 0.f, 400.f}, /*velocity*/ \
0.f, /*divergence*/ \
0.5f, /*temperature*/ \
0.8f, /*fuel*/ \
0.f, /*burn*/ \
0.f, /*smoke*/ \
2.f, /*coupleRateVelocity*/ \
0.f, /*coupleRateDivergence*/ \
2.f, /*coupleRateTemperature*/ \
2.f, /*coupleRateFuel*/ \
0.f, /*coupleRateBurn*/ \
0.f, /*coupleRateSmoke*/ \
0.f, /*physicsVelocityScale*/ \
NV_FLOW_FALSE, /*applyPostPressure*/ \
NV_FLOW_FALSE, /*multisample*/ \
0, /*clippingPlanes*/ \
0, /*clippingPlaneCount*/ \
0, /*clippingPlaneCounts*/ \
0 /*clippingPlaneCountCount*/ \
}
static const NvFlowEmitterBoxParams NvFlowEmitterBoxParams_default = NvFlowEmitterBoxParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterBoxParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint64, luid, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat4x4, localToWorld, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat4x4, localToWorldVelocity, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, velocityIsWorldSpace, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, position, 0, 0)
NV_FLOW_REFLECT_VALUE(int, layer, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, halfSize, 0, 0)
NV_FLOW_REFLECT_VALUE(float, allocationScale, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, velocity, 0, 0)
NV_FLOW_REFLECT_VALUE(float, divergence, 0, 0)
NV_FLOW_REFLECT_VALUE(float, temperature, 0, 0)
NV_FLOW_REFLECT_VALUE(float, fuel, 0, 0)
NV_FLOW_REFLECT_VALUE(float, burn, 0, 0)
NV_FLOW_REFLECT_VALUE(float, smoke, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateVelocity, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateDivergence, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateTemperature, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateFuel, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateBurn, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateSmoke, 0, 0)
NV_FLOW_REFLECT_VALUE(float, physicsVelocityScale, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, applyPostPressure, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, multisample, 0, 0)
NV_FLOW_REFLECT_ARRAY(NvFlowFloat4, clippingPlanes, clippingPlaneCount, 0, 0)
NV_FLOW_REFLECT_ARRAY(NvFlowUint, clippingPlaneCounts, clippingPlaneCountCount, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowEmitterBoxParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowEmitterBoxPinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
float deltaTime;
const NvFlowEmitterBoxParams*const* velocityParams;
NvFlowUint64 velocityParamCount;
const NvFlowEmitterBoxParams*const* densityParams;
NvFlowUint64 densityParamCount;
NvFlowSparseTexture value;
NvFlowSparseTexture valueTemp;
NvFlowBool32 isPostPressure;
}NvFlowEmitterBoxPinsIn;
typedef struct NvFlowEmitterBoxPinsOut
{
NvFlowSparseTexture value;
}NvFlowEmitterBoxPinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterBoxPinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(float, deltaTime, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterBoxParams, velocityParams, velocityParamCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterBoxParams, densityParams, densityParamCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, value, eNvFlowReflectHint_pinEnabledMutable, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, valueTemp, eNvFlowReflectHint_pinEnabledMutable, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, isPostPressure, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterBoxPinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, value, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowEmitterBox)
/// ********************************* EmitterBoxAllocate ***************************************
typedef struct NvFlowEmitterBoxAllocatePinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
NvFlowSparseParams sparseParams;
float deltaTime;
const NvFlowEmitterBoxParams*const* params;
NvFlowUint64 paramCount;
}NvFlowEmitterBoxAllocatePinsIn;
typedef struct NvFlowEmitterBoxAllocatePinsOut
{
NvFlowInt4* locations;
NvFlowUint64 locationCount;
}NvFlowEmitterBoxAllocatePinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterBoxAllocatePinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseParams, sparseParams, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(float, deltaTime, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterBoxParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterBoxAllocatePinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_ARRAY(NvFlowInt4, locations, locationCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowEmitterBoxAllocate)
/// ********************************* EmitterPoint ***************************************
typedef struct NvFlowEmitterPointParams
{
NvFlowUint64 luid;
NvFlowBool32 enabled;
NvFlowFloat4x4 localToWorld;
NvFlowFloat4x4 localToWorldVelocity;
NvFlowBool32 velocityIsWorldSpace;
NvFlowUint numSubSteps;
int layer;
NvFlowBool32 allocateMask;
NvFlowFloat3 velocity;
float divergence;
float temperature;
float fuel;
float burn;
float smoke;
NvFlowBool32 colorIsSrgb;
float velocityScale;
float divergenceScale;
float temperatureScale;
float fuelScale;
float burnScale;
float smokeScale;
float coupleRateVelocity;
float coupleRateDivergence;
float coupleRateTemperature;
float coupleRateFuel;
float coupleRateBurn;
float coupleRateSmoke;
NvFlowFloat3* pointPositions;
NvFlowUint64 pointPositionCount;
NvFlowUint64 pointPositionVersion;
NvFlowBool32* pointAllocateMasks;
NvFlowUint64 pointAllocateMaskCount;
NvFlowUint64 pointAllocateMaskVersion;
NvFlowFloat3* pointVelocities;
NvFlowUint64 pointVelocityCount;
NvFlowUint64 pointVelocityVersion;
float* pointDivergences;
NvFlowUint64 pointDivergenceCount;
NvFlowUint64 pointDivergenceVersion;
NvFlowFloat3* pointColors;
NvFlowUint64 pointColorCount;
NvFlowUint64 pointColorVersion;
float* pointTemperatures;
NvFlowUint64 pointTemperatureCount;
NvFlowUint64 pointTemperatureVersion;
float* pointFuels;
NvFlowUint64 pointFuelCount;
NvFlowUint64 pointFuelVersion;
float* pointBurns;
NvFlowUint64 pointBurnCount;
NvFlowUint64 pointBurnVersion;
float* pointSmokes;
NvFlowUint64 pointSmokeCount;
NvFlowUint64 pointSmokeVersion;
float* pointCoupleRateVelocities;
NvFlowUint64 pointCoupleRateVelocityCount;
NvFlowUint64 pointCoupleRateVelocityVersion;
float* pointCoupleRateDivergences;
NvFlowUint64 pointCoupleRateDivergenceCount;
NvFlowUint64 pointCoupleRateDivergenceVersion;
float* pointCoupleRateTemperatures;
NvFlowUint64 pointCoupleRateTemperatureCount;
NvFlowUint64 pointCoupleRateTemperatureVersion;
float* pointCoupleRateFuels;
NvFlowUint64 pointCoupleRateFuelCount;
NvFlowUint64 pointCoupleRateFuelVersion;
float* pointCoupleRateBurns;
NvFlowUint64 pointCoupleRateBurnCount;
NvFlowUint64 pointCoupleRateBurnVersion;
float* pointCoupleRateSmokes;
NvFlowUint64 pointCoupleRateSmokeCount;
NvFlowUint64 pointCoupleRateSmokeVersion;
NvFlowBool32 applyPostPressure;
NvFlowBool32 enableStreaming;
NvFlowBool32 streamOnce;
NvFlowBool32 streamClearAtStart;
NvFlowUint streamingBatchSize;
NvFlowBool32 updateCoarseDensity;
}NvFlowEmitterPointParams;
#define NvFlowEmitterPointParams_default_init { \
0llu, /*luid*/ \
NV_FLOW_TRUE, /*enabled*/ \
{ \
1.f, 0.f, 0.f, 0.f, \
0.f, 1.f, 0.f, 0.f, \
0.f, 0.f, 1.f, 0.f, \
0.f, 0.f, 0.f, 1.f \
}, /*localToWorld*/ \
{ \
1.f, 0.f, 0.f, 0.f, \
0.f, 1.f, 0.f, 0.f, \
0.f, 0.f, 1.f, 0.f, \
0.f, 0.f, 0.f, 1.f \
}, /*localToWorldVelocity*/ \
NV_FLOW_FALSE, /*velocityIsWorldSpace*/ \
1u, /*numSubSteps*/ \
0, /*layer*/ \
NV_FLOW_TRUE, /*allocateMask*/ \
{0.f, 0.f, 100.f}, /*velocity*/ \
0.f, /*divergence*/ \
2.f, /*temperature*/ \
2.f, /*fuel*/ \
0.f, /*burn*/ \
2.f, /*smoke*/ \
NV_FLOW_FALSE, /*colorIsSrgb*/ \
1.f, /*velocityScale*/ \
1.f, /*divergenceScale*/ \
1.f, /*temperatureScale*/ \
1.f, /*fuelScale*/ \
1.f, /*burnScale*/ \
1.f, /*smokeScale*/ \
200.f, /*coupleRateVelocity*/ \
0.f, /*coupleRateDivergence*/ \
200.f, /*coupleRateTemperature*/ \
200.f, /*coupleRateFuel*/ \
0.f, /*coupleRateBurn*/ \
200.f, /*coupleRateSmoke*/ \
0, /*pointPositions*/ \
0, /*pointPositionCount*/ \
0, /*pointPositionVersion*/ \
0, /*pointAllocateMasks*/ \
0, /*pointAllocateMaskCount*/ \
0, /*pointAllocateMaskVersion*/ \
0, /*pointVelocities*/ \
0, /*pointVelocityCount*/ \
0, /*pointVelocityVersion*/ \
0, /*pointDivergences*/ \
0, /*pointDivergenceCount*/ \
0, /*pointDivergenceVersion*/ \
0, /*pointColors*/ \
0, /*pointColorCount*/ \
0, /*pointColorVersion*/ \
0, /*pointTemperatures*/ \
0, /*pointTemperatureCount*/ \
0, /*pointTemperatureVersion*/ \
0, /*pointFuels*/ \
0, /*pointFuelCount*/ \
0, /*pointFuelVersion*/ \
0, /*pointBurns*/ \
0, /*pointBurnCount*/ \
0, /*pointBurnVersion*/ \
0, /*pointSmokes*/ \
0, /*pointSmokeCount*/ \
0, /*pointSmokeVersion*/ \
0, /*pointCoupleRateVelocities*/ \
0, /*pointCoupleRateVelocityCount*/ \
0, /*pointCoupleRateVelocityVersion*/ \
0, /*pointCoupleRateDivergences*/ \
0, /*pointCoupleRateDivergenceCount*/ \
0, /*pointCoupleRateDivergenceVersion*/ \
0, /*pointCoupleRateTemperatures*/ \
0, /*pointCoupleRateTemperatureCount*/ \
0, /*pointCoupleRateTemperatureVersion*/ \
0, /*pointCoupleRateFuels*/ \
0, /*pointCoupleRateFuelCount*/ \
0, /*pointCoupleRateFuelVersion*/ \
0, /*pointCoupleRateBurns*/ \
0, /*pointCoupleRateBurnCount*/ \
0, /*pointCoupleRateBurnVersion*/ \
0, /*pointCoupleRateSmokes*/ \
0, /*pointCoupleRateSmokeCount*/ \
0, /*pointCoupleRateSmokeVersion*/ \
NV_FLOW_FALSE, /*applyPostPressure*/ \
NV_FLOW_FALSE, /*enableStreaming*/ \
NV_FLOW_FALSE, /*streamOnce*/ \
NV_FLOW_FALSE, /*streamClearAtStart*/ \
1048576, /*streamingBatchSize*/ \
NV_FLOW_FALSE, /*updateCoarseDensity*/ \
}
static const NvFlowEmitterPointParams NvFlowEmitterPointParams_default = NvFlowEmitterPointParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterPointParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint64, luid, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat4x4, localToWorld, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat4x4, localToWorldVelocity, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, velocityIsWorldSpace, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, numSubSteps, 0, 0)
NV_FLOW_REFLECT_VALUE(int, layer, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, allocateMask, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, velocity, 0, 0)
NV_FLOW_REFLECT_VALUE(float, divergence, 0, 0)
NV_FLOW_REFLECT_VALUE(float, temperature, 0, 0)
NV_FLOW_REFLECT_VALUE(float, fuel, 0, 0)
NV_FLOW_REFLECT_VALUE(float, burn, 0, 0)
NV_FLOW_REFLECT_VALUE(float, smoke, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, colorIsSrgb, 0, 0)
NV_FLOW_REFLECT_VALUE(float, velocityScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, divergenceScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, temperatureScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, fuelScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, burnScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, smokeScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateVelocity, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateDivergence, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateTemperature, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateFuel, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateBurn, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateSmoke, 0, 0)
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowFloat3, pointPositions, pointPositionCount, pointPositionVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowBool32, pointAllocateMasks, pointAllocateMaskCount, pointAllocateMaskVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowFloat3, pointVelocities, pointVelocityCount, pointVelocityVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, pointDivergences, pointDivergenceCount, pointDivergenceVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowFloat3, pointColors, pointColorCount, pointColorVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, pointTemperatures, pointTemperatureCount, pointTemperatureVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, pointFuels, pointFuelCount, pointFuelVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, pointBurns, pointBurnCount, pointBurnVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, pointSmokes, pointSmokeCount, pointSmokeVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, pointCoupleRateVelocities, pointCoupleRateVelocityCount, pointCoupleRateVelocityVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, pointCoupleRateDivergences, pointCoupleRateDivergenceCount, pointCoupleRateDivergenceVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, pointCoupleRateTemperatures, pointCoupleRateTemperatureCount, pointCoupleRateTemperatureVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, pointCoupleRateFuels, pointCoupleRateFuelCount, pointCoupleRateFuelVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, pointCoupleRateBurns, pointCoupleRateBurnCount, pointCoupleRateBurnVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, pointCoupleRateSmokes, pointCoupleRateSmokeCount, pointCoupleRateSmokeVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_VALUE(NvFlowBool32, applyPostPressure, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enableStreaming, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, streamOnce, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, streamClearAtStart, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, streamingBatchSize, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, updateCoarseDensity, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowEmitterPointParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowEmitterPointFeedback
{
void* data;
}NvFlowEmitterPointFeedback;
NV_FLOW_REFLECT_STRUCT_OPAQUE_IMPL(NvFlowEmitterPointFeedback)
typedef struct NvFlowEmitterPointPinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
float deltaTime;
const NvFlowEmitterPointParams*const* velocityParams;
NvFlowUint64 velocityParamCount;
const NvFlowEmitterPointParams*const* densityParams;
NvFlowUint64 densityParamCount;
NvFlowSparseTexture value;
NvFlowSparseTexture valueTemp;
NvFlowSparseTexture coarseDensity;
NvFlowBool32 isPostPressure;
NvFlowEmitterPointFeedback feedback;
}NvFlowEmitterPointPinsIn;
typedef struct NvFlowEmitterPointPinsOut
{
NvFlowSparseTexture value;
}NvFlowEmitterPointPinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterPointPinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(float, deltaTime, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterPointParams, velocityParams, velocityParamCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterPointParams, densityParams, densityParamCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, value, eNvFlowReflectHint_pinEnabledMutable, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, valueTemp, eNvFlowReflectHint_pinEnabledMutable, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, coarseDensity, eNvFlowReflectHint_pinEnabledMutable, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, isPostPressure, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowEmitterPointFeedback, feedback, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterPointPinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, value, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowEmitterPoint)
/// ********************************* EmitterPointAllocate ***************************************
typedef struct NvFlowEmitterPointAllocatePinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
NvFlowSparseParams sparseParams;
float deltaTime;
const NvFlowEmitterPointParams*const* params;
NvFlowUint64 paramCount;
NvFlowUint3 baseBlockDimBits;
}NvFlowEmitterPointAllocatePinsIn;
typedef struct NvFlowEmitterPointAllocatePinsOut
{
NvFlowInt4* locations;
NvFlowUint64 locationCount;
NvFlowEmitterPointFeedback feedback;
int* clearLayers;
NvFlowUint64 clearLayerCount;
}NvFlowEmitterPointAllocatePinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterPointAllocatePinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseParams, sparseParams, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(float, deltaTime, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterPointParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint3, baseBlockDimBits, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterPointAllocatePinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_ARRAY(NvFlowInt4, locations, locationCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowEmitterPointFeedback, feedback, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_ARRAY(int, clearLayers, clearLayerCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowEmitterPointAllocate)
/// ********************************* EmitterMesh ***************************************
typedef struct NvFlowEmitterMeshParams
{
NvFlowUint64 luid;
NvFlowBool32 enabled;
NvFlowFloat4x4 localToWorld;
NvFlowFloat4x4 localToWorldVelocity;
NvFlowBool32 velocityIsWorldSpace;
NvFlowUint numSubSteps;
int layer;
float minDistance;
float maxDistance;
NvFlowBool32 allocateMask;
NvFlowFloat3 velocity;
float divergence;
float temperature;
float fuel;
float burn;
float smoke;
NvFlowBool32 orientationLeftHanded;
NvFlowBool32 colorIsSrgb;
float velocityScale;
float divergenceScale;
float temperatureScale;
float fuelScale;
float burnScale;
float smokeScale;
float coupleRateVelocity;
float coupleRateDivergence;
float coupleRateTemperature;
float coupleRateFuel;
float coupleRateBurn;
float coupleRateSmoke;
int* meshSubsetFaceCounts;
NvFlowUint64 meshSubsetFaceCountCount;
NvFlowUint64 meshSubsetFaceCountVersion;
int* meshSubsetLayers;
NvFlowUint64 meshSubsetLayerCount;
NvFlowUint64 meshSubsetLayerVersion;
NvFlowBool32* meshSubsetEnableds;
NvFlowUint64 meshSubsetEnabledCount;
NvFlowUint64 meshSubsetEnabledVersion;
NvFlowFloat3* meshPositions;
NvFlowUint64 meshPositionCount;
NvFlowUint64 meshPositionVersion;
int* meshFaceVertexIndices;
NvFlowUint64 meshFaceVertexIndexCount;
NvFlowUint64 meshFaceVertexIndexVersion;
int* meshFaceVertexCounts;
NvFlowUint64 meshFaceVertexCountCount;
NvFlowUint64 meshFaceVertexCountVersion;
NvFlowFloat3* meshVelocities;
NvFlowUint64 meshVelocityCount;
NvFlowUint64 meshVelocityVersion;
float* meshDivergences;
NvFlowUint64 meshDivergenceCount;
NvFlowUint64 meshDivergenceVersion;
NvFlowFloat3* meshColors;
NvFlowUint64 meshColorCount;
NvFlowUint64 meshColorVersion;
float* meshTemperatures;
NvFlowUint64 meshTemperatureCount;
NvFlowUint64 meshTemperatureVersion;
float* meshFuels;
NvFlowUint64 meshFuelCount;
NvFlowUint64 meshFuelVersion;
float* meshBurns;
NvFlowUint64 meshBurnCount;
NvFlowUint64 meshBurnVersion;
float* meshSmokes;
NvFlowUint64 meshSmokeCount;
NvFlowUint64 meshSmokeVersion;
float* meshCoupleRateVelocities;
NvFlowUint64 meshCoupleRateVelocityCount;
NvFlowUint64 meshCoupleRateVelocityVersion;
float* meshCoupleRateDivergences;
NvFlowUint64 meshCoupleRateDivergenceCount;
NvFlowUint64 meshCoupleRateDivergenceVersion;
float* meshCoupleRateTemperatures;
NvFlowUint64 meshCoupleRateTemperatureCount;
NvFlowUint64 meshCoupleRateTemperatureVersion;
float* meshCoupleRateFuels;
NvFlowUint64 meshCoupleRateFuelCount;
NvFlowUint64 meshCoupleRateFuelVersion;
float* meshCoupleRateBurns;
NvFlowUint64 meshCoupleRateBurnCount;
NvFlowUint64 meshCoupleRateBurnVersion;
float* meshCoupleRateSmokes;
NvFlowUint64 meshCoupleRateSmokeCount;
NvFlowUint64 meshCoupleRateSmokeVersion;
float physicsVelocityScale;
NvFlowBool32 applyPostPressure;
}NvFlowEmitterMeshParams;
#define NvFlowEmitterMeshParams_default_init { \
0llu, /*luid*/ \
NV_FLOW_TRUE, /*enabled*/ \
{ \
1.f, 0.f, 0.f, 0.f, \
0.f, 1.f, 0.f, 0.f, \
0.f, 0.f, 1.f, 0.f, \
0.f, 0.f, 0.f, 1.f \
}, /*localToWorld*/ \
{ \
1.f, 0.f, 0.f, 0.f, \
0.f, 1.f, 0.f, 0.f, \
0.f, 0.f, 1.f, 0.f, \
0.f, 0.f, 0.f, 1.f \
}, /*localToWorldVelocity*/ \
NV_FLOW_FALSE, /*velocityIsWorldSpace*/ \
1u, /*numSubSteps*/ \
0, /*layer*/ \
-0.8f, /*minDistance*/ \
0.3f, /*minDistance*/ \
NV_FLOW_TRUE, /*allocateMask*/ \
{0.f, 0.f, 100.f}, /*velocity*/ \
0.f, /*divergence*/ \
2.f, /*temperature*/ \
0.8f, /*fuel*/ \
0.f, /*burn*/ \
2.f, /*smoke*/ \
NV_FLOW_FALSE, /*orientationLeftHanded*/ \
NV_FLOW_FALSE, /*colorIsSrgb*/ \
1.f, /*velocityScale*/ \
1.f, /*divergenceScale*/ \
1.f, /*temperatureScale*/ \
1.f, /*fuelScale*/ \
1.f, /*burnScale*/ \
1.f, /*smokeScale*/ \
2.f, /*coupleRateVelocity*/ \
0.f, /*coupleRateDivergence*/ \
10.f, /*coupleRateTemperature*/ \
2.f, /*coupleRateFuel*/ \
0.f, /*coupleRateBurn*/ \
2.f, /*coupleRateSmoke*/ \
0, /*meshSubsetFaceCounts*/ \
0, /*meshSubsetFaceCountCount*/ \
0, /*meshSubsetFaceCountVersion*/ \
0, /*meshSubsetLayers*/ \
0, /*meshSubsetLayerCount*/ \
0, /*meshSubsetLayerVersion*/ \
0, /*meshSubsetEnableds*/ \
0, /*meshSubsetEnabledCount*/ \
0, /*meshSubsetEnabledVersion*/ \
0, /*meshPositions*/ \
0, /*meshPositionCount*/ \
0, /*meshPositionVersion*/ \
0, /*meshFaceVertexIndices*/ \
0, /*meshFaceVertexIndexCount;*/ \
0, /*meshFaceVertexIndexVersion*/ \
0, /*meshFaceVertexCounts*/ \
0, /*meshFaceVertexCountCount*/ \
0, /*meshFaceVertexCountVersion*/ \
0, /*meshVelocities*/ \
0, /*meshVelocityCount*/ \
0, /*meshVelocityVersion*/ \
0, /*meshDivergences*/ \
0, /*meshDivergenceCount*/ \
0, /*meshDivergenceVersion*/ \
0, /*meshColors*/ \
0, /*meshColorCount*/ \
0, /*meshColorVersion*/ \
0, /*meshTemperatures*/ \
0, /*meshTemperatureCount*/ \
0, /*meshTemperatureVersion*/ \
0, /*meshFuels*/ \
0, /*meshFuelCount*/ \
0, /*meshFuelVersion*/ \
0, /*meshBurns*/ \
0, /*meshBurnCount*/ \
0, /*meshBurnVersion*/ \
0, /*meshSmokes*/ \
0, /*meshSmokeCount*/ \
0, /*meshSmokeVersion*/ \
0, /*meshCoupleRateVelocities*/ \
0, /*meshCoupleRateVelocityCount*/ \
0, /*meshCoupleRateVelocityVersion*/ \
0, /*meshCoupleRateDivergences*/ \
0, /*meshCoupleRateDivergenceCount*/ \
0, /*meshCoupleRateDivergenceVersion*/ \
0, /*meshCoupleRateTemperatures*/ \
0, /*meshCoupleRateTemperatureCount*/ \
0, /*meshCoupleRateTemperatureVersion*/ \
0, /*meshCoupleRateFuels*/ \
0, /*meshCoupleRateFuelCount*/ \
0, /*meshCoupleRateFuelVersion*/ \
0, /*meshCoupleRateBurns*/ \
0, /*meshCoupleRateBurnCount*/ \
0, /*meshCoupleRateBurnVersion*/ \
0, /*meshCoupleRateSmokes*/ \
0, /*meshCoupleRateSmokeCount*/ \
0, /*meshCoupleRateSmokeVersion*/ \
0.f, /*physicsVelocityScale*/ \
NV_FLOW_FALSE, /*applyPostPressure*/ \
}
static const NvFlowEmitterMeshParams NvFlowEmitterMeshParams_default = NvFlowEmitterMeshParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterMeshParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint64, luid, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat4x4, localToWorld, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat4x4, localToWorldVelocity, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, velocityIsWorldSpace, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, numSubSteps, 0, 0)
NV_FLOW_REFLECT_VALUE(int, layer, 0, 0)
NV_FLOW_REFLECT_VALUE(float, minDistance, 0, 0)
NV_FLOW_REFLECT_VALUE(float, maxDistance, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, allocateMask, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, velocity, 0, 0)
NV_FLOW_REFLECT_VALUE(float, divergence, 0, 0)
NV_FLOW_REFLECT_VALUE(float, temperature, 0, 0)
NV_FLOW_REFLECT_VALUE(float, fuel, 0, 0)
NV_FLOW_REFLECT_VALUE(float, burn, 0, 0)
NV_FLOW_REFLECT_VALUE(float, smoke, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, orientationLeftHanded, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, colorIsSrgb, 0, 0)
NV_FLOW_REFLECT_VALUE(float, velocityScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, divergenceScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, temperatureScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, fuelScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, burnScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, smokeScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateVelocity, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateDivergence, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateTemperature, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateFuel, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateBurn, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateSmoke, 0, 0)
NV_FLOW_REFLECT_ARRAY_VERSIONED(int, meshSubsetFaceCounts, meshSubsetFaceCountCount, meshSubsetFaceCountVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(int, meshSubsetLayers, meshSubsetLayerCount, meshSubsetLayerVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowBool32, meshSubsetEnableds, meshSubsetEnabledCount, meshSubsetEnabledVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowFloat3, meshPositions, meshPositionCount, meshPositionVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(int, meshFaceVertexIndices, meshFaceVertexIndexCount, meshFaceVertexIndexVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(int, meshFaceVertexCounts, meshFaceVertexCountCount, meshFaceVertexCountVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowFloat3, meshVelocities, meshVelocityCount, meshVelocityVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, meshDivergences, meshDivergenceCount, meshDivergenceVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowFloat3, meshColors, meshColorCount, meshColorVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, meshTemperatures, meshTemperatureCount, meshTemperatureVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, meshFuels, meshFuelCount, meshFuelVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, meshBurns, meshBurnCount, meshBurnVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, meshSmokes, meshSmokeCount, meshSmokeVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, meshCoupleRateVelocities, meshCoupleRateVelocityCount, meshCoupleRateVelocityVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, meshCoupleRateDivergences, meshCoupleRateDivergenceCount, meshCoupleRateDivergenceVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, meshCoupleRateTemperatures, meshCoupleRateTemperatureCount, meshCoupleRateTemperatureVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, meshCoupleRateFuels, meshCoupleRateFuelCount, meshCoupleRateFuelVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, meshCoupleRateBurns, meshCoupleRateBurnCount, meshCoupleRateBurnVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, meshCoupleRateSmokes, meshCoupleRateSmokeCount, meshCoupleRateSmokeVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_VALUE(float, physicsVelocityScale, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, applyPostPressure, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowEmitterMeshParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowEmitterMeshFeedback
{
void* data;
}NvFlowEmitterMeshFeedback;
NV_FLOW_REFLECT_STRUCT_OPAQUE_IMPL(NvFlowEmitterMeshFeedback)
typedef struct NvFlowEmitterMeshPinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
float deltaTime;
const NvFlowEmitterMeshParams*const* velocityParams;
NvFlowUint64 velocityParamCount;
const NvFlowEmitterMeshParams*const* densityParams;
NvFlowUint64 densityParamCount;
NvFlowSparseTexture value;
NvFlowSparseTexture valueTemp;
NvFlowBool32 isPostPressure;
NvFlowEmitterMeshFeedback feedback;
}NvFlowEmitterMeshPinsIn;
typedef struct NvFlowEmitterMeshPinsOut
{
NvFlowSparseTexture value;
}NvFlowEmitterMeshPinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterMeshPinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(float, deltaTime, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterMeshParams, velocityParams, velocityParamCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterMeshParams, densityParams, densityParamCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, value, eNvFlowReflectHint_pinEnabledMutable, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, valueTemp, eNvFlowReflectHint_pinEnabledMutable, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, isPostPressure, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowEmitterMeshFeedback, feedback, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterMeshPinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, value, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowEmitterMesh)
/// ********************************* EmitterMeshAllocate ***************************************
typedef struct NvFlowEmitterMeshAllocatePinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
NvFlowSparseParams sparseParams;
float deltaTime;
const NvFlowEmitterMeshParams*const* params;
NvFlowUint64 paramCount;
NvFlowUint3 baseBlockDimBits;
}NvFlowEmitterMeshAllocatePinsIn;
typedef struct NvFlowEmitterMeshAllocatePinsOut
{
NvFlowInt4* locations;
NvFlowUint64 locationCount;
NvFlowEmitterMeshFeedback feedback;
}NvFlowEmitterMeshAllocatePinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterMeshAllocatePinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseParams, sparseParams, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(float, deltaTime, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterMeshParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint3, baseBlockDimBits, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterMeshAllocatePinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_ARRAY(NvFlowInt4, locations, locationCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowEmitterMeshFeedback, feedback, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowEmitterMeshAllocate)
/// ********************************* EmitterTexture ***************************************
typedef struct NvFlowEmitterTextureParams
{
NvFlowUint64 luid;
NvFlowBool32 enabled;
NvFlowFloat4x4 localToWorld;
NvFlowFloat4x4 localToWorldVelocity;
NvFlowBool32 velocityIsWorldSpace;
NvFlowFloat3 position;
int layer;
NvFlowFloat3 halfSize;
float allocationScale;
NvFlowFloat3 velocity;
float divergence;
float temperature;
float fuel;
float burn;
float smoke;
NvFlowBool32 colorIsSrgb;
float velocityScale;
float divergenceScale;
float temperatureScale;
float fuelScale;
float burnScale;
float smokeScale;
float coupleRateVelocity;
float coupleRateDivergence;
float coupleRateTemperature;
float coupleRateFuel;
float coupleRateBurn;
float coupleRateSmoke;
NvFlowUint textureWidth;
NvFlowUint textureHeight;
NvFlowUint textureDepth;
NvFlowUint textureFirstElement;
NvFlowFloat3* textureVelocities;
NvFlowUint64 textureVelocityCount;
NvFlowUint64 textureVelocityVersion;
float* textureDivergences;
NvFlowUint64 textureDivergenceCount;
NvFlowUint64 textureDivergenceVersion;
float* textureTemperatures;
NvFlowUint64 textureTemperatureCount;
NvFlowUint64 textureTemperatureVersion;
float* textureFuels;
NvFlowUint64 textureFuelCount;
NvFlowUint64 textureFuelVersion;
float* textureBurns;
NvFlowUint64 textureBurnCount;
NvFlowUint64 textureBurnVersion;
float* textureSmokes;
NvFlowUint64 textureSmokeCount;
NvFlowUint64 textureSmokeVersion;
float* textureCoupleRateVelocities;
NvFlowUint64 textureCoupleRateVelocityCount;
NvFlowUint64 textureCoupleRateVelocityVersion;
float* textureCoupleRateDivergences;
NvFlowUint64 textureCoupleRateDivergenceCount;
NvFlowUint64 textureCoupleRateDivergenceVersion;
float* textureCoupleRateTemperatures;
NvFlowUint64 textureCoupleRateTemperatureCount;
NvFlowUint64 textureCoupleRateTemperatureVersion;
float* textureCoupleRateFuels;
NvFlowUint64 textureCoupleRateFuelCount;
NvFlowUint64 textureCoupleRateFuelVersion;
float* textureCoupleRateBurns;
NvFlowUint64 textureCoupleRateBurnCount;
NvFlowUint64 textureCoupleRateBurnVersion;
float* textureCoupleRateSmokes;
NvFlowUint64 textureCoupleRateSmokeCount;
NvFlowUint64 textureCoupleRateSmokeVersion;
NvFlowBool32 applyPostPressure;
}NvFlowEmitterTextureParams;
#define NvFlowEmitterTextureParams_default_init { \
0llu, /*luid*/ \
NV_FLOW_TRUE, /*enabled*/ \
{ \
1.f, 0.f, 0.f, 0.f, \
0.f, 1.f, 0.f, 0.f, \
0.f, 0.f, 1.f, 0.f, \
0.f, 0.f, 0.f, 1.f \
}, /*localToWorld*/ \
{ \
1.f, 0.f, 0.f, 0.f, \
0.f, 1.f, 0.f, 0.f, \
0.f, 0.f, 1.f, 0.f, \
0.f, 0.f, 0.f, 1.f \
}, /*localToWorldVelocity*/ \
NV_FLOW_FALSE, /*velocityIsWorldSpace*/ \
{0.f, 0.f, 0.f}, /*position*/ \
0, /*layer*/ \
{10.f, 10.f, 10.f}, /*halfSize*/ \
1.f, /*fallocationScale*/ \
{0.f, 0.f, 400.f}, /*velocity*/ \
0.f, /*divergence*/ \
0.5f, /*temperature*/ \
0.8f, /*fuel*/ \
0.f, /*burn*/ \
0.f, /*smoke*/ \
NV_FLOW_FALSE, /*colorIsSrgb*/ \
1.f, /*velocityScale*/ \
1.f, /*divergenceScale*/ \
1.f, /*temperatureScale*/ \
1.f, /*fuelScale*/ \
1.f, /*burnScale*/ \
1.f, /*smokeScale*/ \
2.f, /*coupleRateVelocity*/ \
0.f, /*coupleRateDivergence*/ \
2.f, /*coupleRateTemperature*/ \
2.f, /*coupleRateFuel*/ \
0.f, /*coupleRateBurn*/ \
0.f, /*coupleRateSmoke*/ \
0u, /*textureWidth*/ \
0u, /*textureHeight*/ \
0u, /*textureDepth*/ \
0u, /*textureFirstElement*/ \
0, /*textureVelocities*/ \
0, /*textureVelocityCount*/ \
0, /*textureVelocityVersion*/ \
0, /*textureDivergences*/ \
0, /*textureDivergenceCount*/ \
0, /*textureDivergenceVersion*/ \
0, /*textureTemperatures*/ \
0, /*textureTemperatureCount*/ \
0, /*textureTemperatureVersion*/ \
0, /*textureFuels*/ \
0, /*textureFuelCount*/ \
0, /*textureFuelVersion*/ \
0, /*textureBurns*/ \
0, /*textureBurnCount*/ \
0, /*textureBurnVersion*/ \
0, /*textureSmokes*/ \
0, /*textureSmokeCount*/ \
0, /*textureSmokeVersion*/ \
0, /*textureCoupleRateVelocities*/ \
0, /*textureCoupleRateVelocityCount*/ \
0, /*textureCoupleRateVelocityVersion*/ \
0, /*textureCoupleRateDivergences*/ \
0, /*textureCoupleRateDivergenceCount*/ \
0, /*textureCoupleRateDivergenceVersion*/ \
0, /*textureCoupleRateTemperatures*/ \
0, /*textureCoupleRateTemperatureCount*/ \
0, /*textureCoupleRateTemperatureVersion*/ \
0, /*textureCoupleRateFuels*/ \
0, /*textureCoupleRateFuelCount*/ \
0, /*textureCoupleRateFuelVersion*/ \
0, /*textureCoupleRateBurns*/ \
0, /*textureCoupleRateBurnCount*/ \
0, /*textureCoupleRateBurnVersion*/ \
0, /*textureCoupleRateSmokes*/ \
0, /*textureCoupleRateSmokeCount*/ \
0, /*textureCoupleRateSmokeVersion*/ \
NV_FLOW_FALSE /*applyPostPressure*/ \
}
static const NvFlowEmitterTextureParams NvFlowEmitterTextureParams_default = NvFlowEmitterTextureParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterTextureParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint64, luid, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat4x4, localToWorld, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat4x4, localToWorldVelocity, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, velocityIsWorldSpace, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, position, 0, 0)
NV_FLOW_REFLECT_VALUE(int, layer, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, halfSize, 0, 0)
NV_FLOW_REFLECT_VALUE(float, allocationScale, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, velocity, 0, 0)
NV_FLOW_REFLECT_VALUE(float, divergence, 0, 0)
NV_FLOW_REFLECT_VALUE(float, temperature, 0, 0)
NV_FLOW_REFLECT_VALUE(float, fuel, 0, 0)
NV_FLOW_REFLECT_VALUE(float, burn, 0, 0)
NV_FLOW_REFLECT_VALUE(float, smoke, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, colorIsSrgb, 0, 0)
NV_FLOW_REFLECT_VALUE(float, velocityScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, divergenceScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, temperatureScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, fuelScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, burnScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, smokeScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateVelocity, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateDivergence, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateTemperature, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateFuel, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateBurn, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateSmoke, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, textureWidth, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, textureHeight, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, textureDepth, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, textureFirstElement, 0, 0)
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowFloat3, textureVelocities, textureVelocityCount, textureVelocityVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, textureDivergences, textureDivergenceCount, textureDivergenceVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, textureTemperatures, textureTemperatureCount, textureTemperatureVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, textureFuels, textureFuelCount, textureFuelVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, textureBurns, textureBurnCount, textureBurnVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, textureSmokes, textureSmokeCount, textureSmokeVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, textureCoupleRateVelocities, textureCoupleRateVelocityCount, textureCoupleRateVelocityVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, textureCoupleRateDivergences, textureCoupleRateDivergenceCount, textureCoupleRateDivergenceVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, textureCoupleRateTemperatures, textureCoupleRateTemperatureCount, textureCoupleRateTemperatureVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, textureCoupleRateFuels, textureCoupleRateFuelCount, textureCoupleRateFuelVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, textureCoupleRateBurns, textureCoupleRateBurnCount, textureCoupleRateBurnVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY_VERSIONED(float, textureCoupleRateSmokes, textureCoupleRateSmokeCount, textureCoupleRateSmokeVersion, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_VALUE(NvFlowBool32, applyPostPressure, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowEmitterTextureParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowEmitterTexturePinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
float deltaTime;
const NvFlowEmitterTextureParams*const* velocityParams;
NvFlowUint64 velocityParamCount;
const NvFlowEmitterTextureParams*const* densityParams;
NvFlowUint64 densityParamCount;
NvFlowSparseTexture value;
NvFlowSparseTexture valueTemp;
NvFlowBool32 isPostPressure;
}NvFlowEmitterTexturePinsIn;
typedef struct NvFlowEmitterTexturePinsOut
{
NvFlowSparseTexture value;
}NvFlowEmitterTexturePinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterTexturePinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(float, deltaTime, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterTextureParams, velocityParams, velocityParamCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterTextureParams, densityParams, densityParamCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, value, eNvFlowReflectHint_pinEnabledMutable, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, valueTemp, eNvFlowReflectHint_pinEnabledMutable, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, isPostPressure, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterTexturePinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, value, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowEmitterTexture)
/// ********************************* EmitterTextureAllocate ***************************************
typedef struct NvFlowEmitterTextureAllocatePinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
NvFlowSparseParams sparseParams;
float deltaTime;
const NvFlowEmitterTextureParams*const* params;
NvFlowUint64 paramCount;
}NvFlowEmitterTextureAllocatePinsIn;
typedef struct NvFlowEmitterTextureAllocatePinsOut
{
NvFlowInt4* locations;
NvFlowUint64 locationCount;
}NvFlowEmitterTextureAllocatePinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterTextureAllocatePinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseParams, sparseParams, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(float, deltaTime, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterTextureParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterTextureAllocatePinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_ARRAY(NvFlowInt4, locations, locationCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowEmitterTextureAllocate)
/// ********************************* EmitterNanoVdb ***************************************
typedef struct NvFlowEmitterNanoVdbParams
{
NvFlowUint64 luid;
NvFlowBool32 enabled;
NvFlowFloat4x4 localToWorld;
NvFlowFloat4x4 localToWorldVelocity;
NvFlowBool32 velocityIsWorldSpace;
int layer;
float allocationScale;
NvFlowFloat3 velocity;
float divergence;
float temperature;
float fuel;
float burn;
float smoke;
NvFlowBool32 colorIsSrgb;
float velocityScale;
float divergenceScale;
float temperatureScale;
float fuelScale;
float burnScale;
float smokeScale;
float coupleRateVelocity;
float coupleRateDivergence;
float coupleRateTemperature;
float coupleRateFuel;
float coupleRateBurn;
float coupleRateSmoke;
float minDistance;
float maxDistance;
NvFlowUint* nanoVdbDistances;
NvFlowUint64 nanoVdbDistanceCount;
NvFlowUint64 nanoVdbDistanceVersion;
NvFlowUint64 nanoVdbDistanceFirstElement;
NvFlowUint* nanoVdbVelocities;
NvFlowUint64 nanoVdbVelocityCount;
NvFlowUint64 nanoVdbVelocityVersion;
NvFlowUint64 nanoVdbVelocityFirstElement;
NvFlowUint* nanoVdbDivergences;
NvFlowUint64 nanoVdbDivergenceCount;
NvFlowUint64 nanoVdbDivergenceVersion;
NvFlowUint64 nanoVdbDivergenceFirstElement;
NvFlowUint* nanoVdbTemperatures;
NvFlowUint64 nanoVdbTemperatureCount;
NvFlowUint64 nanoVdbTemperatureVersion;
NvFlowUint64 nanoVdbTemperatureFirstElement;
NvFlowUint* nanoVdbFuels;
NvFlowUint64 nanoVdbFuelCount;
NvFlowUint64 nanoVdbFuelVersion;
NvFlowUint64 nanoVdbFuelFirstElement;
NvFlowUint* nanoVdbBurns;
NvFlowUint64 nanoVdbBurnCount;
NvFlowUint64 nanoVdbBurnVersion;
NvFlowUint64 nanoVdbBurnFirstElement;
NvFlowUint* nanoVdbSmokes;
NvFlowUint64 nanoVdbSmokeCount;
NvFlowUint64 nanoVdbSmokeVersion;
NvFlowUint64 nanoVdbSmokeFirstElement;
NvFlowUint* nanoVdbCoupleRateVelocities;
NvFlowUint64 nanoVdbCoupleRateVelocityCount;
NvFlowUint64 nanoVdbCoupleRateVelocityVersion;
NvFlowUint64 nanoVdbCoupleRateVelocityFirstElement;
NvFlowUint* nanoVdbCoupleRateDivergences;
NvFlowUint64 nanoVdbCoupleRateDivergenceCount;
NvFlowUint64 nanoVdbCoupleRateDivergenceVersion;
NvFlowUint64 nanoVdbCoupleRateDivergenceFirstElement;
NvFlowUint* nanoVdbCoupleRateTemperatures;
NvFlowUint64 nanoVdbCoupleRateTemperatureCount;
NvFlowUint64 nanoVdbCoupleRateTemperatureVersion;
NvFlowUint64 nanoVdbCoupleRateTemperatureFirstElement;
NvFlowUint* nanoVdbCoupleRateFuels;
NvFlowUint64 nanoVdbCoupleRateFuelCount;
NvFlowUint64 nanoVdbCoupleRateFuelVersion;
NvFlowUint64 nanoVdbCoupleRateFuelFirstElement;
NvFlowUint* nanoVdbCoupleRateBurns;
NvFlowUint64 nanoVdbCoupleRateBurnCount;
NvFlowUint64 nanoVdbCoupleRateBurnVersion;
NvFlowUint64 nanoVdbCoupleRateBurnFirstElement;
NvFlowUint* nanoVdbCoupleRateSmokes;
NvFlowUint64 nanoVdbCoupleRateSmokeCount;
NvFlowUint64 nanoVdbCoupleRateSmokeVersion;
NvFlowUint64 nanoVdbCoupleRateSmokeFirstElement;
NvFlowUint* nanoVdbRgba8s;
NvFlowUint64 nanoVdbRgba8Count;
NvFlowUint64 nanoVdbRgba8Version;
NvFlowUint64 nanoVdbRgba8FirstElement;
NvFlowBool32 applyPostPressure;
NvFlowBool32 allocateActiveLeaves;
}NvFlowEmitterNanoVdbParams;
#define NvFlowEmitterNanoVdbParams_default_init { \
0llu, /*luid*/ \
NV_FLOW_TRUE, /*enabled*/ \
{ \
1.f, 0.f, 0.f, 0.f, \
0.f, 1.f, 0.f, 0.f, \
0.f, 0.f, 1.f, 0.f, \
0.f, 0.f, 0.f, 1.f \
}, /*localToWorld*/ \
{ \
1.f, 0.f, 0.f, 0.f, \
0.f, 1.f, 0.f, 0.f, \
0.f, 0.f, 1.f, 0.f, \
0.f, 0.f, 0.f, 1.f \
}, /*localToWorldVelocity*/ \
NV_FLOW_FALSE, /*velocityIsWorldSpace*/ \
0, /*layer*/ \
1.f, /*fallocationScale*/ \
{0.f, 0.f, 0.f}, /*velocity*/ \
0.f, /*divergence*/ \
2.f, /*temperature*/ \
0.0f, /*fuel*/ \
0.f, /*burn*/ \
10.f, /*smoke*/ \
NV_FLOW_FALSE, /*colorIsSrgb*/ \
1.f, /*velocityScale*/ \
1.f, /*divergenceScale*/ \
1.f, /*temperatureScale*/ \
1.f, /*fuelScale*/ \
1.f, /*burnScale*/ \
1.f, /*smokeScale*/ \
2.f, /*coupleRateVelocity*/ \
0.f, /*coupleRateDivergence*/ \
2.f, /*coupleRateTemperature*/ \
0.f, /*coupleRateFuel*/ \
0.f, /*coupleRateBurn*/ \
2.f, /*coupleRateSmoke*/ \
-0.25f, /*minDistance*/ \
0.25f, /*maxDistance*/ \
0, /*nanoVdbDistances*/ \
0, /*nanoVdbDistanceCount*/ \
0, /*nanoVdbDistanceVersion*/ \
0, /*nanoVdbDistanceFirstElement*/ \
0, /*nanoVdbVelocities*/ \
0, /*nanoVdbVelocityCount*/ \
0, /*nanoVdbVelocityVersion*/ \
0, /*nanoVdbVelocityFirstElement*/ \
0, /*nanoVdbDivergences*/ \
0, /*nanoVdbDivergenceCount*/ \
0, /*nanoVdbDivergenceVersion*/ \
0, /*nanoVdbDivergenceFirstElement*/ \
0, /*nanoVdbTemperatures*/ \
0, /*nanoVdbTemperatureCount*/ \
0, /*nanoVdbTemperatureVersion*/ \
0, /*nanoVdbTemperatureFirstElement*/ \
0, /*nanoVdbFuels*/ \
0, /*nanoVdbFuelCount*/ \
0, /*nanoVdbFuelVersion*/ \
0, /*nanoVdbFuelFirstElement*/ \
0, /*nanoVdbBurns*/ \
0, /*nanoVdbBurnCount*/ \
0, /*nanoVdbBurnVersion*/ \
0, /*nanoVdbBurnFirstElement*/ \
0, /*nanoVdbSmokes*/ \
0, /*nanoVdbSmokeCount*/ \
0, /*nanoVdbSmokeVersion*/ \
0, /*nanoVdbSmokeFirstElement*/ \
0, /*nanoVdbCoupleRateVelocities*/ \
0, /*nanoVdbCoupleRateVelocityCount*/ \
0, /*nanoVdbCoupleRateVelocityVersion*/ \
0, /*nanoVdbCoupleRateVelocityFirstElement*/ \
0, /*nanoVdbCoupleRateDivergences*/ \
0, /*nanoVdbCoupleRateDivergenceCount*/ \
0, /*nanoVdbCoupleRateDivergenceVersion*/ \
0, /*nanoVdbCoupleRateDivergenceFirstElement*/ \
0, /*nanoVdbCoupleRateTemperatures*/ \
0, /*nanoVdbCoupleRateTemperatureCount*/ \
0, /*nanoVdbCoupleRateTemperatureVersion*/ \
0, /*nanoVdbCoupleRateTemperatureFirstElement*/ \
0, /*nanoVdbCoupleRateFuels*/ \
0, /*nanoVdbCoupleRateFuelCount*/ \
0, /*nanoVdbCoupleRateFuelVersion*/ \
0, /*nanoVdbCoupleRateFuelFirstElement*/ \
0, /*nanoVdbCoupleRateBurns*/ \
0, /*nanoVdbCoupleRateBurnCount*/ \
0, /*nanoVdbCoupleRateBurnVersion*/ \
0, /*nanoVdbCoupleRateBurnFirstElement*/ \
0, /*nanoVdbCoupleRateSmokes*/ \
0, /*nanoVdbCoupleRateSmokeCount*/ \
0, /*nanoVdbCoupleRateSmokeVersion*/ \
0, /*nanoVdbCoupleRateSmokeFirstElement*/ \
0, /*nanoVdbRgba8s*/ \
0, /*nanoVdbRgba8Count*/ \
0, /*nanoVdbRgba8Version*/ \
0, /*nanoVdbRgba8FirstElement*/ \
NV_FLOW_FALSE, /*applyPostPressure*/ \
NV_FLOW_TRUE, /*allocateActiveLeaves*/ \
}
static const NvFlowEmitterNanoVdbParams NvFlowEmitterNanoVdbParams_default = NvFlowEmitterNanoVdbParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterNanoVdbParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint64, luid, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat4x4, localToWorld, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat4x4, localToWorldVelocity, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, velocityIsWorldSpace, 0, 0)
NV_FLOW_REFLECT_VALUE(int, layer, 0, 0)
NV_FLOW_REFLECT_VALUE(float, allocationScale, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, velocity, 0, 0)
NV_FLOW_REFLECT_VALUE(float, divergence, 0, 0)
NV_FLOW_REFLECT_VALUE(float, temperature, 0, 0)
NV_FLOW_REFLECT_VALUE(float, fuel, 0, 0)
NV_FLOW_REFLECT_VALUE(float, burn, 0, 0)
NV_FLOW_REFLECT_VALUE(float, smoke, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, colorIsSrgb, 0, 0)
NV_FLOW_REFLECT_VALUE(float, velocityScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, divergenceScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, temperatureScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, fuelScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, burnScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, smokeScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateVelocity, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateDivergence, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateTemperature, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateFuel, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateBurn, 0, 0)
NV_FLOW_REFLECT_VALUE(float, coupleRateSmoke, 0, 0)
NV_FLOW_REFLECT_VALUE(float, minDistance, 0, 0)
NV_FLOW_REFLECT_VALUE(float, maxDistance, 0, 0)
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowUint, nanoVdbDistances, nanoVdbDistanceCount, nanoVdbDistanceVersion, eNvFlowReflectHint_asset, "asset(nanovdb)")
NV_FLOW_REFLECT_VALUE(NvFlowUint64, nanoVdbDistanceFirstElement, 0, 0)
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowUint, nanoVdbVelocities, nanoVdbVelocityCount, nanoVdbVelocityVersion, eNvFlowReflectHint_asset, "asset(nanovdb)")
NV_FLOW_REFLECT_VALUE(NvFlowUint64, nanoVdbVelocityFirstElement, 0, 0)
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowUint, nanoVdbDivergences, nanoVdbDivergenceCount, nanoVdbDivergenceVersion, eNvFlowReflectHint_asset, "asset(nanovdb)")
NV_FLOW_REFLECT_VALUE(NvFlowUint64, nanoVdbDivergenceFirstElement, 0, 0)
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowUint, nanoVdbTemperatures, nanoVdbTemperatureCount, nanoVdbTemperatureVersion, eNvFlowReflectHint_asset, "asset(nanovdb)")
NV_FLOW_REFLECT_VALUE(NvFlowUint64, nanoVdbTemperatureFirstElement, 0, 0)
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowUint, nanoVdbFuels, nanoVdbFuelCount, nanoVdbFuelVersion, eNvFlowReflectHint_asset, "asset(nanovdb)")
NV_FLOW_REFLECT_VALUE(NvFlowUint64, nanoVdbFuelFirstElement, 0, 0)
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowUint, nanoVdbBurns, nanoVdbBurnCount, nanoVdbBurnVersion, eNvFlowReflectHint_asset, "asset(nanovdb)")
NV_FLOW_REFLECT_VALUE(NvFlowUint64, nanoVdbBurnFirstElement, 0, 0)
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowUint, nanoVdbSmokes, nanoVdbSmokeCount, nanoVdbSmokeVersion, eNvFlowReflectHint_asset, "asset(nanovdb)")
NV_FLOW_REFLECT_VALUE(NvFlowUint64, nanoVdbSmokeFirstElement, 0, 0)
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowUint, nanoVdbCoupleRateVelocities, nanoVdbCoupleRateVelocityCount, nanoVdbCoupleRateVelocityVersion, eNvFlowReflectHint_asset, "asset(nanovdb)")
NV_FLOW_REFLECT_VALUE(NvFlowUint64, nanoVdbCoupleRateVelocityFirstElement, 0, 0)
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowUint, nanoVdbCoupleRateDivergences, nanoVdbCoupleRateDivergenceCount, nanoVdbCoupleRateDivergenceVersion, eNvFlowReflectHint_asset, "asset(nanovdb)")
NV_FLOW_REFLECT_VALUE(NvFlowUint64, nanoVdbCoupleRateDivergenceFirstElement, 0, 0)
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowUint, nanoVdbCoupleRateTemperatures, nanoVdbCoupleRateTemperatureCount, nanoVdbCoupleRateTemperatureVersion, eNvFlowReflectHint_asset, "asset(nanovdb)")
NV_FLOW_REFLECT_VALUE(NvFlowUint64, nanoVdbCoupleRateTemperatureFirstElement, 0, 0)
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowUint, nanoVdbCoupleRateFuels, nanoVdbCoupleRateFuelCount, nanoVdbCoupleRateFuelVersion, eNvFlowReflectHint_asset, "asset(nanovdb)")
NV_FLOW_REFLECT_VALUE(NvFlowUint64, nanoVdbCoupleRateFuelFirstElement, 0, 0)
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowUint, nanoVdbCoupleRateBurns, nanoVdbCoupleRateBurnCount, nanoVdbCoupleRateBurnVersion, eNvFlowReflectHint_asset, "asset(nanovdb)")
NV_FLOW_REFLECT_VALUE(NvFlowUint64, nanoVdbCoupleRateBurnFirstElement, 0, 0)
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowUint, nanoVdbCoupleRateSmokes, nanoVdbCoupleRateSmokeCount, nanoVdbCoupleRateSmokeVersion, eNvFlowReflectHint_asset, "asset(nanovdb)")
NV_FLOW_REFLECT_VALUE(NvFlowUint64, nanoVdbCoupleRateSmokeFirstElement, 0, 0)
NV_FLOW_REFLECT_ARRAY_VERSIONED(NvFlowUint, nanoVdbRgba8s, nanoVdbRgba8Count, nanoVdbRgba8Version, eNvFlowReflectHint_asset, "asset(nanovdb)")
NV_FLOW_REFLECT_VALUE(NvFlowUint64, nanoVdbRgba8FirstElement, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, applyPostPressure, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, allocateActiveLeaves, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowEmitterNanoVdbParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowEmitterNanoVdbPinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
float deltaTime;
const NvFlowEmitterNanoVdbParams*const* velocityParams;
NvFlowUint64 velocityParamCount;
const NvFlowEmitterNanoVdbParams*const* densityParams;
NvFlowUint64 densityParamCount;
NvFlowSparseTexture value;
NvFlowSparseTexture valueTemp;
NvFlowBool32 isPostPressure;
}NvFlowEmitterNanoVdbPinsIn;
typedef struct NvFlowEmitterNanoVdbPinsOut
{
NvFlowSparseTexture value;
}NvFlowEmitterNanoVdbPinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterNanoVdbPinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(float, deltaTime, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterNanoVdbParams, velocityParams, velocityParamCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterNanoVdbParams, densityParams, densityParamCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, value, eNvFlowReflectHint_pinEnabledMutable, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, valueTemp, eNvFlowReflectHint_pinEnabledMutable, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, isPostPressure, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterNanoVdbPinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, value, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowEmitterNanoVdb)
/// ********************************* EmitterNanoVdbAllocate ***************************************
typedef struct NvFlowEmitterNanoVdbAllocatePinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
NvFlowSparseParams sparseParams;
float deltaTime;
const NvFlowEmitterNanoVdbParams*const* params;
NvFlowUint64 paramCount;
}NvFlowEmitterNanoVdbAllocatePinsIn;
typedef struct NvFlowEmitterNanoVdbAllocatePinsOut
{
NvFlowInt4* locations;
NvFlowUint64 locationCount;
}NvFlowEmitterNanoVdbAllocatePinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterNanoVdbAllocatePinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseParams, sparseParams, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(float, deltaTime, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEmitterNanoVdbParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowEmitterNanoVdbAllocatePinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_ARRAY(NvFlowInt4, locations, locationCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowEmitterNanoVdbAllocate)
/// ********************************* Ellipsoid Raster ***************************************
typedef struct NvFlowEllipsoidRasterFeedback
{
void* data;
}NvFlowEllipsoidRasterFeedback;
NV_FLOW_REFLECT_STRUCT_OPAQUE_IMPL(NvFlowEllipsoidRasterFeedback)
typedef struct NvFlowEllipsoidRasterParams
{
float scale;
float density;
NvFlowBool32 sdfMode;
const NvFlowFloat4* positions;
NvFlowUint64 positionCount;
const NvFlowFloat3* positionFloat3s;
NvFlowUint64 positionFloat3Count;
const NvFlowFloat4* anisotropyE1s;
NvFlowUint64 anisotropyE1Count;
const NvFlowFloat4* anisotropyE2s;
NvFlowUint64 anisotropyE2Count;
const NvFlowFloat4* anisotropyE3s;
NvFlowUint64 anisotropyE3Count;
NvFlowUint smoothIterations;
float allocationScale;
float allocationOffset;
}NvFlowEllipsoidRasterParams;
#define NvFlowEllipsoidRasterParams_default_init { \
1.f, /*scale*/ \
1.f, /*density*/ \
NV_FLOW_TRUE, /*sdfMode*/ \
0, /*positions*/ \
0u, /*positionsCount*/ \
0, /*positionFloat3s*/ \
0u, /*positionsFloat3Count*/ \
0, /*anisotropyE1s*/ \
0u, /*anisotropyE1Count*/ \
0, /*anisotropyE2s*/ \
0u, /*anisotropyE2Count*/ \
0, /*anisotropyE3s*/ \
0u, /*anisotropyE3Count*/ \
3u, /*smoothIterations*/ \
1.f, /*allocationScale*/ \
0.f /*allocationOffset*/ \
}
static const NvFlowEllipsoidRasterParams NvFlowEllipsoidRasterParams_default = NvFlowEllipsoidRasterParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowEllipsoidRasterParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(float, scale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, density, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, sdfMode, 0, 0)
NV_FLOW_REFLECT_ARRAY(NvFlowFloat4, positions, positionCount, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY(NvFlowFloat3, positionFloat3s, positionFloat3Count, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY(NvFlowFloat4, anisotropyE1s, anisotropyE1Count, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY(NvFlowFloat4, anisotropyE2s, anisotropyE2Count, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_ARRAY(NvFlowFloat4, anisotropyE3s, anisotropyE3Count, eNvFlowReflectHint_asset, "asset(array)")
NV_FLOW_REFLECT_VALUE(NvFlowUint, smoothIterations, 0, 0)
NV_FLOW_REFLECT_VALUE(float, allocationScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, allocationOffset, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowEllipsoidRasterParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowEllipsoidRasterPinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
NvFlowEllipsoidRasterFeedback feedback;
const NvFlowEllipsoidRasterParams** params;
NvFlowUint64 paramCount;
NvFlowSparseTexture layout;
}NvFlowEllipsoidRasterPinsIn;
typedef struct NvFlowEllipsoidRasterPinsOut
{
NvFlowSparseTexture value;
}NvFlowEllipsoidRasterPinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowEllipsoidRasterPinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(NvFlowEllipsoidRasterFeedback, feedback, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEllipsoidRasterParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, layout, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowEllipsoidRasterPinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, value, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowEllipsoidRaster)
typedef struct NvFlowEllipsoidRasterAllocatePinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
NvFlowSparseParams sparseParams;
const NvFlowEllipsoidRasterParams** params;
NvFlowUint64 paramCount;
}NvFlowEllipsoidRasterAllocatePinsIn;
typedef struct NvFlowEllipsoidRasterAllocatePinsOut
{
NvFlowEllipsoidRasterFeedback feedback;
NvFlowInt4* locations;
NvFlowUint64 locationCount;
}NvFlowEllipsoidRasterAllocatePinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowEllipsoidRasterAllocatePinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseParams, sparseParams, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowEllipsoidRasterParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowEllipsoidRasterAllocatePinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowEllipsoidRasterFeedback, feedback, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_ARRAY(NvFlowInt4, locations, locationCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowEllipsoidRasterAllocate)
/// ********************************* Shadow ***************************************
typedef struct NvFlowShadowParams
{
NvFlowBool32 enabled;
NvFlowFloat3 lightDirection;
NvFlowFloat3 lightPosition;
NvFlowBool32 isPointLight;
float attenuation;
float stepSizeScale;
float stepOffsetScale;
float minIntensity;
NvFlowUint numSteps;
NvFlowBool32 coarsePropagate;
}NvFlowShadowParams;
#define NvFlowShadowParams_default_init { \
NV_FLOW_TRUE, /*enabled*/ \
{1.f, 1.f, 1.f}, /*lightDirection*/ \
{0.f, 0.f, 0.f}, /*lightPosition*/ \
NV_FLOW_FALSE, /*isPointLight*/ \
0.045f, /*attenuation*/ \
0.75f, /*stepSizeScale*/ \
1.f, /*stepOffsetScale*/ \
0.125f, /*minIntensity*/ \
16u, /*numSteps*/ \
NV_FLOW_TRUE /*coarsePropagate*/ \
}
static const NvFlowShadowParams NvFlowShadowParams_default = NvFlowShadowParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowShadowParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enabled, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, lightDirection, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, lightPosition, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, isPointLight, 0, 0)
NV_FLOW_REFLECT_VALUE(float, attenuation, 0, 0)
NV_FLOW_REFLECT_VALUE(float, stepSizeScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, stepOffsetScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, minIntensity, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, numSteps, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, coarsePropagate, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowShadowParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowShadowPinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
const NvFlowShadowParams** params;
NvFlowUint64 paramCount;
NvFlowTextureTransient* colormap;
NvFlowSparseTexture density;
NvFlowSparseTexture coarseDensity;
}NvFlowShadowPinsIn;
typedef struct NvFlowShadowPinsOut
{
NvFlowSparseTexture densityShadow;
}NvFlowShadowPinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowShadowPinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowShadowParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER(NvFlowTextureTransient, colormap, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, density, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, coarseDensity, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowShadowPinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, densityShadow, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowShadow)
/// ********************************* DebugVolume ***************************************
typedef struct NvFlowDebugVolumeParams
{
NvFlowBool32 enableSpeedAsTemperature;
NvFlowBool32 enableVelocityAsDensity;
NvFlowFloat3 velocityScale;
}NvFlowDebugVolumeParams;
#define NvFlowDebugVolumeParams_default_init { \
NV_FLOW_FALSE, /*enableSpeedAsTemperature*/ \
NV_FLOW_FALSE, /*enableVelocityAsDensity*/ \
{0.01f, 0.01f, 0.01f} /*velocityScale*/ \
}
static const NvFlowDebugVolumeParams NvFlowDebugVolumeParams_default = NvFlowDebugVolumeParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowDebugVolumeParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enableSpeedAsTemperature, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enableVelocityAsDensity, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, velocityScale, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowDebugVolumeParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowDebugVolumePinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
const NvFlowDebugVolumeParams** params;
NvFlowUint64 paramCount;
NvFlowSparseTexture velocity;
NvFlowSparseTexture densityShadow;
}NvFlowDebugVolumePinsIn;
typedef struct NvFlowDebugVolumePinsOut
{
NvFlowSparseTexture densityShadow;
}NvFlowDebugVolumePinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowDebugVolumePinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowDebugVolumeParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, velocity, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, densityShadow, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowDebugVolumePinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, densityShadow, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowDebugVolume)
/// ********************************* RayMarch ***************************************
typedef struct NvFlowRayMarchCloudParams
{
NvFlowBool32 enableCloudMode;
NvFlowFloat3 sunDirection;
NvFlowFloat3 ambientColor;
float ambientMultiplier;
float densityMultiplier;
NvFlowFloat3 volumeBaseColor;
float volumeColorMultiplier;
float shadowStepMultiplier;
int numShadowSteps;
NvFlowFloat3 attenuationMultiplier;
}NvFlowRayMarchCloudParams;
#define NvFlowRayMarchCloudParams_default_init { \
NV_FLOW_FALSE, /*enableCloudMode*/ \
{1.f, 1.f, 1.f}, /*sunDirection*/ \
{0.4f, 0.55f, 0.9f}, /*ambientColor*/ \
1.0f, /*ambientMultiplier*/ \
0.5f, /*densityMultiplier*/ \
{1.1f, 1.f, 0.95f}, /*volumeBaseColor*/ \
1.0f, /*volumeColorMultiplier*/ \
1.0f, /*shadowStepMultiplier*/ \
10u, /*numShadowSteps*/ \
{1.f, 1.f, 1.f} /*attenuationMultiplier*/ \
}
static const NvFlowRayMarchCloudParams NvFlowRayMarchCloudParams_default = NvFlowRayMarchCloudParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowRayMarchCloudParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enableCloudMode, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, sunDirection, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, ambientColor, 0, 0)
NV_FLOW_REFLECT_VALUE(float, ambientMultiplier, 0, 0)
NV_FLOW_REFLECT_VALUE(float, densityMultiplier, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, volumeBaseColor, 0, 0)
NV_FLOW_REFLECT_VALUE(float, volumeColorMultiplier, 0, 0)
NV_FLOW_REFLECT_VALUE(float, shadowStepMultiplier, 0, 0)
NV_FLOW_REFLECT_VALUE(int, numShadowSteps, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, attenuationMultiplier, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowRayMarchCloudParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowRayMarchParams
{
NvFlowBool32 enableBlockWireframe;
NvFlowBool32 enableRawMode;
float colorScale;
float attenuation;
float stepSizeScale;
float shadowFactor;
NvFlowRayMarchCloudParams cloud;
}NvFlowRayMarchParams;
#define NvFlowRayMarchParams_default_init { \
NV_FLOW_FALSE, /*enableBlockWireframe*/ \
NV_FLOW_FALSE, /*enableRawMode*/ \
1.f, /*colorScale*/ \
0.05f, /*attenuation*/ \
0.75f, /*stepSizeScale*/ \
1.f, /*shadowFactor*/ \
NvFlowRayMarchCloudParams_default_init /*cloud*/ \
}
static const NvFlowRayMarchParams NvFlowRayMarchParams_default = NvFlowRayMarchParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowRayMarchParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enableBlockWireframe, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enableRawMode, 0, 0)
NV_FLOW_REFLECT_VALUE(float, colorScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, attenuation, 0, 0)
NV_FLOW_REFLECT_VALUE(float, stepSizeScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, shadowFactor, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowRayMarchCloudParams, cloud, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowRayMarchParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowRayMarchIsosurfaceParams
{
NvFlowBool32 enableBlockWireframe;
float stepSizeScale;
float densityThreshold;
NvFlowBool32 refractionMode;
NvFlowBool32 visualizeNormals;
float fluidIoR;
NvFlowFloat3 fluidColor;
float fluidAbsorptionCoefficient;
NvFlowFloat3 fluidSpecularReflectance;
NvFlowFloat3 fluidDiffuseReflectance;
NvFlowFloat3 fluidRadiance;
}NvFlowRayMarchIsosurfaceParams;
#define NvFlowRayMarchIsosurfaceParams_default_init { \
NV_FLOW_FALSE, /*enableBlockWireframe*/ \
0.75f, /*stepSizeScale*/ \
0.5f, /*densityThreshold*/ \
NV_FLOW_FALSE, /*refractionMode*/ \
NV_FLOW_FALSE, /*visualizeNormals*/ \
1.333f, /*fluidIoR*/ \
{0.9f, 0.9f, 1.f}, /*fluidColor*/ \
0.0035f, /*fluidAbsorptionCoefficient*/ \
{0.1f, 0.1f, 0.1f}, /*fluidSpecularReflectance*/ \
{0.1f, 0.1f, 0.1f}, /*fluidDiffuseReflectance*/ \
{0.f, 0.f, 0.f} /*fluidRadiance*/ \
}
static const NvFlowRayMarchIsosurfaceParams NvFlowRayMarchIsosurfaceParams_default = NvFlowRayMarchIsosurfaceParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowRayMarchIsosurfaceParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enableBlockWireframe, 0, 0)
NV_FLOW_REFLECT_VALUE(float, stepSizeScale, 0, 0)
NV_FLOW_REFLECT_VALUE(float, densityThreshold, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, refractionMode, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, visualizeNormals, 0, 0)
NV_FLOW_REFLECT_VALUE(float, fluidIoR, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, fluidColor, 0, 0)
NV_FLOW_REFLECT_VALUE(float, fluidAbsorptionCoefficient, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, fluidSpecularReflectance, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, fluidDiffuseReflectance, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowFloat3, fluidRadiance, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowRayMarchIsosurfaceParams_default)
#undef NV_FLOW_REFLECT_TYPE
#define NvFlowRayMarchColormapParams_default_pointCount_init 6u
static const NvFlowUint64 NvFlowRayMarchColormapParams_default_pointCount = NvFlowRayMarchColormapParams_default_pointCount_init;
#define NvFlowRayMarchColormapParams_default_xPoints_init { \
0.000000f, \
0.050000f, \
0.15000f, \
0.600000f, \
0.850000f, \
1.000000f \
}
static const float NvFlowRayMarchColormapParams_default_xPoints[NvFlowRayMarchColormapParams_default_pointCount_init] = NvFlowRayMarchColormapParams_default_xPoints_init;
#define NvFlowRayMarchColormapParams_default_colorScalePoints_init { \
1.000000f, \
1.000000f, \
1.000000f, \
1.000000f, \
1.000000f, \
1.000000f \
}
static const float NvFlowRayMarchColormapParams_default_colorScalePoints[NvFlowRayMarchColormapParams_default_pointCount_init] = NvFlowRayMarchColormapParams_default_colorScalePoints_init;
#define NvFlowRayMarchColormapParams_default_rgbaPoints_smoke_init { \
{ 0.9f, 0.9f, 0.9f, 0.004902f }, \
{ 0.9f, 0.9f, 0.9f, 0.904902f }, \
{ 0.9f, 0.9f, 0.9f, 0.904902f }, \
{ 0.9f, 0.9f, 0.9f, 0.904902f }, \
{ 0.9f, 0.9f, 0.9f, 0.904902f }, \
{ 0.9f, 0.9f, 0.9f, 0.904902f }, \
}
static const NvFlowFloat4 NvFlowRayMarchColormapParams_default_rgbaPoints_smoke[NvFlowRayMarchColormapParams_default_pointCount_init] = NvFlowRayMarchColormapParams_default_rgbaPoints_smoke_init;
#define NvFlowRayMarchColormapParams_default_rgbaPoints_fire_init { \
{ 0.015400f, 0.017700f, 0.015400f, 0.004902f }, \
{ 0.035750f, 0.035750f, 0.035750f, 0.504902f }, \
{ 0.035750f, 0.035750f, 0.035750f, 0.504902f }, \
{ 1.f, 0.1594125f, 0.0135315f, 0.800000f }, \
{ 13.534992f, 2.986956f, 0.125991f, 0.800000f }, \
{ 78.08f, 39.04f, 6.1f, 0.700000f }, \
}
static const NvFlowFloat4 NvFlowRayMarchColormapParams_default_rgbaPoints_fire[NvFlowRayMarchColormapParams_default_pointCount_init] = NvFlowRayMarchColormapParams_default_rgbaPoints_fire_init;
typedef struct NvFlowRayMarchColormapParams
{
NvFlowUint resolution;
const float* xPoints;
NvFlowUint64 xPointCount;
const NvFlowFloat4* rgbaPoints;
NvFlowUint64 rgbaPointCount;
const float* colorScalePoints;
NvFlowUint64 colorScalePointCount;
float colorScale;
}NvFlowRayMarchColormapParams;
#define NvFlowRayMarchColormapParams_default_init { \
32u, /*resolution*/ \
NvFlowRayMarchColormapParams_default_xPoints, /*xPoints*/ \
NvFlowRayMarchColormapParams_default_pointCount_init, /*xPointCount*/ \
NvFlowRayMarchColormapParams_default_rgbaPoints_smoke, /*rgbaPoints*/ \
NvFlowRayMarchColormapParams_default_pointCount_init, /*rgbaPointCount*/ \
NvFlowRayMarchColormapParams_default_colorScalePoints, /*colorScalePoints*/ \
NvFlowRayMarchColormapParams_default_pointCount_init, /*colorScaleCount*/ \
2.5f /*colorScale*/ \
}
static const NvFlowRayMarchColormapParams NvFlowRayMarchColormapParams_default = NvFlowRayMarchColormapParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowRayMarchColormapParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint, resolution, 0, 0)
NV_FLOW_REFLECT_ARRAY(float, xPoints, xPointCount, 0, 0)
NV_FLOW_REFLECT_ARRAY(NvFlowFloat4, rgbaPoints, rgbaPointCount, 0, 0)
NV_FLOW_REFLECT_ARRAY(float, colorScalePoints, colorScalePointCount, 0, 0)
NV_FLOW_REFLECT_VALUE(float, colorScale, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowRayMarchColormapParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowRayMarchTargetTexture
{
const NvFlowFloat4x4* view;
const NvFlowFloat4x4* projection;
const NvFlowFloat4x4* projectionJittered;
NvFlowUint textureWidth;
NvFlowUint textureHeight;
NvFlowUint sceneDepthWidth;
NvFlowUint sceneDepthHeight;
NvFlowTextureTransient* sceneDepthIn;
NvFlowFormat sceneColorFormat;
NvFlowTextureTransient* sceneColorIn;
NvFlowTextureTransient** pSceneColorOut;
}NvFlowRayMarchTargetTexture;
NV_FLOW_REFLECT_STRUCT_OPAQUE_IMPL(NvFlowRayMarchTargetTexture)
typedef struct NvFlowRayMarchPinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
const NvFlowRayMarchParams** params;
NvFlowUint64 paramCount;
NvFlowSparseTexture velocity;
NvFlowSparseTexture density;
NvFlowTextureTransient* colormap;
const NvFlowRayMarchTargetTexture* target;
float compositeColorScale;
}NvFlowRayMarchPinsIn;
typedef struct NvFlowRayMarchPinsOut
{
NvFlowUint unused;
}NvFlowRayMarchPinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowRayMarchPinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowRayMarchParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, velocity, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, density, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER(NvFlowTextureTransient, colormap, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER(NvFlowRayMarchTargetTexture, target, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(float, compositeColorScale, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowRayMarchPinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint, unused, 0, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowRayMarch)
typedef struct NvFlowRayMarchUpdateColormapPinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
const NvFlowRayMarchColormapParams** params;
NvFlowUint64 paramCount;
}NvFlowRayMarchUpdateColormapPinsIn;
typedef struct NvFlowRayMarchUpdateColormapPinsOut
{
NvFlowTextureTransient* colormap;
}NvFlowRayMarchUpdateColormapPinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowRayMarchUpdateColormapPinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowRayMarchColormapParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowRayMarchUpdateColormapPinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowTextureTransient, colormap, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowRayMarchUpdateColormap)
typedef struct NvFlowRayMarchIsosurfacePinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
const NvFlowRayMarchIsosurfaceParams** params;
NvFlowUint64 paramCount;
NvFlowSparseTexture density;
const NvFlowRayMarchTargetTexture* target;
float compositeColorScale;
}NvFlowRayMarchIsosurfacePinsIn;
typedef struct NvFlowRayMarchIsosurfacePinsOut
{
NvFlowUint unused;
}NvFlowRayMarchIsosurfacePinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowRayMarchIsosurfacePinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER_ARRAY(NvFlowRayMarchIsosurfaceParams, params, paramCount, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseTexture, density, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_POINTER(NvFlowRayMarchTargetTexture, target, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(float, compositeColorScale, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowRayMarchIsosurfacePinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint, unused, 0, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowRayMarchIsosurface)
typedef struct NvFlowRayMarchCopyTexturePinsIn
{
NvFlowContextInterface* contextInterface;
NvFlowContext* context;
NvFlowTextureTransient* texture;
NvFlowUint width;
NvFlowUint height;
NvFlowFormat format;
}NvFlowRayMarchCopyTexturePinsIn;
typedef struct NvFlowRayMarchCopyTexturePinsOut
{
NvFlowTextureTransient* texture;
}NvFlowRayMarchCopyTexturePinsOut;
#define NV_FLOW_REFLECT_TYPE NvFlowRayMarchCopyTexturePinsIn
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowContextInterface, contextInterface, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowContext, context, eNvFlowReflectHint_pinEnabledGlobal, 0)
NV_FLOW_REFLECT_POINTER(NvFlowTextureTransient, texture, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, width, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, height, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_ENUM(format, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
#define NV_FLOW_REFLECT_TYPE NvFlowRayMarchCopyTexturePinsOut
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_POINTER(NvFlowTextureTransient, texture, eNvFlowReflectHint_pinEnabled, 0)
NV_FLOW_REFLECT_END(0)
#undef NV_FLOW_REFLECT_TYPE
NV_FLOW_OP_TYPED(NvFlowRayMarchCopyTexture)
/// ********************************* NvFlowExtOpList ***************************************
typedef struct NvFlowExtOpList
{
NV_FLOW_REFLECT_INTERFACE();
NvFlowOpInterface* (NV_FLOW_ABI* pEmitterSphere)();
NvFlowOpInterface* (NV_FLOW_ABI* pEmitterSphereAllocate)();
NvFlowOpInterface* (NV_FLOW_ABI* pEmitterBox)();
NvFlowOpInterface* (NV_FLOW_ABI* pEmitterBoxAllocate)();
NvFlowOpInterface* (NV_FLOW_ABI* pEmitterPoint)();
NvFlowOpInterface* (NV_FLOW_ABI* pEmitterPointAllocate)();
NvFlowOpInterface* (NV_FLOW_ABI* pEmitterMesh)();
NvFlowOpInterface* (NV_FLOW_ABI* pEmitterMeshAllocate)();
NvFlowOpInterface* (NV_FLOW_ABI* pEmitterTexture)();
NvFlowOpInterface* (NV_FLOW_ABI* pEmitterTextureAllocate)();
NvFlowOpInterface* (NV_FLOW_ABI* pEmitterNanoVdb)();
NvFlowOpInterface* (NV_FLOW_ABI* pEmitterNanoVdbAllocate)();
NvFlowOpInterface* (NV_FLOW_ABI* pEllipsoidRaster)();
NvFlowOpInterface* (NV_FLOW_ABI* pEllipsoidRasterAllocate)();
NvFlowOpInterface* (NV_FLOW_ABI* pShadow)();
NvFlowOpInterface* (NV_FLOW_ABI* pDebugVolume)();
NvFlowOpInterface* (NV_FLOW_ABI* pRayMarch)();
NvFlowOpInterface* (NV_FLOW_ABI* pRayMarchUpdateColormap)();
NvFlowOpInterface* (NV_FLOW_ABI* pRayMarchIsosurface)();
NvFlowOpInterface* (NV_FLOW_ABI* pRayMarchCopyTexture)();
}NvFlowExtOpList;
#define NV_FLOW_REFLECT_TYPE NvFlowExtOpList
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_FUNCTION_POINTER(pEmitterSphere, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pEmitterSphereAllocate, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pEmitterBox, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pEmitterBoxAllocate, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pEmitterPoint, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pEmitterPointAllocate, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pEmitterTexture, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pEmitterTextureAllocate, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pEmitterNanoVdb, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pEmitterNanoVdbAllocate, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pEllipsoidRaster, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pEllipsoidRasterAllocate, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pShadow, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pDebugVolume, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pRayMarch, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pRayMarchUpdateColormap, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pRayMarchIsosurface, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(pRayMarchCopyTexture, 0, 0)
NV_FLOW_REFLECT_END(0)
NV_FLOW_REFLECT_INTERFACE_IMPL()
#undef NV_FLOW_REFLECT_TYPE
typedef NvFlowExtOpList* (NV_FLOW_ABI* PFN_NvFlowGetExtOpList)();
NV_FLOW_API NvFlowExtOpList* NvFlowGetExtOpList();
/// ********************************* Grid ***************************************
struct NvFlowGrid;
typedef struct NvFlowGrid NvFlowGrid;
typedef struct NvFlowGridDesc
{
NvFlowUint maxLocations;
NvFlowUint maxLocationsIsosurface;
}NvFlowGridDesc;
#define NvFlowGridDesc_default_init { \
4096u, /*maxLocations*/ \
4096u /*maxLocationsIsosurface*/ \
}
static const NvFlowGridDesc NvFlowGridDesc_default = NvFlowGridDesc_default_init;
NV_FLOW_REFLECT_TYPE_ALIAS(NvFlowEmitterSphereParams, NvFlowGridEmitterSphereParams)
NV_FLOW_REFLECT_TYPE_ALIAS(NvFlowEmitterBoxParams, NvFlowGridEmitterBoxParams)
NV_FLOW_REFLECT_TYPE_ALIAS(NvFlowEmitterPointParams, NvFlowGridEmitterPointParams)
NV_FLOW_REFLECT_TYPE_ALIAS(NvFlowEmitterMeshParams, NvFlowGridEmitterMeshParams)
NV_FLOW_REFLECT_TYPE_ALIAS(NvFlowEmitterTextureParams, NvFlowGridEmitterTextureParams)
NV_FLOW_REFLECT_TYPE_ALIAS(NvFlowEmitterNanoVdbParams, NvFlowGridEmitterNanoVdbParams)
typedef struct NvFlowGridSimulateLayerParams
{
NvFlowUint64 luid;
int layer;
float densityCellSize;
NvFlowBool32 enableSmallBlocks;
NvFlowBool32 enableLowPrecisionVelocity;
NvFlowBool32 enableLowPrecisionDensity;
NvFlowBool32 forceClear;
NvFlowBool32 forceDisableEmitters;
NvFlowBool32 forceDisableCoreSimulation;
NvFlowBool32 simulateWhenPaused;
NvFlowUint blockMinLifetime;
float stepsPerSecond;
float timeScale;
NvFlowUint maxStepsPerSimulate;
NvFlowBool32 enableVariableTimeStep;
NvFlowBool32 interpolateTimeSteps;
NvFlowUint velocitySubSteps;
NvFlowAdvectionCombustionParams advection;
NvFlowVorticityParams vorticity;
NvFlowPressureParams pressure;
NvFlowSummaryAllocateParams summaryAllocate;
NvFlowSparseNanoVdbExportParams nanoVdbExport;
}NvFlowGridSimulateLayerParams;
#define NvFlowGridSimulateLayerParams_default_init { \
0llu, /*luid*/ \
0, /*layer*/ \
0.5f, /*densityCellSize*/ \
NV_FLOW_FALSE, /*enableSmallBlocks*/ \
NV_FLOW_FALSE, /*enableLowPrecisionVelocity*/ \
NV_FLOW_FALSE, /*enableLowPrecisionDensity*/ \
NV_FLOW_FALSE, /*forceClear*/ \
NV_FLOW_FALSE, /*forceDisableEmitters*/ \
NV_FLOW_FALSE, /*forceDisableCoreSimulation*/ \
NV_FLOW_FALSE, /*simulateWhenPaused*/ \
4u, /*blockMinLifetime*/ \
60.f, /*stepsPerSecond*/ \
1.f, /*timeScale*/ \
1u, /*maxStepsPerSimulate*/ \
NV_FLOW_FALSE, /*enableVariableTimeStep*/ \
NV_FLOW_FALSE, /*interpolateTimeSteps*/ \
1u, /*velocitySubSteps*/ \
NvFlowAdvectionCombustionParams_default_init, /*advection*/ \
NvFlowVorticityParams_default_init, /*vorticity*/ \
NvFlowPressureParams_default_init, /*pressure*/ \
NvFlowSummaryAllocateParams_default_init, /*summaryAllocate*/ \
NvFlowSparseNanoVdbExportParams_default_init /*nanoVdbExport*/ \
}
static const NvFlowGridSimulateLayerParams NvFlowGridSimulateLayerParams_default = NvFlowGridSimulateLayerParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowGridSimulateLayerParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint64, luid, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(int, layer, 0, 0)
NV_FLOW_REFLECT_VALUE(float, densityCellSize, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enableSmallBlocks, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enableLowPrecisionVelocity, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enableLowPrecisionDensity, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, forceClear, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, forceDisableEmitters, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, forceDisableCoreSimulation, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, simulateWhenPaused, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, blockMinLifetime, 0, 0)
NV_FLOW_REFLECT_VALUE(float, stepsPerSecond, 0, 0)
NV_FLOW_REFLECT_VALUE(float, timeScale, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, maxStepsPerSimulate, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, enableVariableTimeStep, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowBool32, interpolateTimeSteps, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowUint, velocitySubSteps, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowAdvectionCombustionParams, advection, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowVorticityParams, vorticity, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowPressureParams, pressure, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSummaryAllocateParams, summaryAllocate, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowSparseNanoVdbExportParams, nanoVdbExport, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowGridSimulateLayerParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowGridOffscreenLayerParams
{
NvFlowUint64 luid;
int layer;
NvFlowShadowParams shadow;
NvFlowRayMarchColormapParams colormap;
NvFlowDebugVolumeParams debugVolume;
}NvFlowGridOffscreenLayerParams;
#define NvFlowGridOffscreenLayerParams_default_init { \
0llu, /*luid*/ \
0, /*layer*/ \
NvFlowShadowParams_default_init, /*shadow*/ \
NvFlowRayMarchColormapParams_default_init, /*colormap*/ \
NvFlowDebugVolumeParams_default_init /*debugVolume*/ \
}
static const NvFlowGridOffscreenLayerParams NvFlowGridOffscreenLayerParams_default = NvFlowGridOffscreenLayerParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowGridOffscreenLayerParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint64, luid, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(int, layer, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowShadowParams, shadow, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowRayMarchColormapParams, colormap, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowDebugVolumeParams, debugVolume, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowGridOffscreenLayerParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowGridRenderLayerParams
{
NvFlowUint64 luid;
int layer;
NvFlowRayMarchParams rayMarch;
}NvFlowGridRenderLayerParams;
#define NvFlowGridRenderLayerParams_default_init { \
0llu, /*luid*/ \
0, /*layer*/ \
NvFlowRayMarchParams_default_init /*rayMarch*/ \
}
static const NvFlowGridRenderLayerParams NvFlowGridRenderLayerParams_default = NvFlowGridRenderLayerParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowGridRenderLayerParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint64, luid, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(int, layer, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowRayMarchParams, rayMarch, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowGridRenderLayerParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowGridIsosurfaceLayerParams
{
NvFlowUint64 luid;
int layer;
float densityCellSize;
NvFlowEllipsoidRasterParams ellipsoidRaster;
NvFlowRayMarchIsosurfaceParams rayMarchIsosurface;
}NvFlowGridIsosurfaceLayerParams;
#define NvFlowGridIsosurfaceLayerParams_default_init { \
0llu, /*luid*/ \
0, /*layer*/ \
2.f, /*densityCellSize*/ \
NvFlowEllipsoidRasterParams_default_init, /*ellipsoidRaster*/ \
NvFlowRayMarchIsosurfaceParams_default_init /*rayMarchIsosurface*/ \
}
static const NvFlowGridIsosurfaceLayerParams NvFlowGridIsosurfaceLayerParams_default = NvFlowGridIsosurfaceLayerParams_default_init;
#define NV_FLOW_REFLECT_TYPE NvFlowGridIsosurfaceLayerParams
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_VALUE(NvFlowUint64, luid, eNvFlowReflectHint_transientNoEdit, 0)
NV_FLOW_REFLECT_VALUE(int, layer, 0, 0)
NV_FLOW_REFLECT_VALUE(float, densityCellSize, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowEllipsoidRasterParams, ellipsoidRaster, 0, 0)
NV_FLOW_REFLECT_VALUE(NvFlowRayMarchIsosurfaceParams, rayMarchIsosurface, 0, 0)
NV_FLOW_REFLECT_END(&NvFlowGridIsosurfaceLayerParams_default)
#undef NV_FLOW_REFLECT_TYPE
typedef struct NvFlowGridParamsDescSnapshot
{
NvFlowDatabaseSnapshot snapshot;
double absoluteSimTime;
float deltaTime;
NvFlowBool32 globalForceClear;
const NvFlowUint8* userdata;
NvFlowUint64 userdataSizeInBytes;
}NvFlowGridParamsDescSnapshot;
typedef struct NvFlowGridParamsDesc
{
NvFlowGridParamsDescSnapshot* snapshots;
NvFlowUint64 snapshotCount;
}NvFlowGridParamsDesc;
typedef NvFlowSparseNanoVdbExportPinsOut NvFlowGridRenderDataNanoVdb;
typedef struct NvFlowGridRenderData
{
NvFlowBufferTransient* sparseBuffer;
NvFlowTextureTransient* densityTexture;
NvFlowTextureTransient* velocityTexture;
NvFlowTextureTransient* colormap;
NvFlowSparseParams sparseParams;
NvFlowGridRenderDataNanoVdb nanoVdb;
}NvFlowGridRenderData;
typedef struct NvFlowGridIsosurfaceData
{
NvFlowBufferTransient* sparseBuffer;
NvFlowTextureTransient* densityTexture;
NvFlowSparseParams sparseParams;
}NvFlowGridIsosurfaceData;
typedef struct NvFlowGridInterface
{
NV_FLOW_REFLECT_INTERFACE();
NvFlowGrid*(NV_FLOW_ABI* createGrid)(
NvFlowContextInterface* contextInterface,
NvFlowContext* context,
NvFlowOpList* opList,
NvFlowExtOpList* extOpList,
const NvFlowGridDesc* desc
);
void(NV_FLOW_ABI* destroyGrid)(
NvFlowContext* context,
NvFlowGrid* grid
);
void(NV_FLOW_ABI* resetGrid)(
NvFlowContext* context,
NvFlowGrid* grid,
const NvFlowGridDesc* desc
);
void(NV_FLOW_ABI* simulate)(
NvFlowContext* context,
NvFlowGrid* grid,
const NvFlowGridParamsDesc* params,
NvFlowBool32 globalForceClear
);
void(NV_FLOW_ABI* offscreen)(
NvFlowContext* context,
NvFlowGrid* grid,
const NvFlowGridParamsDesc* params
);
void(NV_FLOW_ABI* getRenderData)(
NvFlowContext* context,
NvFlowGrid* grid,
NvFlowGridRenderData* renderData
);
void(NV_FLOW_ABI* render)(
NvFlowContext* context,
NvFlowGrid* grid,
const NvFlowGridParamsDesc* params,
const NvFlowFloat4x4* view,
const NvFlowFloat4x4* projection,
const NvFlowFloat4x4* projectionJittered,
NvFlowUint width,
NvFlowUint height,
NvFlowUint sceneDepthWidth,
NvFlowUint sceneDepthHeight,
float compositeColorScale,
NvFlowTextureTransient* sceneDepthIn,
NvFlowFormat sceneColorFormat,
NvFlowTextureTransient* sceneColorIn,
NvFlowTextureTransient** pSceneColorOut
);
void(NV_FLOW_ABI* updateIsosurface)(
NvFlowContext* context,
NvFlowGrid* grid,
const NvFlowGridParamsDesc* params
);
void(NV_FLOW_ABI* getIsosurfaceData)(
NvFlowContext* context,
NvFlowGrid* grid,
NvFlowGridIsosurfaceData* isosurfaceData
);
void(NV_FLOW_ABI* renderIsosurface)(
NvFlowContext* context,
NvFlowGrid* grid,
const NvFlowGridParamsDesc* params,
const NvFlowFloat4x4* view,
const NvFlowFloat4x4* projection,
const NvFlowFloat4x4* projectionJittered,
NvFlowUint width,
NvFlowUint height,
NvFlowUint sceneDepthWidth,
NvFlowUint sceneDepthHeight,
float compositeColorScale,
NvFlowTextureTransient* sceneDepthIn,
NvFlowFormat sceneColorFormat,
NvFlowTextureTransient* sceneColorIn,
NvFlowTextureTransient** pSceneColorOut
);
void(NV_FLOW_ABI* copyTexture)(
NvFlowContext* context,
NvFlowGrid* grid,
NvFlowUint width,
NvFlowUint height,
NvFlowFormat sceneColorFormat,
NvFlowTextureTransient* sceneColorIn,
NvFlowTextureTransient** pSceneColorOut
);
NvFlowUint(NV_FLOW_ABI* getActiveBlockCount)(NvFlowGrid* grid);
NvFlowUint(NV_FLOW_ABI* getActiveBlockCountIsosurface)(NvFlowGrid* grid);
void(NV_FLOW_ABI* setResourceMinLifetime)(NvFlowContext* context, NvFlowGrid* grid, NvFlowUint64 minLifetime);
}NvFlowGridInterface;
#define NV_FLOW_REFLECT_TYPE NvFlowGridInterface
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_FUNCTION_POINTER(createGrid, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(destroyGrid, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(resetGrid, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(simulate, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(offscreen, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getRenderData, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(render, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(updateIsosurface, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getIsosurfaceData, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(renderIsosurface, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(copyTexture, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getActiveBlockCount, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getActiveBlockCountIsosurface, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(setResourceMinLifetime, 0, 0)
NV_FLOW_REFLECT_END(0)
NV_FLOW_REFLECT_INTERFACE_IMPL()
#undef NV_FLOW_REFLECT_TYPE
typedef NvFlowGridInterface* (NV_FLOW_ABI* PFN_NvFlowGetGridInterface)();
NV_FLOW_API NvFlowGridInterface* NvFlowGetGridInterface();
NV_FLOW_API NvFlowGridInterface* NvFlowGetGridInterfaceNoOpt();
/// ********************************* Grid Params ***************************************
struct NvFlowGridParams;
typedef struct NvFlowGridParams NvFlowGridParams;
struct NvFlowGridParamsSnapshot;
typedef struct NvFlowGridParamsSnapshot NvFlowGridParamsSnapshot;
struct NvFlowGridParamsNamed;
typedef struct NvFlowGridParamsNamed NvFlowGridParamsNamed;
typedef struct NvFlowGridParamsInterface
{
NV_FLOW_REFLECT_INTERFACE();
NvFlowGridParams*(NV_FLOW_ABI* createGridParams)();
void(NV_FLOW_ABI* destroyGridParams)(NvFlowGridParams* gridParams);
void(NV_FLOW_ABI* enumerateParamTypes)(
NvFlowGridParams* gridParams,
const char** pTypenames,
const char** pDisplayTypenames,
const NvFlowReflectDataType** pDataTypes,
NvFlowUint64* pCount
);
void(NV_FLOW_ABI* getVersion)(
NvFlowGridParams* gridParams,
NvFlowUint64* pStagingVersion,
NvFlowUint64* pMinActiveVersion
);
void(NV_FLOW_ABI* commitParams)(
NvFlowGridParams* gridParams,
const NvFlowGridParamsDescSnapshot* snapshot
);
NvFlowBool32(NV_FLOW_ABI* resetParams)(NvFlowGridParams* gridParams);
NvFlowGridParamsSnapshot*(NV_FLOW_ABI* getParamsSnapshot)(
NvFlowGridParams* gridParams,
double absoluteSimTime,
NvFlowUint64 pullId
);
NvFlowBool32(NV_FLOW_ABI* mapParamsDesc)(
NvFlowGridParams* gridParams,
NvFlowGridParamsSnapshot* snapshot,
NvFlowGridParamsDesc* pParamsDesc
);
void(NV_FLOW_ABI* unmapParamsDesc)(
NvFlowGridParams* gridParams,
NvFlowGridParamsSnapshot* snapshot
);
NvFlowGridParamsNamed*(NV_FLOW_ABI* createGridParamsNamed)(const char* name);
int(NV_FLOW_ABI* destroyGridParamsNamed)(NvFlowGridParamsNamed* gridParamsNamed);
NvFlowGridParams*(NV_FLOW_ABI* mapGridParamsNamed)(NvFlowGridParamsNamed* gridParamsNamed);
}NvFlowGridParamsInterface;
#define NV_FLOW_REFLECT_TYPE NvFlowGridParamsInterface
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_FUNCTION_POINTER(createGridParams, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(destroyGridParams, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(enumerateParamTypes, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getVersion, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(commitParams, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(resetParams, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getParamsSnapshot, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(mapParamsDesc, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(unmapParamsDesc, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(createGridParamsNamed, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(destroyGridParamsNamed, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(mapGridParamsNamed, 0, 0)
NV_FLOW_REFLECT_END(0)
NV_FLOW_REFLECT_INTERFACE_IMPL()
#undef NV_FLOW_REFLECT_TYPE
typedef NvFlowGridParamsInterface* (NV_FLOW_ABI* PFN_NvFlowGetGridParamsInterface)();
NV_FLOW_API NvFlowGridParamsInterface* NvFlowGetGridParamsInterface();
/// ********************************* Thread Pool ***************************************
struct NvFlowThreadPool;
typedef struct NvFlowThreadPool NvFlowThreadPool;
typedef void(*NvFlowThreadPoolTask_t)(NvFlowUint taskIdx, NvFlowUint threadIdx, void* sharedMem, void* userdata);
typedef struct NvFlowThreadPoolInterface
{
NV_FLOW_REFLECT_INTERFACE();
NvFlowUint(NV_FLOW_ABI* getDefaultThreadCount)();
NvFlowThreadPool*(NV_FLOW_ABI* create)(NvFlowUint threadCount, NvFlowUint64 sharedMemorySizeInBytes);
void(NV_FLOW_ABI* destroy)(NvFlowThreadPool* pool);
NvFlowUint(NV_FLOW_ABI* getThreadCount)(NvFlowThreadPool* pool);
void(NV_FLOW_ABI* execute)(NvFlowThreadPool* pool, NvFlowUint taskCount, NvFlowUint taskGranularity, NvFlowThreadPoolTask_t task, void* userdata);
}NvFlowThreadPoolInterface;
#define NV_FLOW_REFLECT_TYPE NvFlowThreadPoolInterface
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_FUNCTION_POINTER(getDefaultThreadCount, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(create, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(destroy, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getThreadCount, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(execute, 0, 0)
NV_FLOW_REFLECT_END(0)
NV_FLOW_REFLECT_INTERFACE_IMPL()
#undef NV_FLOW_REFLECT_TYPE
typedef NvFlowThreadPoolInterface* (NV_FLOW_ABI* PFN_NvFlowThreadPoolInterface)();
NV_FLOW_API NvFlowThreadPoolInterface* NvFlowGetThreadPoolInterface();
/// ********************************* Optimization Layer ***************************************
struct NvFlowContextOpt;
typedef struct NvFlowContextOpt NvFlowContextOpt;
typedef struct NvFlowContextOptInterface
{
NV_FLOW_REFLECT_INTERFACE();
NvFlowContextOpt*(NV_FLOW_ABI* create)(NvFlowContextInterface* backendContextInterface, NvFlowContext* backendContext);
void(NV_FLOW_ABI* destroy)(NvFlowContextOpt* contextOpt);
void(NV_FLOW_ABI* getContext)(NvFlowContextOpt* contextOpt, NvFlowContextInterface** pContextInterface, NvFlowContext** pContext);
void(NV_FLOW_ABI* flush)(NvFlowContextOpt* contextOpt);
NvFlowBufferTransient*(NV_FLOW_ABI* importBackendBufferTransient)(NvFlowContextOpt* contextOpt, NvFlowBufferTransient* backendBufferTransient);
NvFlowTextureTransient*(NV_FLOW_ABI* importBackendTextureTransient)(NvFlowContextOpt* contextOpt, NvFlowTextureTransient* backendTextureTransient);
void(NV_FLOW_ABI* exportBufferTransient)(NvFlowContextOpt* contextOpt, NvFlowBufferTransient* bufferTransient, NvFlowBufferTransient** pBackendBufferTransient);
void(NV_FLOW_ABI* exportTextureTransient)(NvFlowContextOpt* contextOpt, NvFlowTextureTransient* textureTransient, NvFlowTextureTransient** pBackendTextureTransient);
void(NV_FLOW_ABI* setResourceMinLifetime)(NvFlowContextOpt* contextOpt, NvFlowUint64 minLifetime);
}NvFlowContextOptInterface;
#define NV_FLOW_REFLECT_TYPE NvFlowContextOptInterface
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_FUNCTION_POINTER(create, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(destroy, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getContext, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(flush, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(importBackendBufferTransient, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(importBackendTextureTransient, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(exportBufferTransient, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(exportTextureTransient, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(setResourceMinLifetime, 0, 0)
NV_FLOW_REFLECT_END(0)
NV_FLOW_REFLECT_INTERFACE_IMPL()
#undef NV_FLOW_REFLECT_TYPE
typedef NvFlowContextOptInterface*(NV_FLOW_ABI* PFN_NvFlowGetContextOptInterface)();
NV_FLOW_API NvFlowContextOptInterface* NvFlowGetContextOptInterface();
/// ********************************* Reference Device ***************************************
struct NvFlowDeviceManager;
typedef struct NvFlowDeviceManager NvFlowDeviceManager;
typedef struct NvFlowPhysicalDeviceDesc
{
NvFlowUint8 deviceUUID[16u];
NvFlowUint8 deviceLUID[8u];
NvFlowUint deviceNodeMask;
NvFlowBool32 deviceLUIDValid;
}NvFlowPhysicalDeviceDesc;
typedef struct NvFlowDeviceDesc
{
NvFlowUint deviceIndex;
NvFlowBool32 enableExternalUsage;
NvFlowLogPrint_t logPrint;
}NvFlowDeviceDesc;
struct NvFlowSwapchainDesc;
typedef struct NvFlowSwapchainDesc NvFlowSwapchainDesc;
#if defined(NV_FLOW_SWAPCHAIN_DESC)
struct NvFlowSwapchainDesc
{
#if defined(_WIN32)
HINSTANCE hinstance;
HWND hwnd;
#else
Display* dpy;
Window window;
#endif
NvFlowFormat format;
};
#endif
struct NvFlowDevice;
typedef struct NvFlowDevice NvFlowDevice;
struct NvFlowDeviceQueue;
typedef struct NvFlowDeviceQueue NvFlowDeviceQueue;
struct NvFlowDeviceSemaphore;
typedef struct NvFlowDeviceSemaphore NvFlowDeviceSemaphore;
struct NvFlowSwapchain;
typedef struct NvFlowSwapchain NvFlowSwapchain;
typedef struct NvFlowProfilerEntry
{
const char* label;
float cpuDeltaTime;
float gpuDeltaTime;
}NvFlowProfilerEntry;
typedef struct NvFlowDeviceInterface
{
NV_FLOW_REFLECT_INTERFACE();
NvFlowDeviceManager*(NV_FLOW_ABI* createDeviceManager)(NvFlowBool32 enableValidationOnDebugBuild, NvFlowThreadPoolInterface* threadPoolInterface, NvFlowUint threadCount);
void(NV_FLOW_ABI* destroyDeviceManager)(NvFlowDeviceManager* manager);
NvFlowBool32(NV_FLOW_ABI* enumerateDevices)(NvFlowDeviceManager* manager, NvFlowUint deviceIndex, NvFlowPhysicalDeviceDesc* pDesc);
NvFlowDevice*(NV_FLOW_ABI* createDevice)(NvFlowDeviceManager* manager, const NvFlowDeviceDesc* desc);
void(NV_FLOW_ABI* destroyDevice)(NvFlowDeviceManager* manager, NvFlowDevice* device);
NvFlowDeviceSemaphore*(NV_FLOW_ABI* createSemaphore)(NvFlowDevice* device);
void(NV_FLOW_ABI* destroySemaphore)(NvFlowDeviceSemaphore* semaphore);
void(NV_FLOW_ABI* getSemaphoreExternalHandle)(NvFlowDeviceSemaphore* semaphore, void* dstHandle, NvFlowUint64 dstHandleSize);
void(NV_FLOW_ABI* closeSemaphoreExternalHandle)(NvFlowDeviceSemaphore* semaphore, const void* srcHandle, NvFlowUint64 srcHandleSize);
NvFlowDeviceQueue*(NV_FLOW_ABI* getDeviceQueue)(NvFlowDevice* device);
int(NV_FLOW_ABI* flush)(NvFlowDeviceQueue* queue, NvFlowUint64* flushedFrameID, NvFlowDeviceSemaphore* waitSemaphore, NvFlowDeviceSemaphore* signalSemaphore);
NvFlowUint64(NV_FLOW_ABI* getLastFrameCompleted)(NvFlowDeviceQueue* queue);
void(NV_FLOW_ABI* waitForFrame)(NvFlowDeviceQueue* queue, NvFlowUint64 frameFrameID);
void(NV_FLOW_ABI* waitIdle)(NvFlowDeviceQueue* queue);
NvFlowContextInterface*(NV_FLOW_ABI* getContextInterface)(NvFlowDeviceQueue* queue);
NvFlowContext*(NV_FLOW_ABI* getContext)(NvFlowDeviceQueue* queue);
NvFlowSwapchain*(NV_FLOW_ABI* createSwapchain)(NvFlowDeviceQueue* queue, const NvFlowSwapchainDesc* desc);
void(NV_FLOW_ABI* destroySwapchain)(NvFlowSwapchain* swapchain);
void(NV_FLOW_ABI* resizeSwapchain)(NvFlowSwapchain* swapchain, NvFlowUint width, NvFlowUint height);
int(NV_FLOW_ABI* presentSwapchain)(NvFlowSwapchain* swapchain, NvFlowBool32 vsync, NvFlowUint64* flushedFrameID);
NvFlowTexture*(NV_FLOW_ABI* getSwapchainFrontTexture)(NvFlowSwapchain* swapchain);
void(NV_FLOW_ABI* enableProfiler)(NvFlowContext* context, void* userdata, void(NV_FLOW_ABI* reportEntries)(void* userdata, NvFlowUint64 captureID, NvFlowUint numEntries, NvFlowProfilerEntry* entries));
void(NV_FLOW_ABI* disableProfiler)(NvFlowContext* context);
NvFlowUint64(NV_FLOW_ABI* registerBufferId)(NvFlowContext* context, NvFlowBuffer* buffer);
NvFlowUint64(NV_FLOW_ABI* registerTextureId)(NvFlowContext* context, NvFlowTexture* texture);
void(NV_FLOW_ABI* unregisterBufferId)(NvFlowContext* context, NvFlowUint64 bufferId);
void(NV_FLOW_ABI* unregisterTextureId)(NvFlowContext* context, NvFlowUint64 textureId);
void(NV_FLOW_ABI* setResourceMinLifetime)(NvFlowContext* context, NvFlowUint64 minLifetime);
void(NV_FLOW_ABI* getBufferExternalHandle)(NvFlowContext* context, NvFlowBuffer* buffer, void* dstHandle, NvFlowUint64 dstHandleSize, NvFlowUint64* pBufferSizeInBytes);
void(NV_FLOW_ABI* closeBufferExternalHandle)(NvFlowContext* context, NvFlowBuffer* buffer, const void* srcHandle, NvFlowUint64 srcHandleSize);
}NvFlowDeviceInterface;
#define NV_FLOW_REFLECT_TYPE NvFlowDeviceInterface
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_FUNCTION_POINTER(createDeviceManager, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(destroyDeviceManager, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(enumerateDevices, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(createDevice, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(destroyDevice, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(createSemaphore, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(destroySemaphore, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getSemaphoreExternalHandle, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(closeSemaphoreExternalHandle, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getDeviceQueue, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(flush, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getLastFrameCompleted, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(waitForFrame, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(waitIdle, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getContextInterface, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getContext, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(createSwapchain, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(destroySwapchain, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(resizeSwapchain, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(presentSwapchain, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getSwapchainFrontTexture, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(enableProfiler, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(disableProfiler, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(registerBufferId, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(registerTextureId, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(unregisterBufferId, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(unregisterTextureId, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(setResourceMinLifetime, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getBufferExternalHandle, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(closeBufferExternalHandle, 0, 0)
NV_FLOW_REFLECT_END(0)
NV_FLOW_REFLECT_INTERFACE_IMPL()
#undef NV_FLOW_REFLECT_TYPE
typedef NvFlowDeviceInterface* (NV_FLOW_ABI* PFN_NvFlowGetDeviceInterface)(NvFlowContextApi api);
NV_FLOW_API NvFlowDeviceInterface* NvFlowGetDeviceInterface(NvFlowContextApi api);
/// ********************************* RadixSort ***************************************
struct NvFlowRadixSort;
typedef struct NvFlowRadixSort NvFlowRadixSort;
typedef struct NvFlowRadixSortInterface
{
NV_FLOW_REFLECT_INTERFACE();
NvFlowRadixSort*(NV_FLOW_ABI* create)(NvFlowContextInterface* contextInterface, NvFlowContext* context);
void(NV_FLOW_ABI* destroy)(NvFlowContext* context, NvFlowRadixSort* radixSort);
void(NV_FLOW_ABI* reserve)(NvFlowContext* context, NvFlowRadixSort* radixSort, NvFlowUint numKeys);
void(NV_FLOW_ABI* getInputBuffers)(NvFlowContext* context, NvFlowRadixSort* radixSort, NvFlowBufferTransient** pKeyBuffer, NvFlowBufferTransient** pValBuffer);
void(NV_FLOW_ABI* sort)(NvFlowContext* context, NvFlowRadixSort* radixSort, NvFlowUint numKeys, NvFlowUint numKeyBits);
void(NV_FLOW_ABI* getOutputBuffers)(NvFlowContext* context, NvFlowRadixSort* radixSort, NvFlowBufferTransient** pKeyBuffer, NvFlowBufferTransient** pValBuffer);
}NvFlowRadixSortInterface;
#define NV_FLOW_REFLECT_TYPE NvFlowRadixSortInterface
NV_FLOW_REFLECT_BEGIN()
NV_FLOW_REFLECT_FUNCTION_POINTER(create, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(destroy, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(reserve, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getInputBuffers, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(sort, 0, 0)
NV_FLOW_REFLECT_FUNCTION_POINTER(getOutputBuffers, 0, 0)
NV_FLOW_REFLECT_END(0)
NV_FLOW_REFLECT_INTERFACE_IMPL()
#undef NV_FLOW_REFLECT_TYPE
typedef NvFlowRadixSortInterface* (NV_FLOW_ABI* PFN_NvFlowGetRadixSortInterface)();
NV_FLOW_API NvFlowRadixSortInterface* NvFlowGetRadixSortInterface();
#endif | 122,952 | C | 37.652311 | 202 | 0.789804 |
NVIDIA-Omniverse/PhysX/flow/include/nvflowext/shaders/NvFlowRayMarchUtils.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#ifndef NV_FLOW_RAY_MARCH_UTILS_H
#define NV_FLOW_RAY_MARCH_UTILS_H
#if defined(__cplusplus)
#include "NvFlowExt.h"
#include "NvFlowRayMarchParams.h"
#include "NvFlowMath.h"
NV_FLOW_INLINE void NvFlowRayMarchLayerShaderParams_populate(
NvFlowRayMarchLayerShaderParams* dst,
NvFlowUint velocityLevelIdx,
NvFlowUint densityLevelIdx,
NvFlowUint layerParamIdx,
const NvFlowSparseParams* sparseParams,
const NvFlowRayMarchParams* rayMarchParams
)
{
const NvFlowSparseLevelParams* levelParamsVelocity = &sparseParams->levels[velocityLevelIdx];
const NvFlowSparseLevelParams* levelParamsDensity = &sparseParams->levels[densityLevelIdx];
const NvFlowSparseLayerParams* layerParams = &sparseParams->layers[layerParamIdx];
NvFlowFloat3 blockSizeWorld = layerParams->blockSizeWorld;
NvFlowFloat3 blockSizeWorldInv = layerParams->blockSizeWorldInv;
float minBlockSizeWorld = fminf(blockSizeWorld.x, fminf(blockSizeWorld.y, blockSizeWorld.z));
float maxBlockSizeWorld = fmaxf(blockSizeWorld.x, fmaxf(blockSizeWorld.y, blockSizeWorld.z));
NvFlowFloat3 cellSize = NvFlowFloat3{
blockSizeWorld.x / float(levelParamsDensity->blockDimLessOne.x + 1u),
blockSizeWorld.y / float(levelParamsDensity->blockDimLessOne.y + 1u),
blockSizeWorld.z / float(levelParamsDensity->blockDimLessOne.z + 1u)
};
NvFlowFloat3 cellSizeInv = { 1.f / cellSize.x, 1.f / cellSize.y, 1.f / cellSize.z };
float cellSizeMin = fminf(fminf(cellSize.x, cellSize.y), cellSize.z);
NvFlowFloat3 velocityCellSizeInv = NvFlowFloat3{
float(levelParamsVelocity->blockDimLessOne.x + 1u) / blockSizeWorld.x,
float(levelParamsVelocity->blockDimLessOne.y + 1u) / blockSizeWorld.y,
float(levelParamsVelocity->blockDimLessOne.z + 1u) / blockSizeWorld.z
};
float stepSize = rayMarchParams->stepSizeScale * cellSizeMin;
float stepSizeInv = 1.f / stepSize;
// normalize alphaScale based on stepSize
float alphaScale = 1.f - expf(-rayMarchParams->attenuation * stepSize);
float layerColormapV = (float(layerParamIdx) + 0.5f) / float(sparseParams->layerCount);
dst->blockSizeWorld = blockSizeWorld;
dst->minBlockSizeWorld = minBlockSizeWorld;
dst->blockSizeWorldInv = blockSizeWorldInv;
dst->maxBlockSizeWorld = maxBlockSizeWorld;
dst->cellSize = cellSize;
dst->stepSize = stepSize;
dst->cellSizeInv = cellSizeInv;
dst->stepSizeInv = stepSizeInv;
dst->locationMin = layerParams->locationMin;
dst->locationMax = layerParams->locationMax;
dst->worldMin.x = (layerParams->locationMin.x - 0.5f) * layerParams->blockSizeWorld.x;
dst->worldMin.y = (layerParams->locationMin.y - 0.5f) * layerParams->blockSizeWorld.y;
dst->worldMin.z = (layerParams->locationMin.z - 0.5f) * layerParams->blockSizeWorld.z;
dst->enableBlockWireframe = rayMarchParams->enableBlockWireframe;
dst->worldMax.x = (layerParams->locationMax.x + 0.5f) * layerParams->blockSizeWorld.x;
dst->worldMax.y = (layerParams->locationMax.y + 0.5f) * layerParams->blockSizeWorld.y;
dst->worldMax.z = (layerParams->locationMax.z + 0.5f) * layerParams->blockSizeWorld.z;
dst->enableRawMode = rayMarchParams->enableRawMode;
dst->velocityCellSizeInv = velocityCellSizeInv;
dst->deltaTime = layerParams->deltaTime;
dst->layer = layerParams->layer;
dst->layerColormapV = layerColormapV;
dst->alphaScale = alphaScale;
dst->colorScale = rayMarchParams->colorScale;
dst->shadowFactor = rayMarchParams->shadowFactor;
dst->pad1 = 0.f;
dst->pad2 = 0.f;
dst->pad3 = 0.f;
dst->cloud.densityMultiplier = rayMarchParams->cloud.densityMultiplier;
dst->cloud.enableCloudMode = rayMarchParams->cloud.enableCloudMode;
dst->cloud.pad1 = 0.f;
dst->cloud.pad2 = 0.f;
dst->cloud.ambientColor = rayMarchParams->cloud.ambientColor;
dst->cloud.ambientMultiplier = rayMarchParams->cloud.ambientMultiplier;
dst->cloud.volumeBaseColor = rayMarchParams->cloud.volumeBaseColor;
dst->cloud.volumeColorMultiplier = rayMarchParams->cloud.volumeColorMultiplier;
dst->cloud.sunDirection = rayMarchParams->cloud.sunDirection;
dst->cloud.shadowStepMultiplier = rayMarchParams->cloud.shadowStepMultiplier;
dst->cloud.attenuationMultiplier = rayMarchParams->cloud.attenuationMultiplier;
dst->cloud.numShadowSteps = rayMarchParams->cloud.numShadowSteps;
}
NV_FLOW_INLINE void NvFlowRayMarchIsosurfaceLayerShaderParams_populate(
NvFlowRayMarchIsosurfaceLayerShaderParams* dst,
NvFlowUint densityLevelIdx,
NvFlowUint layerParamIdx,
const NvFlowSparseParams* sparseParams,
const NvFlowRayMarchIsosurfaceParams* rayMarchParams
)
{
const NvFlowSparseLevelParams* levelParams = &sparseParams->levels[densityLevelIdx];
const NvFlowSparseLayerParams* layerParams = &sparseParams->layers[layerParamIdx];
NvFlowFloat3 blockSizeWorld = layerParams->blockSizeWorld;
NvFlowFloat3 blockSizeWorldInv = layerParams->blockSizeWorldInv;
float minBlockSizeWorld = fminf(blockSizeWorld.x, fminf(blockSizeWorld.y, blockSizeWorld.z));
float maxBlockSizeWorld = fmaxf(blockSizeWorld.x, fmaxf(blockSizeWorld.y, blockSizeWorld.z));
NvFlowFloat3 cellSize = NvFlowFloat3{
blockSizeWorld.x / float(levelParams->blockDimLessOne.x + 1u),
blockSizeWorld.y / float(levelParams->blockDimLessOne.y + 1u),
blockSizeWorld.z / float(levelParams->blockDimLessOne.z + 1u)
};
NvFlowFloat3 cellSizeInv = { 1.f / cellSize.x, 1.f / cellSize.y, 1.f / cellSize.z };
float cellSizeMin = fminf(fminf(cellSize.x, cellSize.y), cellSize.z);
float stepSize = rayMarchParams->stepSizeScale * cellSizeMin;
float stepSizeInv = 1.f / stepSize;
dst->blockSizeWorld = blockSizeWorld;
dst->minBlockSizeWorld = minBlockSizeWorld;
dst->blockSizeWorldInv = blockSizeWorldInv;
dst->maxBlockSizeWorld = maxBlockSizeWorld;
dst->cellSize = cellSize;
dst->stepSize = stepSize;
dst->cellSizeInv = cellSizeInv;
dst->stepSizeInv = stepSizeInv;
dst->locationMin = layerParams->locationMin;
dst->locationMax = layerParams->locationMax;
dst->worldMin.x = (layerParams->locationMin.x - 0.5f) * layerParams->blockSizeWorld.x;
dst->worldMin.y = (layerParams->locationMin.y - 0.5f) * layerParams->blockSizeWorld.y;
dst->worldMin.z = (layerParams->locationMin.z - 0.5f) * layerParams->blockSizeWorld.z;
dst->enableBlockWireframe = rayMarchParams->enableBlockWireframe;
dst->worldMax.x = (layerParams->locationMax.x + 0.5f) * layerParams->blockSizeWorld.x;
dst->worldMax.y = (layerParams->locationMax.y + 0.5f) * layerParams->blockSizeWorld.y;
dst->worldMax.z = (layerParams->locationMax.z + 0.5f) * layerParams->blockSizeWorld.z;
dst->visualizeNormals = rayMarchParams->visualizeNormals;
dst->layer = layerParams->layer;
dst->densityThreshold = rayMarchParams->densityThreshold;
dst->refractionMode = rayMarchParams->refractionMode;
dst->pad2 = 0u;
dst->fluidColor = rayMarchParams->fluidColor;
dst->fluidIoR = rayMarchParams->fluidIoR;
dst->fluidSpecularReflectance = rayMarchParams->fluidSpecularReflectance;
dst->fluidAbsorptionCoefficient = rayMarchParams->fluidAbsorptionCoefficient;
dst->fluidDiffuseReflectance = rayMarchParams->fluidDiffuseReflectance;
dst->pad3 = 0.f;
dst->fluidRadiance = rayMarchParams->fluidRadiance;
dst->pad4 = 0.f;
}
NV_FLOW_INLINE void NvFlowRayMarchShaderParams_populate(
NvFlowRayMarchShaderParams* dst,
NvFlowUint velocityLevelIdx,
NvFlowUint densityLevelIdx,
const NvFlowSparseParams* sparseParams,
const NvFlowFloat4x4* view,
const NvFlowFloat4x4* projection,
const NvFlowFloat4x4* projectionJittered,
NvFlowUint textureWidth,
NvFlowUint textureHeight,
NvFlowUint sceneDepthWidth,
NvFlowUint sceneDepthHeight,
float compositeColorScale
)
{
using namespace NvFlowMath;
NvFlowFloat4x4 projectionInv = matrixInverse(*projection);
NvFlowFloat4x4 projectionJitteredInv = matrixInverse(*projectionJittered);
NvFlowFloat4x4 viewInv = matrixInverse(*view);
FrustumRays frustumRays = {};
computeFrustumRays(&frustumRays, viewInv, projectionInv);
const NvFlowSparseLevelParams* levelParamsVelocity = &sparseParams->levels[velocityLevelIdx];
const NvFlowSparseLevelParams* levelParamsDensity = &sparseParams->levels[densityLevelIdx];
dst->levelParamsVelocity = *levelParamsVelocity;
dst->levelParamsDensity = *levelParamsDensity;
dst->projection = NvFlowMath::matrixTranspose(*projection);
dst->view = NvFlowMath::matrixTranspose(*view);
dst->projectionJitteredInv = NvFlowMath::matrixTranspose(projectionJitteredInv);
dst->viewInv = NvFlowMath::matrixTranspose(viewInv);
dst->rayDir00 = frustumRays.rayDir00;
dst->rayDir10 = frustumRays.rayDir10;
dst->rayDir01 = frustumRays.rayDir01;
dst->rayDir11 = frustumRays.rayDir11;
dst->rayOrigin00 = frustumRays.rayOrigin00;
dst->rayOrigin10 = frustumRays.rayOrigin10;
dst->rayOrigin01 = frustumRays.rayOrigin01;
dst->rayOrigin11 = frustumRays.rayOrigin11;
dst->width = float(textureWidth);
dst->height = float(textureHeight);
dst->widthInv = 1.f / float(textureWidth);
dst->heightInv = 1.f / float(textureHeight);
dst->depthWidth = float(sceneDepthWidth);
dst->depthHeight = float(sceneDepthHeight);
dst->depthWidthInv = 1.f / float(sceneDepthWidth);
dst->depthHeightInv = 1.f / float(sceneDepthHeight);
dst->numLayers = sparseParams->layerCount;
dst->maxWorldDistance = INFINITY;
dst->isReverseZ = frustumRays.isReverseZ;
dst->compositeColorScale = compositeColorScale;
}
NV_FLOW_INLINE void NvFlowSelfShadowLayerShaderParams_populate(
NvFlowSelfShadowLayerShaderParams* dst,
NvFlowUint coarseLevelIdx,
NvFlowUint fineLevelIdx,
NvFlowUint layerParamIdx,
const NvFlowSparseParams* sparseParams,
const NvFlowShadowParams* shadowParams
)
{
bool isCoarse = coarseLevelIdx != fineLevelIdx;
const NvFlowSparseLevelParams* coarseDensityLevelParams = &sparseParams->levels[coarseLevelIdx];
const NvFlowSparseLayerParams* layerParams = &sparseParams->layers[layerParamIdx];
int layer = layerParams->layer;
NvFlowFloat3 blockSizeWorld = layerParams->blockSizeWorld;
NvFlowFloat3 blockSizeWorldInv = layerParams->blockSizeWorldInv;
float minBlockSizeWorld = fminf(blockSizeWorld.x, fminf(blockSizeWorld.y, blockSizeWorld.z));
float maxBlockSizeWorld = fmaxf(blockSizeWorld.x, fmaxf(blockSizeWorld.y, blockSizeWorld.z));
NvFlowFloat3 cellSize = NvFlowFloat3{
blockSizeWorld.x / float(coarseDensityLevelParams->blockDimLessOne.x + 1u),
blockSizeWorld.y / float(coarseDensityLevelParams->blockDimLessOne.y + 1u),
blockSizeWorld.z / float(coarseDensityLevelParams->blockDimLessOne.z + 1u)
};
NvFlowFloat3 cellSizeInv = { 1.f / cellSize.x, 1.f / cellSize.y, 1.f / cellSize.z };
float cellSizeMin = fminf(fminf(cellSize.x, cellSize.y), cellSize.z);
float stepSize = shadowParams->stepSizeScale * cellSizeMin;
float stepSizeInv = 1.f / stepSize;
float stepOffset = shadowParams->stepOffsetScale * cellSizeMin;
// normalize alphaScale based on stepSize
float alphaScale = 1.f - expf(-shadowParams->attenuation * stepSize);
float layerColormapV = (float(layerParamIdx) + 0.5f) / float(sparseParams->layerCount);
dst->base.blockSizeWorld = blockSizeWorld;
dst->base.minBlockSizeWorld = minBlockSizeWorld;
dst->base.blockSizeWorldInv = blockSizeWorldInv;
dst->base.maxBlockSizeWorld = maxBlockSizeWorld;
dst->base.cellSize = cellSize;
dst->base.stepSize = stepSize;
dst->base.cellSizeInv = cellSizeInv;
dst->base.stepSizeInv = stepSizeInv;
dst->base.locationMin = layerParams->locationMin;
dst->base.locationMax = layerParams->locationMax;
dst->base.worldMin.x = (layerParams->locationMin.x - 0.5f) * layerParams->blockSizeWorld.x;
dst->base.worldMin.y = (layerParams->locationMin.y - 0.5f) * layerParams->blockSizeWorld.y;
dst->base.worldMin.z = (layerParams->locationMin.z - 0.5f) * layerParams->blockSizeWorld.z;
dst->base.enableBlockWireframe = NV_FLOW_FALSE;
dst->base.worldMax.x = (layerParams->locationMax.x + 0.5f) * layerParams->blockSizeWorld.x;
dst->base.worldMax.y = (layerParams->locationMax.y + 0.5f) * layerParams->blockSizeWorld.y;
dst->base.worldMax.z = (layerParams->locationMax.z + 0.5f) * layerParams->blockSizeWorld.z;
dst->base.enableRawMode = NV_FLOW_FALSE;
dst->base.velocityCellSizeInv = cellSizeInv;
dst->base.deltaTime = layerParams->deltaTime;
dst->base.layer = layer;
dst->base.layerColormapV = layerColormapV;
dst->base.alphaScale = alphaScale;
dst->base.colorScale = 1.f;
dst->base.shadowFactor = 0.f;
dst->base.pad1 = 0.f;
dst->base.pad2 = 0.f;
dst->base.pad3 = 0.f;
// Set cloud mode to default
dst->base.cloud.densityMultiplier = 0.5f;
dst->base.cloud.enableCloudMode = NV_FLOW_FALSE;
dst->base.cloud.pad1 = 0.f;
dst->base.cloud.pad2 = 0.f;
dst->base.cloud.ambientColor.x = 0.4f;
dst->base.cloud.ambientColor.y = 0.55f;
dst->base.cloud.ambientColor.z = 0.9f;
dst->base.cloud.ambientMultiplier = 1.f;
dst->base.cloud.volumeBaseColor.x = 1.1f;
dst->base.cloud.volumeBaseColor.y = 1.f;
dst->base.cloud.volumeBaseColor.z = 0.9f;
dst->base.cloud.volumeColorMultiplier = 1.f;
dst->base.cloud.sunDirection.x = 1.f;
dst->base.cloud.sunDirection.y = 1.f;
dst->base.cloud.sunDirection.z = 1.f;
dst->base.cloud.shadowStepMultiplier = 1.f;
dst->base.cloud.attenuationMultiplier.x = 1.f;
dst->base.cloud.attenuationMultiplier.y = 1.f;
dst->base.cloud.attenuationMultiplier.z = 1.f;
dst->base.cloud.numShadowSteps = 10u;
dst->minIntensity = shadowParams->minIntensity;
dst->numSteps = isCoarse ? (shadowParams->numSteps / 2u) : shadowParams->numSteps;
dst->isPointLight = shadowParams->isPointLight;
dst->stepOffset = stepOffset;
dst->lightDirection = shadowParams->lightDirection;
dst->enabled = shadowParams->enabled;
dst->lightPosition = shadowParams->lightPosition;
dst->pad3 = 0.f;
}
NV_FLOW_INLINE void NvFlowSelfShadowShaderParams_populate(
NvFlowSelfShadowShaderParams* dst,
NvFlowUint coarseLevelIdx,
NvFlowUint fineLevelIdx,
const NvFlowSparseParams* sparseParams,
NvFlowUint blockIdxOffset
)
{
dst->blockIdxOffset = blockIdxOffset;
dst->pad1 = 0u;
dst->pad2 = 0u;
dst->pad3 = 0u;
dst->coarseDensityTable = sparseParams->levels[coarseLevelIdx];
dst->densityTable = sparseParams->levels[fineLevelIdx];
}
#endif
#endif | 15,554 | C | 41.969613 | 97 | 0.777356 |
NVIDIA-Omniverse/PhysX/flow/include/nvflowext/shaders/NvFlowRayMarchParams.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#ifndef NV_FLOW_RAY_MARCH_PARAMS_H
#define NV_FLOW_RAY_MARCH_PARAMS_H
#include "NvFlowShaderTypes.h"
struct NvFlowRayMarchCloudLayerShaderParams
{
float densityMultiplier;
NvFlowUint enableCloudMode;
float pad1;
float pad2;
NvFlowFloat3 ambientColor;
float ambientMultiplier;
NvFlowFloat3 volumeBaseColor;
float volumeColorMultiplier;
NvFlowFloat3 sunDirection;
float shadowStepMultiplier;
NvFlowFloat3 attenuationMultiplier;
int numShadowSteps;
};
#ifdef NV_FLOW_CPU
typedef struct NvFlowRayMarchCloudLayerShaderParams NvFlowRayMarchCloudLayerShaderParams;
#endif
struct NvFlowRayMarchLayerShaderParams
{
NvFlowFloat3 blockSizeWorld;
float minBlockSizeWorld;
NvFlowFloat3 blockSizeWorldInv;
float maxBlockSizeWorld;
NvFlowFloat3 cellSize;
float stepSize;
NvFlowFloat3 cellSizeInv;
float stepSizeInv;
NvFlowInt4 locationMin;
NvFlowInt4 locationMax;
NvFlowFloat3 worldMin;
NvFlowUint enableBlockWireframe;
NvFlowFloat3 worldMax;
NvFlowUint enableRawMode;
NvFlowFloat3 velocityCellSizeInv;
float deltaTime;
int layer;
float layerColormapV;
float alphaScale;
float colorScale;
float shadowFactor;
float pad1;
float pad2;
float pad3;
NvFlowRayMarchCloudLayerShaderParams cloud;
};
#ifdef NV_FLOW_CPU
typedef struct NvFlowRayMarchLayerShaderParams NvFlowRayMarchLayerShaderParams;
#endif
struct NvFlowRayMarchIsosurfaceLayerShaderParams
{
NvFlowFloat3 blockSizeWorld;
float minBlockSizeWorld;
NvFlowFloat3 blockSizeWorldInv;
float maxBlockSizeWorld;
NvFlowFloat3 cellSize;
float stepSize;
NvFlowFloat3 cellSizeInv;
float stepSizeInv;
NvFlowInt4 locationMin;
NvFlowInt4 locationMax;
NvFlowFloat3 worldMin;
NvFlowUint enableBlockWireframe;
NvFlowFloat3 worldMax;
NvFlowUint visualizeNormals;
int layer;
float densityThreshold;
NvFlowUint refractionMode;
NvFlowUint pad2;
NvFlowFloat3 fluidColor;
float fluidIoR;
NvFlowFloat3 fluidSpecularReflectance;
float fluidAbsorptionCoefficient;
NvFlowFloat3 fluidDiffuseReflectance;
float pad3;
NvFlowFloat3 fluidRadiance;
float pad4;
};
#ifdef NV_FLOW_CPU
typedef struct NvFlowRayMarchIsosurfaceLayerShaderParams NvFlowRayMarchIsosurfaceLayerShaderParams;
#endif
struct NvFlowRayMarchShaderParams
{
NvFlowSparseLevelParams levelParamsVelocity;
NvFlowSparseLevelParams levelParamsDensity;
NvFlowFloat4x4 projection;
NvFlowFloat4x4 view;
NvFlowFloat4x4 projectionJitteredInv;
NvFlowFloat4x4 viewInv;
NvFlowFloat4 rayDir00;
NvFlowFloat4 rayDir10;
NvFlowFloat4 rayDir01;
NvFlowFloat4 rayDir11;
NvFlowFloat4 rayOrigin00;
NvFlowFloat4 rayOrigin10;
NvFlowFloat4 rayOrigin01;
NvFlowFloat4 rayOrigin11;
float width;
float height;
float widthInv;
float heightInv;
float depthWidth;
float depthHeight;
float depthWidthInv;
float depthHeightInv;
NvFlowUint numLayers;
float maxWorldDistance;
NvFlowUint isReverseZ;
float compositeColorScale;
};
#ifdef NV_FLOW_CPU
typedef struct NvFlowRayMarchShaderParams NvFlowRayMarchShaderParams;
#endif
struct NvFlowSelfShadowLayerShaderParams
{
NvFlowRayMarchLayerShaderParams base;
float minIntensity;
NvFlowUint numSteps;
NvFlowUint isPointLight;
float stepOffset;
NvFlowFloat3 lightDirection;
NvFlowUint enabled;
NvFlowFloat3 lightPosition;
float pad3;
};
#ifdef NV_FLOW_CPU
typedef struct NvFlowSelfShadowLayerShaderParams NvFlowSelfShadowLayerShaderParams;
#endif
struct NvFlowSelfShadowShaderParams
{
NvFlowUint blockIdxOffset;
NvFlowUint pad1;
NvFlowUint pad2;
NvFlowUint pad3;
NvFlowSparseLevelParams coarseDensityTable;
NvFlowSparseLevelParams densityTable;
};
#ifdef NV_FLOW_CPU
typedef struct NvFlowSelfShadowShaderParams NvFlowSelfShadowShaderParams;
#endif
#endif | 5,226 | C | 26.366492 | 99 | 0.820896 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowStringHash.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowTypes.h"
#include "NvFlowArray.h"
NV_FLOW_INLINE NvFlowUint NvFlowStringHashFNV(const char* a)
{
// FNV-1a
NvFlowUint hash = 2166136261u;
NvFlowUint idx = 0u;
if (a)
{
while (a[idx])
{
hash = 16777619u * (hash ^ (NvFlowUint)(a[idx]));
idx++;
}
}
return hash;
}
template<class T, NvFlowUint64 staticCapacity = 0u>
struct NvFlowStringHashTable
{
NvFlowArray<NvFlowUint, staticCapacity> hashs;
NvFlowArray<NvFlowArray<char>, staticCapacity> keys;
NvFlowArray<T, staticCapacity> values;
NvFlowUint64 keyCount = 0llu;
NvFlowUint64 find(const char* path, NvFlowUint hash)
{
path = path ? path : "";
NvFlowUint64 beginIdx = hash & (hashs.size - 1u);
for (NvFlowUint64 iterIdx = 0u; iterIdx < hashs.size; iterIdx++)
{
NvFlowUint64 idx = (iterIdx + beginIdx) & (hashs.size - 1u);
if (hashs[idx] == hash &&
keys[idx].size > 0u &&
strcmp(keys[idx].data, path) == 0)
{
return idx;
}
}
return ~0llu;
}
NvFlowUint64 insertNoResize(const char* path, NvFlowUint hash, const T& value, NvFlowBool32* pSuccess = nullptr)
{
path = path ? path : "";
if (pSuccess)
{
*pSuccess = NV_FLOW_FALSE;
}
NvFlowUint64 beginIdx = hash & (hashs.size - 1u);
for (NvFlowUint64 iterIdx = 0u; iterIdx < hashs.size; iterIdx++)
{
NvFlowUint64 idx = (iterIdx + beginIdx) & (hashs.size - 1u);
if (keys[idx].size == 0u)
{
keyCount++;
hashs[idx] = hash;
for (NvFlowUint64 strIdx = 0u; path[strIdx]; strIdx++)
{
keys[idx].pushBack(path[strIdx]);
}
keys[idx].pushBack('\0');
values[idx] = value;
if (pSuccess)
{
*pSuccess = NV_FLOW_TRUE;
}
return idx;
}
else if (hashs[idx] == hash &&
keys[idx].size > 0u &&
strcmp(keys[idx].data, path) == 0)
{
return idx;
}
}
return ~0llu;
}
NvFlowUint64 insert(const char* path, NvFlowUint hash, const T& value, NvFlowBool32* pSuccess = nullptr)
{
// resize if adding key would make 50+% full
if (2u * (keyCount + 1u) >= hashs.size)
{
NvFlowArray<NvFlowUint, staticCapacity> hashs_old(std::move(hashs));
NvFlowArray<NvFlowArray<char>, staticCapacity> keys_old(std::move(keys));
NvFlowArray<T, staticCapacity> values_old(std::move(values));
NvFlowUint64 newSize = 1u;
while (newSize <= hashs_old.size)
{
newSize *= 2u;
}
hashs.reserve(newSize);
keys.reserve(newSize);
values.reserve(newSize);
hashs.size = newSize;
keys.size = newSize;
values.size = newSize;
keyCount = 0u; // reset key count, because insert counts it again
for (NvFlowUint64 idx = 0u; idx < hashs_old.size; idx++)
{
if (keys_old[idx].size > 0u)
{
insertNoResize(keys_old[idx].data, hashs_old[idx], values_old[idx], nullptr);
}
}
}
return insertNoResize(path, hash, value, pSuccess);
}
NvFlowBool32 erase(const char* path, NvFlowUint hash)
{
NvFlowUint64 findIdx = find(path, hash);
if (findIdx != ~0llu)
{
keyCount--;
hashs[findIdx] = 0u;
keys[findIdx].size = 0u;
values[findIdx] = T();
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
};
| 5,539 | C | 34.741935 | 116 | 0.575194 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowString.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#include "NvFlowString.h"
#include "NvFlowArray.h"
#include <stdio.h>
struct NvFlowStringPool
{
NvFlowArray<NvFlowArray<char>, 16u> heaps;
};
NvFlowStringPool* NvFlowStringPoolCreate()
{
return new NvFlowStringPool();
}
void NvFlowStringPoolAllocate_newHeap(NvFlowStringPool* ptr, NvFlowUint64 allocSize)
{
auto& currentHeap = ptr->heaps[ptr->heaps.allocateBack()];
NvFlowUint64 heapSize = 4096u; // default heap size
while (heapSize < allocSize)
{
heapSize *= 2u;
}
currentHeap.reserve(heapSize);
}
NvFlowUint64 NvFlowStringPool_alignment(NvFlowUint64 size)
{
return 8u * ((size + 7u) / 8u);
}
char* NvFlowStringPoolAllocate_internal(NvFlowStringPool* ptr, NvFlowUint64 size)
{
NvFlowUint64 allocSize = NvFlowStringPool_alignment(size);
if (ptr->heaps.size > 0u)
{
auto& currentHeap = ptr->heaps[ptr->heaps.size - 1u];
if (currentHeap.size + allocSize <= currentHeap.capacity)
{
char* ret = currentHeap.data + currentHeap.size;
ret[size - 1] = 0;
currentHeap.size += allocSize;
return ret;
}
}
NvFlowStringPoolAllocate_newHeap(ptr, allocSize);
return NvFlowStringPoolAllocate_internal(ptr, size);
}
char* NvFlowStringPoolAllocate(NvFlowStringPool* ptr, NvFlowUint64 size)
{
return NvFlowStringPoolAllocate_internal(ptr, size + 1);
}
void NvFlowStringPoolTempAllocate(NvFlowStringPool* ptr, char** p_str_data, NvFlowUint64* p_str_size)
{
if (ptr->heaps.size > 0u)
{
auto& currentHeap = ptr->heaps[ptr->heaps.size - 1u];
char* str_data = currentHeap.data + currentHeap.size;
NvFlowUint64 str_size = currentHeap.capacity - currentHeap.size;
if (str_size > 0)
{
str_data[str_size - 1] = 0;
str_size--;
*p_str_size = str_size;
*p_str_data = str_data;
return;
}
}
NvFlowStringPoolAllocate_newHeap(ptr, 8u);
NvFlowStringPoolTempAllocate(ptr, p_str_data, p_str_size);
}
void NvFlowStringPoolTempAllocateCommit(NvFlowStringPool* ptr, char* str_data, NvFlowUint64 str_size)
{
// to reverse the str_size-- in NvFlowStringPoolTempAllocate()
str_size++;
if (ptr->heaps.size > 0u)
{
auto& currentHeap = ptr->heaps[ptr->heaps.size - 1u];
char* compStr_data = currentHeap.data + currentHeap.size;
NvFlowUint64 compStr_size = currentHeap.capacity - currentHeap.size;
if (str_data == compStr_data && str_size <= compStr_size)
{
NvFlowUint64 allocSize = NvFlowStringPool_alignment(str_size);
currentHeap.size += allocSize;
}
}
}
void NvFlowStringPoolDestroy(NvFlowStringPool* ptr)
{
delete ptr;
}
void NvFlowStringPoolReset(NvFlowStringPool* ptr)
{
for (NvFlowUint64 heapIdx = 0u; heapIdx < ptr->heaps.size; heapIdx++)
{
ptr->heaps[heapIdx].size = 0u;
}
ptr->heaps.size = 0u;
}
char* NvFlowStringPrintV(NvFlowStringPool* pool, const char* format, va_list args)
{
va_list argsCopy;
va_copy(argsCopy, args);
NvFlowUint64 str_size = ~0llu;
char* str_data = nullptr;
NvFlowStringPoolTempAllocate(pool, &str_data, &str_size);
NvFlowUint64 count = (NvFlowUint64)vsnprintf(str_data, str_size + 1, format, args);
if (count <= str_size)
{
str_size = count;
NvFlowStringPoolTempAllocateCommit(pool, str_data, str_size);
}
else
{
str_data = NvFlowStringPoolAllocate(pool, count);
str_size = count;
count = vsnprintf(str_data, str_size + 1, format, argsCopy);
}
va_end(argsCopy);
return str_data;
}
char* NvFlowStringPrint(NvFlowStringPool* pool, const char* format, ...)
{
va_list args;
va_start(args, format);
char* str = NvFlowStringPrintV(pool, format, args);
va_end(args);
return str;
}
/// ************************** File Utils *********************************************
const char* NvFlowTextFileLoad(NvFlowStringPool* pool, const char* filename)
{
FILE* file = nullptr;
#if defined(_WIN32)
fopen_s(&file, filename, "r");
#else
file = fopen(filename, "r");
#endif
if (file == nullptr)
{
return nullptr;
}
NvFlowUint64 chunkSize = 4096u;
NvFlowArray<const char*, 8u> chunks;
size_t readBytes = 0u;
do
{
chunkSize *= 2u;
char* chunkStr = NvFlowStringPoolAllocate(pool, chunkSize);
chunkStr[0] = '\0';
readBytes = fread(chunkStr, 1u, chunkSize, file);
chunkStr[readBytes] = '\0';
chunks.pushBack(chunkStr);
} while(readBytes == chunkSize);
fclose(file);
const char* text_data = (chunks.size == 1u) ? chunks[0u] : NvFlowStringConcatN(pool, chunks.data, chunks.size);
//NvFlowUint64 strLength = NvFlowStringLength(text_data);
//printf("NvFlowTextureFileLoad(%s) %llu bytes in %llu chunks\n", filename, strLength, chunks.size);
return text_data;
}
void NvFlowTextFileStore(const char* text_data, const char* filename)
{
FILE* file = nullptr;
#if defined(_WIN32)
fopen_s(&file, filename, "w");
#else
file = fopen(filename, "w");
#endif
if (file == nullptr)
{
return;
}
NvFlowUint64 text_size = NvFlowStringLength(text_data);
fwrite(text_data, 1u, text_size, file);
fclose(file);
}
NvFlowBool32 NvFlowTextFileTestOpen(const char* filename)
{
FILE* file = nullptr;
#if defined(_WIN32)
fopen_s(&file, filename, "r");
#else
file = fopen(filename, "r");
#endif
if (file)
{
fclose(file);
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
void NvFlowTextFileRemove(const char* name)
{
remove(name);
}
void NvFlowTextFileRename(const char* oldName, const char* newName)
{
rename(oldName, newName);
}
NvFlowBool32 NvFlowTextFileDiffAndWriteIfModified(const char* filenameDst, const char* filenameTmp)
{
FILE* fileTmp = nullptr;
FILE* fileDst = nullptr;
bool match = true;
#if defined(_WIN32)
fopen_s(&fileDst, filenameDst, "r");
#else
fileDst = fopen(filenameDst, "r");
#endif
if (fileDst)
{
#if defined(_WIN32)
fopen_s(&fileTmp, filenameTmp, "r");
#else
fileTmp = fopen(filenameTmp, "r");
#endif
if (fileTmp)
{
while (1)
{
int a = fgetc(fileTmp);
int b = fgetc(fileDst);
if (a == EOF && b == EOF)
{
break;
}
else if (a != b)
{
match = false;
break;
}
}
fclose(fileTmp);
}
else
{
match = false;
}
fclose(fileDst);
}
else
{
match = false;
}
if (!match)
{
remove(filenameDst);
rename(filenameTmp, filenameDst);
}
// always cleanup temp file
remove(filenameTmp);
return !match;
}
| 7,751 | C++ | 22.002967 | 112 | 0.700426 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowArrayBuffer.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowUploadBuffer.h"
#include <string.h>
struct NvFlowArrayBufferData
{
const void* data;
NvFlowUint64 elementCount;
NvFlowUint64 version;
};
struct NvFlowArrayBufferState
{
NvFlowBool32 isDirty;
NvFlowUint64 elementCount;
NvFlowUint64 version;
NvFlowUint64 firstElement;
};
struct NvFlowArrayBuffer
{
NvFlowUploadBuffer uploadBuffer = {};
NvFlowArray<NvFlowArrayBufferState> state;
NvFlowArray<NvFlowUploadBufferCopyRange> copyRanges;
NvFlowUint64 totalSizeInBytes = 0llu;
};
NV_FLOW_INLINE void NvFlowArrayBuffer_init_custom(
NvFlowContextInterface* contextInterface,
NvFlowContext* context,
NvFlowArrayBuffer* ptr,
NvFlowBufferUsageFlags flags,
NvFlowFormat format,
NvFlowUint structureStride,
NvFlowBuffer*(NV_FLOW_ABI* createBuffer)(NvFlowContext* context, NvFlowMemoryType memoryType, const NvFlowBufferDesc* desc, void* userdata),
void(NV_FLOW_ABI* addPassCopyBuffer)(NvFlowContext* context, const NvFlowPassCopyBufferParams* params, void* userdata),
void* userdata
)
{
NvFlowUploadBuffer_init_custom(contextInterface, context, &ptr->uploadBuffer, flags, format, structureStride, createBuffer, addPassCopyBuffer, userdata);
}
NV_FLOW_INLINE NvFlowBuffer* NvFlowArrayBuffer_createBuffer(NvFlowContext* context, NvFlowMemoryType memoryType, const NvFlowBufferDesc* desc, void* userdata)
{
NvFlowArrayBuffer* ptr = (NvFlowArrayBuffer*)userdata;
return ptr->uploadBuffer.contextInterface->createBuffer(context, memoryType, desc);
}
NV_FLOW_INLINE void NvFlowArrayBuffer_addPassCopyBuffer(NvFlowContext* context, const NvFlowPassCopyBufferParams* params, void* userdata)
{
NvFlowArrayBuffer* ptr = (NvFlowArrayBuffer*)userdata;
ptr->uploadBuffer.contextInterface->addPassCopyBuffer(context, params);
}
NV_FLOW_INLINE void NvFlowArrayBuffer_init(NvFlowContextInterface* contextInterface, NvFlowContext* context, NvFlowArrayBuffer* ptr, NvFlowBufferUsageFlags flags, NvFlowFormat format, NvFlowUint structureStride)
{
NvFlowArrayBuffer_init_custom(contextInterface, context, ptr, flags, format, structureStride, NvFlowArrayBuffer_createBuffer, NvFlowArrayBuffer_addPassCopyBuffer, ptr);
}
NV_FLOW_INLINE void NvFlowArrayBuffer_destroy(NvFlowContext* context, NvFlowArrayBuffer* ptr)
{
NvFlowUploadBuffer_destroy(context, &ptr->uploadBuffer);
ptr->state.size = 0u;
ptr->copyRanges.size = 0u;
}
NV_FLOW_INLINE NvFlowBufferTransient* NvFlowArrayBuffer_update(
NvFlowContext* context,
NvFlowArrayBuffer* ptr,
const NvFlowArrayBufferData* arrayDatas,
NvFlowUint64* outFirstElements,
NvFlowUint64 arrayCount,
NvFlowUint64* outTotalSizeInBytes,
const char* debugName
)
{
// if arrayCount changes, reset all state
bool shouldResetState = false;
if (ptr->state.size != arrayCount)
{
shouldResetState = true;
}
// if any array size changes, reset all state, since buffer resize might occur
if (!shouldResetState)
{
for (NvFlowUint64 arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
if (ptr->state[arrayIdx].elementCount != arrayDatas[arrayIdx].elementCount)
{
shouldResetState = true;
}
}
}
if (shouldResetState)
{
ptr->state.reserve(arrayCount);
ptr->state.size = arrayCount;
for (NvFlowUint64 arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
ptr->state[arrayIdx].isDirty = NV_FLOW_TRUE;
ptr->state[arrayIdx].elementCount = 0llu;
ptr->state[arrayIdx].version = 0llu;
ptr->state[arrayIdx].firstElement = 0llu;
}
}
// mark any array dirty if version changes
for (NvFlowUint64 arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
if (arrayDatas[arrayIdx].elementCount != 0u || ptr->state[arrayIdx].elementCount != 0u)
{
if (arrayDatas[arrayIdx].version == 0llu || arrayDatas[arrayIdx].version != ptr->state[arrayIdx].version)
{
ptr->state[arrayIdx].isDirty = NV_FLOW_TRUE;
}
}
}
NvFlowBool32 anyDirty = NV_FLOW_FALSE;
for (NvFlowUint64 arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
if (ptr->state[arrayIdx].isDirty)
{
anyDirty = NV_FLOW_TRUE;
}
}
// compute total size
NvFlowUint64 totalSizeInBytes = 0llu;
for (NvFlowUint arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
totalSizeInBytes += ptr->uploadBuffer.structureStride * arrayDatas[arrayIdx].elementCount;
}
NvFlowUint8* mapped = nullptr;
if (anyDirty)
{
mapped = (NvFlowUint8*)NvFlowUploadBuffer_map(context, &ptr->uploadBuffer, totalSizeInBytes);
}
// update state
NvFlowUint64 globalFirstElement = 0llu;
for (NvFlowUint arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
ptr->state[arrayIdx].elementCount = arrayDatas[arrayIdx].elementCount;
ptr->state[arrayIdx].version = arrayDatas[arrayIdx].version;
ptr->state[arrayIdx].firstElement = globalFirstElement;
globalFirstElement += ptr->state[arrayIdx].elementCount;
}
ptr->copyRanges.size = 0u;
for (NvFlowUint arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
if (ptr->state[arrayIdx].isDirty)
{
NvFlowUint64 offsetInBytes = ptr->uploadBuffer.structureStride * ptr->state[arrayIdx].firstElement;
NvFlowUint64 sizeInBytes = ptr->uploadBuffer.structureStride * ptr->state[arrayIdx].elementCount;
// copy to host memory
memcpy(mapped + offsetInBytes, arrayDatas[arrayIdx].data, sizeInBytes);
// add copy range
NvFlowUploadBufferCopyRange copyRange = { offsetInBytes, sizeInBytes };
ptr->copyRanges.pushBack(copyRange);
}
}
NvFlowBufferTransient* bufferTransient = nullptr;
if (anyDirty)
{
bufferTransient = NvFlowUploadBuffer_unmapDeviceN(context, &ptr->uploadBuffer, ptr->copyRanges.data, ptr->copyRanges.size, debugName);
}
else
{
bufferTransient = NvFlowUploadBuffer_getDevice(context, &ptr->uploadBuffer, totalSizeInBytes);
}
// mark all arrays as clean
for (NvFlowUint arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
ptr->state[arrayIdx].isDirty = NV_FLOW_FALSE;
}
if (outFirstElements)
{
for (NvFlowUint arrayIdx = 0u; arrayIdx < arrayCount; arrayIdx++)
{
outFirstElements[arrayIdx] = ptr->state[arrayIdx].firstElement;
}
}
ptr->totalSizeInBytes = totalSizeInBytes;
if (outTotalSizeInBytes)
{
*outTotalSizeInBytes = totalSizeInBytes;
}
return bufferTransient;
}
| 7,728 | C | 32.604348 | 211 | 0.762034 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowArray.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#define NV_FLOW_ARRAY_CACHE_ENABLED 1
#include <new>
#include <utility>
template<class T, NvFlowUint64 staticCapacity = 0u, void(prerelease)(void* data, NvFlowUint64 size) = nullptr>
struct NvFlowArray
{
#if NV_FLOW_ARRAY_CACHE_ENABLED
static const NvFlowUint64 s_staticCapacity = staticCapacity;
#else
static const NvFlowUint64 s_staticCapacity = 0u;
#endif
T* data = nullptr;
NvFlowUint64 capacity = 0u;
NvFlowUint64 size = 0u;
unsigned char cache[s_staticCapacity * sizeof(T) + 8u];
void release()
{
for (NvFlowUint64 i = 0; i < capacity; i++)
{
data[i].~T();
}
if (data != nullptr && (T*)cache != data)
{
operator delete[](data);
}
data = nullptr;
capacity = 0u;
size = 0u;
}
void move(NvFlowArray& rhs)
{
data = rhs.data;
capacity = rhs.capacity;
size = rhs.size;
if (rhs.data == (T*)rhs.cache)
{
data = (T*)cache;
for (NvFlowUint64 idx = 0u; idx < capacity; idx++)
{
new(data + idx) T(std::move(rhs.data[idx]));
}
}
// to match destructed state
rhs.data = nullptr;
rhs.capacity = 0u;
rhs.size = 0u;
}
void reserve(NvFlowUint64 requestedCapacity)
{
if (requestedCapacity <= capacity)
{
return;
}
NvFlowUint64 newSize = size;
NvFlowUint64 newCapacity = capacity;
if (newCapacity < s_staticCapacity)
{
newCapacity = s_staticCapacity;
}
if (newCapacity == 0u)
{
newCapacity = 1u;
}
while (newCapacity < requestedCapacity)
{
newCapacity *= 2u;
}
T* newData = (T*)(newCapacity <= s_staticCapacity ? (void*)cache : operator new[](newCapacity * sizeof(T)));
// copy to new
for (NvFlowUint64 i = 0; i < newSize; i++)
{
new(newData + i) T(std::move(data[i]));
}
for (NvFlowUint64 i = newSize; i < newCapacity; i++)
{
new(newData + i) T();
}
if (prerelease)
{
prerelease(data + size, capacity - size);
}
// cleanup old
release();
// commit new
data = newData;
capacity = newCapacity;
size = newSize;
}
NvFlowArray()
{
reserve(s_staticCapacity);
}
NvFlowArray(NvFlowArray&& rhs)
{
move(rhs);
}
~NvFlowArray()
{
if (prerelease)
{
prerelease(data, capacity);
}
release();
}
T& operator[](NvFlowUint64 idx)
{
return data[idx];
}
const T& operator[](NvFlowUint64 idx) const
{
return data[idx];
}
NvFlowUint64 allocateBack()
{
reserve(size + 1);
size++;
return size - 1;
}
void pushBack(const T& v)
{
operator[](allocateBack()) = v;
}
T& back()
{
return operator[](size - 1);
}
void popBack()
{
size--;
}
};
/// Copy utility
template <class T, NvFlowUint64 staticCapacity = 0u, void(prerelease)(void* data, NvFlowUint64 size) = nullptr>
NV_FLOW_INLINE void NvFlowArray_copy(NvFlowArray<T, staticCapacity, prerelease>& dst, const NvFlowArray<T, staticCapacity, prerelease>& src)
{
dst.size = 0u;
dst.reserve(src.size);
dst.size = src.size;
for (NvFlowUint64 idx = 0u; idx < dst.size; idx++)
{
dst[idx] = src[idx];
}
}
template<class T>
NV_FLOW_INLINE void NvFlowArrayPointer_prerelease(void* dataIn, NvFlowUint64 size)
{
T* data = (T*)dataIn;
for (NvFlowUint64 idx = 0u; idx < size; idx++)
{
if (data[idx])
{
delete data[idx];
data[idx] = nullptr;
}
}
}
template<class T>
NV_FLOW_INLINE void NvFlowArrayPointer_allocate(T*& ptr)
{
ptr = new T();
}
template<class T, NvFlowUint64 staticCapacity = 0u>
struct NvFlowArrayPointer : public NvFlowArray<T, staticCapacity, NvFlowArrayPointer_prerelease<T>>
{
NvFlowArrayPointer() : NvFlowArray<T, staticCapacity, NvFlowArrayPointer_prerelease<T>>()
{
}
NvFlowArrayPointer(NvFlowArrayPointer&& rhs) : NvFlowArray<T, staticCapacity, NvFlowArrayPointer_prerelease<T>>(std::move(rhs))
{
}
~NvFlowArrayPointer()
{
}
T allocateBackPointer()
{
NvFlowUint64 allocIdx = this->allocateBack();
if (!(*this)[allocIdx])
{
NvFlowArrayPointer_allocate((*this)[allocIdx]);
}
return (*this)[allocIdx];
}
void pushBackPointer(const T& v)
{
NvFlowUint64 allocIdx = this->allocateBack();
deletePointerAtIndex(allocIdx);
(*this)[allocIdx] = v;
}
void swapPointers(NvFlowUint64 idxA, NvFlowUint64 idxB)
{
T temp = (*this)[idxA];
(*this)[idxA] = (*this)[idxB];
(*this)[idxB] = temp;
}
void removeSwapPointerAtIndex(NvFlowUint64 idx)
{
swapPointers(idx, this->size - 1u);
this->size--;
}
void removeSwapPointer(T ptr)
{
for (NvFlowUint64 idx = 0u; idx < this->size; idx++)
{
if ((*this)[idx] == ptr)
{
removeSwapPointerAtIndex(idx);
break;
}
}
}
void deletePointerAtIndex(NvFlowUint64 idx)
{
if ((*this)[idx])
{
delete (*this)[idx];
(*this)[idx] = nullptr;
}
}
void deletePointers()
{
this->size = this->capacity;
for (NvFlowUint64 idx = 0u; idx < this->size; idx++)
{
deletePointerAtIndex(idx);
}
this->size = 0u;
}
};
template<class T, NvFlowUint64 staticCapacity = 0u>
struct NvFlowRingBufferPointer
{
NvFlowArrayPointer<T, staticCapacity> arr;
NvFlowUint64 freeIdx = 0u;
NvFlowUint64 frontIdx = 0u;
NvFlowUint64 backIdx = 0u;
NvFlowRingBufferPointer() : arr()
{
}
NvFlowRingBufferPointer(NvFlowRingBufferPointer&& rhs) :
arr(std::move(rhs.arr)),
freeIdx(rhs.freeIdx),
frontIdx(rhs.frontIdx),
backIdx(rhs.backIdx)
{
}
~NvFlowRingBufferPointer()
{
}
T& front()
{
return arr[frontIdx];
}
T& back()
{
return arr[(backIdx - 1u) & (arr.size - 1)];
}
NvFlowUint64 activeCount()
{
return (backIdx - frontIdx) & (arr.size - 1);
}
NvFlowUint64 freeCount()
{
return (frontIdx - freeIdx) & (arr.size - 1);
}
void popFront()
{
frontIdx = (frontIdx + 1u) & (arr.size - 1);
}
void popFree()
{
freeIdx = (freeIdx + 1u) & (arr.size - 1);
}
T& operator[](NvFlowUint64 idx)
{
return arr[(frontIdx + idx) & (arr.size - 1)];
}
const T& operator[](NvFlowUint64 idx) const
{
return arr[(frontIdx + idx) & (arr.size - 1)];
}
NvFlowUint64 allocateBack()
{
if (arr.size == 0u)
{
arr.allocateBack();
}
if (freeCount() > 0u)
{
auto tmp = arr[freeIdx];
arr[freeIdx] = arr[backIdx];
arr[backIdx] = tmp;
popFree();
}
else if ((activeCount() + 1u) > (arr.size - 1))
{
NvFlowUint64 oldSize = arr.size;
arr.reserve(2u * oldSize);
arr.size = 2u * oldSize;
if (backIdx < frontIdx)
{
for (NvFlowUint64 idx = 0u; idx < backIdx; idx++)
{
auto tmp = arr[idx + oldSize];
arr[idx + oldSize] = arr[idx];
arr[idx] = tmp;
}
backIdx += oldSize;
}
}
NvFlowUint64 allocIdx = backIdx;
backIdx = (backIdx + 1u) & (arr.size - 1);
return allocIdx;
}
void pushBack(const T& v)
{
NvFlowUint64 allocIdx = allocateBack();
arr.deletePointerAtIndex(allocIdx);
arr[allocIdx] = v;
}
T allocateBackPointer()
{
NvFlowUint64 allocIdx = allocateBack();
if (!arr[allocIdx])
{
NvFlowArrayPointer_allocate(arr[allocIdx]);
}
return arr[allocIdx];
}
void deletePointers()
{
arr.deletePointers();
}
}; | 8,492 | C | 20.44697 | 140 | 0.662506 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowResourceCPU.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
// Workaround to scope includes per shader
#ifdef NV_FLOW_CPU_SHADER
#undef NV_FLOW_SHADER_TYPES_H
#undef NV_FLOW_SHADER_HLSLI
#undef NV_FLOW_RAY_MARCH_PARAMS_H
#undef NV_FLOW_RAY_MARCH_HLSLI
#undef NV_FLOW_RAY_MARCH_COMMON_HLSLI
#endif
// Disabled by default, to save build time
#define NV_FLOW_CPU_SHADER_DISABLE
#ifndef NV_FLOW_RESOURCE_CPU_H
#define NV_FLOW_RESOURCE_CPU_H
#include "NvFlowContext.h"
#include <math.h>
#include <atomic>
#include <string.h>
typedef NvFlowUint NvFlowCPU_Uint;
struct NvFlowCPU_Float2;
struct NvFlowCPU_Float3;
struct NvFlowCPU_Float4;
struct NvFlowCPU_Float4x4;
struct NvFlowCPU_Int2;
struct NvFlowCPU_Int3;
struct NvFlowCPU_Int4;
struct NvFlowCPU_Uint2;
struct NvFlowCPU_Uint3;
struct NvFlowCPU_Uint4;
NV_FLOW_INLINE int NvFlowCPU_max(int a, int b)
{
return a > b ? a : b;
}
NV_FLOW_INLINE int NvFlowCPU_min(int a, int b)
{
return a < b ? a : b;
}
NV_FLOW_INLINE float NvFlowCPU_round(float v)
{
return roundf(v);
}
NV_FLOW_INLINE float NvFlowCPU_abs(float v)
{
return fabsf(v);
}
NV_FLOW_INLINE float NvFlowCPU_floor(float v)
{
return floorf(v);
}
NV_FLOW_INLINE int NvFlowCPU_abs(int v)
{
return v < 0 ? -v : v;
}
NV_FLOW_INLINE float NvFlowCPU_sqrt(float v)
{
return sqrtf(v);
}
NV_FLOW_INLINE float NvFlowCPU_exp(float v)
{
return expf(v);
}
NV_FLOW_INLINE float NvFlowCPU_pow(float a, float b)
{
return powf(a, b);
}
NV_FLOW_INLINE float NvFlowCPU_log2(float v)
{
return log2f(v);
}
NV_FLOW_INLINE float NvFlowCPU_min(float a, float b)
{
//return fminf(a, b);
return a < b ? a : b;
}
NV_FLOW_INLINE float NvFlowCPU_max(float a, float b)
{
//return fmaxf(a, b);
return a > b ? a : b;
}
NV_FLOW_INLINE float NvFlowCPU_clamp(float v, float min, float max)
{
return NvFlowCPU_max(min, NvFlowCPU_min(v, max));
}
struct NvFlowCPU_Float2
{
float x, y;
NvFlowCPU_Float2() {}
NvFlowCPU_Float2(float x, float y) : x(x), y(y) {}
NV_FLOW_INLINE NvFlowCPU_Float2(const NvFlowCPU_Int2& rhs);
NvFlowCPU_Float2 operator+(const NvFlowCPU_Float2& rhs) const { return NvFlowCPU_Float2(x + rhs.x, y + rhs.y); }
NvFlowCPU_Float2 operator-(const NvFlowCPU_Float2& rhs) const { return NvFlowCPU_Float2(x - rhs.x, y - rhs.y); }
NvFlowCPU_Float2 operator*(const NvFlowCPU_Float2& rhs) const { return NvFlowCPU_Float2(x * rhs.x, y * rhs.y); }
NvFlowCPU_Float2 operator/(const NvFlowCPU_Float2& rhs) const { return NvFlowCPU_Float2(x / rhs.x, y / rhs.y); }
NvFlowCPU_Float2 operator+(const float& rhs) const { return NvFlowCPU_Float2(x + rhs, y + rhs); }
NvFlowCPU_Float2 operator-(const float& rhs) const { return NvFlowCPU_Float2(x - rhs, y - rhs); }
NvFlowCPU_Float2 operator*(const float& rhs) const { return NvFlowCPU_Float2(x * rhs, y * rhs); }
NvFlowCPU_Float2 operator/(const float& rhs) const { return NvFlowCPU_Float2(x / rhs, y / rhs); }
NvFlowCPU_Float2& operator+=(const NvFlowCPU_Float2& rhs) { x += rhs.x; y += rhs.y; return *this; }
NvFlowCPU_Float2& operator-=(const NvFlowCPU_Float2& rhs) { x -= rhs.x; y -= rhs.y; return *this; }
NvFlowCPU_Float2& operator*=(const NvFlowCPU_Float2& rhs) { x *= rhs.x; y *= rhs.y; return *this; }
NvFlowCPU_Float2& operator/=(const NvFlowCPU_Float2& rhs) { x /= rhs.x; y /= rhs.y; return *this; }
NvFlowCPU_Float2& operator+=(const float& rhs) { x += rhs; y += rhs; return *this; }
NvFlowCPU_Float2& operator-=(const float& rhs) { x -= rhs; y -= rhs; return *this; }
NvFlowCPU_Float2& operator*=(const float& rhs) { x *= rhs; y *= rhs; return *this; }
NvFlowCPU_Float2& operator/=(const float& rhs) { x /= rhs; y /= rhs; return *this; }
NvFlowCPU_Float2 operator+() const { return NvFlowCPU_Float2(+x, +y); }
NvFlowCPU_Float2 operator-() const { return NvFlowCPU_Float2(-x, -y); }
};
NV_FLOW_INLINE NvFlowCPU_Float2 operator+(const float& lhs, const NvFlowCPU_Float2& rhs) { return NvFlowCPU_Float2(lhs + rhs.x, lhs + rhs.y); }
NV_FLOW_INLINE NvFlowCPU_Float2 operator-(const float& lhs, const NvFlowCPU_Float2& rhs) { return NvFlowCPU_Float2(lhs - rhs.x, lhs - rhs.y); }
NV_FLOW_INLINE NvFlowCPU_Float2 operator*(const float& lhs, const NvFlowCPU_Float2& rhs) { return NvFlowCPU_Float2(lhs * rhs.x, lhs * rhs.y); }
NV_FLOW_INLINE NvFlowCPU_Float2 operator/(const float& lhs, const NvFlowCPU_Float2& rhs) { return NvFlowCPU_Float2(lhs / rhs.x, lhs / rhs.y); }
NV_FLOW_INLINE NvFlowCPU_Float2 NvFlowCPU_floor(NvFlowCPU_Float2 v)
{
return NvFlowCPU_Float2(floorf(v.x), floorf(v.y));
}
struct NvFlowCPU_Float3
{
float x, y, z;
NvFlowCPU_Float3() {}
NvFlowCPU_Float3(float x, float y, float z) : x(x), y(y), z(z) {}
NV_FLOW_INLINE NvFlowCPU_Float3(const NvFlowCPU_Int3& v);
NvFlowCPU_Float3 operator+(const NvFlowCPU_Float3& rhs) const { return NvFlowCPU_Float3(x + rhs.x, y + rhs.y, z + rhs.z); }
NvFlowCPU_Float3 operator-(const NvFlowCPU_Float3& rhs) const { return NvFlowCPU_Float3(x - rhs.x, y - rhs.y, z - rhs.z); }
NvFlowCPU_Float3 operator*(const NvFlowCPU_Float3& rhs) const { return NvFlowCPU_Float3(x * rhs.x, y * rhs.y, z * rhs.z); }
NvFlowCPU_Float3 operator/(const NvFlowCPU_Float3& rhs) const { return NvFlowCPU_Float3(x / rhs.x, y / rhs.y, z / rhs.z); }
NvFlowCPU_Float3 operator+(const float& rhs) const { return NvFlowCPU_Float3(x + rhs, y + rhs, z + rhs); }
NvFlowCPU_Float3 operator-(const float& rhs) const { return NvFlowCPU_Float3(x - rhs, y - rhs, z - rhs); }
NvFlowCPU_Float3 operator*(const float& rhs) const { return NvFlowCPU_Float3(x * rhs, y * rhs, z * rhs); }
NvFlowCPU_Float3 operator/(const float& rhs) const { return NvFlowCPU_Float3(x / rhs, y / rhs, z / rhs); }
NvFlowCPU_Float3& operator+=(const NvFlowCPU_Float3& rhs) { x += rhs.x; y += rhs.y; z += rhs.z; return *this; }
NvFlowCPU_Float3& operator-=(const NvFlowCPU_Float3& rhs) { x -= rhs.x; y -= rhs.y; z -= rhs.z; return *this; }
NvFlowCPU_Float3& operator*=(const NvFlowCPU_Float3& rhs) { x *= rhs.x; y *= rhs.y; z *= rhs.z; return *this; }
NvFlowCPU_Float3& operator/=(const NvFlowCPU_Float3& rhs) { x /= rhs.x; y /= rhs.y; z /= rhs.z; return *this; }
NvFlowCPU_Float3& operator+=(const float& rhs) { x += rhs; y += rhs; z += rhs; return *this; }
NvFlowCPU_Float3& operator-=(const float& rhs) { x -= rhs; y -= rhs; z -= rhs; return *this; }
NvFlowCPU_Float3& operator*=(const float& rhs) { x *= rhs; y *= rhs; z *= rhs; return *this; }
NvFlowCPU_Float3& operator/=(const float& rhs) { x /= rhs; y /= rhs; z /= rhs; return *this; }
NvFlowCPU_Float3 operator+() const { return NvFlowCPU_Float3(+x, +y, +z); }
NvFlowCPU_Float3 operator-() const { return NvFlowCPU_Float3(-x, -y, -z); }
};
NV_FLOW_INLINE NvFlowCPU_Float3 operator*(const float& lhs, const NvFlowCPU_Float3& rhs) { return NvFlowCPU_Float3(lhs * rhs.x, lhs * rhs.y, lhs * rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Float3 NvFlowCPU_abs(NvFlowCPU_Float3 v)
{
return NvFlowCPU_Float3(fabsf(v.x), fabsf(v.y), fabsf(v.z));
}
NV_FLOW_INLINE NvFlowCPU_Float3 NvFlowCPU_floor(NvFlowCPU_Float3 v)
{
return NvFlowCPU_Float3(floorf(v.x), floorf(v.y), floorf(v.z));
}
NV_FLOW_INLINE float NvFlowCPU_length(NvFlowCPU_Float3 v)
{
return sqrtf(v.x * v.x + v.y * v.y + v.z * v.z);
}
NV_FLOW_INLINE NvFlowCPU_Float3 NvFlowCPU_max(NvFlowCPU_Float3 a, NvFlowCPU_Float3 b)
{
return NvFlowCPU_Float3(NvFlowCPU_max(a.x, b.x), NvFlowCPU_max(a.y, b.y), NvFlowCPU_max(a.z, b.z));
}
NV_FLOW_INLINE NvFlowCPU_Float3 NvFlowCPU_min(NvFlowCPU_Float3 a, NvFlowCPU_Float3 b)
{
return NvFlowCPU_Float3(NvFlowCPU_min(a.x, b.x), NvFlowCPU_min(a.y, b.y), NvFlowCPU_min(a.z, b.z));
}
NV_FLOW_INLINE float NvFlowCPU_dot(NvFlowCPU_Float3 a, NvFlowCPU_Float3 b)
{
return a.x * b.x + a.y * b.y + a.z * b.z;
}
NV_FLOW_INLINE NvFlowCPU_Float3 NvFlowCPU_normalize(NvFlowCPU_Float3 v)
{
float length = NvFlowCPU_length(v);
if (length > 0.f)
{
v /= length;
}
return v;
}
struct NvFlowCPU_Float4
{
float x, y, z, w;
NvFlowCPU_Float4() {}
NvFlowCPU_Float4(float x, float y, float z, float w) : x(x), y(y), z(z), w(w) {}
NvFlowCPU_Float4(const NvFlowCPU_Float3& rhs, float w) : x(rhs.x), y(rhs.y), z(rhs.z), w(w) {}
NvFlowCPU_Float3& rgb() { return *((NvFlowCPU_Float3*)this); }
NvFlowCPU_Float2& rg() { return *((NvFlowCPU_Float2*)this); }
float& r() { return *((float*)this); }
NvFlowCPU_Float2& ba() { return *((NvFlowCPU_Float2*)&z); }
const NvFlowCPU_Float3& rgb() const { return *((const NvFlowCPU_Float3*)this); }
const NvFlowCPU_Float2& rg() const { return *((const NvFlowCPU_Float2*)this); }
const float& r() const { return *((const float*)this); }
const NvFlowCPU_Float2& ba() const { return *((const NvFlowCPU_Float2*)&z); }
NvFlowCPU_Float4 operator+(const NvFlowCPU_Float4& rhs) const { return NvFlowCPU_Float4(x + rhs.x, y + rhs.y, z + rhs.z, w + rhs.w); }
NvFlowCPU_Float4 operator-(const NvFlowCPU_Float4& rhs) const { return NvFlowCPU_Float4(x - rhs.x, y - rhs.y, z - rhs.z, w - rhs.w); }
NvFlowCPU_Float4 operator*(const NvFlowCPU_Float4& rhs) const { return NvFlowCPU_Float4(x * rhs.x, y * rhs.y, z * rhs.z, w * rhs.w); }
NvFlowCPU_Float4 operator/(const NvFlowCPU_Float4& rhs) const { return NvFlowCPU_Float4(x / rhs.x, y / rhs.y, z / rhs.z, w / rhs.w); }
NvFlowCPU_Float4 operator+(const float& rhs) const { return NvFlowCPU_Float4(x + rhs, y + rhs, z + rhs, w + rhs); }
NvFlowCPU_Float4 operator-(const float& rhs) const { return NvFlowCPU_Float4(x - rhs, y - rhs, z - rhs, w - rhs); }
NvFlowCPU_Float4 operator*(const float& rhs) const { return NvFlowCPU_Float4(x * rhs, y * rhs, z * rhs, w * rhs); }
NvFlowCPU_Float4 operator/(const float& rhs) const { return NvFlowCPU_Float4(x / rhs, y / rhs, z / rhs, w / rhs); }
NvFlowCPU_Float4& operator+=(const NvFlowCPU_Float4& rhs) { x += rhs.x; y += rhs.y; z += rhs.z; w += rhs.w; return *this; }
NvFlowCPU_Float4& operator-=(const NvFlowCPU_Float4& rhs) { x -= rhs.x; y -= rhs.y; z -= rhs.z; w -= rhs.w; return *this; }
NvFlowCPU_Float4& operator*=(const NvFlowCPU_Float4& rhs) { x *= rhs.x; y *= rhs.y; z *= rhs.z; w *= rhs.w; return *this; }
NvFlowCPU_Float4& operator/=(const NvFlowCPU_Float4& rhs) { x /= rhs.x; y /= rhs.y; z /= rhs.z; w /= rhs.w; return *this; }
NvFlowCPU_Float4& operator*=(const float& rhs) { x *= rhs; y *= rhs; z *= rhs; w *= rhs; return *this; }
};
NV_FLOW_INLINE NvFlowCPU_Float4 operator*(const float& lhs, const NvFlowCPU_Float4& rhs) { return NvFlowCPU_Float4(lhs * rhs.x, lhs * rhs.y, lhs * rhs.z, lhs * rhs.w); }
NV_FLOW_INLINE float NvFlowCPU_dot(NvFlowCPU_Float4 a, NvFlowCPU_Float4 b)
{
return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w;
}
NV_FLOW_INLINE NvFlowCPU_Float4 NvFlowCPU_max(NvFlowCPU_Float4 a, NvFlowCPU_Float4 b)
{
return NvFlowCPU_Float4(NvFlowCPU_max(a.x, b.x), NvFlowCPU_max(a.y, b.y), NvFlowCPU_max(a.z, b.z), NvFlowCPU_max(a.w, b.w));
}
NV_FLOW_INLINE NvFlowCPU_Float4 NvFlowCPU_min(NvFlowCPU_Float4 a, NvFlowCPU_Float4 b)
{
return NvFlowCPU_Float4(NvFlowCPU_min(a.x, b.x), NvFlowCPU_min(a.y, b.y), NvFlowCPU_min(a.z, b.z), NvFlowCPU_min(a.w, b.w));
}
NV_FLOW_INLINE NvFlowCPU_Float4 NvFlowCPU_sign(NvFlowCPU_Float4 v)
{
return NvFlowCPU_Float4(
v.x == 0.f ? 0.f : (v.x < 0.f ? -1.f : +1.f),
v.y == 0.f ? 0.f : (v.y < 0.f ? -1.f : +1.f),
v.z == 0.f ? 0.f : (v.z < 0.f ? -1.f : +1.f),
v.w == 0.f ? 0.f : (v.w < 0.f ? -1.f : +1.f)
);
}
NV_FLOW_INLINE NvFlowCPU_Float4 NvFlowCPU_abs(NvFlowCPU_Float4 v)
{
return NvFlowCPU_Float4(fabsf(v.x), fabsf(v.y), fabsf(v.z), fabsf(v.w));
}
struct NvFlowCPU_Float4x4
{
NvFlowCPU_Float4 x, y, z, w;
NvFlowCPU_Float4x4() {}
NvFlowCPU_Float4x4(const NvFlowCPU_Float4& x, const NvFlowCPU_Float4& y, const NvFlowCPU_Float4& z, const NvFlowCPU_Float4& w) : x(x), y(y), z(z), w(w) {}
};
NV_FLOW_INLINE NvFlowCPU_Float4 NvFlowCPU_mul(const NvFlowCPU_Float4& x, const NvFlowCPU_Float4x4 A)
{
return NvFlowCPU_Float4(
{ A.x.x * x.x + A.x.y * x.y + A.x.z * x.z + A.x.w * x.w },
{ A.y.x * x.x + A.y.y * x.y + A.y.z * x.z + A.y.w * x.w },
{ A.z.x * x.x + A.z.y * x.y + A.z.z * x.z + A.z.w * x.w },
{ A.w.x * x.x + A.w.y * x.y + A.w.z * x.z + A.w.w * x.w }
);
}
struct NvFlowCPU_Int2
{
int x, y;
NvFlowCPU_Int2() {}
NvFlowCPU_Int2(int x, int y) : x(x), y(y) {}
NvFlowCPU_Int2(const NvFlowCPU_Float2& rhs) : x(int(rhs.x)), y(int(rhs.y)) {}
NV_FLOW_INLINE NvFlowCPU_Int2(const NvFlowCPU_Uint2& rhs);
NvFlowCPU_Int2 operator+(const NvFlowCPU_Int2& rhs) const { return NvFlowCPU_Int2(x + rhs.x, y + rhs.y); }
NvFlowCPU_Int2 operator-(const NvFlowCPU_Int2& rhs) const { return NvFlowCPU_Int2(x - rhs.x, y - rhs.y); }
NvFlowCPU_Int2 operator*(const NvFlowCPU_Int2& rhs) const { return NvFlowCPU_Int2(x * rhs.x, y * rhs.y); }
NvFlowCPU_Int2 operator/(const NvFlowCPU_Int2& rhs) const { return NvFlowCPU_Int2(x / rhs.x, y / rhs.y); }
NvFlowCPU_Int2 operator+(const int& rhs) const { return NvFlowCPU_Int2(x + rhs, y + rhs); }
NvFlowCPU_Int2 operator-(const int& rhs) const { return NvFlowCPU_Int2(x - rhs, y - rhs); }
NvFlowCPU_Int2 operator*(const int& rhs) const { return NvFlowCPU_Int2(x * rhs, y * rhs); }
NvFlowCPU_Int2 operator/(const int& rhs) const { return NvFlowCPU_Int2(x / rhs, y / rhs); }
};
NV_FLOW_INLINE NvFlowCPU_Float2::NvFlowCPU_Float2(const NvFlowCPU_Int2& rhs) : x(float(rhs.x)), y(float(rhs.y)) {}
NV_FLOW_INLINE NvFlowCPU_Int2 NvFlowCPU_max(NvFlowCPU_Int2 a, NvFlowCPU_Int2 b)
{
return NvFlowCPU_Int2(NvFlowCPU_max(a.x, b.x), NvFlowCPU_max(a.y, b.y));
}
NV_FLOW_INLINE NvFlowCPU_Int2 NvFlowCPU_min(NvFlowCPU_Int2 a, NvFlowCPU_Int2 b)
{
return NvFlowCPU_Int2(NvFlowCPU_min(a.x, b.x), NvFlowCPU_min(a.y, b.y));
}
struct NvFlowCPU_Int3
{
int x, y, z;
NvFlowCPU_Int3() {}
NvFlowCPU_Int3(int x, int y, int z) : x(x), y(y), z(z) {}
NV_FLOW_INLINE NvFlowCPU_Int3(const NvFlowCPU_Uint3& v);
NV_FLOW_INLINE NvFlowCPU_Int3(const NvFlowCPU_Float3& v);
NvFlowCPU_Int2& rg() { return *((NvFlowCPU_Int2*)this); }
int& r() { return *((int*)this); }
NvFlowCPU_Int3 operator+(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Int3(x + rhs.x, y + rhs.y, z + rhs.z); }
NvFlowCPU_Int3 operator-(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Int3(x - rhs.x, y - rhs.y, z - rhs.z); }
NvFlowCPU_Int3 operator*(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Int3(x * rhs.x, y * rhs.y, z * rhs.z); }
NvFlowCPU_Int3 operator/(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Int3(x / rhs.x, y / rhs.y, z / rhs.z); }
NvFlowCPU_Int3& operator+=(const NvFlowCPU_Int3& rhs) { x += rhs.x; y += rhs.y; z += rhs.z; return *this; }
NvFlowCPU_Int3& operator-=(const NvFlowCPU_Int3& rhs) { x -= rhs.x; y -= rhs.y; z -= rhs.z; return *this; }
NvFlowCPU_Int3& operator*=(const NvFlowCPU_Int3& rhs) { x *= rhs.x; y *= rhs.y; z *= rhs.z; return *this; }
NvFlowCPU_Int3& operator/=(const NvFlowCPU_Int3& rhs) { x /= rhs.x; y /= rhs.y; z /= rhs.z; return *this; }
NV_FLOW_INLINE NvFlowCPU_Int3 operator>>(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Int3(x >> rhs.x, y >> rhs.y, z >> rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Int3 operator<<(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Int3(x << rhs.x, y << rhs.y, z << rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Int3 operator&(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Int3(x & rhs.x, y & rhs.y, z & rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Int3 operator|(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Int3(x | rhs.x, y | rhs.y, z | rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Int3 operator>>(const int& rhs) const { return NvFlowCPU_Int3(x >> rhs, y >> rhs, z >> rhs); }
NV_FLOW_INLINE NvFlowCPU_Int3 operator<<(const int& rhs) const { return NvFlowCPU_Int3(x << rhs, y << rhs, z << rhs); }
NV_FLOW_INLINE NvFlowCPU_Int3 operator>>(const NvFlowCPU_Uint& rhs) const { return NvFlowCPU_Int3(x >> rhs, y >> rhs, z >> rhs); }
NV_FLOW_INLINE NvFlowCPU_Int3 operator<<(const NvFlowCPU_Uint& rhs) const { return NvFlowCPU_Int3(x << rhs, y << rhs, z << rhs); }
NV_FLOW_INLINE NvFlowCPU_Int3 operator>>(const NvFlowCPU_Uint3& rhs) const;
NV_FLOW_INLINE NvFlowCPU_Int3 operator<<(const NvFlowCPU_Uint3& rhs) const;
};
NV_FLOW_INLINE NvFlowCPU_Int3 NvFlowCPU_max(NvFlowCPU_Int3 a, NvFlowCPU_Int3 b)
{
return NvFlowCPU_Int3(NvFlowCPU_max(a.x, b.x), NvFlowCPU_max(a.y, b.y), NvFlowCPU_max(a.z, b.z));
}
NV_FLOW_INLINE NvFlowCPU_Int3 NvFlowCPU_min(NvFlowCPU_Int3 a, NvFlowCPU_Int3 b)
{
return NvFlowCPU_Int3(NvFlowCPU_min(a.x, b.x), NvFlowCPU_min(a.y, b.y), NvFlowCPU_min(a.z, b.z));
}
struct NvFlowCPU_Int4
{
int x, y, z, w;
NvFlowCPU_Int4() {}
NvFlowCPU_Int4(int x, int y, int z, int w) : x(x), y(y), z(z), w(w) {}
NvFlowCPU_Int4(const NvFlowCPU_Int2& a, const NvFlowCPU_Int2& b) : x(a.x), y(a.y), z(b.x), w(b.y) {}
NvFlowCPU_Int4(const NvFlowCPU_Int3& rhs, int w) : x(rhs.x), y(rhs.y), z(rhs.z), w(w) {}
NvFlowCPU_Int4(const NvFlowCPU_Uint4& rhs);
NvFlowCPU_Int3& rgb() { return *((NvFlowCPU_Int3*)this); }
NvFlowCPU_Int2& rg() { return *((NvFlowCPU_Int2*)this); }
int& r() { return *((int*)this); }
NvFlowCPU_Int2& ba() { return *((NvFlowCPU_Int2*)&z); }
const NvFlowCPU_Int3& rgb()const { return *((const NvFlowCPU_Int3*)this); }
const NvFlowCPU_Int2& rg()const { return *((const NvFlowCPU_Int2*)this); }
const int& r()const { return *((const int*)this); }
const NvFlowCPU_Int2& ba()const { return *((const NvFlowCPU_Int2*)&z); }
NvFlowCPU_Int4 operator+(const NvFlowCPU_Int4& rhs) const { return NvFlowCPU_Int4(x + rhs.x, y + rhs.y, z + rhs.z, w + rhs.w); }
};
struct NvFlowCPU_Uint2
{
NvFlowUint x, y;
NvFlowCPU_Uint2() {}
NvFlowCPU_Uint2(NvFlowUint x, NvFlowUint y) : x(x), y(y) {}
NvFlowCPU_Uint2(const NvFlowCPU_Int2& rhs) : x(rhs.x), y(rhs.y) {}
};
NV_FLOW_INLINE NvFlowCPU_Int2::NvFlowCPU_Int2(const NvFlowCPU_Uint2& rhs) : x(rhs.x), y(rhs.y) {}
struct NvFlowCPU_Uint3
{
NvFlowUint x, y, z;
NvFlowCPU_Uint3() {}
NvFlowCPU_Uint3(NvFlowUint x, NvFlowUint y, NvFlowUint z) : x(x), y(y), z(z) {}
NV_FLOW_INLINE NvFlowCPU_Uint3(const NvFlowCPU_Int3& v);
NvFlowCPU_Uint2& rg() { return *((NvFlowCPU_Uint2*)this); }
NvFlowCPU_Uint& r() { return *((NvFlowCPU_Uint*)this); }
NvFlowCPU_Uint3 operator+(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Uint3(x + rhs.x, y + rhs.y, z + rhs.z); }
NvFlowCPU_Uint3 operator-(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Uint3(x - rhs.x, y - rhs.y, z - rhs.z); }
NvFlowCPU_Uint3 operator*(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Uint3(x * rhs.x, y * rhs.y, z * rhs.z); }
NvFlowCPU_Uint3 operator/(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Uint3(x / rhs.x, y / rhs.y, z / rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator&(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Uint3(x & rhs.x, y & rhs.y, z & rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator|(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Uint3(x | rhs.x, y | rhs.y, z | rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator>>(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Uint3(x >> rhs.x, y >> rhs.y, z >> rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator<<(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Uint3(x << rhs.x, y << rhs.y, z << rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator>>(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Uint3(x >> rhs.x, y >> rhs.y, z >> rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator<<(const NvFlowCPU_Int3& rhs) const { return NvFlowCPU_Uint3(x << rhs.x, y << rhs.y, z << rhs.z); }
};
NV_FLOW_INLINE NvFlowCPU_Uint3 operator>>(const NvFlowCPU_Uint& lhs, const NvFlowCPU_Uint3& rhs) { return NvFlowCPU_Uint3(lhs >> rhs.x, lhs >> rhs.y, lhs >> rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator>>(const NvFlowCPU_Uint3& lhs, const NvFlowCPU_Uint& rhs) { return NvFlowCPU_Uint3(lhs.x >> rhs, lhs.y >> rhs, lhs.z >> rhs); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator<<(const NvFlowCPU_Uint& lhs, const NvFlowCPU_Uint3& rhs) { return NvFlowCPU_Uint3(lhs << rhs.x, lhs << rhs.y, lhs << rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator<<(const NvFlowCPU_Uint3& lhs, const NvFlowCPU_Uint& rhs) { return NvFlowCPU_Uint3(lhs.x << rhs, lhs.y << rhs, lhs.z << rhs); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator+(const NvFlowCPU_Uint& lhs, const NvFlowCPU_Uint3& rhs) { return NvFlowCPU_Uint3(lhs + rhs.x, lhs + rhs.y, lhs + rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator+(const NvFlowCPU_Uint3& lhs, const NvFlowCPU_Uint& rhs) { return NvFlowCPU_Uint3(lhs.x + rhs, lhs.y + rhs, lhs.z + rhs); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator-(const NvFlowCPU_Uint& lhs, const NvFlowCPU_Uint3& rhs) { return NvFlowCPU_Uint3(lhs - rhs.x, lhs - rhs.y, lhs - rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Uint3 operator-(const NvFlowCPU_Uint3& lhs, const NvFlowCPU_Uint& rhs) { return NvFlowCPU_Uint3(lhs.x - rhs, lhs.y - rhs, lhs.z - rhs); }
struct NvFlowCPU_Uint4
{
NvFlowUint x, y, z, w;
NvFlowCPU_Uint4() {}
NvFlowCPU_Uint4(NvFlowUint x, NvFlowUint y, NvFlowUint z, NvFlowUint w) : x(x), y(y), z(z), w(w) {}
NvFlowCPU_Uint4 operator+(const NvFlowCPU_Uint4& rhs) const { return NvFlowCPU_Uint4(x + rhs.x, y + rhs.y, z + rhs.z, w + rhs.w); }
NvFlowCPU_Uint4 operator-(const NvFlowCPU_Uint4& rhs) const { return NvFlowCPU_Uint4(x - rhs.x, y - rhs.y, z - rhs.z, w - rhs.w); }
NvFlowCPU_Uint4 operator*(const NvFlowCPU_Uint4& rhs) const { return NvFlowCPU_Uint4(x * rhs.x, y * rhs.y, z * rhs.z, w * rhs.w); }
NvFlowCPU_Uint4 operator/(const NvFlowCPU_Uint4& rhs) const { return NvFlowCPU_Uint4(x / rhs.x, y / rhs.y, z / rhs.z, w / rhs.w); }
NvFlowCPU_Uint4& operator+=(const NvFlowCPU_Uint4& rhs) { x += rhs.x; y += rhs.y; z += rhs.z; w += rhs.w; return *this; }
NvFlowCPU_Uint4& operator-=(const NvFlowCPU_Uint4& rhs) { x -= rhs.x; y -= rhs.y; z -= rhs.z; w -= rhs.w; return *this; }
NvFlowCPU_Uint4& operator*=(const NvFlowCPU_Uint4& rhs) { x *= rhs.x; y *= rhs.y; z *= rhs.z; w *= rhs.w; return *this; }
NvFlowCPU_Uint4& operator/=(const NvFlowCPU_Uint4& rhs) { x /= rhs.x; y /= rhs.y; z /= rhs.z; w /= rhs.w; return *this; }
};
NV_FLOW_INLINE NvFlowCPU_Int3 NvFlowCPU_Int3::operator>>(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Int3(x >> rhs.x, y >> rhs.y, z >> rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Int3 NvFlowCPU_Int3::operator<<(const NvFlowCPU_Uint3& rhs) const { return NvFlowCPU_Int3(x << rhs.x, y << rhs.y, z << rhs.z); }
NV_FLOW_INLINE NvFlowCPU_Float3::NvFlowCPU_Float3(const NvFlowCPU_Int3& v) : x(float(v.x)), y(float(v.y)), z(float(v.z)) {}
NV_FLOW_INLINE NvFlowCPU_Int3::NvFlowCPU_Int3(const NvFlowCPU_Uint3& v) : x(int(v.x)), y(int(v.y)), z(int(v.z)) {}
NV_FLOW_INLINE NvFlowCPU_Int3::NvFlowCPU_Int3(const NvFlowCPU_Float3& v) : x(int(v.x)), y(int(v.y)), z(int(v.z)) {}
NV_FLOW_INLINE NvFlowCPU_Uint3::NvFlowCPU_Uint3(const NvFlowCPU_Int3& v) : x(int(v.x)), y(int(v.y)), z(int(v.z)) {}
NV_FLOW_INLINE NvFlowCPU_Int4::NvFlowCPU_Int4(const NvFlowCPU_Uint4& rhs) : x(int(rhs.x)), y(int(rhs.y)), z(int(rhs.z)), w(int(rhs.w)) {}
NV_FLOW_INLINE NvFlowCPU_Float4 NvFlowCPU_asfloat(NvFlowCPU_Uint4 v) {return *((NvFlowCPU_Float4*)&v);}
NV_FLOW_INLINE NvFlowCPU_Float3 NvFlowCPU_asfloat(NvFlowCPU_Uint3 v) {return *((NvFlowCPU_Float3*)&v);}
NV_FLOW_INLINE NvFlowCPU_Float2 NvFlowCPU_asfloat(NvFlowCPU_Uint2 v) {return *((NvFlowCPU_Float2*)&v);}
NV_FLOW_INLINE float NvFlowCPU_asfloat(NvFlowUint v) {return *((float*)&v);}
NV_FLOW_INLINE NvFlowCPU_Float4 NvFlowCPU_asfloat(NvFlowCPU_Int4 v) {return *((NvFlowCPU_Float4*)&v);}
NV_FLOW_INLINE NvFlowCPU_Float3 NvFlowCPU_asfloat(NvFlowCPU_Int3 v) {return *((NvFlowCPU_Float3*)&v);}
NV_FLOW_INLINE NvFlowCPU_Float2 NvFlowCPU_asfloat(NvFlowCPU_Int2 v) {return *((NvFlowCPU_Float2*)&v);}
NV_FLOW_INLINE float NvFlowCPU_asfloat(int v) {return *((float*)&v);}
NV_FLOW_INLINE NvFlowCPU_Uint4 NvFlowCPU_asuint(NvFlowCPU_Float4 v) {return *((NvFlowCPU_Uint4*)&v);}
NV_FLOW_INLINE NvFlowCPU_Uint3 NvFlowCPU_asuint(NvFlowCPU_Float3 v) {return *((NvFlowCPU_Uint3*)&v);}
NV_FLOW_INLINE NvFlowCPU_Uint2 NvFlowCPU_asuint(NvFlowCPU_Float2 v) {return *((NvFlowCPU_Uint2*)&v);}
NV_FLOW_INLINE NvFlowUint NvFlowCPU_asuint(float v) {return *((NvFlowUint*)&v);}
NV_FLOW_INLINE NvFlowCPU_Int4 NvFlowCPU_asint(NvFlowCPU_Float4 v) {return *((NvFlowCPU_Int4*)&v);}
NV_FLOW_INLINE NvFlowCPU_Int3 NvFlowCPU_asint(NvFlowCPU_Float3 v) {return *((NvFlowCPU_Int3*)&v);}
NV_FLOW_INLINE NvFlowCPU_Int2 NvFlowCPU_asint(NvFlowCPU_Float2 v) {return *((NvFlowCPU_Int2*)&v);}
NV_FLOW_INLINE int NvFlowCPU_asint(float v) {return *((int*)&v);}
struct NvFlowCPU_Resource
{
void* data;
NvFlowUint64 sizeInBytes;
NvFlowUint elementSizeInBytes;
NvFlowUint elementCount;
NvFlowFormat format;
NvFlowUint width;
NvFlowUint height;
NvFlowUint depth;
NvFlowSamplerDesc samplerDesc;
};
template <typename T>
struct NvFlowCPU_ConstantBuffer
{
const T* data;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (const T*)resource->data;
}
};
template <typename T>
struct NvFlowCPU_StructuredBuffer
{
const T* data;
NvFlowUint count;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (const T*)resource->data;
count = resource->elementCount;
}
const T& operator[](int index) {
if (index < 0 || index >= int(count)) index = 0;
return data[index];
}
};
template <typename T>
struct NvFlowCPU_RWStructuredBuffer
{
T* data;
NvFlowUint count;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (T*)resource->data;
count = resource->elementCount;
}
T& operator[](int index) {
if (index < 0 || index >= int(count)) index = 0;
return data[index];
}
};
struct NvFlowCPU_SamplerState
{
NvFlowSamplerDesc desc;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
desc = resource->samplerDesc;
}
};
template <typename T>
struct NvFlowCPU_Texture1D
{
const T* data;
NvFlowFormat format;
NvFlowUint width;
T out_of_bounds = {};
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (const T*)resource->data;
format = resource->format;
width = resource->width;
memset(&out_of_bounds, 0, sizeof(out_of_bounds));
}
};
template <typename T>
NV_FLOW_FORCE_INLINE const T NvFlowCPU_textureRead(NvFlowCPU_Texture1D<T>& tex, int index)
{
if (index < 0 || index >= int(tex.width))
{
return tex.out_of_bounds;
}
return tex.data[index];
}
template <typename T>
NV_FLOW_FORCE_INLINE T NvFlowCPU_textureSampleLevel(NvFlowCPU_Texture1D<T>& tex, NvFlowCPU_SamplerState state, const float pos, float lod)
{
float posf(float(tex.width) * pos);
// clamp sampler
if (posf < 0.5f) posf = 0.5f;
if (posf > float(tex.width) - 0.5f) posf = float(tex.width) - 0.5f;
int pos0 = int(NvFlowCPU_floor(posf - 0.5f));
float f = posf - 0.5f - float(pos0);
float of = 1.f - f;
T sum = of * NvFlowCPU_textureRead(tex, pos0 + 0);
sum += f * NvFlowCPU_textureRead(tex, pos0 + 1);
return sum;
}
template <typename T>
struct NvFlowCPU_RWTexture1D
{
T* data;
NvFlowFormat format;
NvFlowUint width;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (T*)resource->data;
format = resource->format;
width = resource->width;
}
};
template <typename T>
NV_FLOW_FORCE_INLINE const T NvFlowCPU_textureRead(NvFlowCPU_RWTexture1D<T>& tex, int index)
{
return tex.data[index];
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_textureWrite(NvFlowCPU_RWTexture1D<T>& tex, int index, const T value)
{
tex.data[index] = value;
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_textureWrite(bool pred, NvFlowCPU_RWTexture1D<T>& tex, int index, const T value)
{
if (pred)
{
NvFlowCPU_textureWrite(tex, index, value);
}
}
template <typename T>
struct NvFlowCPU_Texture2D
{
const T* data;
NvFlowFormat format;
NvFlowUint width;
NvFlowUint height;
T out_of_bounds;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (const T*)resource->data;
format = resource->format;
width = resource->width;
height = resource->height;
memset(&out_of_bounds, 0, sizeof(out_of_bounds));
}
};
template <typename T>
NV_FLOW_FORCE_INLINE const T NvFlowCPU_textureRead(NvFlowCPU_Texture2D<T>& tex, NvFlowCPU_Int2 index)
{
if (index.x < 0 || index.x >= int(tex.width) ||
index.y < 0 || index.y >= int(tex.height))
{
return tex.out_of_bounds;
}
return tex.data[index.y * tex.width + index.x];
}
template <typename T>
NV_FLOW_FORCE_INLINE T NvFlowCPU_textureSampleLevel(NvFlowCPU_Texture2D<T>& tex, NvFlowCPU_SamplerState state, const NvFlowCPU_Float2 pos, float lod)
{
NvFlowCPU_Float2 posf(NvFlowCPU_Float2(float(tex.width), float(tex.height)) * pos);
// clamp sampler
if (posf.x < 0.5f) posf.x = 0.5f;
if (posf.x > float(tex.width) - 0.5f) posf.x = float(tex.width) - 0.5f;
if (posf.y < 0.5f) posf.y = 0.5f;
if (posf.y > float(tex.height) - 0.5f) posf.y = float(tex.height) - 0.5f;
NvFlowCPU_Int2 pos00 = NvFlowCPU_Int2(NvFlowCPU_floor(posf - NvFlowCPU_Float2(0.5f, 0.5f)));
NvFlowCPU_Float2 f = posf - NvFlowCPU_Float2(0.5f, 0.5f) - NvFlowCPU_Float2(pos00);
NvFlowCPU_Float2 of = NvFlowCPU_Float2(1.f, 1.f) - f;
T sum = of.x * of.y * NvFlowCPU_textureRead(tex, pos00 + NvFlowCPU_Int2(0, 0));
sum += f.x * of.y * NvFlowCPU_textureRead(tex, pos00 + NvFlowCPU_Int2(1, 0));
sum += of.x * f.y * NvFlowCPU_textureRead(tex, pos00 + NvFlowCPU_Int2(0, 1));
sum += f.x * f.y * NvFlowCPU_textureRead(tex, pos00 + NvFlowCPU_Int2(1, 1));
return sum;
}
template <typename T>
struct NvFlowCPU_RWTexture2D
{
T* data;
NvFlowFormat format;
NvFlowUint width;
NvFlowUint height;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (T*)resource->data;
format = resource->format;
width = resource->width;
height = resource->height;
}
};
template <typename T>
NV_FLOW_FORCE_INLINE const T NvFlowCPU_textureRead(NvFlowCPU_RWTexture2D<T>& tex, NvFlowCPU_Int2 index)
{
return tex.data[index.y * tex.width + index.x];
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_textureWrite(NvFlowCPU_RWTexture2D<T>& tex, NvFlowCPU_Int2 index, const T value)
{
tex.data[index.y * tex.width + index.x] = value;
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_textureWrite(bool pred, NvFlowCPU_RWTexture2D<T>& tex, NvFlowCPU_Int2 index, const T value)
{
if (pred)
{
NvFlowCPU_textureWrite(tex, index, value);
}
}
template <typename T>
struct NvFlowCPU_Texture3D
{
const T* data;
NvFlowFormat format;
NvFlowUint width;
NvFlowUint height;
NvFlowUint depth;
T out_of_bounds;
NvFlowUint wh;
NvFlowUint whd;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (const T*)resource->data;
format = resource->format;
width = resource->width;
height = resource->height;
depth = resource->depth;
memset(&out_of_bounds, 0, sizeof(out_of_bounds));
wh = width * height;
whd = wh * depth;
}
};
template <typename T>
NV_FLOW_FORCE_INLINE const T NvFlowCPU_textureRead(NvFlowCPU_Texture3D<T>& tex, NvFlowCPU_Int3 index)
{
if (index.x < 0 || index.x >= int(tex.width) ||
index.y < 0 || index.y >= int(tex.height) ||
index.z < 0 || index.z >= int(tex.depth))
{
return tex.out_of_bounds;
}
return tex.data[(index.z * tex.height + index.y) * tex.width + index.x];
}
template <typename T>
NV_FLOW_FORCE_INLINE T NvFlowCPU_textureSampleLevel(NvFlowCPU_Texture3D<T>& tex, NvFlowCPU_SamplerState state, const NvFlowCPU_Float3 pos, float lod)
{
NvFlowCPU_Float3 posf(NvFlowCPU_Float3(float(tex.width), float(tex.height), float(tex.depth)) * pos);
// clamp sampler
if (posf.x < 0.5f) posf.x = 0.5f;
if (posf.x > float(tex.width) - 0.5f) posf.x = float(tex.width) - 0.5f;
if (posf.y < 0.5f) posf.y = 0.5f;
if (posf.y > float(tex.height) - 0.5f) posf.y = float(tex.height) - 0.5f;
if (posf.z < 0.5f) posf.z = 0.5f;
if (posf.z > float(tex.depth) - 0.5f) posf.z = float(tex.depth) - 0.5f;
NvFlowCPU_Int4 pos000 = NvFlowCPU_Int4(NvFlowCPU_floor(posf - NvFlowCPU_Float3(0.5f, 0.5f, 0.5f)), 0);
NvFlowCPU_Float3 f = posf - NvFlowCPU_Float3(0.5f, 0.5f, 0.5f) - NvFlowCPU_Float3(float(pos000.x), float(pos000.y), float(pos000.z));
NvFlowCPU_Float3 of = NvFlowCPU_Float3(1.f, 1.f, 1.f) - f;
NvFlowCPU_Float4 wl(
of.x * of.y * of.z,
f.x * of.y * of.z,
of.x * f.y * of.z,
f.x * f.y * of.z
);
NvFlowCPU_Float4 wh(
of.x * of.y * f.z,
f.x * of.y * f.z,
of.x * f.y * f.z,
f.x * f.y * f.z
);
T sum;
if (pos000.x >= 0 && pos000.y >= 0 && pos000.z >= 0 &&
pos000.x <= int(tex.width - 2) && pos000.y <= int(tex.height - 2) && pos000.z <= int(tex.depth - 2))
{
NvFlowUint idx000 = pos000.z * tex.wh + pos000.y * tex.width + pos000.x;
sum = wl.x * tex.data[idx000];
sum += wl.y * tex.data[idx000 + 1u];
sum += wl.z * tex.data[idx000 + tex.width];
sum += wl.w * tex.data[idx000 + 1u + tex.width];
sum += wh.x * tex.data[idx000 + tex.wh];
sum += wh.y * tex.data[idx000 + 1u + tex.wh];
sum += wh.z * tex.data[idx000 + tex.width + tex.wh];
sum += wh.w * tex.data[idx000 + 1u + tex.width + tex.wh];
}
else
{
sum = wl.x * NvFlowCPU_textureRead(tex, pos000.rgb() + NvFlowCPU_Int3(0, 0, 0));
sum += wl.y * NvFlowCPU_textureRead(tex, pos000.rgb() + NvFlowCPU_Int3(1, 0, 0));
sum += wl.z * NvFlowCPU_textureRead(tex, pos000.rgb() + NvFlowCPU_Int3(0, 1, 0));
sum += wl.w * NvFlowCPU_textureRead(tex, pos000.rgb() + NvFlowCPU_Int3(1, 1, 0));
sum += wh.x * NvFlowCPU_textureRead(tex, pos000.rgb() + NvFlowCPU_Int3(0, 0, 1));
sum += wh.y * NvFlowCPU_textureRead(tex, pos000.rgb() + NvFlowCPU_Int3(1, 0, 1));
sum += wh.z * NvFlowCPU_textureRead(tex, pos000.rgb() + NvFlowCPU_Int3(0, 1, 1));
sum += wh.w * NvFlowCPU_textureRead(tex, pos000.rgb() + NvFlowCPU_Int3(1, 1, 1));
}
return sum;
}
template <typename T>
struct NvFlowCPU_RWTexture3D
{
T* data;
NvFlowFormat format;
NvFlowUint width;
NvFlowUint height;
NvFlowUint depth;
NV_FLOW_INLINE void bind(NvFlowCPU_Resource* resource)
{
data = (T*)resource->data;
format = resource->format;
width = resource->width;
height = resource->height;
depth = resource->depth;
}
};
template <typename T>
NV_FLOW_FORCE_INLINE const T NvFlowCPU_textureRead(NvFlowCPU_RWTexture3D<T>& tex, NvFlowCPU_Int3 index)
{
return tex.data[(index.z * tex.height + index.y) * tex.width + index.x];
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_textureWrite(NvFlowCPU_RWTexture3D<T>& tex, NvFlowCPU_Int3 index, const T value)
{
tex.data[(index.z * tex.height + index.y) * tex.width + index.x] = value;
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_textureWrite(bool pred, NvFlowCPU_RWTexture3D<T>& tex, NvFlowCPU_Int3 index, const T value)
{
if (pred)
{
NvFlowCPU_textureWrite(tex, index, value);
}
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_InterlockedAdd(NvFlowCPU_RWTexture3D<T>& tex, NvFlowCPU_Int3 index, T value)
{
((std::atomic<T>*)&tex.data[(index.z * tex.height + index.y) * tex.width + index.x])->fetch_add(value);
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_InterlockedMin(NvFlowCPU_RWTexture3D<T>& tex, NvFlowCPU_Int3 index, T value)
{
((std::atomic<T>*)&tex.data[(index.z * tex.height + index.y) * tex.width + index.x])->fetch_min(value);
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_InterlockedOr(NvFlowCPU_RWTexture3D<T>& tex, NvFlowCPU_Int3 index, T value)
{
((std::atomic<T>*)&tex.data[(index.z * tex.height + index.y) * tex.width + index.x])->fetch_or(value);
}
template <typename T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_InterlockedAnd(NvFlowCPU_RWTexture3D<T>& tex, NvFlowCPU_Int3 index, T value)
{
((std::atomic<T>*)&tex.data[(index.z * tex.height + index.y) * tex.width + index.x])->fetch_and(value);
}
template <class T>
struct NvFlowCPU_Groupshared
{
T data;
};
template <class T>
NV_FLOW_FORCE_INLINE void NvFlowCPU_swrite(int _groupshared_pass, int _groupshared_sync_count, NvFlowCPU_Groupshared<T>& g, const T& value)
{
if (_groupshared_pass == _groupshared_sync_count)
{
g.data = value;
}
}
template <class T>
NV_FLOW_FORCE_INLINE T NvFlowCPU_sread(NvFlowCPU_Groupshared<T>& g)
{
return g.data;
}
template <class T, unsigned int arraySize>
struct NvFlowCPU_GroupsharedArray
{
T data[arraySize];
};
template <class T, unsigned int arraySize>
NV_FLOW_FORCE_INLINE void NvFlowCPU_swrite(int _groupshared_pass, int _groupshared_sync_count, NvFlowCPU_GroupsharedArray<T, arraySize>& g, int index, const T& value)
{
if (_groupshared_pass == _groupshared_sync_count)
{
g.data[index] = value;
}
}
template <class T, unsigned int arraySize>
NV_FLOW_FORCE_INLINE T NvFlowCPU_sread(NvFlowCPU_GroupsharedArray<T, arraySize>& g, int index)
{
return g.data[index];
}
#endif | 37,820 | C | 37.790769 | 169 | 0.684479 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowBufferVariable.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowContext.h"
struct NvFlowBufferVariable
{
NvFlowContextInterface* contextInterface = nullptr;
NvFlowBufferTransient* transientBuffer = nullptr;
NvFlowUint64 transientFrame = ~0llu;
NvFlowBuffer* buffer = nullptr;
NvFlowArray<NvFlowBufferAcquire*, 4u> acquires;
};
NV_FLOW_INLINE void NvFlowBufferVariable_init(NvFlowContextInterface* contextInterface, NvFlowBufferVariable* ptr)
{
ptr->contextInterface = contextInterface;
}
NV_FLOW_INLINE void NvFlowBufferVariable_flush(NvFlowContext* context, NvFlowBufferVariable* ptr)
{
// process acquire queue
NvFlowUint acquireWriteIdx = 0u;
for (NvFlowUint acquireReadIdx = 0u; acquireReadIdx < ptr->acquires.size; acquireReadIdx++)
{
NvFlowBuffer* acquiredBuffer = nullptr;
if (ptr->contextInterface->getAcquiredBuffer(context, ptr->acquires[acquireReadIdx], &acquiredBuffer))
{
if (ptr->buffer)
{
ptr->contextInterface->destroyBuffer(context, ptr->buffer);
ptr->buffer = nullptr;
}
ptr->buffer = acquiredBuffer;
}
else
{
ptr->acquires[acquireWriteIdx++] = ptr->acquires[acquireReadIdx];
}
}
ptr->acquires.size = acquireWriteIdx;
}
NV_FLOW_INLINE NvFlowBufferTransient* NvFlowBufferVariable_get(NvFlowContext* context, NvFlowBufferVariable* ptr)
{
if (ptr->transientFrame == ptr->contextInterface->getCurrentFrame(context))
{
return ptr->transientBuffer;
}
NvFlowBufferVariable_flush(context, ptr);
if (ptr->buffer)
{
ptr->transientBuffer = ptr->contextInterface->registerBufferAsTransient(context, ptr->buffer);
ptr->transientFrame = ptr->contextInterface->getCurrentFrame(context);
}
else
{
ptr->transientBuffer = nullptr;
ptr->transientFrame = ~0llu;
}
return ptr->transientBuffer;
}
NV_FLOW_INLINE void NvFlowBufferVariable_set(NvFlowContext* context, NvFlowBufferVariable* ptr, NvFlowBufferTransient* transientBuffer)
{
NvFlowBufferVariable_flush(context, ptr);
if (ptr->buffer)
{
ptr->contextInterface->destroyBuffer(context, ptr->buffer);
ptr->buffer = nullptr;
}
ptr->transientBuffer = nullptr;
ptr->transientFrame = ~0llu;
if (transientBuffer)
{
ptr->transientBuffer = transientBuffer;
ptr->transientFrame = ptr->contextInterface->getCurrentFrame(context);
// push acquire
ptr->acquires.pushBack(ptr->contextInterface->enqueueAcquireBuffer(context, transientBuffer));
}
}
NV_FLOW_INLINE void NvFlowBufferVariable_destroy(NvFlowContext* context, NvFlowBufferVariable* ptr)
{
NvFlowBufferVariable_set(context, ptr, nullptr);
} | 4,068 | C | 35.008849 | 135 | 0.76647 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowReadbackBuffer.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowContext.h"
#include "NvFlowArray.h"
struct NvFlowReadbackBufferInstance
{
NvFlowBuffer* buffer = nullptr;
NvFlowUint64 bufferSizeInBytes = 0llu;
NvFlowBool32 isActive = NV_FLOW_FALSE;
NvFlowUint64 completedFrame = ~0llu;
NvFlowUint64 completedGlobalFrame = ~0llu;
NvFlowUint64 version = ~0llu;
NvFlowUint64 validNumBytes = 0llu;
};
struct NvFlowReadbackBuffer
{
NvFlowContextInterface* contextInterface = nullptr;
NvFlowUint64 versionCounter = 0llu;
NvFlowArray<NvFlowReadbackBufferInstance, 8u> buffers;
NvFlowArray<NvFlowUint64, 8u> activeBuffers;
};
NV_FLOW_INLINE void NvFlowReadbackBuffer_init(NvFlowContextInterface* contextInterface, NvFlowContext* context, NvFlowReadbackBuffer* ptr)
{
ptr->contextInterface = contextInterface;
}
NV_FLOW_INLINE void NvFlowReadbackBuffer_destroy(NvFlowContext* context, NvFlowReadbackBuffer* ptr)
{
for (NvFlowUint idx = 0u; idx < ptr->buffers.size; idx++)
{
if (ptr->buffers[idx].buffer)
{
ptr->contextInterface->destroyBuffer(context, ptr->buffers[idx].buffer);
ptr->buffers[idx].buffer = nullptr;
}
}
ptr->buffers.size = 0u;
}
struct NvFlowReadbackBufferCopyRange
{
NvFlowUint64 offset;
NvFlowUint64 numBytes;
};
NV_FLOW_INLINE void NvFlowReadbackBuffer_copyN(NvFlowContext* context, NvFlowReadbackBuffer* ptr, NvFlowUint64 numBytes, NvFlowBufferTransient* src, const NvFlowReadbackBufferCopyRange* copyRanges, NvFlowUint copyRangeCount, NvFlowUint64* pOutVersion)
{
// find inactive buffer, create as needed
NvFlowUint64 bufferIdx = 0u;
for (; bufferIdx < ptr->buffers.size; bufferIdx++)
{
if (!ptr->buffers[bufferIdx].isActive)
{
break;
}
}
if (bufferIdx == ptr->buffers.size)
{
bufferIdx = ptr->buffers.allocateBack();
}
NvFlowReadbackBufferInstance* inst = &ptr->buffers[bufferIdx];
// resize buffer as needed
if (inst->buffer && inst->bufferSizeInBytes < numBytes)
{
ptr->contextInterface->destroyBuffer(context, inst->buffer);
inst->buffer = nullptr;
inst->bufferSizeInBytes = 0llu;
}
if (!inst->buffer)
{
NvFlowBufferDesc bufDesc = {};
bufDesc.usageFlags = eNvFlowBufferUsage_bufferCopyDst;
bufDesc.format = eNvFlowFormat_unknown;
bufDesc.structureStride = 0u;
bufDesc.sizeInBytes = 65536u;
while (bufDesc.sizeInBytes < numBytes)
{
bufDesc.sizeInBytes *= 2u;
}
inst->buffer = ptr->contextInterface->createBuffer(context, eNvFlowMemoryType_readback, &bufDesc);
inst->bufferSizeInBytes = bufDesc.sizeInBytes;
}
// set active state
ptr->versionCounter++;
inst->isActive = NV_FLOW_TRUE;
inst->completedFrame = ptr->contextInterface->getCurrentFrame(context);
inst->completedGlobalFrame = ptr->contextInterface->getCurrentGlobalFrame(context);
inst->version = ptr->versionCounter;
inst->validNumBytes = numBytes;
if (pOutVersion)
{
*pOutVersion = inst->version;
}
// copy
NvFlowBufferTransient* dst = ptr->contextInterface->registerBufferAsTransient(context, inst->buffer);
for (NvFlowUint copyRangeIdx = 0u; copyRangeIdx < copyRangeCount; copyRangeIdx++)
{
NvFlowPassCopyBufferParams copyParams = {};
copyParams.srcOffset = copyRanges[copyRangeIdx].offset;
copyParams.dstOffset = copyRanges[copyRangeIdx].offset;
copyParams.numBytes = copyRanges[copyRangeIdx].numBytes;
copyParams.src = src;
copyParams.dst = dst;
copyParams.debugLabel = "ReadbackBufferCopy";
ptr->contextInterface->addPassCopyBuffer(context, ©Params);
}
if (copyRangeCount == 0u)
{
NvFlowPassCopyBufferParams copyParams = {};
copyParams.srcOffset = 0llu;
copyParams.dstOffset = 0llu;
copyParams.numBytes = 0llu;
copyParams.src = src;
copyParams.dst = dst;
copyParams.debugLabel = "ReadbackBufferCopy";
ptr->contextInterface->addPassCopyBuffer(context, ©Params);
}
// push on active queue
ptr->activeBuffers.pushBack(bufferIdx);
}
NV_FLOW_INLINE void NvFlowReadbackBuffer_copy(NvFlowContext* context, NvFlowReadbackBuffer* ptr, NvFlowUint64 numBytes, NvFlowBufferTransient* src, NvFlowUint64* pOutVersion)
{
NvFlowReadbackBufferCopyRange copyRange = { 0llu, numBytes };
NvFlowReadbackBuffer_copyN(context, ptr, numBytes, src, ©Range, 1u, pOutVersion);
}
NV_FLOW_INLINE void NvFlowReadbackBuffer_flush(NvFlowContext* context, NvFlowReadbackBuffer* ptr)
{
// flush queue
NvFlowUint completedCount = 0u;
NvFlowUint64 lastFenceCompleted = ptr->contextInterface->getLastFrameCompleted(context);
for (NvFlowUint activeBufferIdx = 0u; activeBufferIdx < ptr->activeBuffers.size; activeBufferIdx++)
{
if (ptr->buffers[ptr->activeBuffers[activeBufferIdx]].completedFrame > lastFenceCompleted)
{
break;
}
completedCount++;
}
NvFlowUint popCount = completedCount >= 2u ? completedCount - 1u : 0u;
if (popCount > 0u)
{
for (NvFlowUint activeBufferIdx = 0u; activeBufferIdx < popCount; activeBufferIdx++)
{
ptr->buffers[ptr->activeBuffers[activeBufferIdx]].isActive = NV_FLOW_FALSE;
}
// compact
for (NvFlowUint activeBufferIdx = popCount; activeBufferIdx < ptr->activeBuffers.size; activeBufferIdx++)
{
ptr->activeBuffers[activeBufferIdx - popCount] = ptr->activeBuffers[activeBufferIdx];
}
ptr->activeBuffers.size = ptr->activeBuffers.size - popCount;
}
}
NV_FLOW_INLINE NvFlowUint NvFlowReadbackBuffer_getActiveCount(NvFlowContext* context, NvFlowReadbackBuffer* ptr)
{
return (NvFlowUint)ptr->activeBuffers.size;
}
NV_FLOW_INLINE NvFlowUint64 NvFlowReadbackBuffer_getCompletedGlobalFrame(NvFlowContext* context, NvFlowReadbackBuffer* ptr, NvFlowUint activeIdx)
{
if (activeIdx < ptr->activeBuffers.size)
{
return ptr->buffers[ptr->activeBuffers[activeIdx]].completedGlobalFrame;
}
return ~0llu;
}
NV_FLOW_INLINE void* NvFlowReadbackBuffer_map(NvFlowContext* context, NvFlowReadbackBuffer* ptr, NvFlowUint activeIdx, NvFlowUint64* pOutVersion, NvFlowUint64* pNumBytes)
{
if (activeIdx > ptr->activeBuffers.size)
{
if (pOutVersion)
{
*pOutVersion = 0llu;
}
if (pNumBytes)
{
*pNumBytes = 0llu;
}
return nullptr;
}
NvFlowReadbackBufferInstance* inst = &ptr->buffers[ptr->activeBuffers[activeIdx]];
if (pOutVersion)
{
*pOutVersion = inst->version;
}
if (pNumBytes)
{
*pNumBytes = inst->validNumBytes;
}
return ptr->contextInterface->mapBuffer(context, inst->buffer);
}
NV_FLOW_INLINE void* NvFlowReadbackBuffer_mapLatest(NvFlowContext* context, NvFlowReadbackBuffer* ptr, NvFlowUint64* pOutVersion, NvFlowUint64* pNumBytes)
{
NvFlowReadbackBuffer_flush(context, ptr);
NvFlowUint64 lastFenceCompleted = ptr->contextInterface->getLastFrameCompleted(context);
bool shouldMap = true;
if (ptr->activeBuffers.size > 0u)
{
if (ptr->buffers[ptr->activeBuffers[0u]].completedFrame > lastFenceCompleted)
{
shouldMap = false;
}
}
else if (ptr->buffers[ptr->activeBuffers[0u]].completedFrame > lastFenceCompleted)
{
shouldMap = false;
}
if (!shouldMap)
{
if (pOutVersion)
{
*pOutVersion = 0llu;
}
if (pNumBytes)
{
*pNumBytes = 0llu;
}
return nullptr;
}
return NvFlowReadbackBuffer_map(context, ptr, 0u, pOutVersion, pNumBytes);
}
NV_FLOW_INLINE void NvFlowReadbackBuffer_unmap(NvFlowContext* context, NvFlowReadbackBuffer* ptr, NvFlowUint activeIdx)
{
if (activeIdx < ptr->activeBuffers.size)
{
NvFlowReadbackBufferInstance* inst = &ptr->buffers[ptr->activeBuffers[activeIdx]];
ptr->contextInterface->unmapBuffer(context, inst->buffer);
}
}
NV_FLOW_INLINE void NvFlowReadbackBuffer_unmapLatest(NvFlowContext* context, NvFlowReadbackBuffer* ptr)
{
NvFlowReadbackBuffer_unmap(context, ptr, 0u);
} | 9,093 | C | 31.248227 | 251 | 0.759155 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowDeepCopy.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowReflect.h"
#include "NvFlowArray.h"
#include <string.h>
struct NvFlowReflectDeepCopyInfo
{
const char* debugname;
NvFlowUint64 size;
};
struct NvFlowReflectDeepCopyCached;
struct NvFlowReflectDeepCopy
{
NvFlowArray<NvFlowArray<NvFlowUint8>, 16u> heaps;
NvFlowArray<NvFlowReflectDeepCopyInfo> infos;
NvFlowArray<const char*> pathStack;
NvFlowArrayPointer<NvFlowReflectDeepCopyCached*> cached;
};
struct NvFlowReflectDeepCopyCached
{
NvFlowUint64 luid = 0llu;
NvFlowArray<const char*> pathStack;
NvFlowUint64 version = 0llu;
NvFlowReflectDeepCopy* deepCopy = nullptr;
NvFlowUint8* deepCopyData;
NvFlowUint64 lastUse = 0llu;
};
NV_FLOW_INLINE NvFlowReflectDeepCopy* NvFlowReflectDeepCopy_create()
{
auto ptr = new NvFlowReflectDeepCopy();
return ptr;
}
NV_FLOW_INLINE void NvFlowReflectDeepCopyCached_destroy(NvFlowReflectDeepCopyCached* ptr);
NV_FLOW_INLINE void NvFlowReflectDeepCopy_destroy(NvFlowReflectDeepCopy* ptr)
{
for (NvFlowUint64 cachedIdx = 0u; cachedIdx < ptr->cached.size; cachedIdx++)
{
NvFlowReflectDeepCopyCached_destroy(ptr->cached[cachedIdx]);
ptr->cached[cachedIdx] = nullptr;
}
ptr->cached.size = 0u;
delete ptr;
}
NV_FLOW_INLINE NvFlowReflectDeepCopyCached* NvFlowReflectDeepCopyCached_create(NvFlowUint64 luid, const char** pathStacks, NvFlowUint64 pathStackCount)
{
auto ptr = new NvFlowReflectDeepCopyCached();
ptr->luid = luid;
ptr->pathStack.size = 0u;
for (NvFlowUint64 pathStackIdx = 0u; pathStackIdx < pathStackCount; pathStackIdx++)
{
ptr->pathStack.pushBack(pathStacks[pathStackIdx]);
}
ptr->version = 0llu;
ptr->deepCopy = NvFlowReflectDeepCopy_create();
return ptr;
}
NV_FLOW_INLINE void NvFlowReflectDeepCopyCached_destroy(NvFlowReflectDeepCopyCached* ptr)
{
NvFlowReflectDeepCopy_destroy(ptr->deepCopy);
ptr->deepCopy = nullptr;
delete ptr;
}
NV_FLOW_INLINE void NvFlowReflectDeepCopy_newHeap(NvFlowReflectDeepCopy* ptr, NvFlowUint64 allocSize)
{
auto& currentHeap = ptr->heaps[ptr->heaps.allocateBack()];
NvFlowUint64 heapSize = 4096u; // default heap size
while (heapSize < allocSize)
{
heapSize *= 2u;
}
currentHeap.reserve(heapSize);
}
NV_FLOW_INLINE NvFlowUint64 NvFlowReflectDeepCopy_alignment(NvFlowUint64 size)
{
return 8u * ((size + 7u) / 8u);
}
NV_FLOW_INLINE NvFlowUint8* NvFlowReflectDeepCopy_allocate(NvFlowReflectDeepCopy* ptr, NvFlowUint64 size, const char* debugName)
{
NvFlowUint64 allocSize = NvFlowReflectDeepCopy_alignment(size);
if (ptr->heaps.size > 0u)
{
auto& currentHeap = ptr->heaps[ptr->heaps.size - 1u];
if (currentHeap.size + allocSize <= currentHeap.capacity)
{
NvFlowUint8* ret = currentHeap.data + currentHeap.size;
ret[size - 1] = 0;
currentHeap.size += allocSize;
NvFlowReflectDeepCopyInfo info = { debugName, size };
ptr->infos.pushBack(info);
return ret;
}
}
NvFlowReflectDeepCopy_newHeap(ptr, allocSize);
return NvFlowReflectDeepCopy_allocate(ptr, size, debugName);
}
NV_FLOW_INLINE void NvFlowReflectDeepCopy_reset(NvFlowReflectDeepCopy* ptr)
{
for (NvFlowUint64 heapIdx = 0u; heapIdx < ptr->heaps.size; heapIdx++)
{
ptr->heaps[heapIdx].size = 0u;
}
ptr->heaps.size = 0u;
ptr->infos.size = 0u;
ptr->pathStack.size = 0u;
}
NV_FLOW_INLINE NvFlowUint8* NvFlowReflectDeepCopy_recursive(NvFlowReflectDeepCopy* ptr, NvFlowUint64 luid, const NvFlowUint8* src, const NvFlowReflectDataType* type, NvFlowUint64 elementCount, NvFlowBool32 isPointerArray);
NV_FLOW_INLINE void NvFlowReflectDeepCopy_cleanCache(NvFlowReflectDeepCopy* ptr)
{
static const NvFlowUint64 cacheFreeTreshold = 8u;
NvFlowUint cachedIdx = 0u;
while (cachedIdx < ptr->cached.size)
{
ptr->cached[cachedIdx]->lastUse++;
if (ptr->cached[cachedIdx]->lastUse > cacheFreeTreshold)
{
NvFlowReflectDeepCopyCached_destroy(ptr->cached[cachedIdx]);
ptr->cached[cachedIdx] = nullptr;
ptr->cached.removeSwapPointerAtIndex(cachedIdx);
}
else
{
cachedIdx++;
}
}
}
NV_FLOW_INLINE NvFlowUint8* NvFlowReflectDeepCopy_cached(NvFlowReflectDeepCopy* ptr, NvFlowUint64 luid, const NvFlowUint8* src, const NvFlowReflectDataType* type, NvFlowUint64 elementCount, NvFlowUint64 version, NvFlowBool32 isPointerArray)
{
NvFlowUint64 cachedIdx = 0u;
for (; cachedIdx < ptr->cached.size; cachedIdx++)
{
auto& cached = ptr->cached[cachedIdx];
if (cached->luid == luid && ptr->pathStack.size == cached->pathStack.size)
{
// check path stack
bool pathStackMatches = true;
for (NvFlowUint64 pathStackIdx = 0u; pathStackIdx < ptr->pathStack.size; pathStackIdx++)
{
if (NvFlowReflectStringCompare(ptr->pathStack[pathStackIdx], cached->pathStack[pathStackIdx]) != 0)
{
pathStackMatches = false;
break;
}
}
if (pathStackMatches)
{
break;
}
}
}
if (cachedIdx == ptr->cached.size)
{
cachedIdx = ptr->cached.allocateBack();
ptr->cached[cachedIdx] = NvFlowReflectDeepCopyCached_create(luid, ptr->pathStack.data, ptr->pathStack.size);
}
auto cached = ptr->cached[cachedIdx];
if (ptr->cached[cachedIdx]->version != version)
{
NvFlowReflectDeepCopy_reset(cached->deepCopy);
NvFlowReflectDeepCopy_cleanCache(cached->deepCopy);
cached->deepCopyData = NvFlowReflectDeepCopy_recursive(cached->deepCopy, luid, src, type, elementCount, isPointerArray);
cached->version = version;
}
cached->lastUse = 0u;
return cached->deepCopyData;
}
NV_FLOW_INLINE void NvFlowReflectDeepCopy_structRecursive(NvFlowReflectDeepCopy* ptr, NvFlowUint64 luid, NvFlowUint8* dst, const NvFlowReflectDataType* type, NvFlowUint64 elementCount, NvFlowBool32 isPointerArray)
{
if (type->dataType == eNvFlowType_struct)
{
for (NvFlowUint64 elementIdx = 0u; elementIdx < elementCount; elementIdx++)
{
NvFlowUint8* dstArray = dst + type->elementSize * elementIdx;
// attempt to find luid
for (NvFlowUint childIdx = 0u; childIdx < type->childReflectDataCount; childIdx++)
{
const auto& childReflectData = type->childReflectDatas[childIdx];
if (childReflectData.reflectMode == eNvFlowReflectMode_value &&
childReflectData.dataType->dataType == eNvFlowType_uint64)
{
if (NvFlowReflectStringCompare(childReflectData.name, "luid") == 0)
{
luid = *((NvFlowUint64*)(dst + childReflectData.dataOffset));
break;
}
}
}
// traverse all elements, searching for pointers/arrays
for (NvFlowUint64 childIdx = 0u; childIdx < type->childReflectDataCount; childIdx++)
{
const auto& childReflectData = type->childReflectDatas[childIdx];
ptr->pathStack.pushBack(childReflectData.name);
if (childReflectData.dataType->dataType == eNvFlowType_struct &&
(childReflectData.reflectMode == eNvFlowReflectMode_value ||
childReflectData.reflectMode == eNvFlowReflectMode_valueVersioned))
{
NvFlowReflectDeepCopy_structRecursive(ptr, luid, dstArray + childReflectData.dataOffset, childReflectData.dataType, 1u, NV_FLOW_FALSE);
}
if (childReflectData.reflectMode & eNvFlowReflectMode_pointerArray)
{
// get pointer to pointer
NvFlowUint8** childPtr = (NvFlowUint8**)(dstArray + childReflectData.dataOffset);
NvFlowUint64 childElementCount = 1u;
NvFlowUint64 childVersion = 0u;
if ((*childPtr))
{
if (childReflectData.reflectMode & eNvFlowReflectMode_array)
{
childElementCount = *(NvFlowUint64*)(dstArray + childReflectData.arraySizeOffset);
}
if (childReflectData.reflectMode & eNvFlowReflectMode_valueVersioned)
{
childVersion = *(NvFlowUint64*)(dstArray + childReflectData.versionOffset);
}
NvFlowBool32 isPointerArray = (childReflectData.reflectMode & eNvFlowReflectMode_pointerArray) == eNvFlowReflectMode_pointerArray;
// conditionally attempt cached array
if (luid > 0u && childElementCount > 0u && childVersion > 0u && childReflectData.dataType->dataType != eNvFlowType_struct)
{
*childPtr = NvFlowReflectDeepCopy_cached(ptr, luid, *childPtr, childReflectData.dataType, childElementCount, childVersion, isPointerArray);
}
else
{
// recurse
*childPtr = NvFlowReflectDeepCopy_recursive(ptr, luid, *childPtr, childReflectData.dataType, childElementCount, isPointerArray);
}
}
}
ptr->pathStack.size--;
}
}
}
}
NV_FLOW_INLINE NvFlowUint8* NvFlowReflectDeepCopy_recursive(NvFlowReflectDeepCopy* ptr, NvFlowUint64 luid, const NvFlowUint8* src, const NvFlowReflectDataType* type, NvFlowUint64 elementCount, NvFlowBool32 isPointerArray)
{
const char* debugName = "root";
if (ptr->pathStack.size > 0u)
{
debugName = ptr->pathStack[ptr->pathStack.size - 1u];
}
if (isPointerArray)
{
NvFlowUint8* dstData = NvFlowReflectDeepCopy_allocate(ptr, sizeof(void*) * elementCount, debugName);
memcpy(dstData, src, sizeof(void*) * elementCount);
// for each non-null pointer, recurse
NvFlowUint8** dstArray = (NvFlowUint8**)dstData;
for (NvFlowUint64 elementIdx = 0u; elementIdx < elementCount; elementIdx++)
{
if (dstArray[elementIdx])
{
dstArray[elementIdx] = NvFlowReflectDeepCopy_recursive(ptr, luid, dstArray[elementIdx], type, 1u, NV_FLOW_FALSE);
}
}
return dstData;
}
NvFlowUint8* dstData = NvFlowReflectDeepCopy_allocate(ptr, type->elementSize * elementCount, debugName);
memcpy(dstData, src, type->elementSize * elementCount);
NvFlowReflectDeepCopy_structRecursive(ptr, luid, dstData, type, elementCount, isPointerArray);
return dstData;
}
NV_FLOW_INLINE NvFlowUint8* NvFlowReflectDeepCopy_update(NvFlowReflectDeepCopy* ptr, const void* srcVoid, const NvFlowReflectDataType* type)
{
const NvFlowUint8* src = (const NvFlowUint8*)srcVoid;
NvFlowReflectDeepCopy_reset(ptr);
NvFlowReflectDeepCopy_cleanCache(ptr);
return NvFlowReflectDeepCopy_recursive(ptr, 0llu, src, type, 1u, NV_FLOW_FALSE);
} | 11,378 | C | 33.586626 | 240 | 0.745737 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowUploadBuffer.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowContext.h"
#include "NvFlowArray.h"
#define NV_FLOW_DISPATCH_BATCH_SIZE 32768u
//#define NV_FLOW_DISPATCH_BATCH_SIZE 256
struct NvFlowDispatchBatch
{
NvFlowBufferTransient* globalTransient = nullptr;
NvFlowUint blockIdxOffset = 0u;
NvFlowUint blockCount = 0u;
};
typedef NvFlowArray<NvFlowDispatchBatch, 8u> NvFlowDispatchBatches;
NV_FLOW_INLINE void NvFlowDispatchBatches_init_custom(NvFlowDispatchBatches* ptr, NvFlowUint totalBlockCount, NvFlowUint batchSize)
{
ptr->size = 0u;
for (NvFlowUint blockIdxOffset = 0u; blockIdxOffset < totalBlockCount; blockIdxOffset += batchSize)
{
NvFlowDispatchBatch batch = {};
batch.globalTransient = nullptr;
batch.blockIdxOffset = blockIdxOffset;
batch.blockCount = totalBlockCount - blockIdxOffset;
if (batch.blockCount > batchSize)
{
batch.blockCount = batchSize;
}
ptr->pushBack(batch);
}
}
NV_FLOW_INLINE void NvFlowDispatchBatches_init(NvFlowDispatchBatches* ptr, NvFlowUint totalBlockCount)
{
NvFlowDispatchBatches_init_custom(ptr, totalBlockCount, NV_FLOW_DISPATCH_BATCH_SIZE);
}
struct NvFlowBufferVersioning
{
NvFlowUint64 mappedIdx = ~0llu;
NvFlowUint64 frontIdx = ~0llu;
NvFlowArray<NvFlowUint64, 16u> recycleFenceValues;
};
NV_FLOW_INLINE NvFlowUint64 NvFlowBufferVersioning_map(NvFlowBufferVersioning* ptr, NvFlowUint64 lastFenceCompleted)
{
NvFlowUint64 index = ptr->frontIdx + 1u;
for (; index < ptr->recycleFenceValues.size; index++)
{
if (ptr->recycleFenceValues[index] <= lastFenceCompleted)
{
break;
}
}
if (index == ptr->recycleFenceValues.size)
{
for (index = 0; index < ptr->frontIdx; index++)
{
if (ptr->recycleFenceValues[index] <= lastFenceCompleted)
{
break;
}
}
}
if (!(index < ptr->recycleFenceValues.size && ptr->recycleFenceValues[index] <= lastFenceCompleted))
{
index = ptr->recycleFenceValues.allocateBack();
}
ptr->recycleFenceValues[index] = ~0llu;
ptr->mappedIdx = index;
return ptr->mappedIdx;
}
NV_FLOW_INLINE void NvFlowBufferVersioning_unmap(NvFlowBufferVersioning* ptr, NvFlowUint64 nextFenceValue)
{
if (ptr->frontIdx < ptr->recycleFenceValues.size)
{
ptr->recycleFenceValues[ptr->frontIdx] = nextFenceValue;
}
ptr->frontIdx = ptr->mappedIdx;
}
struct NvFlowUploadBuffer
{
NvFlowContextInterface* contextInterface = nullptr;
NvFlowBuffer*(NV_FLOW_ABI* createBuffer)(NvFlowContext* context, NvFlowMemoryType memoryType, const NvFlowBufferDesc* desc, void* userdata) = nullptr;
void(NV_FLOW_ABI* addPassCopyBuffer)(NvFlowContext* context, const NvFlowPassCopyBufferParams* params, void* userdata) = nullptr;
void* userdata = nullptr;
NvFlowBufferUsageFlags flags = 0u;
NvFlowFormat format = eNvFlowFormat_unknown;
NvFlowUint structureStride = 0u;
NvFlowBufferVersioning versioning;
NvFlowArray<NvFlowBuffer*, 8u> buffers;
NvFlowArray<NvFlowUint64, 8u> bufferSizes;
NvFlowBuffer* deviceBuffer = nullptr;
NvFlowUint64 deviceNumBytes = 0llu;
};
NV_FLOW_INLINE void NvFlowUploadBuffer_init_custom(
NvFlowContextInterface* contextInterface,
NvFlowContext* context, NvFlowUploadBuffer* ptr,
NvFlowBufferUsageFlags flags, NvFlowFormat format, NvFlowUint structureStride,
NvFlowBuffer*(NV_FLOW_ABI* createBuffer)(NvFlowContext* context, NvFlowMemoryType memoryType, const NvFlowBufferDesc* desc, void* userdata),
void(NV_FLOW_ABI* addPassCopyBuffer)(NvFlowContext* context, const NvFlowPassCopyBufferParams* params, void* userdata),
void* userdata
)
{
ptr->contextInterface = contextInterface;
ptr->createBuffer = createBuffer;
ptr->addPassCopyBuffer = addPassCopyBuffer;
ptr->userdata = userdata;
ptr->flags = flags;
ptr->format = format;
ptr->structureStride = structureStride;
}
NV_FLOW_INLINE NvFlowBuffer* NvFlowUploadBuffer_createBuffer(NvFlowContext* context, NvFlowMemoryType memoryType, const NvFlowBufferDesc* desc, void* userdata)
{
NvFlowUploadBuffer* ptr = (NvFlowUploadBuffer*)userdata;
return ptr->contextInterface->createBuffer(context, memoryType, desc);
}
NV_FLOW_INLINE void NvFlowUploadBuffer_addPassCopyBuffer(NvFlowContext* context, const NvFlowPassCopyBufferParams* params, void* userdata)
{
NvFlowUploadBuffer* ptr = (NvFlowUploadBuffer*)userdata;
ptr->contextInterface->addPassCopyBuffer(context, params);
}
NV_FLOW_INLINE void NvFlowUploadBuffer_init(NvFlowContextInterface* contextInterface, NvFlowContext* context, NvFlowUploadBuffer* ptr, NvFlowBufferUsageFlags flags, NvFlowFormat format, NvFlowUint structureStride)
{
NvFlowUploadBuffer_init_custom(contextInterface, context, ptr, flags, format, structureStride, NvFlowUploadBuffer_createBuffer, NvFlowUploadBuffer_addPassCopyBuffer, ptr);
}
NV_FLOW_INLINE void NvFlowUploadBuffer_destroy(NvFlowContext* context, NvFlowUploadBuffer* ptr)
{
for (NvFlowUint64 idx = 0u; idx < ptr->buffers.size; idx++)
{
if (ptr->buffers[idx])
{
ptr->contextInterface->destroyBuffer(context, ptr->buffers[idx]);
ptr->buffers[idx] = nullptr;
}
}
ptr->buffers.size = 0u;
ptr->bufferSizes.size = 0u;
if (ptr->deviceBuffer)
{
ptr->contextInterface->destroyBuffer(context, ptr->deviceBuffer);
ptr->deviceBuffer = nullptr;
}
}
NV_FLOW_INLINE NvFlowUint64 NvFlowUploadBuffer_computeBufferSize(NvFlowUint64 requested)
{
NvFlowUint64 bufferSize = 65536u;
while (bufferSize < requested)
{
bufferSize *= 2u;
}
return bufferSize;
}
NV_FLOW_INLINE void* NvFlowUploadBuffer_map(NvFlowContext* context, NvFlowUploadBuffer* ptr, NvFlowUint64 numBytes)
{
NvFlowUint64 instanceIdx = NvFlowBufferVersioning_map(&ptr->versioning, ptr->contextInterface->getLastFrameCompleted(context));
while (instanceIdx >= ptr->buffers.size)
{
ptr->buffers.pushBack(nullptr);
ptr->bufferSizes.pushBack(0llu);
}
if (ptr->buffers[instanceIdx] && ptr->bufferSizes[instanceIdx] < numBytes)
{
ptr->contextInterface->destroyBuffer(context, ptr->buffers[instanceIdx]);
ptr->buffers[instanceIdx] = nullptr;
}
if (!ptr->buffers[instanceIdx])
{
NvFlowBufferDesc bufDesc = {};
bufDesc.format = ptr->format;
bufDesc.usageFlags = ptr->flags;
bufDesc.structureStride = ptr->structureStride;
bufDesc.sizeInBytes = NvFlowUploadBuffer_computeBufferSize(numBytes);
ptr->bufferSizes[instanceIdx] = bufDesc.sizeInBytes;
ptr->buffers[instanceIdx] = ptr->contextInterface->createBuffer(context, eNvFlowMemoryType_upload, &bufDesc);
}
return ptr->contextInterface->mapBuffer(context, ptr->buffers[instanceIdx]);
}
NV_FLOW_INLINE NvFlowBufferTransient* NvFlowUploadBuffer_unmap(NvFlowContext* context, NvFlowUploadBuffer* ptr)
{
ptr->contextInterface->unmapBuffer(context, ptr->buffers[ptr->versioning.mappedIdx]);
NvFlowBufferVersioning_unmap(&ptr->versioning, ptr->contextInterface->getCurrentFrame(context));
return ptr->contextInterface->registerBufferAsTransient(context, ptr->buffers[ptr->versioning.frontIdx]);
}
struct NvFlowUploadBufferCopyRange
{
NvFlowUint64 offset;
NvFlowUint64 numBytes;
};
NV_FLOW_INLINE NvFlowBufferTransient* NvFlowUploadBuffer_getDevice(NvFlowContext* context, NvFlowUploadBuffer* ptr, NvFlowUint64 numBytes)
{
NvFlowUint64 srcNumBytes = NvFlowUploadBuffer_computeBufferSize(numBytes);
if (ptr->deviceBuffer && ptr->deviceNumBytes < srcNumBytes)
{
ptr->contextInterface->destroyBuffer(context, ptr->deviceBuffer);
ptr->deviceBuffer = nullptr;
ptr->deviceNumBytes = 0llu;
}
if (!ptr->deviceBuffer)
{
NvFlowBufferDesc bufDesc = {};
bufDesc.format = ptr->format;
bufDesc.usageFlags = ptr->flags | eNvFlowBufferUsage_bufferCopyDst;
bufDesc.structureStride = ptr->structureStride;
bufDesc.sizeInBytes = srcNumBytes;
ptr->deviceBuffer = ptr->createBuffer(context, eNvFlowMemoryType_device, &bufDesc, ptr->userdata);
ptr->deviceNumBytes = srcNumBytes;
}
return ptr->contextInterface->registerBufferAsTransient(context, ptr->deviceBuffer);
}
NV_FLOW_INLINE NvFlowBufferTransient* NvFlowUploadBuffer_unmapDeviceN(NvFlowContext* context, NvFlowUploadBuffer* ptr, NvFlowUploadBufferCopyRange* copyRanges, NvFlowUint64 copyRangeCount, const char* debugName)
{
NvFlowBufferTransient* src = NvFlowUploadBuffer_unmap(context, ptr);
NvFlowUint64 srcNumBytes = ptr->bufferSizes[ptr->versioning.frontIdx];
NvFlowBufferTransient* dst = NvFlowUploadBuffer_getDevice(context, ptr, srcNumBytes);
NvFlowUint activeCopyCount = 0u;
for (NvFlowUint64 copyRangeIdx = 0u; copyRangeIdx < copyRangeCount; copyRangeIdx++)
{
NvFlowPassCopyBufferParams copyParams = {};
copyParams.srcOffset = copyRanges[copyRangeIdx].offset;
copyParams.dstOffset = copyRanges[copyRangeIdx].offset;
copyParams.numBytes = copyRanges[copyRangeIdx].numBytes;
copyParams.src = src;
copyParams.dst = dst;
copyParams.debugLabel = debugName ? debugName : "UploadBufferUnmapDevice";
if (copyParams.numBytes > 0u)
{
ptr->addPassCopyBuffer(context, ©Params, ptr->userdata);
activeCopyCount++;
}
}
// this ensures proper barriers
if (activeCopyCount == 0u)
{
NvFlowPassCopyBufferParams copyParams = {};
copyParams.srcOffset = 0llu;
copyParams.dstOffset = 0llu;
copyParams.numBytes = 0llu;
copyParams.src = src;
copyParams.dst = dst;
copyParams.debugLabel = debugName ? debugName : "UploadBufferUnmapDevice";
ptr->addPassCopyBuffer(context, ©Params, ptr->userdata);
}
return dst;
}
NV_FLOW_INLINE NvFlowBufferTransient* NvFlowUploadBuffer_unmapDevice(NvFlowContext* context, NvFlowUploadBuffer* ptr, NvFlowUint64 offset, NvFlowUint64 numBytes, const char* debugName)
{
NvFlowUploadBufferCopyRange copyRange = { offset, numBytes };
return NvFlowUploadBuffer_unmapDeviceN(context, ptr, ©Range, 1u, debugName);
} | 11,167 | C | 34.680511 | 213 | 0.776305 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowPreprocessor.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowString.h"
struct NvFlowPreprocessor;
struct NvFlowPreprocessorRange
{
NvFlowUint64 begin;
NvFlowUint64 end;
};
enum NvFlowPreprocessorTokenType
{
eNvFlowPreprocessorTokenType_unknown = 0, // unclassified
eNvFlowPreprocessorTokenType_whitespace, //
eNvFlowPreprocessorTokenType_newline, // \n
eNvFlowPreprocessorTokenType_comment, // // comment
eNvFlowPreprocessorTokenType_name, // alpha_1234
eNvFlowPreprocessorTokenType_number, // 1234
eNvFlowPreprocessorTokenType_string, // "string"
eNvFlowPreprocessorTokenType_char, // 's'
eNvFlowPreprocessorTokenType_pound, // #
eNvFlowPreprocessorTokenType_comma, // ,
eNvFlowPreprocessorTokenType_period, // .
eNvFlowPreprocessorTokenType_semicolon, // ;
eNvFlowPreprocessorTokenType_colon, // :
eNvFlowPreprocessorTokenType_equals, // =
eNvFlowPreprocessorTokenType_asterisk, // *
eNvFlowPreprocessorTokenType_leftParenthesis, // (
eNvFlowPreprocessorTokenType_rightParenthesis, // )
eNvFlowPreprocessorTokenType_leftBracket, // [
eNvFlowPreprocessorTokenType_rightBracket, // ]
eNvFlowPreprocessorTokenType_leftCurlyBrace, // {
eNvFlowPreprocessorTokenType_rightCurlyBrace, // }
eNvFlowPreprocessorTokenType_lessThan, // <
eNvFlowPreprocessorTokenType_greaterThan, // >
eNvFlowPreprocessorTokenType_anyWhitespace, // For delimiter usage, aligns with NvFlowPreprocessorTokenIsWhitespace()
eNvFlowPreprocessorTokenType_count,
eNvFlowPreprocessorTokenType_maxEnum = 0x7FFFFFFF
};
struct NvFlowPreprocessorToken
{
NvFlowPreprocessorTokenType type;
const char* str;
};
NV_FLOW_INLINE NvFlowBool32 NvFlowPreprocessorTokenIsWhitespace(const NvFlowPreprocessorToken token)
{
return token.type == eNvFlowPreprocessorTokenType_whitespace ||
token.type == eNvFlowPreprocessorTokenType_newline ||
token.type == eNvFlowPreprocessorTokenType_comment;
}
NV_FLOW_INLINE void NvFlowPreprocessorSkipWhitespaceTokens(NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens)
{
while ((*pTokenIdx) < numTokens && NvFlowPreprocessorTokenIsWhitespace(tokens[(*pTokenIdx)]))
{
(*pTokenIdx)++;
}
}
enum NvFlowPreprocessorType
{
eNvFlowPreprocessorType_constant = 0, // name
eNvFlowPreprocessorType_statement = 1, // name arg0 arg1;
eNvFlowPreprocessorType_function = 2, // name(arg0, arg1, arg2)
eNvFlowPreprocessorType_index = 3, // name[arg0] or name[arg0]= arg1 arg2 arg3;
eNvFlowPreprocessorType_attribute = 4, // [name(arg0, arg1, arg2)]
eNvFlowPreprocessorType_line = 5, // #name arg0 \n
eNvFlowPreprocessorType_body = 6, // name <arg0, arg1> arg2 arg3(arg4, arg5) { arg6; arg7; }
eNvFlowPreprocessorType_templateInstance = 7, // name<arg0, arg1>
eNvFlowPreprocessorType_statementComma = 8, // "name arg0," or "name arg0)"
eNvFlowPreprocessorType_maxEnum = 0x7FFFFFFF
};
struct NvFlowPreprocessorConstant
{
const char* name;
const char* value;
};
struct NvFlowPreprocessorFunction
{
const char* name;
NvFlowPreprocessorType type;
void* userdata;
char*(*substitute)(NvFlowPreprocessor* ptr, void* userdata, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens);
NvFlowBool32 allowRecursion;
};
enum NvFlowPreprocessorMode
{
eNvFlowPreprocessorMode_default = 0, // Input string evaluated and substitution evaluated, no recursion
eNvFlowPreprocessorMode_singlePass = 1, // Input string evaluated once, no substitution evaluation
eNvFlowPreprocessorMode_disable_passthrough = 2, // Do not passthrough strings
eNvFlowPreprocessorMode_maxEnum = 0x7FFFFFFF
};
NvFlowPreprocessor* NvFlowPreprocessorCreate(NvFlowStringPool* pool);
void NvFlowPreprocessorDestroy(NvFlowPreprocessor* ptr);
void NvFlowPreprocessorReset(NvFlowPreprocessor* ptr);
void NvFlowPreprocessorSetMode(NvFlowPreprocessor* ptr, NvFlowPreprocessorMode mode);
NvFlowPreprocessorMode NvFlowPreprocessorGetMode(NvFlowPreprocessor* ptr);
NvFlowStringPool* NvFlowPreprocessorStringPool(NvFlowPreprocessor* ptr);
void NvFlowPreprocessorAddConstants(NvFlowPreprocessor* ptr, NvFlowUint64 numConstants, const NvFlowPreprocessorConstant* constants);
void NvFlowPreprocessorAddFunctions(NvFlowPreprocessor* ptr, NvFlowUint64 numFunctions, const NvFlowPreprocessorFunction* functions);
NvFlowPreprocessorRange NvFlowPreprocessorExtractTokensDelimitedN(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowUint64 numDelimiters, const NvFlowPreprocessorTokenType* delimiters);
NvFlowPreprocessorRange NvFlowPreprocessorExtractTokensDelimited(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowPreprocessorTokenType delimiter);
const char* NvFlowPreprocessorExtractDelimited(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowPreprocessorTokenType delimiter);
const char* NvFlowPreprocessorExtractDelimitedN(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowUint64 numDelimiters, const NvFlowPreprocessorTokenType* delimiters);
const char* NvFlowPreprocessorExtractDelimitedPreserve(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowPreprocessorTokenType delimiter);
const char* NvFlowPreprocessorExtractIfType(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowPreprocessorTokenType type);
const char* NvFlowPreprocessorConcatTokens(NvFlowPreprocessor* ptr, const NvFlowPreprocessorToken* tokens, NvFlowUint64 numTokens);
NvFlowBool32 NvFlowPreprocessorFindKeyInSource(NvFlowPreprocessor* ptr, const NvFlowPreprocessorToken* keyTokens, NvFlowUint64 keyTokenCount, const NvFlowPreprocessorToken* sourceTokens, NvFlowUint64 sourceTokenCount, NvFlowUint64* pSourceIndex);
char* NvFlowPreprocessorExecute(NvFlowPreprocessor* ptr, const char* input);
void NvFlowPreprocessorTokenize(NvFlowPreprocessor* ptr, const char* input, NvFlowUint64* pTotalTokens, NvFlowPreprocessorToken** pTokens);
enum NvFlowPreprocessorGlobalType
{
eNvFlowPreprocessorGlobalType_unknown = 0, // Unknown global type
eNvFlowPreprocessorGlobalType_statement = 1, // ConstantBuffer<Params> gParams;
eNvFlowPreprocessorGlobalType_function = 2, // returnType functionName(arg1, arg2, arg3) { [functionbody] }
eNvFlowPreprocessorGlobalType_attribute = 3, // [name(arg0, arg1, arg2)]
eNvFlowPreprocessorGlobalType_line = 4, // #define CONSTANT \n
eNvFlowPreprocessorGlobalType_maxEnum = 0x7FFFFFFF
};
char* NvFlowPreprocessorExecuteGlobal(NvFlowPreprocessor* ptr, const char* input, void* userdata, char*(*substitute)(NvFlowPreprocessor* ptr, void* userdata, NvFlowPreprocessorGlobalType globalType, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens)); | 8,509 | C | 45.758242 | 263 | 0.811141 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowDatabase.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowReflect.h"
#include "NvFlowArray.h"
#include "NvFlowDeepCopy.h"
#include <string.h>
struct NvFlowDatabaseContext;
struct NvFlowDatabasePrim;
struct NvFlowDatabaseAttr;
struct NvFlowDatabaseInterface
{
NvFlowDatabasePrim*(NV_FLOW_ABI *createPrim)(
NvFlowDatabaseContext* context,
NvFlowUint64 version,
NvFlowDatabasePrim* parent,
const char* displayTypename,
const char* path,
const char* name);
void(NV_FLOW_ABI* updatePrim)(
NvFlowDatabaseContext* context,
NvFlowUint64 version,
NvFlowUint64 minActiveVersion,
NvFlowDatabasePrim* prim);
void(NV_FLOW_ABI* markDestroyedPrim)(NvFlowDatabaseContext* context, NvFlowDatabasePrim* prim);
void(NV_FLOW_ABI* destroyPrim)(NvFlowDatabaseContext* context, NvFlowDatabasePrim* prim);
NvFlowDatabaseAttr*(NV_FLOW_ABI* createAttr)(
NvFlowDatabaseContext* context,
NvFlowUint64 version,
NvFlowDatabasePrim* prim,
const NvFlowReflectData* reflectData,
NvFlowUint8* mappedData);
void(NV_FLOW_ABI* updateAttr)(
NvFlowDatabaseContext* context,
NvFlowUint64 version,
NvFlowUint64 minActiveVersion,
NvFlowDatabaseAttr* attr,
const NvFlowReflectData* reflectData,
NvFlowUint8* mappedData);
void(NV_FLOW_ABI* markDestroyedAttr)(NvFlowDatabaseContext* context, NvFlowDatabaseAttr* attr);
void(NV_FLOW_ABI* destroyAttr)(NvFlowDatabaseContext* context, NvFlowDatabaseAttr* attr);
};
struct NvFlowDatabaseString
{
NvFlowArray<char> data;
void append(const char* str)
{
if (data.size > 0u)
{
data.size--;
}
if (str)
{
NvFlowUint64 idx = 0u;
while (str[idx])
{
data.pushBack(str[idx]);
idx++;
}
data.pushBack('\0');
}
}
void set(const char* str)
{
data.size = 0u;
append(str);
}
const char* get()
{
return data.data;
}
};
struct NvFlowDatabaseInstance
{
struct Prim
{
NvFlowDatabasePrim* prim;
NvFlowArrayPointer<Prim*> childPrims;
NvFlowArray<NvFlowDatabaseAttr*> attrs;
NvFlowDatabaseString path;
};
struct Data
{
NvFlowArray<NvFlowUint8> data;
NvFlowUint64 version;
NvFlowReflectDeepCopy* deepCopy = nullptr;
~Data()
{
if (deepCopy)
{
NvFlowReflectDeepCopy_destroy(deepCopy);
deepCopy = nullptr;
}
}
};
const NvFlowReflectDataType* dataType = nullptr;
NvFlowDatabaseString displayTypename;
NvFlowDatabaseString name;
NvFlowUint64 luid = 0llu;
NvFlowUint64 luidByteOffset = ~0llu;
Prim rootPrim;
NvFlowRingBufferPointer<Data*> datas;
NvFlowBool32 markedForDestroy = NV_FLOW_FALSE;
NvFlowArray<NvFlowUint8> defaultData;
struct StackState
{
NvFlowUint64 childIdx;
const NvFlowReflectDataType* reflectDataType;
NvFlowUint8* data;
Prim* prim;
};
NvFlowUint8* mapDataVersionAndType(NvFlowUint64 version, const NvFlowReflectDataType** pReflectDataType)
{
for (NvFlowUint64 idx = datas.activeCount() - 1u; idx < datas.activeCount(); idx--)
{
if (datas[idx]->version == version)
{
if (pReflectDataType)
{
*pReflectDataType = dataType;
}
return datas[idx]->data.data;
}
}
NvFlowDatabaseInstance::Data* data = datas.allocateBackPointer();
data->version = version;
data->data.reserve(dataType->elementSize);
data->data.size = dataType->elementSize;
if (datas.activeCount() >= 2u)
{
NvFlowDatabaseInstance::Data* oldData = datas[datas.activeCount() - 2u];
memcpy(data->data.data, oldData->data.data, data->data.size);
}
else if (dataType->defaultValue)
{
memcpy(data->data.data, dataType->defaultValue, data->data.size);
}
else
{
memset(data->data.data, 0, data->data.size);
}
// enforce luid
if (luidByteOffset < dataType->elementSize)
{
*((NvFlowUint64*)(data->data.data + luidByteOffset)) = luid;
}
if (pReflectDataType)
{
*pReflectDataType = dataType;
}
return data->data.data;
}
NvFlowUint8* mapDataVersion(NvFlowUint64 version)
{
const NvFlowReflectDataType* reflectDataType = nullptr;
return mapDataVersionAndType(version, &reflectDataType);
}
void deepCopyDataVersion(NvFlowUint64 version)
{
Data* data = nullptr;
for (NvFlowUint64 idx = datas.activeCount() - 1u; idx < datas.activeCount(); idx--)
{
if (datas[idx]->version == version)
{
data = datas[idx];
}
}
if (data)
{
if (!data->deepCopy)
{
data->deepCopy = NvFlowReflectDeepCopy_create();
}
NvFlowUint8* copyData = NvFlowReflectDeepCopy_update(data->deepCopy, data->data.data, dataType);
// copy root struct over mapped to get safe pointers
memcpy(data->data.data, copyData, data->data.size);
}
}
NvFlowUint8* mapDataVersionReadOnly(NvFlowUint64 version)
{
// TODO: Simply update version to avoid copy
return mapDataVersion(version);
}
template<const NvFlowDatabaseInterface* iface>
void init(NvFlowDatabaseContext* context, NvFlowUint64 version, NvFlowUint64 luidIn, const NvFlowReflectDataType* dataTypeIn, const char* displayTypenameIn, const char* pathIn, const char* nameIn)
{
dataType = dataTypeIn;
displayTypename.set(displayTypenameIn);
name.set(nameIn);
luid = luidIn;
luidByteOffset = ~0llu;
// try to find luid offset in root
for (NvFlowUint64 childIdx = 0u; childIdx < dataType->childReflectDataCount; childIdx++)
{
if (strcmp(dataType->childReflectDatas[childIdx].name, "luid") == 0)
{
luidByteOffset = dataType->childReflectDatas[childIdx].dataOffset;
break;
}
}
rootPrim.path.set(pathIn);
rootPrim.prim = nullptr;
if (iface->createPrim)
{
rootPrim.prim = iface->createPrim(
context,
version,
nullptr,
displayTypename.get(),
rootPrim.path.get(),
name.get());
}
NvFlowUint8* mappedData = mapDataVersion(version);
StackState state = { 0llu, dataType, mappedData, &rootPrim };
NvFlowArray<StackState, 8u> stateStack;
for (; state.childIdx < state.reflectDataType->childReflectDataCount; state.childIdx++)
{
// push prims
while (state.reflectDataType->childReflectDatas[state.childIdx].dataType->dataType == eNvFlowType_struct)
{
const NvFlowReflectData* childReflectData = state.reflectDataType->childReflectDatas + state.childIdx;
auto childPrim = state.prim->childPrims.allocateBackPointer();
state.prim->attrs.pushBack(nullptr);
// form path
childPrim->path.set(state.prim->path.get());
childPrim->path.append("/");
childPrim->path.append(childReflectData->name);
childPrim->prim = nullptr;
if (iface->createPrim)
{
childPrim->prim = iface->createPrim(
context,
version,
state.prim->prim,
NvFlowReflectTrimPrefix(childReflectData->dataType->structTypename),
childPrim->path.get(),
childReflectData->name);
}
stateStack.pushBack(state);
state.childIdx = 0u;
state.reflectDataType = childReflectData->dataType;
state.data += childReflectData->dataOffset;
state.prim = childPrim;
}
// attributes
if (state.childIdx < state.reflectDataType->childReflectDataCount)
{
const NvFlowReflectData* childReflectData = state.reflectDataType->childReflectDatas + state.childIdx;
NvFlowDatabaseAttr* attr = nullptr;
if (iface->createAttr)
{
attr = iface->createAttr(context, version, state.prim->prim, childReflectData, state.data);
}
state.prim->attrs.pushBack(attr);
state.prim->childPrims.pushBack(nullptr);
}
// pop prims
while (state.childIdx + 1u >= state.reflectDataType->childReflectDataCount && stateStack.size > 0u)
{
state = stateStack.back();
stateStack.popBack();
}
}
}
void process(NvFlowUint64 version, NvFlowReflectProcess_t processReflect, void* userdata)
{
NvFlowUint8* mappedData = mapDataVersion(version);
processReflect(mappedData, dataType, userdata);
}
template<const NvFlowDatabaseInterface* iface>
void update(NvFlowDatabaseContext* context, NvFlowUint64 version, NvFlowUint64 minActiveVersion)
{
if (!markedForDestroy)
{
NvFlowUint8* mappedData = mapDataVersion(version);
if (rootPrim.prim)
{
iface->updatePrim(context, version, minActiveVersion, rootPrim.prim);
}
StackState state = { 0llu, dataType, mappedData, &rootPrim };
NvFlowArray<StackState, 8u> stateStack;
for (; state.childIdx < state.reflectDataType->childReflectDataCount; state.childIdx++)
{
// push prims
while (state.reflectDataType->childReflectDatas[state.childIdx].dataType->dataType == eNvFlowType_struct)
{
const NvFlowReflectData* childReflectData = state.reflectDataType->childReflectDatas + state.childIdx;
auto childPrim = state.prim->childPrims[state.childIdx];
if (childPrim->prim)
{
iface->updatePrim(context, version, minActiveVersion, childPrim->prim);
}
stateStack.pushBack(state);
state.childIdx = 0u;
state.reflectDataType = childReflectData->dataType;
state.data += childReflectData->dataOffset;
state.prim = childPrim;
}
// attributes
if (state.childIdx < state.reflectDataType->childReflectDataCount)
{
const NvFlowReflectData* childReflectData = state.reflectDataType->childReflectDatas + state.childIdx;
auto attr = state.prim->attrs[state.childIdx];
if (attr)
{
iface->updateAttr(context, version, minActiveVersion, attr, childReflectData, state.data);
}
}
// pop prims
while (state.childIdx + 1u >= state.reflectDataType->childReflectDataCount && stateStack.size > 0u)
{
state = stateStack.back();
stateStack.popBack();
}
}
}
NvFlowUint freeTreshold = markedForDestroy ? 0u : 1u;
while (datas.activeCount() > freeTreshold && datas.front()->version < minActiveVersion)
{
datas.popFront();
}
}
template<const NvFlowDatabaseInterface* iface>
void markForDestroy(NvFlowDatabaseContext* context)
{
NvFlowUint8* mappedData = nullptr;
StackState state = { 0llu, dataType, mappedData, &rootPrim };
NvFlowArray<StackState, 8u> stateStack;
for (; state.childIdx < state.reflectDataType->childReflectDataCount; state.childIdx++)
{
// push prims
while (state.reflectDataType->childReflectDatas[state.childIdx].dataType->dataType == eNvFlowType_struct)
{
const NvFlowReflectData* childReflectData = state.reflectDataType->childReflectDatas + state.childIdx;
auto childPrim = state.prim->childPrims[state.childIdx];
stateStack.pushBack(state);
state.childIdx = 0u;
state.reflectDataType = childReflectData->dataType;
state.data += childReflectData->dataOffset;
state.prim = childPrim;
}
// attributes
if (state.childIdx < state.reflectDataType->childReflectDataCount)
{
auto attr = state.prim->attrs[state.childIdx];
if (attr)
{
iface->markDestroyedAttr(context, attr);
}
}
// pop prims
while (state.childIdx + 1u >= state.reflectDataType->childReflectDataCount && stateStack.size > 0u)
{
if (state.prim->prim)
{
iface->markDestroyedPrim(context, state.prim->prim);
}
state = stateStack.back();
stateStack.popBack();
}
}
if (rootPrim.prim)
{
iface->markDestroyedPrim(context, rootPrim.prim);
}
markedForDestroy = NV_FLOW_TRUE;
}
template<const NvFlowDatabaseInterface* iface>
void destroy(NvFlowDatabaseContext* context)
{
NvFlowUint8* mappedData = nullptr;
StackState state = { 0llu, dataType, mappedData, &rootPrim };
NvFlowArray<StackState, 8u> stateStack;
for (; state.childIdx < state.reflectDataType->childReflectDataCount; state.childIdx++)
{
// push prims
while (state.reflectDataType->childReflectDatas[state.childIdx].dataType->dataType == eNvFlowType_struct)
{
const NvFlowReflectData* childReflectData = state.reflectDataType->childReflectDatas + state.childIdx;
auto childPrim = state.prim->childPrims[state.childIdx];
stateStack.pushBack(state);
state.childIdx = 0u;
state.reflectDataType = childReflectData->dataType;
state.data += childReflectData->dataOffset;
state.prim = childPrim;
}
// attributes
if (state.childIdx < state.reflectDataType->childReflectDataCount)
{
auto attr = state.prim->attrs[state.childIdx];
if (attr)
{
iface->destroyAttr(context, attr);
attr = nullptr;
}
}
// pop prims
while (state.childIdx + 1u >= state.reflectDataType->childReflectDataCount && stateStack.size > 0u)
{
if (state.prim->prim)
{
iface->destroyPrim(context, state.prim->prim);
state.prim->prim = nullptr;
}
state = stateStack.back();
stateStack.popBack();
}
}
if (rootPrim.prim)
{
iface->destroyPrim(context, rootPrim.prim);
rootPrim.prim = nullptr;
}
}
};
struct NvFlowDatabaseType
{
const NvFlowReflectDataType* dataType = nullptr;
NvFlowDatabaseString displayTypeName;
NvFlowArrayPointer<NvFlowDatabaseInstance*> instances;
struct TypeSnapshot
{
NvFlowDatabaseTypeSnapshot snapshot;
NvFlowArray<NvFlowUint8*> instanceDatas;
};
NvFlowRingBufferPointer<TypeSnapshot*> snapshots;
void init(const NvFlowReflectDataType* dataTypeIn, const char* displayTypeNameIn)
{
dataType = dataTypeIn;
displayTypeName.set(displayTypeNameIn);
}
template<const NvFlowDatabaseInterface* iface>
void update(NvFlowDatabaseContext* context, NvFlowUint64 version, NvFlowUint64 minActiveVersion)
{
for (NvFlowUint instanceIdx = 0u; instanceIdx < instances.size; instanceIdx++)
{
instances[instanceIdx]->update<iface>(context, version, minActiveVersion);
}
// release instances
{
NvFlowUint64 keepCount = 0llu;
for (NvFlowUint instanceIdx = 0u; instanceIdx < instances.size; instanceIdx++)
{
if (instances[instanceIdx]->markedForDestroy && instances[instanceIdx]->datas.activeCount() == 0u)
{
instances[instanceIdx]->destroy<iface>(context);
instances.deletePointerAtIndex(instanceIdx);
}
else
{
instances.swapPointers(keepCount, instanceIdx);
keepCount++;
}
}
instances.size = keepCount;
}
// release snapshots
while (snapshots.activeCount() > 0u && snapshots.front()->snapshot.version < minActiveVersion)
{
snapshots.popFront();
}
}
template<const NvFlowDatabaseInterface* iface>
void destroy(NvFlowDatabaseContext* context)
{
for (NvFlowUint instanceIdx = 0u; instanceIdx < instances.size; instanceIdx++)
{
instances[instanceIdx]->destroy<iface>(context);
}
instances.deletePointers();
}
void getSnapshot(NvFlowDatabaseTypeSnapshot* snapshot, NvFlowUint64 version)
{
auto ptr = snapshots.allocateBackPointer();
ptr->snapshot.version = version;
ptr->snapshot.dataType = dataType;
ptr->instanceDatas.size = 0u;
for (NvFlowUint instanceIdx = 0u; instanceIdx < instances.size; instanceIdx++)
{
if (!instances[instanceIdx]->markedForDestroy)
{
NvFlowUint8* data = instances[instanceIdx]->mapDataVersionReadOnly(version);
ptr->instanceDatas.pushBack(data);
}
}
ptr->snapshot.instanceDatas = ptr->instanceDatas.data;
ptr->snapshot.instanceCount = ptr->instanceDatas.size;
if (snapshot)
{
*snapshot = ptr->snapshot;
}
}
};
struct NvFlowDatabase
{
NvFlowArrayPointer<NvFlowDatabaseType*> types;
struct Snapshot
{
NvFlowDatabaseSnapshot snapshot;
NvFlowArray<NvFlowDatabaseTypeSnapshot> typeSnapshots;
};
NvFlowRingBufferPointer<Snapshot*> snapshots;
NvFlowUint64 luidCounter = 0llu;
NvFlowDatabaseType* createType(const NvFlowReflectDataType* dataTypeIn, const char* displayTypeName)
{
auto ptr = types.allocateBackPointer();
ptr->init(dataTypeIn, displayTypeName);
return ptr;
}
template<const NvFlowDatabaseInterface* iface>
NvFlowDatabaseInstance* createInstance(NvFlowDatabaseContext* context, NvFlowUint64 version, NvFlowDatabaseType* type, const char* pathIn, const char* name)
{
auto ptr = type->instances.allocateBackPointer();
luidCounter++;
ptr->init<iface>(context, version, luidCounter, type->dataType, type->displayTypeName.get(), pathIn, name);
return ptr;
}
template<const NvFlowDatabaseInterface* iface>
void update(NvFlowDatabaseContext* context, NvFlowUint64 version, NvFlowUint64 minActiveVersion)
{
for (NvFlowUint64 typeIdx = 0u; typeIdx < types.size; typeIdx++)
{
types[typeIdx]->update<iface>(context, version, minActiveVersion);
}
// release snapshots
while (snapshots.activeCount() > 0u && snapshots.front()->snapshot.version < minActiveVersion)
{
snapshots.popFront();
}
}
template<const NvFlowDatabaseInterface* iface>
void markInstanceForDestroy(NvFlowDatabaseContext* context, NvFlowDatabaseInstance* ptr)
{
ptr->markForDestroy<iface>(context);
}
template<const NvFlowDatabaseInterface* iface>
void markAllInstancesForDestroy(NvFlowDatabaseContext* context)
{
for (NvFlowUint64 typeIdx = 0u; typeIdx < types.size; typeIdx++)
{
NvFlowDatabaseType* type = types[typeIdx];
for (NvFlowUint instanceIdx = 0u; instanceIdx < type->instances.size; instanceIdx++)
{
type->instances[instanceIdx]->markForDestroy<iface>(context);
}
}
}
template<const NvFlowDatabaseInterface* iface>
NvFlowBool32 snapshotPending(NvFlowDatabaseContext* context, NvFlowUint64 version, NvFlowUint64 minActiveVersion)
{
update<iface>(context, version, minActiveVersion);
NvFlowBool32 anySnapshotPending = NV_FLOW_FALSE;
for (NvFlowUint64 typeIdx = 0u; typeIdx < types.size; typeIdx++)
{
if (types[typeIdx]->snapshots.activeCount() > 0u)
{
anySnapshotPending = NV_FLOW_TRUE;
}
}
if (snapshots.activeCount() > 0u)
{
anySnapshotPending = NV_FLOW_TRUE;
}
return anySnapshotPending;
}
template<const NvFlowDatabaseInterface* iface>
void destroy(NvFlowDatabaseContext* context)
{
for (NvFlowUint64 typeIdx = 0u; typeIdx < types.size; typeIdx++)
{
types[typeIdx]->destroy<iface>(context);
}
}
void getSnapshot(NvFlowDatabaseSnapshot* snapshot, NvFlowUint64 version)
{
auto ptr = snapshots.allocateBackPointer();
ptr->snapshot.version = version;
ptr->typeSnapshots.size = 0u;
for (NvFlowUint64 typeIdx = 0u; typeIdx < types.size; typeIdx++)
{
NvFlowDatabaseTypeSnapshot typeSnapshot = {};
types[typeIdx]->getSnapshot(&typeSnapshot, version);
ptr->typeSnapshots.pushBack(typeSnapshot);
}
ptr->snapshot.typeSnapshots = ptr->typeSnapshots.data;
ptr->snapshot.typeSnapshotCount = ptr->typeSnapshots.size;
if (snapshot)
{
*snapshot = ptr->snapshot;
}
}
void enumerateActiveInstances(NvFlowDatabaseInstance** pInstances, NvFlowUint64* pInstanceCount)
{
if (!pInstances && pInstanceCount)
{
NvFlowUint64 activeCount = 0llu;
for (NvFlowUint64 typeIdx = 0u; typeIdx < types.size; typeIdx++)
{
NvFlowDatabaseType* type = types[typeIdx];
for (NvFlowUint instanceIdx = 0u; instanceIdx < type->instances.size; instanceIdx++)
{
if (!type->instances[instanceIdx]->markedForDestroy)
{
activeCount++;
}
}
}
*pInstanceCount = activeCount;
}
if (pInstances && pInstanceCount)
{
NvFlowUint64 activeCount = 0llu;
for (NvFlowUint64 typeIdx = 0u; typeIdx < types.size; typeIdx++)
{
NvFlowDatabaseType* type = types[typeIdx];
for (NvFlowUint instanceIdx = 0u; instanceIdx < type->instances.size; instanceIdx++)
{
if (!type->instances[instanceIdx]->markedForDestroy)
{
if (activeCount < (*pInstanceCount))
{
pInstances[activeCount] = type->instances[instanceIdx];
activeCount++;
}
}
}
}
*pInstanceCount = activeCount;
}
}
};
| 21,017 | C | 27.326146 | 197 | 0.7178 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowLocationHashTable.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowTypes.h"
#include "NvFlowArray.h"
struct NvFlowLocationHashTableRange
{
NvFlowUint64 beginIdx;
NvFlowUint64 endIdx;
};
struct NvFlowLocationHashTable
{
NvFlowUint tableDimBits = 0llu;
NvFlowUint tableDimLessOne = 0llu;
NvFlowUint tableDim3 = 1u;
NvFlowArray<NvFlowLocationHashTableRange> ranges;
NvFlowArray<NvFlowUint64> nextIndices;
NvFlowArray<NvFlowInt4> locations;
NvFlowArray<NvFlowUint> masks;
NvFlowInt4 locationMin = { 0, 0, 0, 0 };
NvFlowInt4 locationMax = { 0, 0, 0, 0 };
NvFlowArray<NvFlowInt4> tmpLocations;
NvFlowArray<NvFlowUint> tmpMasks;
void reset()
{
tableDimBits = 0llu;
tableDimLessOne = 0llu;
tableDim3 = 1u;
ranges.size = 0u;
nextIndices.size = 0u;
NvFlowLocationHashTableRange nullRange = { ~0llu, ~0llu };
ranges.pushBack(nullRange);
locations.size = 0u;
masks.size = 0u;
}
NvFlowLocationHashTable()
{
reset();
}
void rebuildTable()
{
ranges.size = 0u;
ranges.reserve(tableDim3);
ranges.size = tableDim3;
nextIndices.size = 0u;
nextIndices.reserve(locations.size);
nextIndices.size = locations.size;
// invalidate ranges
NvFlowLocationHashTableRange nullRange = { ~0llu, ~0llu };
for (NvFlowUint64 rangeIdx = 0u; rangeIdx < ranges.size; rangeIdx++)
{
ranges[rangeIdx] = nullRange;
}
for (NvFlowUint64 locationIdx = 0u; locationIdx < locations.size; locationIdx++)
{
NvFlowInt4 location = locations[locationIdx];
NvFlowUint64 baseRangeIdx = (location.x & tableDimLessOne) |
((location.y & tableDimLessOne) << tableDimBits) |
((location.z & tableDimLessOne) << (tableDimBits + tableDimBits));
// reset next for this location
nextIndices[locationIdx] = ~0llu;
NvFlowUint64 beginIdx = ranges[baseRangeIdx].beginIdx;
NvFlowUint64 endIdx = ranges[baseRangeIdx].endIdx;
if (beginIdx >= endIdx)
{
ranges[baseRangeIdx].beginIdx = locationIdx;
ranges[baseRangeIdx].endIdx = locationIdx + 1u;
}
else if (endIdx == locationIdx)
{
ranges[baseRangeIdx].endIdx = locationIdx + 1u;
nextIndices[endIdx - 1u] = locationIdx;
}
else
{
NvFlowUint64 prevIdx = endIdx - 1u;
NvFlowUint64 currentIdx = nextIndices[prevIdx];
while (currentIdx < nextIndices.size)
{
prevIdx = currentIdx;
currentIdx = nextIndices[currentIdx];
}
nextIndices[prevIdx] = locationIdx;
}
}
}
void compactNonZeroWithLimit(NvFlowUint64 maxLocations)
{
NvFlowUint64 dstIdx = 0u;
for (NvFlowUint64 srcIdx = 0u; srcIdx < locations.size && dstIdx < maxLocations; srcIdx++)
{
if (masks[srcIdx])
{
locations[dstIdx] = locations[srcIdx];
masks[dstIdx] = masks[srcIdx];
dstIdx++;
}
}
locations.size = dstIdx;
masks.size = dstIdx;
// optimize compacted table dim
tableDimBits = 0llu;
tableDimLessOne = 0llu;
tableDim3 = 1u;
while (locations.size > tableDim3)
{
tableDimBits++;
tableDimLessOne = (1u << tableDimBits) - 1u;
tableDim3 = (1 << (tableDimBits + tableDimBits + tableDimBits));
}
rebuildTable();
}
void sort()
{
NvFlowArray_copy(tmpLocations, locations);
NvFlowArray_copy(tmpMasks, masks);
NvFlowUint64 globalOffset = 0u;
for (NvFlowUint64 baseRangeIdx = 0u; baseRangeIdx < ranges.size; baseRangeIdx++)
{
NvFlowUint64 beginIdx = ranges[baseRangeIdx].beginIdx;
NvFlowUint64 endIdx = ranges[baseRangeIdx].endIdx;
for (NvFlowUint64 currentIdx = beginIdx; currentIdx < endIdx; currentIdx++)
{
locations[globalOffset] = tmpLocations[currentIdx];
masks[globalOffset] = tmpMasks[currentIdx];
globalOffset++;
}
if (beginIdx < endIdx)
{
NvFlowUint64 currentIdx = nextIndices[endIdx - 1u];
while (currentIdx < nextIndices.size)
{
locations[globalOffset] = tmpLocations[currentIdx];
masks[globalOffset] = tmpMasks[currentIdx];
globalOffset++;
currentIdx = nextIndices[currentIdx];
}
}
}
rebuildTable();
}
NvFlowUint64 find(NvFlowInt4 location)
{
NvFlowUint64 baseRangeIdx = (location.x & tableDimLessOne) |
((location.y & tableDimLessOne) << tableDimBits) |
((location.z & tableDimLessOne) << (tableDimBits + tableDimBits));
NvFlowUint64 beginIdx = ranges[baseRangeIdx].beginIdx;
NvFlowUint64 endIdx = ranges[baseRangeIdx].endIdx;
for (NvFlowUint64 currentIdx = beginIdx; currentIdx < endIdx; currentIdx++)
{
if (location.x == locations[currentIdx].x &&
location.y == locations[currentIdx].y &&
location.z == locations[currentIdx].z &&
location.w == locations[currentIdx].w)
{
return currentIdx;
}
}
if (beginIdx < endIdx)
{
NvFlowUint64 currentIdx = nextIndices[endIdx - 1u];
while (currentIdx < nextIndices.size)
{
if (location.x == locations[currentIdx].x &&
location.y == locations[currentIdx].y &&
location.z == locations[currentIdx].z &&
location.w == locations[currentIdx].w)
{
return currentIdx;
}
currentIdx = nextIndices[currentIdx];
}
}
return ~0llu;
}
void pushNoResize(NvFlowInt4 location, NvFlowUint mask)
{
NvFlowUint64 baseRangeIdx = (location.x & tableDimLessOne) |
((location.y & tableDimLessOne) << tableDimBits) |
((location.z & tableDimLessOne) << (tableDimBits + tableDimBits));
NvFlowUint64 beginIdx = ranges[baseRangeIdx].beginIdx;
NvFlowUint64 endIdx = ranges[baseRangeIdx].endIdx;
for (NvFlowUint64 currentIdx = beginIdx; currentIdx < endIdx; currentIdx++)
{
if (location.x == locations[currentIdx].x &&
location.y == locations[currentIdx].y &&
location.z == locations[currentIdx].z &&
location.w == locations[currentIdx].w)
{
masks[currentIdx] |= mask;
return;
}
}
if (beginIdx >= endIdx)
{
locations.pushBack(location);
masks.pushBack(mask);
nextIndices.pushBack(~0llu);
ranges[baseRangeIdx].beginIdx = locations.size - 1u;
ranges[baseRangeIdx].endIdx = locations.size;
}
else if (endIdx == locations.size)
{
locations.pushBack(location);
masks.pushBack(mask);
nextIndices.pushBack(~0llu);
ranges[baseRangeIdx].endIdx = locations.size;
nextIndices[endIdx - 1u] = locations.size - 1u;
}
else
{
NvFlowUint64 prevIdx = endIdx - 1u;
NvFlowUint64 currentIdx = nextIndices[prevIdx];
while (currentIdx < nextIndices.size)
{
if (location.x == locations[currentIdx].x &&
location.y == locations[currentIdx].y &&
location.z == locations[currentIdx].z &&
location.w == locations[currentIdx].w)
{
masks[currentIdx] |= mask;
return;
}
prevIdx = currentIdx;
currentIdx = nextIndices[currentIdx];
}
locations.pushBack(location);
masks.pushBack(mask);
nextIndices.pushBack(~0llu);
nextIndices[prevIdx] = locations.size - 1u;
}
}
void conditionalGrowTable()
{
if (locations.size > tableDim3)
{
tableDimBits++;
tableDimLessOne = (1u << tableDimBits) - 1u;
tableDim3 = (1 << (tableDimBits + tableDimBits + tableDimBits));
rebuildTable();
}
}
void push(NvFlowInt4 location, NvFlowUint mask)
{
pushNoResize(location, mask);
conditionalGrowTable();
}
void computeStats()
{
locationMin = NvFlowInt4{ 0, 0, 0, 0 };
locationMax = NvFlowInt4{ 0, 0, 0, 0 };
if (locations.size > 0)
{
locationMin = locations[0];
locationMax.x = locations[0].x + 1;
locationMax.y = locations[0].y + 1;
locationMax.z = locations[0].z + 1;
locationMax.w = locations[0].w + 1;
}
for (NvFlowUint64 locationIdx = 1u; locationIdx < locations.size; locationIdx++)
{
NvFlowInt4 location = locations[locationIdx];
if (location.x < locationMin.x)
{
locationMin.x = location.x;
}
if (location.y < locationMin.y)
{
locationMin.y = location.y;
}
if (location.z < locationMin.z)
{
locationMin.z = location.z;
}
if (location.w < locationMin.w)
{
locationMin.w = location.w;
}
// plus one, since max is exclusive
if (location.x + 1 > locationMax.x)
{
locationMax.x = location.x + 1;
}
if (location.y + 1 > locationMax.y)
{
locationMax.y = location.y + 1;
}
if (location.z + 1 > locationMax.z)
{
locationMax.z = location.z + 1;
}
if (location.w + 1 > locationMax.w)
{
locationMax.w = location.w + 1;
}
}
}
}; | 9,839 | C | 26.409471 | 92 | 0.686655 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowString.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include <stdarg.h>
#include "NvFlowTypes.h"
/// ************************** String Pool *********************************************
struct NvFlowStringPool;
NvFlowStringPool* NvFlowStringPoolCreate();
char* NvFlowStringPoolAllocate(NvFlowStringPool* pool, NvFlowUint64 size);
void NvFlowStringPoolTempAllocate(NvFlowStringPool* ptr, char** p_str_data, NvFlowUint64* p_str_size);
void NvFlowStringPoolTempAllocateCommit(NvFlowStringPool* ptr, char* str_data, NvFlowUint64 str_size);
void NvFlowStringPoolDestroy(NvFlowStringPool* pool);
void NvFlowStringPoolReset(NvFlowStringPool* pool);
char* NvFlowStringPrint(NvFlowStringPool* pool, const char* format, ...);
char* NvFlowStringPrintV(NvFlowStringPool* pool, const char* format, va_list args);
/// ************************** Macro utils *********************************
#define NV_FLOW_CSTR(X) NvFlowStringView{X, sizeof(X) - 1}
#define NvFlowStringToInteger(input) atoi(input)
#define NvFlowStringMakeView(input) NvFlowStringView{ input, (int)strlen(input) }
/// ************************** Char Utils *********************************************
NV_FLOW_INLINE int NvFlowCharIsWhiteSpace(char c)
{
return c == ' ' || c == '\n' || c == '\r' || c == '\t' || c == '\f' || c == '\v';
}
NV_FLOW_INLINE int NvFlowCharIsWhiteSpaceButNotNewline(char c)
{
return c == ' ' || c == '\r' || c == '\t' || c == '\f' || c == '\v';
}
NV_FLOW_INLINE int NvFlowCharIsAlphaUnderscore(char c)
{
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c == '_');
}
NV_FLOW_INLINE int NvFlowCharIsAlphaNum(char c)
{
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (c == '_');
}
NV_FLOW_INLINE int NvFlowCharIsNum(char c)
{
return (c >= '0' && c <= '9');
}
/// ************************** String Utils *********************************************
NV_FLOW_INLINE NvFlowUint64 NvFlowStringLength(const char* a)
{
if (!a)
{
return 0;
}
int idx = 0;
while (a[idx])
{
idx++;
}
return idx;
}
NV_FLOW_INLINE int NvFlowStringCompare(const char* a, const char* b)
{
a = a ? a : "\0";
b = b ? b : "\0";
int idx = 0;
while (a[idx] || b[idx])
{
if (a[idx] != b[idx])
{
return a[idx] < b[idx] ? -1 : +1;
}
idx++;
}
return 0;
}
NV_FLOW_INLINE char* NvFlowStringFromView(NvFlowStringPool* pool, const char* data, NvFlowUint64 size)
{
char* str = NvFlowStringPoolAllocate(pool, size);
for (NvFlowUint64 i = 0; i < size; i++)
{
str[i] = data[i];
}
return str;
}
NV_FLOW_INLINE void NvFlowStringSplitDelimFirst(NvFlowStringPool* pool, char** pFirst, char** pSecond, const char* input_data, char delim)
{
NvFlowUint64 input_size = NvFlowStringLength(input_data);
NvFlowUint64 slashIdx = 0;
while (slashIdx < input_size)
{
if (input_data[slashIdx] == delim)
{
break;
}
slashIdx++;
}
*pFirst = NvFlowStringFromView(pool, input_data, slashIdx + 1);
*pSecond = NvFlowStringFromView(pool, input_data + slashIdx + 1, input_size - slashIdx - 1);
}
NV_FLOW_INLINE void NvFlowStringSplitDelimLast(NvFlowStringPool* pool, char** pFirst, char** pSecond, const char* input_data, char delim)
{
NvFlowUint64 input_size = NvFlowStringLength(input_data);
NvFlowUint64 slashIdx = input_size - 1;
while (slashIdx < input_size)
{
if (input_data[slashIdx] == delim)
{
break;
}
slashIdx--;
}
*pFirst = NvFlowStringFromView(pool, input_data, slashIdx + 1);
*pSecond = NvFlowStringFromView(pool, input_data + slashIdx + 1, input_size - slashIdx - 1);
}
NV_FLOW_INLINE char* NvFlowStringDup(NvFlowStringPool* pool, const char* name)
{
NvFlowUint64 name_size = NvFlowStringLength(name);
return NvFlowStringFromView(pool, name, name_size);
}
NV_FLOW_INLINE char* NvFlowStringConcat(NvFlowStringPool* pool, const char* dir_data, const char* filename_data)
{
NvFlowUint64 dir_size = NvFlowStringLength(dir_data);
NvFlowUint64 filename_size = NvFlowStringLength(filename_data);
char* s_data = NvFlowStringPoolAllocate(pool, dir_size + filename_size);
for (NvFlowUint64 i = 0; i < dir_size; i++)
{
s_data[i] = dir_data[i];
}
for (NvFlowUint64 i = 0; i < filename_size; i++)
{
s_data[i + dir_size] = filename_data[i];
}
return s_data;
}
NV_FLOW_INLINE char* NvFlowStringConcatN(NvFlowStringPool* pool, const char** views, NvFlowUint64 numViews)
{
NvFlowUint64 totalSize = 0;
for (NvFlowUint64 viewIdx = 0; viewIdx < numViews; viewIdx++)
{
totalSize += NvFlowStringLength(views[viewIdx]);
}
char* s_data = NvFlowStringPoolAllocate(pool, totalSize);
NvFlowUint64 dstOffset = 0;
for (NvFlowUint64 viewIdx = 0; viewIdx < numViews; viewIdx++)
{
const char* view_data = views[viewIdx];
NvFlowUint64 view_size = NvFlowStringLength(view_data);
for (NvFlowUint64 i = 0; i < view_size; i++)
{
s_data[i + dstOffset] = view_data[i];
}
dstOffset += view_size;
}
return s_data;
}
NV_FLOW_INLINE char* NvFlowStringConcat3(NvFlowStringPool* pool, const char* a, const char* b, const char* c)
{
const char* list[3u] = { a, b, c };
return NvFlowStringConcatN(pool, list, 3u);
}
NV_FLOW_INLINE char* NvFlowStringConcat4(NvFlowStringPool* pool, const char* a, const char* b, const char* c, const char* d)
{
const char* list[4u] = { a, b, c, d };
return NvFlowStringConcatN(pool, list, 4u);
}
NV_FLOW_INLINE char* NvFlowStringTrimEnd(NvFlowStringPool* pool, const char* a_data, char trimChar)
{
NvFlowUint64 a_size = NvFlowStringLength(a_data);
while (a_size > 0 && a_data[a_size - 1] == trimChar)
{
a_size--;
}
return NvFlowStringFromView(pool, a_data, a_size);
}
NV_FLOW_INLINE char* NvFlowStringTrimBeginAndEnd(NvFlowStringPool* pool, const char* a_data, char trimChar)
{
NvFlowUint64 a_size = NvFlowStringLength(a_data);
while (a_size > 0 && a_data[0] == trimChar)
{
a_data++;
a_size--;
}
while (a_size > 0 && a_data[a_size - 1] == trimChar)
{
a_size--;
}
return NvFlowStringFromView(pool, a_data, a_size);
}
/// ************************** File Utils *********************************************
const char* NvFlowTextFileLoad(NvFlowStringPool* pool, const char* filename);
void NvFlowTextFileStore(const char* text, const char* filename);
NvFlowBool32 NvFlowTextFileTestOpen(const char* filename);
void NvFlowTextFileRemove(const char* name);
void NvFlowTextFileRename(const char* oldName, const char* newName);
NvFlowBool32 NvFlowTextFileDiffAndWriteIfModified(const char* filenameDst, const char* filenameTmp);
| 7,988 | C | 30.207031 | 138 | 0.672258 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowMath.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#pragma once
#include "NvFlowTypes.h"
#include <math.h>
namespace NvFlowMath
{
static const float pi = 3.141592654f;
NV_FLOW_INLINE NvFlowFloat4 operator+(const NvFlowFloat4& lhs, const NvFlowFloat4& rhs)
{
NvFlowFloat4 ret;
ret.x = lhs.x + rhs.x;
ret.y = lhs.y + rhs.y;
ret.z = lhs.z + rhs.z;
ret.w = lhs.w + rhs.w;
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 operator-(const NvFlowFloat4& lhs, const NvFlowFloat4& rhs)
{
NvFlowFloat4 ret;
ret.x = lhs.x - rhs.x;
ret.y = lhs.y - rhs.y;
ret.z = lhs.z - rhs.z;
ret.w = lhs.w - rhs.w;
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 operator*(const NvFlowFloat4& lhs, const NvFlowFloat4& rhs)
{
NvFlowFloat4 ret;
ret.x = lhs.x * rhs.x;
ret.y = lhs.y * rhs.y;
ret.z = lhs.z * rhs.z;
ret.w = lhs.w * rhs.w;
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 operator/(const NvFlowFloat4& lhs, const NvFlowFloat4& rhs)
{
NvFlowFloat4 ret;
ret.x = lhs.x / rhs.x;
ret.y = lhs.y / rhs.y;
ret.z = lhs.z / rhs.z;
ret.w = lhs.w / rhs.w;
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 operator*(float v, const NvFlowFloat4& rhs)
{
NvFlowFloat4 ret;
ret.x = v * rhs.x;
ret.y = v * rhs.y;
ret.z = v * rhs.z;
ret.w = v * rhs.w;
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 operator*(const NvFlowFloat4& lhs, float v)
{
NvFlowFloat4 ret;
ret.x = lhs.x * v;
ret.y = lhs.y * v;
ret.z = lhs.z * v;
ret.w = lhs.w * v;
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 vectorSplatX(const NvFlowFloat4& a)
{
return NvFlowFloat4{ a.x, a.x, a.x, a.x };
}
NV_FLOW_INLINE NvFlowFloat4 vectorSplatY(const NvFlowFloat4& a)
{
return NvFlowFloat4{ a.y, a.y, a.y, a.y };
}
NV_FLOW_INLINE NvFlowFloat4 vectorSplatZ(const NvFlowFloat4& a)
{
return NvFlowFloat4{ a.z, a.z, a.z, a.z };
}
NV_FLOW_INLINE NvFlowFloat4 vectorSplatW(const NvFlowFloat4& a)
{
return NvFlowFloat4{ a.w, a.w, a.w, a.w };
}
NV_FLOW_INLINE NvFlowFloat4 vector3Normalize(const NvFlowFloat4& v)
{
float magn = sqrtf(v.x * v.x + v.y * v.y + v.z * v.z);
if (magn > 0.f)
{
magn = 1.f / magn;
}
return NvFlowFloat4{ v.x * magn, v.y * magn, v.z * magn, v.w * magn };
}
NV_FLOW_INLINE NvFlowFloat4 vectorPerspectiveDivide(const NvFlowFloat4& v)
{
return v / vectorSplatW(v);
}
NV_FLOW_INLINE NvFlowFloat4 matrixMultiplyRow(const NvFlowFloat4x4& b, const NvFlowFloat4& r)
{
NvFlowFloat4 result;
result.x = b.x.x * r.x + b.y.x * r.y + b.z.x * r.z + b.w.x * r.w;
result.y = b.x.y * r.x + b.y.y * r.y + b.z.y * r.z + b.w.y * r.w;
result.z = b.x.z * r.x + b.y.z * r.y + b.z.z * r.z + b.w.z * r.w;
result.w = b.x.w * r.x + b.y.w * r.y + b.z.w * r.z + b.w.w * r.w;
return result;
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixMultiply(const NvFlowFloat4x4& a, const NvFlowFloat4x4& b)
{
NvFlowFloat4x4 result;
result.x = matrixMultiplyRow(b, a.x);
result.y = matrixMultiplyRow(b, a.y);
result.z = matrixMultiplyRow(b, a.z);
result.w = matrixMultiplyRow(b, a.w);
return result;
}
NV_FLOW_INLINE NvFlowFloat4 matrixTransposeRow(const NvFlowFloat4x4& a, unsigned int offset)
{
NvFlowFloat4 result;
result.x = *((&a.x.x) + offset);
result.y = *((&a.y.x) + offset);
result.z = *((&a.z.x) + offset);
result.w = *((&a.w.x) + offset);
return result;
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixTranspose(const NvFlowFloat4x4& a)
{
NvFlowFloat4x4 result;
result.x = matrixTransposeRow(a, 0u);
result.y = matrixTransposeRow(a, 1u);
result.z = matrixTransposeRow(a, 2u);
result.w = matrixTransposeRow(a, 3u);
return result;
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixInverse(const NvFlowFloat4x4& a)
{
const NvFlowFloat4x4& m = a;
float f = (float(1.0) /
(m.x.x * m.y.y * m.z.z * m.w.w +
m.x.x * m.y.z * m.z.w * m.w.y +
m.x.x * m.y.w * m.z.y * m.w.z +
m.x.y * m.y.x * m.z.w * m.w.z +
m.x.y * m.y.z * m.z.x * m.w.w +
m.x.y * m.y.w * m.z.z * m.w.x +
m.x.z * m.y.x * m.z.y * m.w.w +
m.x.z * m.y.y * m.z.w * m.w.x +
m.x.z * m.y.w * m.z.x * m.w.y +
m.x.w * m.y.x * m.z.z * m.w.y +
m.x.w * m.y.y * m.z.x * m.w.z +
m.x.w * m.y.z * m.z.y * m.w.x +
-m.x.x * m.y.y * m.z.w * m.w.z +
-m.x.x * m.y.z * m.z.y * m.w.w +
-m.x.x * m.y.w * m.z.z * m.w.y +
-m.x.y * m.y.x * m.z.z * m.w.w +
-m.x.y * m.y.z * m.z.w * m.w.x +
-m.x.y * m.y.w * m.z.x * m.w.z +
-m.x.z * m.y.x * m.z.w * m.w.y +
-m.x.z * m.y.y * m.z.x * m.w.w +
-m.x.z * m.y.w * m.z.y * m.w.x +
-m.x.w * m.y.x * m.z.y * m.w.z +
-m.x.w * m.y.y * m.z.z * m.w.x +
-m.x.w * m.y.z * m.z.x * m.w.y));
float a00 = (m.y.y * m.z.z * m.w.w +
m.y.z * m.z.w * m.w.y +
m.y.w * m.z.y * m.w.z +
-m.y.y * m.z.w * m.w.z +
-m.y.z * m.z.y * m.w.w +
-m.y.w * m.z.z * m.w.y);
float a10 = (m.x.y * m.z.w * m.w.z +
m.x.z * m.z.y * m.w.w +
m.x.w * m.z.z * m.w.y +
-m.x.y * m.z.z * m.w.w +
-m.x.z * m.z.w * m.w.y +
-m.x.w * m.z.y * m.w.z);
float a20 = (m.x.y * m.y.z * m.w.w +
m.x.z * m.y.w * m.w.y +
m.x.w * m.y.y * m.w.z +
-m.x.y * m.y.w * m.w.z +
-m.x.z * m.y.y * m.w.w +
-m.x.w * m.y.z * m.w.y);
float a30 = (m.x.y * m.y.w * m.z.z +
m.x.z * m.y.y * m.z.w +
m.x.w * m.y.z * m.z.y +
-m.x.y * m.y.z * m.z.w +
-m.x.z * m.y.w * m.z.y +
-m.x.w * m.y.y * m.z.z);
float a01 = (m.y.x * m.z.w * m.w.z +
m.y.z * m.z.x * m.w.w +
m.y.w * m.z.z * m.w.x +
-m.y.x * m.z.z * m.w.w +
-m.y.z * m.z.w * m.w.x +
-m.y.w * m.z.x * m.w.z);
float a11 = (m.x.x * m.z.z * m.w.w +
m.x.z * m.z.w * m.w.x +
m.x.w * m.z.x * m.w.z +
-m.x.x * m.z.w * m.w.z +
-m.x.z * m.z.x * m.w.w +
-m.x.w * m.z.z * m.w.x);
float a21 = (m.x.x * m.y.w * m.w.z +
m.x.z * m.y.x * m.w.w +
m.x.w * m.y.z * m.w.x +
-m.x.x * m.y.z * m.w.w +
-m.x.z * m.y.w * m.w.x +
-m.x.w * m.y.x * m.w.z);
float a31 = (m.x.x * m.y.z * m.z.w +
m.x.z * m.y.w * m.z.x +
m.x.w * m.y.x * m.z.z +
-m.x.x * m.y.w * m.z.z +
-m.x.z * m.y.x * m.z.w +
-m.x.w * m.y.z * m.z.x);
float a02 = (m.y.x * m.z.y * m.w.w +
m.y.y * m.z.w * m.w.x +
m.y.w * m.z.x * m.w.y +
-m.y.x * m.z.w * m.w.y +
-m.y.y * m.z.x * m.w.w +
-m.y.w * m.z.y * m.w.x);
float a12 = (-m.x.x * m.z.y * m.w.w +
-m.x.y * m.z.w * m.w.x +
-m.x.w * m.z.x * m.w.y +
m.x.x * m.z.w * m.w.y +
m.x.y * m.z.x * m.w.w +
m.x.w * m.z.y * m.w.x);
float a22 = (m.x.x * m.y.y * m.w.w +
m.x.y * m.y.w * m.w.x +
m.x.w * m.y.x * m.w.y +
-m.x.x * m.y.w * m.w.y +
-m.x.y * m.y.x * m.w.w +
-m.x.w * m.y.y * m.w.x);
float a32 = (m.x.x * m.y.w * m.z.y +
m.x.y * m.y.x * m.z.w +
m.x.w * m.y.y * m.z.x +
-m.x.y * m.y.w * m.z.x +
-m.x.w * m.y.x * m.z.y +
-m.x.x * m.y.y * m.z.w);
float a03 = (m.y.x * m.z.z * m.w.y +
m.y.y * m.z.x * m.w.z +
m.y.z * m.z.y * m.w.x +
-m.y.x * m.z.y * m.w.z +
-m.y.y * m.z.z * m.w.x +
-m.y.z * m.z.x * m.w.y);
float a13 = (m.x.x * m.z.y * m.w.z +
m.x.y * m.z.z * m.w.x +
m.x.z * m.z.x * m.w.y +
-m.x.x * m.z.z * m.w.y +
-m.x.y * m.z.x * m.w.z +
-m.x.z * m.z.y * m.w.x);
float a23 = (m.x.x * m.y.z * m.w.y +
m.x.y * m.y.x * m.w.z +
m.x.z * m.y.y * m.w.x +
-m.x.x * m.y.y * m.w.z +
-m.x.y * m.y.z * m.w.x +
-m.x.z * m.y.x * m.w.y);
float a33 = (m.x.x * m.y.y * m.z.z +
m.x.y * m.y.z * m.z.x +
m.x.z * m.y.x * m.z.y +
-m.x.x * m.y.z * m.z.y +
-m.x.y * m.y.x * m.z.z +
-m.x.z * m.y.y * m.z.x);
return NvFlowFloat4x4{
a00*f, a10*f, a20*f, a30*f,
a01*f, a11*f, a21*f, a31*f,
a02*f, a12*f, a22*f, a32*f,
a03*f, a13*f, a23*f, a33*f };
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixIdentity()
{
return NvFlowFloat4x4{
{ 1.f, 0.f, 0.f, 0.f },
{ 0.f, 1.f, 0.f, 0.f },
{ 0.f, 0.f, 1.f, 0.f },
{ 0.f, 0.f, 0.f, 1.f }
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixScaling(float x, float y, float z)
{
return NvFlowFloat4x4{
x, 0.f, 0.f, 0.f,
0.f, y, 0.f, 0.f,
0.f, 0.f, z, 0.f,
0.f, 0.f, 0.f, 1.f
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixTranslation(float x, float y, float z)
{
return NvFlowFloat4x4{
1.f, 0.f, 0.f, 0.f,
0.f, 1.f, 0.f, 0.f,
0.f, 0.f, 1.f, 0.f,
x, y, z, 1.f
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixPerspectiveFovRH(float fovAngleY, float aspectRatio, float nearZ, float farZ)
{
float sinfov = sinf(0.5f * fovAngleY);
float cosfov = cosf(0.5f * fovAngleY);
float height = cosfov / sinfov;
float width = height / aspectRatio;
float frange = farZ / (nearZ - farZ);
if (nearZ == INFINITY)
{
return NvFlowFloat4x4{
{ width, 0.f, 0.f, 0.f },
{ 0.f, height, 0.f, 0.f },
{ 0.f, 0.f, frange, -1.f },
{ 0.f, 0.f, farZ, 0.f }
};
}
return NvFlowFloat4x4{
{ width, 0.f, 0.f, 0.f },
{ 0.f, height, 0.f, 0.f },
{ 0.f, 0.f, frange, -1.f },
{ 0.f, 0.f, frange * nearZ, 0.f }
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixPerspectiveFovLH(float fovAngleY, float aspectRatio, float nearZ, float farZ)
{
float sinfov = sinf(0.5f * fovAngleY);
float cosfov = cosf(0.5f * fovAngleY);
float height = cosfov / sinfov;
float width = height / aspectRatio;
float frange = farZ / (farZ - nearZ);
if (nearZ == INFINITY)
{
return NvFlowFloat4x4{
{ width, 0.f, 0.f, 0.f },
{ 0.f, height, 0.f, 0.f },
{ 0.f, 0.f, frange, 1.f },
{ 0.f, 0.f, farZ, 0.f }
};
}
return NvFlowFloat4x4{
{ width, 0.f, 0.f, 0.f },
{ 0.f, height, 0.f, 0.f },
{ 0.f, 0.f, frange, 1.f },
{ 0.f, 0.f, -frange * nearZ, 0.f }
};
}
NV_FLOW_INLINE NvFlowBool32 matrixPerspectiveIsRH(const NvFlowFloat4x4& m)
{
return m.z.w < 0.f ? NV_FLOW_TRUE : NV_FLOW_FALSE;
}
NV_FLOW_INLINE NvFlowBool32 matrixPerspectiveIsReverseZ(const NvFlowFloat4x4& m)
{
float nearZ = -m.w.z / m.z.z;
float farZ = (m.w.w - m.w.z) / (m.z.z - m.z.w);
float singZ = -m.w.w / m.z.w;
return fabsf(farZ - singZ) < fabs(nearZ - singZ) ? NV_FLOW_TRUE : NV_FLOW_FALSE;
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixOrthographicLH(float width, float height, float nearZ, float farZ)
{
float frange = 1.f / (farZ - nearZ);
return NvFlowFloat4x4{
{ 2.f / width, 0.f, 0.f, 0.f },
{ 0.f, 2.f / height, 0.f, 0.f },
{ 0.f, 0.f, frange, 0.f },
{ 0.f, 0.f, -frange * nearZ, 1.f }
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixOrthographicRH(float width, float height, float nearZ, float farZ)
{
float frange = 1.f / (nearZ - farZ);
return NvFlowFloat4x4{
{ 2.f / width, 0.f, 0.f, 0.f },
{ 0.f, 2.f / height, 0.f, 0.f },
{ 0.f, 0.f, frange, 0.f },
{ 0.f, 0.f, frange * nearZ, 1.f }
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixRotationNormal(NvFlowFloat4 normal, float angle)
{
float sinAngle = sinf(angle);
float cosAngle = cosf(angle);
NvFlowFloat4 a = { sinAngle, cosAngle, 1.f - cosAngle, 0.f };
NvFlowFloat4 c2 = vectorSplatZ(a);
NvFlowFloat4 c1 = vectorSplatY(a);
NvFlowFloat4 c0 = vectorSplatX(a);
NvFlowFloat4 n0 = { normal.y, normal.z, normal.x, normal.w };
NvFlowFloat4 n1 = { normal.z, normal.x, normal.y, normal.w };
NvFlowFloat4 v0 = c2 * n0;
v0 = v0 * n1;
NvFlowFloat4 r0 = c2 * normal;
r0 = (r0 * normal) + c1;
NvFlowFloat4 r1 = (c0 * normal) + v0;
NvFlowFloat4 r2 = v0 - (c0 * normal);
v0 = NvFlowFloat4{ r0.x, r0.y, r0.z, a.w };
NvFlowFloat4 v1 = { r1.z, r2.y, r2.z, r1.x };
NvFlowFloat4 v2 = { r1.y, r2.x, r1.y, r2.x };
return NvFlowFloat4x4{
{ v0.x, v1.x, v1.y, v0.w },
{ v1.z, v0.y, v1.w, v0.w },
{ v2.x, v2.y, v0.z, v0.w },
{ 0.f, 0.f, 0.f, 1.f }
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixRotationAxis(NvFlowFloat4 axis, float angle)
{
NvFlowFloat4 normal = vector3Normalize(axis);
return matrixRotationNormal(normal, angle);
}
NV_FLOW_INLINE NvFlowFloat4 quaterionRotationRollPitchYawFromVector(NvFlowFloat4 angles)
{
NvFlowFloat4 sign = { 1.f, -1.f, -1.f, 1.f };
NvFlowFloat4 halfAngles = angles * NvFlowFloat4{ 0.5f, 0.5f, 0.5f, 0.5f };
NvFlowFloat4 sinAngle = NvFlowFloat4{ sinf(halfAngles.x), sinf(halfAngles.y), sinf(halfAngles.z), sinf(halfAngles.w) };
NvFlowFloat4 cosAngle = NvFlowFloat4{ cosf(halfAngles.x), cosf(halfAngles.y), cosf(halfAngles.z), cosf(halfAngles.w) };
NvFlowFloat4 p0 = { sinAngle.x, cosAngle.x, cosAngle.x, cosAngle.x };
NvFlowFloat4 y0 = { cosAngle.y, sinAngle.y, cosAngle.y, cosAngle.y };
NvFlowFloat4 r0 = { cosAngle.z, cosAngle.z, sinAngle.z, cosAngle.z };
NvFlowFloat4 p1 = { cosAngle.x, sinAngle.x, sinAngle.x, sinAngle.x };
NvFlowFloat4 y1 = { sinAngle.y, cosAngle.y, sinAngle.y, sinAngle.y };
NvFlowFloat4 r1 = { sinAngle.z, sinAngle.z, cosAngle.z, sinAngle.z };
NvFlowFloat4 q1 = p1 * sign;
NvFlowFloat4 q0 = p0 * y0;
q1 = q1 * y1;
q0 = q0 * r0;
NvFlowFloat4 q = (q1 * r1) + q0;
return q;
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixRotationQuaternion(NvFlowFloat4 quaternion)
{
NvFlowFloat4 constant1110 = { 1.f, 1.f, 1.f, 0.f };
NvFlowFloat4 q0 = quaternion + quaternion;
NvFlowFloat4 q1 = quaternion * q0;
NvFlowFloat4 v0 = { q1.y, q1.x, q1.x, constant1110.w };
NvFlowFloat4 v1 = { q1.z, q1.z, q1.y, constant1110.w };
NvFlowFloat4 r0 = constant1110 - v0;
r0 = r0 - v1;
v0 = NvFlowFloat4{ quaternion.x, quaternion.x, quaternion.y, quaternion.w };
v1 = NvFlowFloat4{ q0.z, q0.y, q0.z, q0.w };
v0 = v0 * v1;
v1 = vectorSplatW(quaternion);
NvFlowFloat4 v2 = { q0.y, q0.z, q0.x, q0.w };
v1 = v1 * v2;
NvFlowFloat4 r1 = v0 + v1;
NvFlowFloat4 r2 = v0 - v1;
v0 = NvFlowFloat4{ r1.y, r2.x, r2.y, r1.z };
v1 = NvFlowFloat4{ r1.x, r2.z, r1.x, r2.z };
return NvFlowFloat4x4{
{ r0.x, v0.x, v0.y, r0.w },
{ v0.z, r0.y, v0.w, r0.w },
{ v1.x, v1.y, r0.z, r0.w },
{ 0.f, 0.f, 0.f, 1.f }
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixRotationRollPitchYaw(float pitch, float yaw, float roll)
{
NvFlowFloat4 angles = { pitch, yaw, roll, 0.f };
NvFlowFloat4 q = quaterionRotationRollPitchYawFromVector(angles);
return matrixRotationQuaternion(q);
}
NV_FLOW_INLINE NvFlowFloat4 vectorLerp(NvFlowFloat4 a, NvFlowFloat4 b, float t)
{
return NvFlowFloat4{
(1.f - t) * a.x + t * b.x,
(1.f - t) * a.y + t * b.y,
(1.f - t) * a.z + t * b.z,
(1.f - t) * a.w + t * b.w
};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixInterpolateTranslation(const NvFlowFloat4x4& a, const NvFlowFloat4x4& b, float t)
{
NvFlowFloat4x4 ret;
if (t < 0.5f)
{
ret = a;
}
else
{
ret = b;
}
ret.w.x = (1.f - t) * a.w.x + t * b.w.x;
ret.w.y = (1.f - t) * a.w.y + t * b.w.y;
ret.w.z = (1.f - t) * a.w.z + t * b.w.z;
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 vector4Normalize(const NvFlowFloat4& v)
{
float magn = sqrtf(v.x * v.x + v.y * v.y + v.z * v.z + v.w * v.w);
if (magn > 0.f)
{
magn = 1.f / magn;
}
return NvFlowFloat4{v.x * magn, v.y * magn, v.z * magn, v.w * magn};
}
NV_FLOW_INLINE NvFlowFloat4x4 matrixNormalize(const NvFlowFloat4x4& a)
{
NvFlowFloat4x4 temp = a;
temp.x.w = 0.f;
temp.y.w = 0.f;
temp.z.w = 0.f;
temp.w.w = 1.f;
temp.w.x = 0.f;
temp.w.y = 0.f;
temp.w.z = 0.f;
NvFlowFloat4x4 ret = temp;
ret.x = vector4Normalize(ret.x);
ret.y = vector4Normalize(ret.y);
ret.z = vector4Normalize(ret.z);
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 vector4Transform(const NvFlowFloat4& x, const NvFlowFloat4x4& A)
{
return NvFlowFloat4{
A.x.x * x.x + A.y.x * x.y + A.z.x * x.z + A.w.x * x.w,
A.x.y * x.x + A.y.y * x.y + A.z.y * x.z + A.w.y * x.w,
A.x.z * x.x + A.y.z * x.y + A.z.z * x.z + A.w.z * x.w,
A.x.w * x.x + A.y.w * x.y + A.z.w * x.z + A.w.w * x.w
};
}
NV_FLOW_INLINE NvFlowFloat4 vectorMin(const NvFlowFloat4& a, const NvFlowFloat4& b)
{
return NvFlowFloat4{
a.x < b.x ? a.x : b.x,
a.y < b.y ? a.y : b.y,
a.z < b.z ? a.z : b.z,
a.w < b.w ? a.w : b.w
};
}
NV_FLOW_INLINE NvFlowFloat4 vectorMax(const NvFlowFloat4& a, const NvFlowFloat4& b)
{
return NvFlowFloat4{
a.x > b.x ? a.x : b.x,
a.y > b.y ? a.y : b.y,
a.z > b.z ? a.z : b.z,
a.w > b.w ? a.w : b.w
};
}
NV_FLOW_INLINE NvFlowFloat4 vectorMultiply(const NvFlowFloat4& a, const NvFlowFloat4& b)
{
return NvFlowFloat4{a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w};
}
NV_FLOW_INLINE NvFlowFloat4 vectorFloor(const NvFlowFloat4& a)
{
return NvFlowFloat4{ floorf(a.x), floorf(a.y), floorf(a.z), floorf(a.w) };
}
NV_FLOW_INLINE NvFlowFloat4 vectorCeiling(const NvFlowFloat4& a)
{
return NvFlowFloat4{ceilf(a.x), ceilf(a.y), ceilf(a.z), ceilf(a.w)};
}
NV_FLOW_INLINE NvFlowFloat4 vector3Dot(const NvFlowFloat4& a, const NvFlowFloat4& b)
{
float magn = a.x * b.x + a.y * b.y + a.z * b.z;
return NvFlowFloat4{ magn, magn, magn, magn };
}
NV_FLOW_INLINE NvFlowFloat4 vector4Dot(const NvFlowFloat4& a, const NvFlowFloat4& b)
{
float magn = a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w;
return NvFlowFloat4{ magn, magn, magn, magn };
}
NV_FLOW_INLINE NvFlowFloat4 vector3Cross(const NvFlowFloat4& a, const NvFlowFloat4& b)
{
return NvFlowFloat4{
a.y * b.z - a.z * b.y,
a.z * b.x - a.x * b.z,
a.x * b.y - a.y * b.x,
0.f
};
}
NV_FLOW_INLINE NvFlowFloat4 vector3Length(const NvFlowFloat4& a)
{
float magn = sqrtf(a.x * a.x + a.y * a.y + a.z * a.z);
return NvFlowFloat4{ magn, magn, magn, magn };
}
NV_FLOW_INLINE NvFlowFloat4 make_float4(NvFlowFloat3 a, float b)
{
return NvFlowFloat4{ a.x, a.y, a.z, b };
}
NV_FLOW_INLINE NvFlowFloat3 float4_to_float3(NvFlowFloat4 a)
{
return NvFlowFloat3{ a.x, a.y, a.z };
}
NV_FLOW_INLINE NvFlowUint log2ui(NvFlowUint val)
{
NvFlowUint ret = 0;
for (NvFlowUint i = 0; i < 32; i++)
{
if ((1u << i) >= val)
{
ret = i;
break;
}
}
return ret;
}
NV_FLOW_INLINE NvFlowFloat4 computeRayOrigin(const NvFlowFloat4x4& viewInv, const NvFlowFloat4x4& projectionInv, NvFlowFloat2 ndc, float nearZ)
{
NvFlowFloat4 viewPos = vector4Transform(NvFlowFloat4{ ndc.x, ndc.y, nearZ, 1.f }, projectionInv);
return vectorPerspectiveDivide(vector4Transform(viewPos, viewInv));
}
NV_FLOW_INLINE NvFlowFloat4 computeRayDir(const NvFlowFloat4x4& viewInv, const NvFlowFloat4x4& projectionInv, NvFlowFloat2 ndc, float nearZ)
{
NvFlowFloat4x4 projectionInvT = matrixTranspose(projectionInv);
NvFlowFloat4 ndc_ext = NvFlowFloat4{ ndc.x, ndc.y, 0.f, 1.f };
NvFlowFloat4 dir = {
-projectionInvT.w.z * vector4Dot(projectionInvT.x, ndc_ext).x,
-projectionInvT.w.z * vector4Dot(projectionInvT.y, ndc_ext).x,
-projectionInvT.w.z * vector4Dot(projectionInvT.z, ndc_ext).x +
projectionInvT.z.z * vector4Dot(projectionInvT.w, ndc_ext).x,
0.f
};
if (nearZ > 0.5f)
{
dir = NvFlowFloat4{ 0.f, 0.f, 0.f, 0.f } - dir;
}
return vector4Transform(dir, viewInv);
}
struct FrustumRays
{
NvFlowFloat4 rayOrigin00;
NvFlowFloat4 rayOrigin10;
NvFlowFloat4 rayOrigin01;
NvFlowFloat4 rayOrigin11;
NvFlowFloat4 rayDir00;
NvFlowFloat4 rayDir10;
NvFlowFloat4 rayDir01;
NvFlowFloat4 rayDir11;
float nearZ;
NvFlowBool32 isReverseZ;
};
NV_FLOW_INLINE void computeFrustumRays(FrustumRays* ptr, const NvFlowFloat4x4& viewInv, const NvFlowFloat4x4& projectionInv)
{
NvFlowFloat4 nearPoint = vector4Transform(NvFlowFloat4{ 0.f, 0.f, 0.f, 1.f }, projectionInv);
NvFlowFloat4 farPoint = vector4Transform(NvFlowFloat4{ 0.f, 0.f, 1.f, 1.f }, projectionInv);
nearPoint = nearPoint / vectorSplatW(nearPoint);
farPoint = farPoint / vectorSplatW(farPoint);
float nearZ = fabsf(nearPoint.z) < fabsf(farPoint.z) ? 0.f : 1.f;
ptr->rayOrigin00 = computeRayOrigin(viewInv, projectionInv, NvFlowFloat2{ -1.f, +1.f }, nearZ);
ptr->rayOrigin10 = computeRayOrigin(viewInv, projectionInv, NvFlowFloat2{ +1.f, +1.f }, nearZ);
ptr->rayOrigin01 = computeRayOrigin(viewInv, projectionInv, NvFlowFloat2{ -1.f, -1.f }, nearZ);
ptr->rayOrigin11 = computeRayOrigin(viewInv, projectionInv, NvFlowFloat2{ +1.f, -1.f }, nearZ);
ptr->rayDir00 = computeRayDir(viewInv, projectionInv, NvFlowFloat2{ -1.f, +1.f }, nearZ);
ptr->rayDir10 = computeRayDir(viewInv, projectionInv, NvFlowFloat2{ +1.f, +1.f }, nearZ);
ptr->rayDir01 = computeRayDir(viewInv, projectionInv, NvFlowFloat2{ -1.f, -1.f }, nearZ);
ptr->rayDir11 = computeRayDir(viewInv, projectionInv, NvFlowFloat2{ +1.f, -1.f }, nearZ);
ptr->nearZ = nearZ;
ptr->isReverseZ = fabsf(nearPoint.z) >= fabsf(farPoint.z);
}
} | 21,822 | C | 27.415365 | 144 | 0.592109 |
NVIDIA-Omniverse/PhysX/flow/shared/NvFlowPreprocessor.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2014-2022 NVIDIA Corporation. All rights reserved.
#include "NvFlowPreprocessor.h"
#include "NvFlowArray.h"
#include <stdlib.h>
#include <stdio.h>
struct NvFlowPreprocessorItem
{
NvFlowPreprocessorFunction function;
NvFlowPreprocessorToken* tokens_data;
NvFlowUint64 tokens_size;
NvFlowBool32 isEnabled;
};
struct NvFlowPreprocessor
{
NvFlowStringPool* stringPool = nullptr;
int currentLevel = 0;
NvFlowPreprocessorMode mode = eNvFlowPreprocessorMode_default;
NvFlowArray<NvFlowPreprocessorItem, 16u> items;
NvFlowArray<const char*> stringStack;
NvFlowArray<const char*, 8u> tempStrViews;
NvFlowArray<NvFlowPreprocessorToken> tempTokens;
};
NvFlowPreprocessor* NvFlowPreprocessorCreate(NvFlowStringPool* pool)
{
auto ptr = new NvFlowPreprocessor();
ptr->stringPool = pool;
ptr->currentLevel = 0;
return ptr;
}
void NvFlowPreprocessorDestroy(NvFlowPreprocessor* ptr)
{
delete ptr;
}
void NvFlowPreprocessorReset(NvFlowPreprocessor* ptr)
{
ptr->currentLevel = 0;
ptr->mode = eNvFlowPreprocessorMode_default;
ptr->items.size = 0u;
ptr->stringStack.size = 0u;
ptr->tempStrViews.size = 0u;
ptr->tempTokens.size = 0u;
}
void NvFlowPreprocessorSetMode(NvFlowPreprocessor* ptr, NvFlowPreprocessorMode mode)
{
ptr->mode = mode;
}
NvFlowPreprocessorMode NvFlowPreprocessorGetMode(NvFlowPreprocessor* ptr)
{
return ptr->mode;
}
NvFlowStringPool* NvFlowPreprocessorStringPool(NvFlowPreprocessor* ptr)
{
return ptr->stringPool;
}
void NvFlowPreprocessor_addItem(NvFlowPreprocessor* ptr, const NvFlowPreprocessorFunction* pFunction)
{
NvFlowPreprocessorItem item = {};
item.function = *pFunction;
item.isEnabled = NV_FLOW_TRUE;
const char* tokenStr = item.function.name;
if (item.function.type == eNvFlowPreprocessorType_function)
{
tokenStr = NvFlowStringConcat(ptr->stringPool, tokenStr, "(");
}
else if (item.function.type == eNvFlowPreprocessorType_index)
{
tokenStr = NvFlowStringConcat(ptr->stringPool, tokenStr, "[");
}
else if (item.function.type == eNvFlowPreprocessorType_attribute)
{
tokenStr = NvFlowStringConcat(ptr->stringPool, "[", tokenStr);
}
else if (item.function.type == eNvFlowPreprocessorType_line)
{
tokenStr = NvFlowStringConcat(ptr->stringPool, "#", tokenStr);
}
else if (item.function.type == eNvFlowPreprocessorType_templateInstance)
{
tokenStr = NvFlowStringConcat(ptr->stringPool, tokenStr, "<");
}
else
{
tokenStr = NvFlowStringDup(ptr->stringPool, tokenStr);
}
NvFlowPreprocessorTokenize(ptr, tokenStr, &item.tokens_size, &item.tokens_data);
ptr->items.pushBack(item);
}
char* NvFlowPreprocessor_substituteConstant(NvFlowPreprocessor* ptr, void* userdata, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens)
{
const char* value = (const char*)userdata;
return NvFlowStringDup(ptr->stringPool, value);
}
void NvFlowPreprocessorAddConstants(NvFlowPreprocessor* ptr, NvFlowUint64 numConstants, const NvFlowPreprocessorConstant* constants)
{
for (NvFlowUint64 idx = 0u; idx < numConstants; idx++)
{
char* valueStr = NvFlowStringDup(ptr->stringPool, constants[idx].value);
NvFlowPreprocessorFunction function = {};
function.name = constants[idx].name;
function.type = eNvFlowPreprocessorType_constant;
function.userdata = valueStr;
function.substitute = NvFlowPreprocessor_substituteConstant;
NvFlowPreprocessor_addItem(ptr, &function);
}
}
void NvFlowPreprocessorAddFunctions(NvFlowPreprocessor* ptr, NvFlowUint64 numFunctions, const NvFlowPreprocessorFunction* functions)
{
for (NvFlowUint64 idx = 0u; idx < numFunctions; idx++)
{
NvFlowPreprocessor_addItem(ptr, &functions[idx]);
}
}
char NvFlowPreprocessor_peekChar(const char* input, NvFlowUint64 inputIdx, NvFlowUint64 input_size)
{
char ret = '\0';
if (inputIdx < input_size)
{
ret = input[inputIdx];
}
return ret;
}
NvFlowBool32 NvFlowPreprocessor_whitespaceButNotNewline(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
if (NvFlowCharIsWhiteSpaceButNotNewline(c0))
{
NvFlowUint64 beginIdx = inputIdx;
inputIdx++;
for (; inputIdx < input_size; inputIdx++)
{
c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
if (!NvFlowCharIsWhiteSpaceButNotNewline(c0))
{
break;
}
}
*pOutput_size = inputIdx - beginIdx;
*pOutput = input + beginIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_continuation(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
char c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '\\' && c1 == '\n')
{
*pOutput_size = 2;
*pOutput = input + inputIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_whitespace(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
if (NvFlowPreprocessor_whitespaceButNotNewline(pOutput, pOutput_size, input, input_size, inputIdx))
{
return NV_FLOW_TRUE;
}
return NvFlowPreprocessor_continuation(pOutput, pOutput_size, input, input_size, inputIdx);
}
NvFlowBool32 NvFlowPreprocessor_newline(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
if (c0 == '\n')
{
*pOutput_size = 1;
*pOutput = input + inputIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_commentLine(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
char c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '/' && c1 == '/')
{
NvFlowUint64 beginIdx = inputIdx;
inputIdx++;
for (; inputIdx < input_size; inputIdx++)
{
c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '\n')
{
break;
}
}
*pOutput_size = inputIdx - beginIdx;
*pOutput = input + beginIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_commentMultiLine(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
char c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '/' && c1 == '*')
{
NvFlowUint64 beginIdx = inputIdx;
inputIdx++;
for (; inputIdx < input_size; inputIdx++)
{
c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '*' && c1 == '/')
{
inputIdx += 2;
break;
}
}
*pOutput_size = inputIdx - beginIdx;
*pOutput = input + beginIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_comment(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
if (NvFlowPreprocessor_commentLine(pOutput, pOutput_size, input, input_size, inputIdx))
{
return NV_FLOW_TRUE;
}
return NvFlowPreprocessor_commentMultiLine(pOutput, pOutput_size, input, input_size, inputIdx);
}
NvFlowBool32 NvFlowPreprocessor_name(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
if (NvFlowCharIsAlphaUnderscore(c0))
{
NvFlowUint64 beginIdx = inputIdx;
inputIdx++;
for (; inputIdx < input_size; inputIdx++)
{
c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
if (!NvFlowCharIsAlphaNum(c0))
{
break;
}
}
*pOutput_size = inputIdx - beginIdx;
*pOutput = input + beginIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_number(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
if (NvFlowCharIsNum(c0))
{
NvFlowUint64 beginIdx = inputIdx;
inputIdx++;
for (; inputIdx < input_size; inputIdx++)
{
c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
if (!(NvFlowCharIsAlphaNum(c0) || (c0 == '.')))
{
break;
}
}
*pOutput_size = inputIdx - beginIdx;
*pOutput = input + beginIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_string(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
char c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '\"')
{
NvFlowUint64 beginIdx = inputIdx;
inputIdx++;
for (; inputIdx < input_size; inputIdx++)
{
c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '\"')
{
inputIdx++;
break;
}
else if (c0 == '\\' && c1 == '\"')
{
inputIdx++;
}
}
*pOutput_size = inputIdx - beginIdx;
*pOutput = input + beginIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_char(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
char c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '\'')
{
NvFlowUint64 beginIdx = inputIdx;
inputIdx++;
for (; inputIdx < input_size; inputIdx++)
{
c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
c1 = NvFlowPreprocessor_peekChar(input, inputIdx + 1, input_size);
if (c0 == '\'')
{
inputIdx++;
break;
}
else if (c0 == '\\' && c1 == '\'')
{
inputIdx++;
}
}
*pOutput_size = inputIdx - beginIdx;
*pOutput = input + beginIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
NvFlowBool32 NvFlowPreprocessor_specialChar(const char** pOutput, NvFlowUint64* pOutput_size, const char* input, NvFlowUint64 input_size, NvFlowUint64 inputIdx, char specialChar)
{
char c0 = NvFlowPreprocessor_peekChar(input, inputIdx + 0, input_size);
if (c0 == specialChar)
{
*pOutput_size = 1;
*pOutput = input + inputIdx;
return NV_FLOW_TRUE;
}
return NV_FLOW_FALSE;
}
void NvFlowPreprocessorTokenize(NvFlowPreprocessor* ptr, const char* input, NvFlowUint64* pTotalTokens, NvFlowPreprocessorToken** pTokens)
{
NvFlowStringPool* pool = ptr->stringPool;
ptr->tempTokens.size = 0u;
NvFlowUint64 input_size = NvFlowStringLength(input);
NvFlowUint64 inputIdx = 0u;
while (inputIdx < input_size)
{
char c0 = input[inputIdx];
char c1 = '\0';
if (inputIdx + 1 < input_size)
{
c1 = input[inputIdx + 1u];
}
// default to single char token
NvFlowPreprocessorToken token = { eNvFlowPreprocessorTokenType_unknown, "InvalidToken" };
NvFlowUint64 output_size = 1;
const char* output = input + inputIdx;
if (NvFlowPreprocessor_whitespace(&output, &output_size, input, input_size, inputIdx))
{
token.type = eNvFlowPreprocessorTokenType_whitespace;
}
else if (NvFlowPreprocessor_newline(&output, &output_size, input, input_size, inputIdx))
{
token.type = eNvFlowPreprocessorTokenType_newline;
}
else if (NvFlowPreprocessor_comment(&output, &output_size, input, input_size, inputIdx))
{
token.type = eNvFlowPreprocessorTokenType_comment;
}
else if (NvFlowPreprocessor_name(&output, &output_size, input, input_size, inputIdx))
{
token.type = eNvFlowPreprocessorTokenType_name;
}
else if (NvFlowPreprocessor_number(&output, &output_size, input, input_size, inputIdx))
{
token.type = eNvFlowPreprocessorTokenType_number;
}
else if (NvFlowPreprocessor_string(&output, &output_size, input, input_size, inputIdx))
{
token.type = eNvFlowPreprocessorTokenType_string;
}
else if (NvFlowPreprocessor_char(&output, &output_size, input, input_size, inputIdx))
{
token.type = eNvFlowPreprocessorTokenType_char;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '#'))
{
token.type = eNvFlowPreprocessorTokenType_pound;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, ','))
{
token.type = eNvFlowPreprocessorTokenType_comma;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '.'))
{
token.type = eNvFlowPreprocessorTokenType_period;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, ';'))
{
token.type = eNvFlowPreprocessorTokenType_semicolon;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, ':'))
{
token.type = eNvFlowPreprocessorTokenType_colon;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '='))
{
token.type = eNvFlowPreprocessorTokenType_equals;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '*'))
{
token.type = eNvFlowPreprocessorTokenType_asterisk;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '('))
{
token.type = eNvFlowPreprocessorTokenType_leftParenthesis;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, ')'))
{
token.type = eNvFlowPreprocessorTokenType_rightParenthesis;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '['))
{
token.type = eNvFlowPreprocessorTokenType_leftBracket;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, ']'))
{
token.type = eNvFlowPreprocessorTokenType_rightBracket;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '{'))
{
token.type = eNvFlowPreprocessorTokenType_leftCurlyBrace;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '}'))
{
token.type = eNvFlowPreprocessorTokenType_rightCurlyBrace;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '<'))
{
token.type = eNvFlowPreprocessorTokenType_lessThan;
}
else if (NvFlowPreprocessor_specialChar(&output, &output_size, input, input_size, inputIdx, '>'))
{
token.type = eNvFlowPreprocessorTokenType_greaterThan;
}
// duplicate output to null terminated string
token.str = NvFlowStringFromView(pool, output, output_size);
ptr->tempTokens.pushBack(token);
// advance past token
inputIdx += output_size;
}
auto tokenData = (NvFlowPreprocessorToken*)NvFlowStringPoolAllocate(pool, ptr->tempTokens.size * sizeof(NvFlowPreprocessorToken));
for (NvFlowUint64 idx = 0u; idx < ptr->tempTokens.size; idx++)
{
tokenData[idx] = ptr->tempTokens[idx];
}
*pTokens = tokenData;
*pTotalTokens = ptr->tempTokens.size;
ptr->tempTokens.size = 0u;
}
NvFlowBool32 NvFlowPreprocessorFindKeyInSource(NvFlowPreprocessor* ptr, const NvFlowPreprocessorToken* keyTokens, NvFlowUint64 keyTokenCount, const NvFlowPreprocessorToken* sourceTokens, NvFlowUint64 sourceTokenCount, NvFlowUint64* pSourceIndex)
{
NvFlowUint64 keyTokenIdx = 0u;
NvFlowUint64 sourceTokenIdx = 0u;
NvFlowUint64 matches = 0u;
NvFlowUint64 keyTestCount = 0u;
while (keyTokenIdx < keyTokenCount && sourceTokenIdx < sourceTokenCount)
{
NvFlowPreprocessorSkipWhitespaceTokens(&keyTokenIdx, keyTokenCount, keyTokens);
NvFlowPreprocessorSkipWhitespaceTokens(&sourceTokenIdx, sourceTokenCount, sourceTokens);
if (keyTokenIdx < keyTokenCount)
{
keyTestCount++;
}
if (keyTokenIdx < keyTokenCount && sourceTokenIdx < sourceTokenCount)
{
if (keyTokens[keyTokenIdx].type == sourceTokens[sourceTokenIdx].type)
{
if (keyTokens[keyTokenIdx].type == eNvFlowPreprocessorTokenType_name)
{
if (NvFlowStringCompare(keyTokens[keyTokenIdx].str, sourceTokens[sourceTokenIdx].str) == 0)
{
matches++;
}
}
else
{
matches++;
}
}
}
keyTokenIdx++;
sourceTokenIdx++;
}
if (pSourceIndex)
{
*pSourceIndex += sourceTokenIdx;
}
return (matches > 0 && matches == keyTestCount) ? NV_FLOW_TRUE : NV_FLOW_FALSE;
}
NvFlowPreprocessorRange NvFlowPreprocessorExtractTokensDelimitedN(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowUint64 numDelimiters, const NvFlowPreprocessorTokenType* delimiters)
{
NvFlowUint64 beginTokenIdx = (*pTokenIdx);
NvFlowPreprocessorRange range = { beginTokenIdx, beginTokenIdx };
if (numDelimiters > 0u)
{
NvFlowUint64 localTokenIdx = beginTokenIdx;
range = NvFlowPreprocessorExtractTokensDelimited(ptr, &localTokenIdx, numTokens, tokens, delimiters[0u]);
(*pTokenIdx) = localTokenIdx;
}
for (NvFlowUint64 delimiterIdx = 1u; delimiterIdx < numDelimiters; delimiterIdx++)
{
NvFlowUint64 localTokenIdx = beginTokenIdx;
NvFlowPreprocessorRange localRange = NvFlowPreprocessorExtractTokensDelimited(ptr, &localTokenIdx, numTokens, tokens, delimiters[delimiterIdx]);
if (localRange.end < range.end)
{
range = localRange;
(*pTokenIdx) = localTokenIdx;
}
}
return range;
}
NvFlowPreprocessorRange NvFlowPreprocessorExtractTokensDelimited(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowPreprocessorTokenType delimiter)
{
NvFlowPreprocessorRange range = { (*pTokenIdx), (*pTokenIdx) };
NvFlowPreprocessorTokenType rightType = eNvFlowPreprocessorTokenType_rightParenthesis;
NvFlowPreprocessorTokenType leftType = eNvFlowPreprocessorTokenType_leftParenthesis;
if (delimiter == eNvFlowPreprocessorTokenType_greaterThan)
{
rightType = eNvFlowPreprocessorTokenType_greaterThan;
leftType = eNvFlowPreprocessorTokenType_lessThan;
}
bool delimiterIsScopeEnd = (
delimiter == eNvFlowPreprocessorTokenType_rightParenthesis ||
delimiter == eNvFlowPreprocessorTokenType_rightBracket ||
delimiter == eNvFlowPreprocessorTokenType_rightCurlyBrace ||
delimiter == eNvFlowPreprocessorTokenType_greaterThan
);
int scopeIdx = delimiterIsScopeEnd ? 1 : 0;
for (; (*pTokenIdx) < numTokens; (*pTokenIdx)++)
{
// scope end is 'before' the end symbol
if (tokens[(*pTokenIdx)].type == eNvFlowPreprocessorTokenType_rightParenthesis ||
tokens[(*pTokenIdx)].type == eNvFlowPreprocessorTokenType_rightBracket ||
tokens[(*pTokenIdx)].type == eNvFlowPreprocessorTokenType_rightCurlyBrace ||
tokens[(*pTokenIdx)].type == rightType)
{
scopeIdx--;
}
if (scopeIdx == 0 && tokens[(*pTokenIdx)].type == delimiter)
{
(*pTokenIdx)++;
break;
}
else if (scopeIdx == 0 && delimiter == eNvFlowPreprocessorTokenType_anyWhitespace && NvFlowPreprocessorTokenIsWhitespace(tokens[(*pTokenIdx)]))
{
(*pTokenIdx)++;
break;
}
else
{
range.end++;
}
// scope begin is 'after' the start symbol
if (tokens[(*pTokenIdx)].type == eNvFlowPreprocessorTokenType_leftParenthesis ||
tokens[(*pTokenIdx)].type == eNvFlowPreprocessorTokenType_leftBracket ||
tokens[(*pTokenIdx)].type == eNvFlowPreprocessorTokenType_leftCurlyBrace ||
tokens[(*pTokenIdx)].type == leftType)
{
scopeIdx++;
}
}
return range;
}
const char* NvFlowPreprocessorExtractDelimitedN(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowUint64 numDelimiters, const NvFlowPreprocessorTokenType* delimiters)
{
ptr->tempStrViews.size = 0u;
NvFlowPreprocessorSkipWhitespaceTokens(pTokenIdx, numTokens, tokens);
NvFlowPreprocessorRange range = NvFlowPreprocessorExtractTokensDelimitedN(ptr, pTokenIdx, numTokens, tokens, numDelimiters, delimiters);
NvFlowPreprocessorToken prevPushedToken = {};
for (NvFlowUint64 idx = range.begin; idx < range.end; idx++)
{
if (NvFlowPreprocessorTokenIsWhitespace(tokens[idx]))
{
continue;
}
else
{
if (tokens[idx].type == eNvFlowPreprocessorTokenType_name ||
tokens[idx].type == eNvFlowPreprocessorTokenType_number)
{
if (prevPushedToken.type == eNvFlowPreprocessorTokenType_name ||
prevPushedToken.type == eNvFlowPreprocessorTokenType_number)
{
ptr->tempStrViews.pushBack(" ");
}
}
ptr->tempStrViews.pushBack(tokens[idx].str);
prevPushedToken = tokens[idx];
}
}
char* output = NvFlowStringConcatN(ptr->stringPool, ptr->tempStrViews.data, ptr->tempStrViews.size);
ptr->tempStrViews.size = 0u;
return output;
}
const char* NvFlowPreprocessorExtractDelimited(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowPreprocessorTokenType delimiter)
{
return NvFlowPreprocessorExtractDelimitedN(ptr, pTokenIdx, numTokens, tokens, 1u, &delimiter);
}
const char* NvFlowPreprocessorExtractDelimitedPreserve(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowPreprocessorTokenType delimiter)
{
NvFlowPreprocessorRange range = NvFlowPreprocessorExtractTokensDelimited(ptr, pTokenIdx, numTokens, tokens, delimiter);
return NvFlowPreprocessorConcatTokens(ptr, tokens + range.begin, range.end - range.begin);
}
const char* NvFlowPreprocessorExtractIfType(NvFlowPreprocessor* ptr, NvFlowUint64* pTokenIdx, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens, NvFlowPreprocessorTokenType type)
{
const char* ret = nullptr;
NvFlowPreprocessorSkipWhitespaceTokens(pTokenIdx, numTokens, tokens);
if ((*pTokenIdx) < numTokens && tokens[(*pTokenIdx)].type == type)
{
ret = tokens[(*pTokenIdx)].str;
(*pTokenIdx)++;
}
return ret;
}
const char* NvFlowPreprocessorConcatTokens(NvFlowPreprocessor* ptr, const NvFlowPreprocessorToken* tokens, NvFlowUint64 numTokens)
{
ptr->tempStrViews.size = 0u;
for (NvFlowUint64 idx = 0u; idx < numTokens; idx++)
{
ptr->tempStrViews.pushBack(tokens[idx].str);
}
char* output = NvFlowStringConcatN(ptr->stringPool, ptr->tempStrViews.data, ptr->tempStrViews.size);
ptr->tempStrViews.size = 0u;
return output;
}
char* NvFlowPreprocessorExecute(NvFlowPreprocessor* ptr, const char* input)
{
// increment level
ptr->currentLevel++;
NvFlowUint64 stringStackBegin = ptr->stringStack.size;
// tokenize
NvFlowPreprocessorToken* tokenStack_data = nullptr;
NvFlowUint64 tokenStack_size = 0u;
NvFlowPreprocessorTokenize(ptr, input, &tokenStack_size, &tokenStack_data);
// process tokens
for (NvFlowUint64 tokenIdx = 0u; tokenIdx < tokenStack_size; tokenIdx++)
{
NvFlowPreprocessorToken firstToken = tokenStack_data[tokenIdx];
if (NvFlowPreprocessorTokenIsWhitespace(firstToken))
{
if (ptr->mode == eNvFlowPreprocessorMode_disable_passthrough)
{
// NOP
}
else
{
ptr->stringStack.pushBack(firstToken.str);
}
}
else
{
NvFlowUint64 itemIdx = 0u;
for (; itemIdx < ptr->items.size; itemIdx++)
{
const NvFlowPreprocessorItem item = ptr->items[itemIdx];
NvFlowUint64 compareSourceIdx = tokenIdx;
if (item.isEnabled && NvFlowPreprocessorFindKeyInSource(ptr,
item.tokens_data, item.tokens_size,
tokenStack_data + tokenIdx, tokenStack_size - tokenIdx,
&compareSourceIdx))
{
NvFlowUint64 childTokenBegin = tokenIdx;
NvFlowUint64 childTokenEnd = tokenIdx;
if (item.function.type == eNvFlowPreprocessorType_constant)
{
childTokenEnd = compareSourceIdx;
}
else if (item.function.type == eNvFlowPreprocessorType_statement)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_semicolon);
}
else if (item.function.type == eNvFlowPreprocessorType_statementComma)
{
NvFlowPreprocessorTokenType delimiters[2u] = { eNvFlowPreprocessorTokenType_comma, eNvFlowPreprocessorTokenType_rightParenthesis };
NvFlowPreprocessorExtractTokensDelimitedN(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, 2u, delimiters);
}
else if (item.function.type == eNvFlowPreprocessorType_function)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_leftParenthesis);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_rightParenthesis);
}
else if (item.function.type == eNvFlowPreprocessorType_attribute)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_leftBracket);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_rightBracket);
}
else if (item.function.type == eNvFlowPreprocessorType_body)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_leftCurlyBrace);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_rightCurlyBrace);
}
else if (item.function.type == eNvFlowPreprocessorType_templateInstance)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_lessThan);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_greaterThan);
}
else if (item.function.type == eNvFlowPreprocessorType_index)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_leftBracket);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_rightBracket);
NvFlowUint64 childTokenEndWithEquals = childTokenEnd;
NvFlowPreprocessorSkipWhitespaceTokens(&childTokenEndWithEquals, tokenStack_size, tokenStack_data);
// check for =
if (childTokenEndWithEquals < tokenStack_size)
{
const NvFlowPreprocessorToken token = tokenStack_data[childTokenEndWithEquals];
if (token.type == eNvFlowPreprocessorTokenType_equals)
{
childTokenEndWithEquals++;
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEndWithEquals, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_semicolon);
// commit
childTokenEnd = childTokenEndWithEquals;
}
}
}
else if (item.function.type == eNvFlowPreprocessorType_line)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_newline);
}
if (!ptr->items[itemIdx].function.allowRecursion)
{
ptr->items[itemIdx].isEnabled = NV_FLOW_FALSE; // disable recursion
}
if (item.function.substitute)
{
char* substituteStr = item.function.substitute(
ptr,
item.function.userdata,
childTokenEnd - childTokenBegin,
tokenStack_data + childTokenBegin
);
char* substituteOutput = nullptr;
if (ptr->mode == eNvFlowPreprocessorMode_singlePass)
{
substituteOutput = substituteStr;
}
else // eNvFlowPreprocessorModeDefault or eNvFlowPreprocessorMode_disable_passthrough
{
substituteOutput = NvFlowPreprocessorExecute(ptr, substituteStr);
}
ptr->stringStack.pushBack(substituteOutput);
}
if (!ptr->items[itemIdx].function.allowRecursion)
{
ptr->items[itemIdx].isEnabled = NV_FLOW_TRUE;
}
// advance tokenIdx
if (childTokenEnd > childTokenBegin)
{
tokenIdx += childTokenEnd - childTokenBegin - 1u;
}
break;
}
}
// If no match found, pass through token
if (itemIdx == ptr->items.size)
{
if (ptr->mode == eNvFlowPreprocessorMode_disable_passthrough)
{
// NOP
}
else
{
ptr->stringStack.pushBack(firstToken.str);
}
}
}
}
// pop string stack
NvFlowUint64 stringStackEnd = ptr->stringStack.size;
char* ret = NvFlowStringConcatN(ptr->stringPool, ptr->stringStack.data + stringStackBegin, stringStackEnd - stringStackBegin);
ptr->stringStack.size = stringStackBegin;
// decrement level
ptr->currentLevel--;
return ret;
}
char* NvFlowPreprocessorExecuteGlobal(NvFlowPreprocessor* ptr, const char* input, void* userdata, char*(*substitute)(NvFlowPreprocessor* ptr, void* userdata, NvFlowPreprocessorGlobalType globalType, NvFlowUint64 numTokens, const NvFlowPreprocessorToken* tokens))
{
// increment level
ptr->currentLevel++;
NvFlowUint64 stringStackBegin = ptr->stringStack.size;
// tokenize
NvFlowPreprocessorToken* tokenStack_data = nullptr;
NvFlowUint64 tokenStack_size = 0u;
NvFlowPreprocessorTokenize(ptr, input, &tokenStack_size, &tokenStack_data);
// process tokens
NvFlowUint64 tokenIdx = 0u;
while (tokenIdx < tokenStack_size)
{
NvFlowPreprocessorToken firstToken = tokenStack_data[tokenIdx];
// skip whitespace, but include in output stream
if (NvFlowPreprocessorTokenIsWhitespace(firstToken))
{
ptr->stringStack.pushBack(firstToken.str);
tokenIdx++;
continue;
}
NvFlowUint64 childTokenBegin = tokenIdx;
NvFlowUint64 childTokenEnd = tokenIdx;
NvFlowPreprocessorGlobalType globalType = eNvFlowPreprocessorGlobalType_unknown;
// check for # condition
if (firstToken.type == eNvFlowPreprocessorTokenType_pound)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_newline);
globalType = eNvFlowPreprocessorGlobalType_line;
}
// check for [ condition
if (firstToken.type == eNvFlowPreprocessorTokenType_leftBracket)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_leftBracket);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_rightBracket);
globalType = eNvFlowPreprocessorGlobalType_attribute;
}
// attempt to detect a function declaration, unless line was detected
if (childTokenBegin == childTokenEnd)
{
// names and whitespace are acceptable up to initial (
while (childTokenEnd < tokenStack_size)
{
const NvFlowPreprocessorToken token = tokenStack_data[childTokenEnd];
if (!(token.type == eNvFlowPreprocessorTokenType_name || NvFlowPreprocessorTokenIsWhitespace(token)))
{
break;
}
childTokenEnd++;
}
if (childTokenBegin != childTokenEnd && tokenStack_data[childTokenEnd].type == eNvFlowPreprocessorTokenType_leftParenthesis)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_leftParenthesis);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_rightParenthesis);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_leftCurlyBrace);
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_rightCurlyBrace);
globalType = eNvFlowPreprocessorGlobalType_function;
}
else
{
// invalidate
childTokenEnd = childTokenBegin;
}
}
// attempt to extract a simple statement
if (childTokenBegin == childTokenEnd)
{
NvFlowPreprocessorExtractTokensDelimited(ptr, &childTokenEnd, tokenStack_size, tokenStack_data, eNvFlowPreprocessorTokenType_semicolon);
globalType = eNvFlowPreprocessorGlobalType_statement;
}
if (childTokenBegin == childTokenEnd)
{
// not indentified, force advance
childTokenEnd++;
}
if (globalType != eNvFlowPreprocessorGlobalType_unknown)
{
char* substituteOutput = nullptr;
if (substitute)
{
substituteOutput = substitute(ptr, userdata, globalType, childTokenEnd - childTokenBegin, tokenStack_data + childTokenBegin);
}
if (substituteOutput)
{
ptr->stringStack.pushBack(substituteOutput);
}
}
else
{
for (NvFlowUint64 localTokenIdx = childTokenBegin; localTokenIdx < childTokenEnd; localTokenIdx++)
{
ptr->stringStack.pushBack(tokenStack_data[localTokenIdx].str);
}
}
// advance tokenIdx
tokenIdx = childTokenEnd;
}
// pop string stack
NvFlowUint64 stringStackEnd = ptr->stringStack.size;
char* ret = NvFlowStringConcatN(ptr->stringPool, ptr->stringStack.data + stringStackBegin, stringStackEnd - stringStackBegin);
ptr->stringStack.size = stringStackBegin;
// decrement level
ptr->currentLevel--;
return ret;
}
| 34,949 | C++ | 32.254044 | 262 | 0.741881 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/pyproject.toml | [tool.poetry]
name = "openusd-code-samples"
version = "1.1.0"
description = "Universal Scene Description (OpenUSD) code samples in Python, C++, and USDA for common development features and tasks."
license = "Apache-2.0"
authors = []
readme = "README.md"
packages = [{include = "source"}]
[tool.poetry.dependencies]
python = ">=3.8, <3.11"
numpy = "1.24.1"
usd-core = "23.5"
types-usd = "~23.5.4"
[tool.poetry.group.docs.dependencies]
myst-parser = "0.18.0"
rstcloth = "0.5.4"
Sphinx = "4.5.0"
sphinx-design = "0.2.0"
sphinx-rtd-theme = "1.0.0"
toml = "0.10.2"
# Pinned for security patches
certifi = "2023.7.22"
markdown-it-py = "2.2.0"
pygments = "2.16.1"
requests = "2.31.0"
urllib3 = "1.26.18"
jinja2 = "3.1.3"
[tool.poetry.dev-dependencies]
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 834 | TOML | 21.567567 | 134 | 0.669065 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/CODE-SAMPLE-GUIDELINES.md | # Code Sample Guidelines
## Samples Format
This image shows the file structure that contains two Code Samples for cameras.
![Alt text](images/image.png)
Our Code Samples are stored in the source directory, organized by categories. Each sample has their files, including the actual sample code, in their own directory.
In this example, we have two camera Code Samples. The paths to these two Code Samples folders are the following:
`source/cameras/create-orthographic-camera`
`source/cameras/create-perspective-camera`
**Within each Code Sample folder are the following files:**
| File(s) | Purpose |
| -----|----- |
| config.toml | Contains the title, metadata: description and SEO keywords |
| header.md | The overview for this code sample |
| Code Sample "flavor" file(s) | See below |
| Markdown file for each "flavor" | See below |
The header file is an overview for all of the flavors. It can contain markdown formatting including URL's and markdown directives.
**Each Code Sample should have at least one "flavor":**
| Flavor Source File Name | Language and USD type |
| -----|----- |
| py_usd.py | Python using Pixar USD API |
| py_omni_usd.py | Python using omni.usd extension |
| py_kit_cmds.py | Python using Kit commands |
| cpp_usd.cpp | C++ using Pixar USD API |
| cpp_omni_usd.cpp | C++ using omni.usd extension |
| cpp_kit_cmds.cpp | C++ using Kit commands |
| usda.usda | USDA (text) file |
Each flavor can have more than one sample (variations). In this case we append _var< X >, where X starts with 1 and increments for as many sample variations needed.
Example: `py_usd.py`, `py_usd_var1.py`, `py_usd_var2.py `, etc...
**Markdown files:**
Every flavor that has a sample needs exactly one markdown file, no matter how many variations are included. They will have the same name as the flavor, but with the .md extension.
Example, if you have some `py_usd.py` samples you'll need a `py_usd.md` file. In the markdown file you'll need to use the `literalinclude` directive.
Example:
```
**Convert to Numpy Array**
To convert a VtArray to a Numpy Array, simply pass the VtArray object to `numpy.array` constructor.
``` {literalinclude} py_usd.py
:language: py
```
**Convert from Numpy Array**
To convert a Numpy Array to a VtArray, you can use `FromNumpy()` from the VtArray class you want to convert to.
``` {literalinclude} py_usd_var1.py
:language: py
```
```
This example includes two samples, with a description for each one.
| Language code | File type |
| -----|----- |
| py | Python |
| c++ | C++/cpp |
| usd | USDA |
## Building the Samples
When all of your files are in place you should build and verify your samples are correctly setup by running the build script:
```
>poetry run python build_docs.py
```
If there are no errors, you can then view it by loading the ``index.html`` file, in the ``sphinx/_build folder``, in a browser.
![Alt text](images/root_index_file.png)
There are two ways to do this. The first way:
1) Select the ``index.html`` file
2) Right click and select ``Copy Path``
3) Paste the path into address bar of your web browser
![Alt text](images/copy_path.png)
The second way:
1) select the ``index.html`` file so it's showing in a VS Code window
2) Press ``Alt-B`` and it will be launched in your default web browser.
## Markdown Cheatsheet
### Links
Create links using typical markdown syntax.
Here's an external link:
[USD Data Types documentation](https://docs.omniverse.nvidia.com/dev-guide/latest/dev_usd/quick-start/usd-types.html)
You can also link to other code samples using relative paths. Here's a link to a code sample in the same category:
[Add a Payload](add-payload)
Use the folder name for the code sample. The folder name will be the final markdown/HTML file name.
Here's a link to a code sample in different category:
[Add a Payload](../prims/check-prim-exists)
### Admonitions
https://myst-parser.readthedocs.io/en/latest/syntax/admonitions.html
```{tip}
https://myst-parser.readthedocs.io/en/latest/syntax/admonitions.html
```
| 4,126 | Markdown | 30.030075 | 179 | 0.710131 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/CONTRIBUTING.md |
## OpenUSD Code Samples OSS Contribution Rules
#### Issue Tracking
* All enhancement, bugfix, or change requests must begin with the creation of a [OpenUSD Code Samples Issue Request](https://github.com/NVIDIA-Omniverse/OpenUSD-Code-Samples/issues).
* The issue request must be reviewed by OpenUSD Code Samples engineers and approved prior to code review.
#### Coding Guidelines
- All source code contributions must strictly adhere to the [OpenUSD Code Samples Guidelines](CODE-SAMPLE-GUIDELINES.md).
- In addition, please follow the existing conventions in the relevant file, submodule, module, and project when you add new code or when you extend/fix existing functionality.
- Avoid introducing unnecessary complexity into existing code so that maintainability and readability are preserved.
- All development should happen against the "main" branch of the repository. Please make sure the base branch of your pull request is set to the "main" branch when filing your pull request.
- Try to keep pull requests (PRs) as concise as possible:
- Avoid committing commented-out code.
- Wherever possible, each PR should address a single concern. If there are several otherwise-unrelated things that should be fixed to reach a desired endpoint, our recommendation is to open several PRs and indicate the dependencies in the description. The more complex the changes are in a single PR, the more time it will take to review those changes.
- Write commit titles using imperative mood and [these rules](https://chris.beams.io/posts/git-commit/), and reference the Issue number corresponding to the PR. Following is the recommended format for commit texts:
```
Issue #<Issue Number> - <Commit Title>
<Commit Body>
```
- Ensure that the Sphinx build log is clean, meaning no warnings or errors should be present.
- Ensure that all code blocks execute correctly prior to submitting your code.
- All OSS components must contain accompanying documentation (READMEs) describing the functionality, dependencies, and known issues.
- See `README.md` for existing samples and plugins for reference.
- All OSS components must have an accompanying test.
- If introducing a new component, such as a plugin, provide a test sample to verify the functionality.
- Make sure that you can contribute your work to open source (no license and/or patent conflict is introduced by your code). You will need to [`sign`](#signing-your-work) your commit.
- Thanks in advance for your patience as we review your contributions; we do appreciate them!
#### Pull Requests
Developer workflow for code contributions is as follows:
1. Developers must first [fork](https://help.github.com/en/articles/fork-a-repo) the [upstream](https://github.com/NVIDIA-Omniverse/OpenUSD-Code-Samples) OpenUSD Code Samples repository.
2. Git clone the forked repository.
```bash
git clone https://github.com/YOUR_USERNAME/YOUR_FORK.git OpenUSD-Code-Samples
```
3. Create a branch off of the "main" branch and commit changes. See [Coding Guidelines](#coding-guidelines) for commit formatting rules.
```bash
# Create a branch off of the "main" branch
git checkout -b <local-branch> <remote-branch>
git add <path-to-files>
# -s flag will "sign-off" on your commit, we require all contributors to sign-off on their commits. See below for more
git commit -s -m "Issue #<Issue Number> - <Commit Title>"
```
4. Push Changes to the personal fork.
```bash
# Push the commits to a branch on the fork (remote).
git push -u origin <local-branch>:<remote-branch>
```
5. Please make sure that your pull requests are clean. Use the rebase and squash git facilities as needed to ensure that the pull request is as clean as possible.
6. Once the code changes are staged on the fork and ready for review, a [Pull Request](https://help.github.com/en/articles/about-pull-requests) (PR) can be [requested](https://help.github.com/en/articles/creating-a-pull-request) to merge the changes from your branch to the upstream "main" branch.
* Exercise caution when selecting the source and target branches for the PR.
* Creation of a PR creation kicks off the code review process.
* At least one OpenUSD Code Samples engineer will be assigned for the review.
* While under review, mark your PRs as work-in-progress by prefixing the PR title with [WIP].
7. Since there is no CI/CD process in place yet, the PR will be accepted and the corresponding issue closed only after adequate testing has been completed, manually, by the developer and/or OpenUSD Code Samples engineer reviewing the code.
#### Signing Your Work
* We require that all contributors "sign-off" on their commits. This certifies that the contribution is your original work, or you have rights to submit it under the same license, or a compatible license.
* Any contribution which contains commits that are not Signed-Off will not be accepted.
* To sign off on a commit you simply use the `--signoff` (or `-s`) option when committing your changes:
```bash
$ git commit -s -m "Add cool feature."
```
This will append the following to your commit message:
```
Signed-off-by: Your Name <[email protected]>
```
* Full text of the DCO:
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
``` | 6,676 | Markdown | 48.095588 | 354 | 0.75 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/build_docs.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import argparse
import logging
import os
from pathlib import Path
import shutil
from rstcloth import RstCloth
import sphinx.cmd.build
import toml
REPO_ROOT = Path(__file__).parent
SOURCE_DIR = REPO_ROOT / "source"
SPHINX_DIR = REPO_ROOT / "sphinx"
SPHINX_CODE_SAMPLES_DIR = SPHINX_DIR / "usd"
# 0 = normal toctree, 1 = :doc: tags
TOCTREE_STYLE = 0
REPLACE_USDA_EXT = True
STRIP_COPYRIGHTS = True
IMAGE_TYPES = {".jpg" , ".gif"}
logger = logging.getLogger(__name__)
def main():
# flush build dir
if os.path.exists(SPHINX_CODE_SAMPLES_DIR):
shutil.rmtree(SPHINX_CODE_SAMPLES_DIR)
SPHINX_CODE_SAMPLES_DIR.mkdir(exist_ok=False)
samples = {}
# each config.toml should be a sample
for config_file in SOURCE_DIR.rglob("config.toml"):
category_name = config_file.parent.parent.name
sample_name = config_file.parent.name
if category_name not in samples:
samples[category_name] = []
logger.info(f"processing: {sample_name}")
sample_source_dir = config_file.parent
sample_output_dir = SPHINX_CODE_SAMPLES_DIR / sample_source_dir.parent.relative_to(SOURCE_DIR) / f"{sample_name}"
# make sure category dir exists
category_output_dir = SPHINX_CODE_SAMPLES_DIR / sample_source_dir.parent.relative_to(SOURCE_DIR)
if not os.path.exists(category_output_dir):
category_output_dir.mkdir(exist_ok=False)
sample_rst_out = category_output_dir / f"{sample_name}.rst"
with open(config_file) as f:
content = f.read()
config = toml.loads(content)
title = config["core"]["title"]
samples[category_name].append([sample_name, title])
sample_output_dir.mkdir(exist_ok=True)
with open(sample_rst_out, "w") as f:
doc = RstCloth(f)
if TOCTREE_STYLE == 1:
doc._add(":orphan:")
doc.newline()
doc.directive("meta",
fields=[
('description', config["metadata"]["description"]),
('keywords', ", ".join(config["metadata"]["keywords"]))
])
doc.newline()
doc.title(config["core"]["title"], overline=False)
doc.newline()
md_file_path = sample_source_dir / "header.md"
new_md_name = sample_name + "_header.md"
out_md = category_output_dir / new_md_name
prepend_include_path(md_file_path, out_md, sample_name)
fields = [("parser" , "myst_parser.sphinx_")]
doc.directive( "include", new_md_name, fields)
doc.newline()
doc.newline()
doc.directive("tab-set")
doc.newline()
code_flavors = {"USD Python" : "py_usd.md",
"Python omni.usd" : "py_omni_usd.md",
"Python Kit Commands" : "py_kit_cmds.md",
"USD C++" : "cpp_usd.md",
"C++ omni.usd" : "cpp_omni_usd.md",
"C++ Kit Commands" : "cpp_kit_cmds.md",
"usdview": "py_usdview.md",
"USDA" : "usda.md",
}
for tab_name in code_flavors:
md_file_name = code_flavors[tab_name]
md_file_path = sample_source_dir / code_flavors[tab_name]
if md_file_path.exists():
doc.directive("tab-item", tab_name, None, None, 3)
doc.newline()
# make sure all md flavor names are unique
new_md_name = sample_name + "_" + md_file_name
category_output_dir
out_md = category_output_dir / new_md_name
prepend_include_path(md_file_path, out_md, sample_name)
fields = [("parser" , "myst_parser.sphinx_")]
doc.directive( "include", new_md_name, fields, None, 6)
doc.newline()
# copy all samples
ignore=shutil.ignore_patterns('*.md', 'config.toml')
if REPLACE_USDA_EXT:
ignore=shutil.ignore_patterns('*.md', 'config.toml', '*.usda')
shutil.copytree(sample_source_dir, sample_output_dir, ignore=ignore, dirs_exist_ok=True )
# copy any usda's to .py
if REPLACE_USDA_EXT:
for filename in os.listdir(sample_source_dir):
base_file, ext = os.path.splitext(filename)
if ext == ".usda":
orig = str(sample_source_dir) + "/" + filename
newname = str(sample_output_dir) + "/" + str(base_file) + ".py"
shutil.copy(orig, newname)
# strip out copyright comments in output files
if STRIP_COPYRIGHTS:
for filename in os.listdir(sample_output_dir):
full_path = os.path.join(sample_output_dir, filename)
strip_copyrights(full_path)
doc.newline()
generate_sphinx_index(samples)
sphinx.cmd.build.main([str(SPHINX_DIR), str(SPHINX_DIR / "_build"), "-b", "html"])
def strip_copyrights(filename):
base_file, ext = os.path.splitext(filename)
if ext in IMAGE_TYPES:
print(f"strip_copyrights, skip image :: {filename}")
return
with open(filename) as sample_file:
sample_lines = sample_file.readlines()
# strip copyrights
# .py
while sample_lines[0].startswith("# SPDX-"):
sample_lines.pop(0)
# .cpp
while sample_lines[0].startswith("// SPDX-"):
sample_lines.pop(0)
# get rid of empty spacer line
if len(sample_lines[0].strip()) < 1:
sample_lines.pop(0)
with open(filename, "w") as sample_file:
for line in sample_lines:
sample_file.write(line)
def prepend_include_path(in_file_path: str, out_file_path: str, dir_path: str):
with open(in_file_path) as mdf:
md_data = mdf.read()
md_lines = md_data.split("\n")
lc = 0
for line in md_lines:
inc_str ="``` {literalinclude}"
sp = line.split(inc_str)
if len(sp) > 1:
filename = sp[1].strip()
if REPLACE_USDA_EXT:
sfn = filename.split(".")
if len(sfn) > 1 and sfn[1] == "usda":
filename = sfn[0] + ".py"
newl = inc_str + " " + dir_path + "/" + filename
md_lines[lc] = newl
lc += 1
with open(out_file_path,"w") as nmdf:
for line in md_lines:
nmdf.writelines(line + "\n")
def generate_sphinx_index(samples):
cat_names_path = SOURCE_DIR / "category-display-names.toml"
cat_names = toml.load(cat_names_path)["name_mappings"]
print(f"CAT_NAMES: {cat_names}")
ref_links = {"variant-sets" : "variant_sets_ref"}
index_rst = SPHINX_DIR / "usd.rst"
with open(index_rst, "w") as f:
doc = RstCloth(f)
doc.directive("include", "usd_header.rst")
doc.newline()
#doc.title("OpenUSD Code Samples")
for category, cat_samples in samples.items():
if category in ref_links:
doc.ref_target(ref_links[category])
doc.newline()
human_readable = readable_from_category_dir_name(category)
if category in cat_names.keys():
human_readable = cat_names[category]
doc.h2(human_readable)
fields = [
#("caption", human_readable),
("titlesonly", ""),
]
doc.newline()
if TOCTREE_STYLE == 0:
sample_paths = [f"usd/{category}/{sample[0]}" for sample in cat_samples]
doc.directive("toctree", None, fields, sample_paths)
doc.newline()
elif TOCTREE_STYLE == 1:
#doc.h2(human_readable)
doc.newline()
for sample, title in cat_samples:
doc._add("- :doc:`" + title + f" <usd/{category}/" + sample + ">`")
doc.newline()
doc.directive("include", "usd_footer.rst")
doc.newline()
def readable_from_category_dir_name(category):
sub_strs = category.split("-")
readable = ""
for sub in sub_strs:
readable += sub.capitalize() + " "
return readable.strip()
if __name__ == "__main__":
# Create an argument parser
parser = argparse.ArgumentParser(description='Build rST documentation from code sample source.')
# Parse the arguments
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
main() | 9,545 | Python | 34.225092 | 122 | 0.503929 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/README.md | # OpenUSD Code Samples
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![See the Code Samples](https://img.shields.io/badge/OpenUSD-Code_Samples-green
)](https://docs.omniverse.nvidia.com/dev-guide/latest/programmer_ref/usd.html)
This repository contains useful Universal Scene Description (OpenUSD) code samples in Python, C++, and USDA. If you want to browse the code samples to use them, you can see them fully rendered in the [OpenUSD Code Samples documentation](https://docs.omniverse.nvidia.com/dev-guide/latest/programmer_ref/usd.html) page.
## Configuration
This repository uses [Poetry](https://python-poetry.org/docs/) for dependency management. If you're new to Poetry, you don't need to know much more than the commands we use in the [build instructions](#How-to-Build). To make it easier when authoring code samples and contributing, we recommend installing:
1. Install any version of Python between versions 3.8-3.10 .
1. [Install Poetry](https://python-poetry.org/docs/#installation)
## How to Build
1. `poetry install`
1. `poetry run python build_docs.py`
1. In a web browser, open `sphinx/_build/index.html`
## Have an Idea for a New Code Sample?
Ideas for new code samples that could help other developers are always welcome. Please [create a new issue](https://github.com/NVIDIA-Omniverse/OpenUSD-Code-Samples/issues) requesting a new code sample and add the _new request_ label. Someone from the NVIDIA team or OpenUSD community will pick it up. If you can contribute it yourself, even better!
## Find a Typo or an Error?
Please let us know if you find any mistakes or non-working code samples. [File an issue](https://github.com/NVIDIA-Omniverse/OpenUSD-Code-Samples/issues) with a _bug_ label to let us know and so we can address it.
## Contributing
Contributions are welcome! If you would like to contribute, please read our [Contributing Guidelines](./CONTRIBUTING.md) to understand how to contribute. Also, check out the [Code Sample Guidelines](CODE-SAMPLE-GUIDELINES.md) to understand how code samples file and folders are structured in this repository and how to adhere to follow our code samples style.
## Disclosures
The goal of this repository is to help developers learn OpenUSD and be more productive. To that end, NVIDIA reserves the right to use the source code and documentation in this repository for the purpose of training and/or benchmarking of an AI code assistant for OpenUSD developers.
| 2,507 | Markdown | 88.571425 | 359 | 0.780614 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/py_kit_cmds.md | Here you can add any info specific to the code sample flavor and introduce the code sample.
You should include your code sample as a separate source code file like this:
``` {literalinclude} py_kit_cmds.py
:language: py
```
You should use these includes instead of putting code in markdown code blocks. The first source code file should be named the same as the markdown file. If you want to show any variations of the code sample of expand it, you should then include source code files with the suffix `_var#`.
Variations are not required and you generally won't need them, but it's available if you find you code sample could benefit from showing variations. | 663 | Markdown | 65.399993 | 287 | 0.785822 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# Add all the imports that you need for you snippets
from pxr import Usd, Sdf, UsdGeom
def descriptive_code_sample_name(stage: Usd.Stage, prim_path: str="/World/MyPerspCam") -> UsdGeom.Camera:
"""Docstring is optional. Use Google style docstrings if you choose to add them.
The code sample should be defined as a function. As a descriptive name for the function.
Use function arguments to:
- Pass in any objects that your code sample expects to exist (e.g. a Stage)
- Pass in Paths rather than hard-coding them.
Use type-hinting to help learners understand what type every variable is. Don't assume they'll know.
Args:
stage (Usd.Stage): _description_
prim_path (str, optional): _description_. Defaults to "/World/MyPerspCam".
Returns:
UsdGeom.Camera: _description_
"""
camera_path = Sdf.Path(prim_path)
usd_camera: UsdGeom.Camera = UsdGeom.Camera.Define(stage, camera_path)
usd_camera.CreateProjectionAttr().Set(UsdGeom.Tokens.perspective)
return usd_camera
#############
# Full Usage
#############
# Here you will show your code sample in context. Add any additional imports
# that you may need for your "Full Usage" code
# You can create an in-memory stage and do any stage setup before calling
# you code sample.
stage: Usd.Stage = Usd.Stage.CreateInMemory()
default_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World"))
stage.SetDefaultPrim(default_prim.GetPrim())
cam_path = default_prim.GetPath().AppendPath("MyPerspCam")
# Call your code sample function
camera = descriptive_code_sample_name(stage, cam_path)
# print out the result
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Do some basic asserts to show learners how to interact with the results.
prim = camera.GetPrim()
assert prim.IsValid()
assert camera.GetPath() == Sdf.Path(cam_path)
assert prim.GetTypeName() == "Camera"
projection = camera.GetProjectionAttr().Get()
assert projection == UsdGeom.Tokens.perspective
| 2,131 | Python | 35.75862 | 105 | 0.725481 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/py_omni_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Source code for code block in the py_omni_usd flavor. See the py_usd.py for a
full example of writing a code sample.
You should use omni.usd.get_stage() instead of creating an in-memory stage
for the Full Usage part since this is meant to run in Omniverse.
""" | 403 | Python | 39.399996 | 98 | 0.764268 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/usda.md | Here you can say something before showing the USDA example. You can use the usda string generated by the py_usd flavor.
``` {literalinclude} usda.usda
:language: c++
``` | 170 | Markdown | 41.74999 | 119 | 0.747059 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/config.toml | [core]
# The title for this code sample. Used to name the page.
title = "My Code Example Code Sample"
[metadata]
#A concise description of the code sample for SEO.
description = "Universal Scene Description (OpenUSD) code samples to show how to contribute."
# Put in SEO keywords relevant to this code sample.
keywords = ["OpenUSD", "USD", "code sample", "snippet", "Python", "C++", "example"] | 394 | TOML | 42.888884 | 93 | 0.725888 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/py_usd_var1.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Source code for another code block in the py_usd flavor. See the py_usd.py for a
full example of writing a code sample.
""" | 265 | Python | 36.999995 | 98 | 0.758491 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/category-display-names.toml | [name_mappings]
hierarchy-traversal = "Hierarchy & Traversal"
references-payloads = "References & Payloads"
| 108 | TOML | 26.249993 | 45 | 0.777778 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-orthographic-camera/py_kit_cmds.md | The `CreatePrimWithDefaultXform` command in Kit can create a Camera prim and you can optionally set camera attributes values during creation. You must use the attribute token names as the keys for the `attributes` dictionary. In Omniverse applications, you can explore the names by hovering over a property label in the Property Window and reading it from the tooltip.
``` {literalinclude} py_kit_cmds.py
:language: py
``` | 423 | Markdown | 83.799983 | 368 | 0.799054 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-orthographic-camera/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Sdf, Usd, UsdGeom
def create_orthographic_camera(stage: Usd.Stage, prim_path: str="/World/MyOrthoCam") -> UsdGeom.Camera:
"""Create an orthographic camera
Args:
stage (Usd.Stage): A USD Stage to create the camera on.
prim_path (str, optional): The prim path for where to create the camera. Defaults to "/World/MyOrthoCam".
"""
camera_path = Sdf.Path(prim_path)
usd_camera = UsdGeom.Camera.Define(stage, camera_path)
usd_camera.CreateProjectionAttr().Set(UsdGeom.Tokens.orthographic)
return usd_camera
#############
# Full Usage
#############
cam_path = "/World/MyOrthoCam"
stage: Usd.Stage = Usd.Stage.CreateInMemory()
root_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World"))
stage.SetDefaultPrim(root_prim.GetPrim())
camera = create_orthographic_camera(stage, cam_path)
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Check that the camera was created
prim = camera.GetPrim()
assert prim.IsValid()
assert camera.GetPath() == Sdf.Path(cam_path)
assert prim.GetTypeName() == "Camera"
projection = camera.GetProjectionAttr().Get()
assert projection == UsdGeom.Tokens.orthographic
| 1,298 | Python | 31.474999 | 113 | 0.718028 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-orthographic-camera/py_usd.md | ``` {literalinclude} py_usd.py
:language: py
```
| 50 | Markdown | 9.199998 | 30 | 0.62 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-orthographic-camera/py_kit_cmds.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
from pxr import UsdGeom
def create_orthographic_camera(prim_path: str="/World/MyOrthoCam"):
"""Create an orthographic camera
Args:
prim_path (str, optional): The prim path where the camera should be created. Defaults to "/World/MyOrthoCam".
"""
omni.kit.commands.execute("CreatePrimWithDefaultXform",
prim_type="Camera",
prim_path="/World/MyOrthoCam",
attributes={"projection": UsdGeom.Tokens.orthographic}
)
#############
# Full Usage
#############
import omni.usd
# Create an orthographic camera at /World/MyOrthoCam
path = "/World/MyOrthoCam"
create_orthographic_camera(path)
# Check that the camera was created
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(path)
assert prim.IsValid() == True
assert prim.GetTypeName() == "Camera"
projection = prim.GetAttribute("projection").Get()
assert projection == UsdGeom.Tokens.orthographic | 1,082 | Python | 28.27027 | 117 | 0.711645 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-orthographic-camera/usda.md | This is an example USDA result from creating a Camera and setting the `projection` to `orthographic`. All other Properties are using the default values from the `UsdGeomCamera` schema definition.
``` {literalinclude} usda.usda
:language: usd
``` | 246 | Markdown | 60.749985 | 195 | 0.780488 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-orthographic-camera/config.toml | [core]
title = "Create an Orthographic Camera"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples for creating an orthographic camera prim."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "prim", "camera", "UsdGeom", "Orthographic"] | 278 | TOML | 45.499992 | 110 | 0.726619 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-orthographic-camera/header.md | You can define a new camera on a stage using `UsdGeom.Camera`. The Camera prim has a `projection` attribute that can be set to `orthographic`.
| 143 | Markdown | 70.999965 | 142 | 0.769231 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-perspective-camera/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Usd, Sdf, UsdGeom
def create_perspective_camera(stage: Usd.Stage, prim_path: str="/World/MyPerspCam") -> UsdGeom.Camera:
camera_path = Sdf.Path(prim_path)
usd_camera: UsdGeom.Camera = UsdGeom.Camera.Define(stage, camera_path)
usd_camera.CreateProjectionAttr().Set(UsdGeom.Tokens.perspective)
return usd_camera
#############
# Full Usage
#############
# Create an in-memory Stage with /World Xform prim as the default prim
stage: Usd.Stage = Usd.Stage.CreateInMemory()
default_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World"))
stage.SetDefaultPrim(default_prim.GetPrim())
# Create the perspective camera at /World/MyPerspCam
cam_path = default_prim.GetPath().AppendPath("MyPerspCam")
camera = create_perspective_camera(stage, cam_path)
# Export the complete Stage as a string and print it.
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Check that the camera was created
prim = camera.GetPrim()
assert prim.IsValid()
assert camera.GetPath() == Sdf.Path(cam_path)
assert prim.GetTypeName() == "Camera"
projection = camera.GetProjectionAttr().Get()
assert projection == UsdGeom.Tokens.perspective
| 1,288 | Python | 33.837837 | 102 | 0.743789 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-perspective-camera/py_usd.md | With the USD API, you can use `UsdGeom.Camera.CreateProjectionAttr()` to create the `projection` attribute and then set the value with `Usd.Attribute.Set()`.
``` {literalinclude} py_usd.py
:language: py
```
Here is how to you can set some other common attributes on the camera:
``` {literalinclude} py_usd_var1.py
:language: py
``` | 335 | Markdown | 32.599997 | 157 | 0.728358 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-perspective-camera/py_kit_cmds.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
from pxr import UsdGeom
def create_perspective_camera(prim_path: str="/World/MyPerspCam"):
"""Create a perspective camera
Args:
prim_path (str, optional): The prim path where the camera should be created. Defaults to "/World/MyPerspCam".
"""
omni.kit.commands.execute("CreatePrimWithDefaultXform",
prim_type="Camera",
prim_path=prim_path,
attributes={
"projection": UsdGeom.Tokens.perspective,
"focalLength": 35,
"horizontalAperture": 20.955,
"verticalAperture": 15.2908,
"clippingRange": (0.1, 100000)
}
)
#############
# Full Usage
#############
import omni.usd
# Create a perspective camera at /World/MyPerspCam
path = "/World/MyPerspCam"
create_perspective_camera(path)
# Check that the camera was created
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(path)
assert prim.IsValid() == True
assert prim.GetTypeName() == "Camera"
projection = prim.GetAttribute("projection").Get()
assert projection == UsdGeom.Tokens.perspective | 1,239 | Python | 27.181818 | 117 | 0.673123 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-perspective-camera/config.toml | [core]
title = "Create a Perspective Camera"
[metadata]
description = "Universal Scene Description (OpenUSD) code sample to create a perspective camera."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "prim", "camera", "perspective"] | 253 | TOML | 41.333326 | 98 | 0.719368 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-perspective-camera/py_usd_var1.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Usd, Sdf, UsdGeom
def create_perspective_35mm_camera(stage: Usd.Stage, prim_path: str="/World/MyPerspCam") -> UsdGeom.Camera:
camera_path = Sdf.Path(prim_path)
usd_camera: UsdGeom.Camera = UsdGeom.Camera.Define(stage, camera_path)
usd_camera.CreateProjectionAttr().Set(UsdGeom.Tokens.perspective)
usd_camera.CreateFocalLengthAttr().Set(35)
# Set a few other common attributes too.
usd_camera.CreateHorizontalApertureAttr().Set(20.955)
usd_camera.CreateVerticalApertureAttr().Set(15.2908)
usd_camera.CreateClippingRangeAttr().Set((0.1,100000))
return usd_camera
#############
# Full Usage
#############
# Create an in-memory Stage with /World Xform prim as the default prim
stage: Usd.Stage = Usd.Stage.CreateInMemory()
default_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World"))
stage.SetDefaultPrim(default_prim.GetPrim())
# Create the perspective camera at path /World/MyPerspCam with 35mm
# set for the focal length.
cam_path = default_prim.GetPath().AppendPath("MyPerspCam")
camera = create_perspective_35mm_camera(stage, cam_path)
# Export the complete Stage as a string and print it.
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Check the camera attributes
focal_len = camera.GetFocalLengthAttr().Get()
assert focal_len == 35.0
clip_range = camera.GetClippingRangeAttr().Get()
assert clip_range == (0.1,100000)
| 1,533 | Python | 34.674418 | 107 | 0.740378 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/visibility/show-hide-prim/py_kit_cmds.md | You can use the `ChangeProperty` command from the `omni.kit.commands` extension to change the attribute of any prim. In Omniverse applications, you can discover the attribute name by hovering over the label in the Property Window and inspecting the tooltip.
You can find more information about the Kit command API at the [omni.kit.commands extension documentation](https://docs.omniverse.nvidia.com/kit/docs/omni.kit.commands/latest/API.html).
``` {literalinclude} py_kit_cmds.py
:language: py
``` | 499 | Markdown | 70.428561 | 257 | 0.791583 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/visibility/show-hide-prim/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from typing import Union
from pxr import Sdf, Usd, UsdGeom
def get_visibility_attribute(
stage: Usd.Stage, prim_path: str
) -> Union[Usd.Attribute, None]:
"""Return the visibility attribute of a prim"""
path = Sdf.Path(prim_path)
prim = stage.GetPrimAtPath(path)
if not prim.IsValid():
return None
visibility_attribute = prim.GetAttribute("visibility")
return visibility_attribute
def hide_prim(stage: Usd.Stage, prim_path: str):
"""Hide a prim
Args:
stage (Usd.Stage, required): The USD Stage
prim_path (str, required): The prim path of the prim to hide
"""
visibility_attribute = get_visibility_attribute(stage, prim_path)
if visibility_attribute is None:
return
visibility_attribute.Set("invisible")
def show_prim(stage: Usd.Stage, prim_path: str):
"""Show a prim
Args:
stage (Usd.Stage, required): The USD Stage
prim_path (str, required): The prim path of the prim to show
"""
visibility_attribute = get_visibility_attribute(stage, prim_path)
if visibility_attribute is None:
return
visibility_attribute.Set("inherited")
#############
# Full Usage
#############
# Here you will show your code sample in context. Add any additional imports
# that you may need for your "Full Usage" code
# Create a simple in-memory stage with a Cube
stage: Usd.Stage = Usd.Stage.CreateInMemory()
default_prim_path = Sdf.Path("/World")
default_prim = UsdGeom.Xform.Define(stage, default_prim_path)
stage.SetDefaultPrim(default_prim.GetPrim())
cube_path = default_prim_path.AppendPath("Cube")
cube = UsdGeom.Cube.Define(stage, cube_path)
# The prim is initially visible. Assert so and then demonstrate how to toggle
# it off and on
assert get_visibility_attribute(stage, cube_path).Get() == "inherited"
hide_prim(stage, cube_path)
assert get_visibility_attribute(stage, cube_path).Get() == "invisible"
show_prim(stage, cube_path)
assert get_visibility_attribute(stage, cube_path).Get() == "inherited"
# Print the USDA out
usda = stage.GetRootLayer().ExportToString()
print(usda)
| 2,246 | Python | 30.647887 | 98 | 0.702137 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/visibility/show-hide-prim/py_usd.md | You can use the USD API [Usd.Prim.GetAttribute()](https://openusd.org/release/api/class_usd_prim.html#a31225ac7165f58726f000ab1d67e9e61) to get an attribute of a prim and then use [Usd.Attribute.Set()](https://openusd.org/release/api/class_usd_attribute.html#a151e6fde58bbd911da8322911a3c0079) to change the value. The attribute name for visibility is `visibility` and you can set it to the value of `inherited` or `invisible`.
``` {literalinclude} py_usd.py
:language: py
``` | 477 | Markdown | 94.599981 | 427 | 0.779874 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/visibility/show-hide-prim/py_kit_cmds.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
import omni.usd
from pxr import Sdf
def hide_prim(prim_path: str):
"""Hide a prim
Args:
prim_path (str, required): The prim path of the prim to hide
"""
set_prim_visibility_attribute(prim_path, "invisible")
def show_prim(prim_path: str):
"""Show a prim
Args:
prim_path (str, required): The prim path of the prim to show
"""
set_prim_visibility_attribute(prim_path, "inherited")
def set_prim_visibility_attribute(prim_path: str, value: str):
"""Set the prim visibility attribute at prim_path to value
Args:
prim_path (str, required): The path of the prim to modify
value (str, required): The value of the visibility attribute
"""
# You can reference attributes using the path syntax by appending the
# attribute name with a leading `.`
prop_path = f"{prim_path}.visibility"
omni.kit.commands.execute(
"ChangeProperty", prop_path=Sdf.Path(prop_path), value=value, prev=None
)
"""
Full Usage
"""
# Path to a prim in the open stage
prim_path = "/World/Cube"
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(prim_path)
assert prim.IsValid()
# Manually confirm that the prim is not visible in the viewport after calling
# hide_prim. You should comment out the below show_prim call and assert.
hide_prim(prim_path)
assert prim.GetAttribute("visibility").Get() == "invisible"
# Manually confirm that the prim is visible in the viewport after calling
# show_prim
show_prim(prim_path)
assert prim.GetAttribute("visibility").Get() == "inherited"
| 1,738 | Python | 27.508196 | 98 | 0.698504 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/visibility/show-hide-prim/usda.md | This is an example USDA result from creating a Cube and setting the visibility property to `inherited`. You can edit the value to `invisible` to hide the prim.
``` {literalinclude} usda.usda
:language: usd
``` | 211 | Markdown | 41.399992 | 159 | 0.753554 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/visibility/show-hide-prim/config.toml | [core]
# The title for this code sample. Used to name the page.
title = "Show or Hide a Prim"
[metadata]
#A concise description of the code sample for SEO.
description = "Universal Scene Description (OpenUSD) code samples that demonstrates how to a show or hide a prim."
# Put in SEO keywords relevant to this code sample.
keywords = ["OpenUSD", "USD", "Python", "visibility", "show prim", "hide prim"] | 403 | TOML | 43.888884 | 114 | 0.73201 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/layers/add-sublayer/py_kit_cmds.md | ``` {literalinclude} py_kit_cmds.py
:language: py
``` | 53 | Markdown | 16.999994 | 35 | 0.660377 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/layers/add-sublayer/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Sdf
def add_sub_layer(sub_layer_path: str, root_layer) -> Sdf.Layer:
sub_layer: Sdf.Layer = Sdf.Layer.CreateNew(sub_layer_path)
# You can use standard python list.insert to add the subLayer to any position in the list
root_layer.subLayerPaths.append(sub_layer.identifier)
return sub_layer
#############
# Full Usage
#############
from pxr import Usd
# Get the root layer
stage: Usd.Stage = Usd.Stage.CreateInMemory()
root_layer: Sdf.Layer = stage.GetRootLayer()
# Add the sub layer to the root layer
sub_layer = add_sub_layer(r"C:/path/to/sublayer.usd", root_layer)
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Check to see if the sublayer is loaded
loaded_layers = root_layer.GetLoadedLayers()
assert sub_layer in loaded_layers | 921 | Python | 28.741935 | 98 | 0.726384 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/layers/add-sublayer/py_kit_cmds.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
omni.kit.commands.execute("CreateSublayer",
layer_identifier=stage.GetRootLayer().identifier,
# This example prepends to the subLayers list
sublayer_position=0,
new_layer_path=r"C:/path/to/sublayer.usd",
transfer_root_content=False,
# When True, it will create the layer file for you too.
create_or_insert=True
)
| 506 | Python | 30.687498 | 98 | 0.741107 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/layers/add-sublayer/config.toml | [core]
title = "Add a SubLayer"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples for adding an Inherit composition arc to a prim."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "layer", "SubLayer", "composition", "composition arc"] | 280 | TOML | 45.833326 | 120 | 0.717857 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/create-payload/py_kit_cmds.md | The `CreatePayload` command is a convenient wrapper that creates an Xform prim and adds a Payload to it all at once. If you don't need the two steps batched together, you may want to [add a Payload](add-payload) to an existing prim via Kit Commands or USD Python API.
``` {literalinclude} py_kit_cmds.py
:language: py
``` | 322 | Markdown | 63.599987 | 267 | 0.757764 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/create-payload/py_kit_cmds.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
import omni.usd
from pxr import Usd, Sdf
def create_payload(usd_context: omni.usd.UsdContext, path_to: Sdf.Path, asset_path: str, prim_path: Sdf.Path) -> Usd.Prim:
omni.kit.commands.execute("CreatePayload",
usd_context=usd_context,
path_to=path_to, # Prim path for where to create the prim with the payload
asset_path=asset_path, # The file path to the payload USD. Relative paths are accepted too.
prim_path=prim_path # OPTIONAL: Prim path to a prim in the payloaded USD, if not provided the default prim is used
)
return usd_context.get_stage().GetPrimAtPath(path_to)
#############
# Full Usage
#############
# Get the USD context from kit
context: omni.usd.UsdContext = omni.usd.get_context()
# Create and add external payload to specific prim
payload_prim: Usd.Prim = create_payload(context, Sdf.Path("/World/payload_prim"), "C:/path/to/file.usd", Sdf.Path("/World/some/target"))
# Get the existing USD stage from kit
stage: Usd.Stage = context.get_stage()
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Check that the payload prims were created
assert payload_prim.IsValid()
assert payload_prim.GetPrimStack()[0].payloadList.prependedItems[0] == Sdf.Payload(assetPath="file:/C:/path/to/file.usd", primPath=Sdf.Path("/World/some/target")) | 1,469 | Python | 39.833332 | 162 | 0.721579 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/create-payload/usda.md | This is an example USDA result from creating a reference with the `CreateReference` command.
``` {literalinclude} usda.usda
:language: usd
``` | 143 | Markdown | 34.999991 | 92 | 0.762238 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/create-payload/config.toml | [core]
title = "Create a Payload"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples for creating an Xform prim and adding a Payload in Omniverse Kit in one step."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "prim", "payload", "CreatePayload"] | 292 | TOML | 47.833325 | 144 | 0.726027 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-payload/py_kit_cmds.md | The `AddPayload` command in Kit can add payloads to a prim.
``` {literalinclude} py_kit_cmds.py
:language: py
``` | 113 | Markdown | 27.499993 | 59 | 0.716814 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-payload/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Usd, Sdf
def add_payload(prim: Usd.Prim, payload_asset_path: str, payload_target_path: Sdf.Path) -> None:
payloads: Usd.Payloads = prim.GetPayloads()
payloads.AddPayload(
assetPath=payload_asset_path,
primPath=payload_target_path # OPTIONAL: Payload a specific target prim. Otherwise, uses the payloadd layer's defaultPrim.
)
#############
# Full Usage
#############
from pxr import UsdGeom
# Create new USD stage for this sample
stage: Usd.Stage = Usd.Stage.CreateInMemory()
# Create and define default prim
default_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World"))
stage.SetDefaultPrim(default_prim.GetPrim())
# Create an xform which should hold all payloads in this sample
payload_prim: Usd.Prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World/payload_prim")).GetPrim()
# Add an external payload
add_payload(payload_prim, "C:/path/to/file.usd", Sdf.Path("/World/some/target"))
# Add other external payload to default prim
add_payload(payload_prim, "C:/path/to/other/file.usd", Sdf.Path.emptyPath)
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Get a list of all prepended payloads
payloads = []
for prim_spec in payload_prim.GetPrimStack():
payloads.extend(prim_spec.payloadList.prependedItems)
# Check that the payload prim was created and that the payloads are correct
assert payload_prim.IsValid()
assert payloads[0] == Sdf.Payload(assetPath="C:/path/to/file.usd", primPath=Sdf.Path("/World/some/target"))
assert payloads[1] == Sdf.Payload(assetPath="C:/path/to/other/file.usd")
| 1,698 | Python | 35.148935 | 130 | 0.73616 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-payload/py_usd.md | With the USD API, you can use `Usd.Prim.GetPayloads()` to receive the payloads and add a new one with `Usd.Payloads.AddPayload()`.
``` {literalinclude} py_usd.py
:language: py
``` | 179 | Markdown | 43.999989 | 130 | 0.72067 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-payload/py_kit_cmds.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
from pxr import Usd, Sdf
def add_payload(prim: Usd.Prim, payload_asset_path: str, payload_target_path: Sdf.Path) -> None:
omni.kit.commands.execute("AddPayload",
stage=prim.GetStage(),
prim_path = prim.GetPath(), # an existing prim to add the payload to.
payload=Sdf.Payload(
assetPath = payload_asset_path,
primPath = payload_target_path
)
)
#############
# Full Usage
#############
from pxr import UsdGeom
import omni.usd
# Create new USD stage for this sample in OV
context: omni.usd.UsdContext = omni.usd.get_context()
success: bool = context.new_stage()
stage: Usd.Stage = context.get_stage()
# Create and define default prim, so this file can be easily payloaderenced again
default_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World"))
stage.SetDefaultPrim(default_prim.GetPrim())
# Create a xform which should hold all payloads in this sample
payload_prim: Usd.Prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World/payload_prim")).GetPrim()
# Add an payload specific prim
add_payload(payload_prim, "C:/path/to/file.usd", Sdf.Path("/World/some/target"))
# Add other payload to default prim
add_payload(payload_prim, "C:/path/to/other/file.usd", Sdf.Path.emptyPath)
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Get a list of all prepended payloads
payloads = []
for prim_spec in payload_prim.GetPrimStack():
payloads.extend(prim_spec.payloadList.prependedItems)
# Check that the payload prim was created and that the payloads are correct
assert payload_prim.IsValid()
assert payloads[0] == Sdf.Payload(assetPath="C:/path/to/file.usd", primPath=Sdf.Path("/World/some/target"))
assert payloads[1] == Sdf.Payload(assetPath="C:/path/to/other/file.usd") | 1,908 | Python | 35.018867 | 107 | 0.719078 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-payload/usda.md | This is an example USDA result from creating an `Xform` and adding two `Payloads` to it. The first payload target prim in this case is in the file `C:/path/to/file.usd` with the prim path `/World/some/target` and the second is the default prim in the file `C:/path/to/other/file.usd`.
``` {literalinclude} usda.usda
:language: usd
``` | 335 | Markdown | 82.999979 | 284 | 0.737313 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-payload/config.toml | [core]
title = "Add a Payload"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples for adding a Payload to a prim."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "prim", "payload", "AddPayload"] | 240 | TOML | 39.16666 | 98 | 0.7 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/create-reference/py_kit_cmds.md | The `CreateReference` command is a convenient wrapper that creates an Xform prim and adds a Reference to it all at once. If you don't need the two steps batched together, you may want to [add a Reference](add-reference) to an existing prim via Kit Commands or USD API.
``` {literalinclude} py_kit_cmds.py
:language: py
``` | 323 | Markdown | 63.799987 | 268 | 0.76161 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/create-reference/config.toml | [core]
title = "Create a Reference"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples for creating an Xform prim and adding a Reference in Omniverse Kit in one step."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "prim", "reference", "CreateReference"] | 300 | TOML | 49.166658 | 146 | 0.733333 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-reference/py_kit_cmds.md | The `AddReference` command in Kit can add internal and external references to a prim.
``` {literalinclude} py_kit_cmds.py
:language: py
``` | 139 | Markdown | 33.999992 | 85 | 0.748201 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-reference/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Usd, Sdf
def add_int_reference(prim: Usd.Prim, ref_target_path: Sdf.Path) -> None:
references: Usd.References = prim.GetReferences()
references.AddInternalReference(ref_target_path)
def add_ext_reference(prim: Usd.Prim, ref_asset_path: str, ref_target_path: Sdf.Path) -> None:
references: Usd.References = prim.GetReferences()
references.AddReference(
assetPath=ref_asset_path,
primPath=ref_target_path # OPTIONAL: Reference a specific target prim. Otherwise, uses the referenced layer's defaultPrim.
)
#############
# Full Usage
#############
from pxr import UsdGeom
# Create new USD stage for this sample
stage: Usd.Stage = Usd.Stage.CreateInMemory()
# Create and define default prim, so this file can be easily referenced again
default_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World"))
stage.SetDefaultPrim(default_prim.GetPrim())
# Create an xform which should hold all references in this sample
ref_prim: Usd.Prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World/ref_prim")).GetPrim()
# Add an internal reference
intern_target_path: Sdf.Path = Sdf.Path("/World/intern_target")
target_prim: Usd.Prim = UsdGeom.Xform.Define(stage, intern_target_path).GetPrim()
add_int_reference(ref_prim, intern_target_path)
# Add an external reference to specific prim
add_ext_reference(ref_prim, "C:/path/to/file.usd", Sdf.Path("/World/some/target"))
# Add other external reference to default prim
add_ext_reference(ref_prim, "C:/path/to/other/file.usd", Sdf.Path.emptyPath)
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Get a list of all prepended references
references = []
for prim_spec in ref_prim.GetPrimStack():
references.extend(prim_spec.referenceList.prependedItems)
# Check that the reference prim was created and that the references are correct
assert ref_prim.IsValid()
assert references[0] == Sdf.Reference(primPath=intern_target_path)
assert references[1] == Sdf.Reference(assetPath="C:/path/to/file.usd", primPath=Sdf.Path("/World/some/target"))
assert references[2] == Sdf.Reference(assetPath="C:/path/to/other/file.usd")
| 2,250 | Python | 38.491227 | 130 | 0.744 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-reference/py_usd.md | With the USD API, you can use `Usd.Prim.GetReferences()` to receive the references and add a new one with `Usd.References.AddReference()`.
``` {literalinclude} py_usd.py
:language: py
``` | 187 | Markdown | 45.999989 | 138 | 0.73262 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/references-payloads/add-reference/config.toml | [core]
title = "Add a Reference"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples for adding a Reference to a prim."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "prim", "reference", "AddReference"] | 248 | TOML | 40.499993 | 102 | 0.709677 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/author-variant-data/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
shading_varset = prim.GetVariantSets().GetVariantSet("shading")
selected_variant = shading_varset.GetVariantSelection()
shading_varset.SetVariantSelection(variant_name)
with shading_varset.GetVariantEditContext():
# Specs authored within this context are authored just for the variant.
...
# Set the variant selection back to the previously selected variant.
# Alternatively, you can use Usd.VariantSet.ClearVariantSelection()
# if you know that there isn't a variant selection in the current EditTarget.
if selected_variant:
shading_varset.SetVariantSelection(selected_variant)
| 731 | Python | 42.058821 | 98 | 0.79617 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/author-variant-data/header.md | Opinions (i.e. data) for a particular variant can be authored on different layers. This shows how you can author opinions for an existing variant that
might have been authored on a different layer. | 197 | Markdown | 97.999951 | 150 | 0.807107 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/select-variant/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Usd
def select_variant_from_varaint_set(prim: Usd.Prim, variant_set_name: str, variant_name: str) -> None:
variant_set = prim.GetVariantSets().GetVariantSet(variant_set_name)
variant_set.SetVariantSelection(variant_name)
#############
# Full Usage
#############
from pxr import Sdf, UsdGeom
# Create an in-memory Stage with /World Xform prim as the default prim
stage: Usd.Stage = Usd.Stage.CreateInMemory()
default_prim: Usd.Prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World")).GetPrim()
stage.SetDefaultPrim(default_prim)
# Create the Variant Set
shading_varset: Usd.VariantSet = default_prim.GetVariantSets().AddVariantSet("shading")
# Add Variants to the Variant Set
shading_varset.AddVariant("cell_shading")
shading_varset.AddVariant("realistic")
select_variant_from_varaint_set(default_prim, "shading", "realistic")
usda = stage.GetRootLayer().ExportToString()
print(usda)
assert default_prim.GetVariantSets().GetVariantSet("shading").GetVariantSelection() == "realistic" | 1,150 | Python | 33.878787 | 102 | 0.753043 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/select-variant/usda.md | This is an example USDA result from creating a Variant Set, adding two Variants to the set, and selecting the current Variant to `realistic`.
``` {literalinclude} usda.usda
:language: usd
``` | 192 | Markdown | 47.249988 | 141 | 0.760417 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/create-variant-set/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Usd
def create_variant_set(prim: Usd.Prim, variant_set_name: str, variants: list) -> Usd.VariantSet:
variant_set = prim.GetVariantSets().AddVariantSet(variant_set_name)
for variant in variants:
variant_set.AddVariant(variant)
return variant_set
#############
# Full Usage
#############
from pxr import Sdf, UsdGeom
# Create an in-memory Stage with /World Xform prim as the default prim
stage: Usd.Stage = Usd.Stage.CreateInMemory()
default_prim: Usd.Prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World")).GetPrim()
stage.SetDefaultPrim(default_prim)
# Create the variant set and add your variants to it.
variants = ["red", "blue", "green"]
shading_varset: Usd.VariantSet = create_variant_set(default_prim, "shading", variants)
usda = stage.GetRootLayer().ExportToString()
print(usda)
assert default_prim.GetVariantSets().HasVariantSet("shading")
| 1,027 | Python | 33.266666 | 98 | 0.730282 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/create-variant-set/usda.md | This is an example USDA result from creating a Variant Set and adding Variants to the Set.
``` {literalinclude} usda.usda
:language: usd
``` | 141 | Markdown | 34.499991 | 90 | 0.751773 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/variant-sets/create-variant-set/config.toml | [core]
title = "Create a Variant Set"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples showing how to create a variant set."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "variant set", "composition", "create variant set", "variant"] | 282 | TOML | 46.166659 | 128 | 0.716312 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/specializes/add-specialize/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Usd
def add_specialize_to(base_prim: Usd.Prim, specializes: Usd.Specializes) -> bool:
return specializes.AddSpecialize(base_prim.GetPath())
#############
# Full Usage
#############
from pxr import Sdf, UsdGeom
# Create an in-memory Stage with /World Xform prim as the default prim
stage: Usd.Stage = Usd.Stage.CreateInMemory()
default_prim: Usd.Prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World")).GetPrim()
stage.SetDefaultPrim(default_prim)
prim: Usd.Prim = UsdGeom.Xform.Define(stage, default_prim.GetPath().AppendPath("prim")).GetPrim()
base: Usd.Prim = UsdGeom.Xform.Define(stage, default_prim.GetPath().AppendPath("base")).GetPrim()
specializes: Usd.Specializes = prim.GetSpecializes()
added_successfully = add_specialize_to(base, specializes)
usda = stage.GetRootLayer().ExportToString()
print(usda)
assert added_successfully | 999 | Python | 34.714284 | 98 | 0.746747 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/specializes/add-specialize/usda.md | This is an example USDA result adding an Specialize Arc to a prim.
``` {literalinclude} usda.usda
:language: usd
``` | 117 | Markdown | 28.499993 | 66 | 0.735043 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/specializes/add-specialize/config.toml | [core]
title = "Add a Specialize"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples for adding a Specialize composition arc to a prim."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "specialize", "composition", "composition arc"] | 277 | TOML | 45.333326 | 117 | 0.729242 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/prims/select-prim-by-path/py_omni_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.usd
prim_path = "/World/My/Prim"
ctx = omni.usd.get_context()
# The second arg is unused. Any boolean can be used.
ctx.get_selection().set_selected_prim_paths([prim_path], True) | 328 | Python | 35.555552 | 98 | 0.75 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/prims/select-prim-by-path/py_kit_cmds.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import omni.kit.commands
import omni.usd
prim_path = "/World/My/Prim"
ctx = omni.usd.get_context()
old_selection = ctx.get_selection().get_selected_prim_paths()
omni.kit.commands.execute('SelectPrimsCommand',
old_selected_paths=old_selection,
new_selected_paths=[prim_path],
expand_in_stage=True) #DEPRECATED: Used only for backwards compatibility. | 500 | Python | 34.785712 | 98 | 0.76 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/prims/select-prim-by-path/config.toml | [core]
title = "Select a Prim by Prim Path"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples showing how to select a prim using its prim path."
keywords = ["OpenUSD", "USD", "Python", "code sample", "prim", "selection", "by path", "path", "prim path"] | 281 | TOML | 45.999992 | 116 | 0.697509 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/prims/select-prim-by-path/py_omni_usd.md | ``` {literalinclude} py_omni_usd.py
:language: py
``` | 53 | Markdown | 16.999994 | 35 | 0.660377 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/prims/check-prim-exists/py_usd.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from pxr import Usd
def check_prim_exists(prim: Usd.Prim) -> bool:
if prim.IsValid():
return True
return False
#############
# Full Usage
#############
from pxr import Sdf, UsdGeom
# Create an in-memory Stage with /World Xform prim as the default prim
stage: Usd.Stage = Usd.Stage.CreateInMemory()
default_prim: Usd.Prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World")).GetPrim()
stage.SetDefaultPrim(default_prim)
# Create one prim and
cube: Usd.Prim = UsdGeom.Cube.Define(stage, Sdf.Path("/World/Cube")).GetPrim()
empty_prim = Usd.Prim()
usda = stage.GetRootLayer().ExportToString()
print(usda)
# Check if prims exist
assert check_prim_exists(default_prim)
assert check_prim_exists(cube)
assert not check_prim_exists(empty_prim) | 893 | Python | 26.937499 | 98 | 0.718925 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/prims/check-prim-exists/py_usd.md | ``` {literalinclude} py_usd.py
:language: py
```
Alternatively, `Usd.Object` overrides the boolean operator so you can check with a simple boolean expression.
``` {literalinclude} py_usd_var1.py
:language: py
``` | 214 | Markdown | 22.888886 | 109 | 0.728972 |
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/prims/check-prim-exists/config.toml | [core]
title = "Check if a Prim Exists"
[metadata]
description = "Universal Scene Description (OpenUSD) code samples for checking if a Prim exists on a Stage."
keywords = ["OpenUSD", "USD", "Python", "snippet", "code sample", "prim", "exists", "IsValid", "valid"] | 264 | TOML | 43.166659 | 108 | 0.69697 |